Deleted Added
full compact
oce_if.c (246799) oce_if.c (247880)
1/*-
2 * Copyright (C) 2012 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
1/*-
2 * Copyright (C) 2012 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
39/* $FreeBSD: head/sys/dev/oce/oce_if.c 246799 2013-02-14 17:34:17Z jpaetzel $ */
40
39
40/* $FreeBSD: head/sys/dev/oce/oce_if.c 247880 2013-03-06 09:53:38Z delphij $ */
41
41#include "opt_inet6.h"
42#include "opt_inet.h"
43
44#include "oce_if.h"
45
46
47/* Driver entry points prototypes */
48static int oce_probe(device_t dev);
49static int oce_attach(device_t dev);
50static int oce_detach(device_t dev);
51static int oce_shutdown(device_t dev);
52static int oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
53static void oce_init(void *xsc);
54static int oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
55static void oce_multiq_flush(struct ifnet *ifp);
56
57/* Driver interrupt routines protypes */
58static void oce_intr(void *arg, int pending);
59static int oce_setup_intr(POCE_SOFTC sc);
60static int oce_fast_isr(void *arg);
61static int oce_alloc_intr(POCE_SOFTC sc, int vector,
62 void (*isr) (void *arg, int pending));
63
64/* Media callbacks prototypes */
65static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
66static int oce_media_change(struct ifnet *ifp);
67
68/* Transmit routines prototypes */
69static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
70static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
71static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
72 uint32_t status);
73static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
74 struct oce_wq *wq);
75
76/* Receive routines prototypes */
77static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
78static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
79static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
80static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
81 struct oce_nic_rx_cqe *cqe);
82
83/* Helper function prototypes in this file */
84static int oce_attach_ifp(POCE_SOFTC sc);
85static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
86static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
87static int oce_vid_config(POCE_SOFTC sc);
88static void oce_mac_addr_set(POCE_SOFTC sc);
89static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
90static void oce_local_timer(void *arg);
91static void oce_if_deactivate(POCE_SOFTC sc);
92static void oce_if_activate(POCE_SOFTC sc);
93static void setup_max_queues_want(POCE_SOFTC sc);
94static void update_queues_got(POCE_SOFTC sc);
95static void process_link_state(POCE_SOFTC sc,
96 struct oce_async_cqe_link_state *acqe);
42#include "opt_inet6.h"
43#include "opt_inet.h"
44
45#include "oce_if.h"
46
47
48/* Driver entry points prototypes */
49static int oce_probe(device_t dev);
50static int oce_attach(device_t dev);
51static int oce_detach(device_t dev);
52static int oce_shutdown(device_t dev);
53static int oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
54static void oce_init(void *xsc);
55static int oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
56static void oce_multiq_flush(struct ifnet *ifp);
57
58/* Driver interrupt routines protypes */
59static void oce_intr(void *arg, int pending);
60static int oce_setup_intr(POCE_SOFTC sc);
61static int oce_fast_isr(void *arg);
62static int oce_alloc_intr(POCE_SOFTC sc, int vector,
63 void (*isr) (void *arg, int pending));
64
65/* Media callbacks prototypes */
66static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
67static int oce_media_change(struct ifnet *ifp);
68
69/* Transmit routines prototypes */
70static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
71static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
72static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
73 uint32_t status);
74static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
75 struct oce_wq *wq);
76
77/* Receive routines prototypes */
78static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
79static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
80static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
81static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
82 struct oce_nic_rx_cqe *cqe);
83
84/* Helper function prototypes in this file */
85static int oce_attach_ifp(POCE_SOFTC sc);
86static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
87static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
88static int oce_vid_config(POCE_SOFTC sc);
89static void oce_mac_addr_set(POCE_SOFTC sc);
90static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
91static void oce_local_timer(void *arg);
92static void oce_if_deactivate(POCE_SOFTC sc);
93static void oce_if_activate(POCE_SOFTC sc);
94static void setup_max_queues_want(POCE_SOFTC sc);
95static void update_queues_got(POCE_SOFTC sc);
96static void process_link_state(POCE_SOFTC sc,
97 struct oce_async_cqe_link_state *acqe);
98static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
99static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
97
100
98
99/* IP specific */
100#if defined(INET6) || defined(INET)
101static int oce_init_lro(POCE_SOFTC sc);
102static void oce_rx_flush_lro(struct oce_rq *rq);
103static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
104#endif
105
106static device_method_t oce_dispatch[] = {
107 DEVMETHOD(device_probe, oce_probe),
108 DEVMETHOD(device_attach, oce_attach),
109 DEVMETHOD(device_detach, oce_detach),
110 DEVMETHOD(device_shutdown, oce_shutdown),
111
112 DEVMETHOD_END
113};
114
115static driver_t oce_driver = {
116 "oce",
117 oce_dispatch,
118 sizeof(OCE_SOFTC)
119};
120static devclass_t oce_devclass;
121
122
123DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
124MODULE_DEPEND(oce, pci, 1, 1, 1);
125MODULE_DEPEND(oce, ether, 1, 1, 1);
126MODULE_VERSION(oce, 1);
127
128
129/* global vars */
130const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
131
132/* Module capabilites and parameters */
133uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
134uint32_t oce_enable_rss = OCE_MODCAP_RSS;
135
136
137TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
138TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
139
140
141/* Supported devices table */
142static uint32_t supportedDevices[] = {
143 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
144 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
145 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
146 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
147 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
148};
149
150
151
152
153/*****************************************************************************
154 * Driver entry points functions *
155 *****************************************************************************/
156
157static int
158oce_probe(device_t dev)
159{
160 uint16_t vendor = 0;
161 uint16_t device = 0;
162 int i = 0;
163 char str[256] = {0};
164 POCE_SOFTC sc;
165
166 sc = device_get_softc(dev);
167 bzero(sc, sizeof(OCE_SOFTC));
168 sc->dev = dev;
169
170 vendor = pci_get_vendor(dev);
171 device = pci_get_device(dev);
172
173 for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
174 if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
175 if (device == (supportedDevices[i] & 0xffff)) {
176 sprintf(str, "%s:%s", "Emulex CNA NIC function",
177 component_revision);
178 device_set_desc_copy(dev, str);
179
180 switch (device) {
181 case PCI_PRODUCT_BE2:
182 sc->flags |= OCE_FLAGS_BE2;
183 break;
184 case PCI_PRODUCT_BE3:
185 sc->flags |= OCE_FLAGS_BE3;
186 break;
187 case PCI_PRODUCT_XE201:
188 case PCI_PRODUCT_XE201_VF:
189 sc->flags |= OCE_FLAGS_XE201;
190 break;
191 default:
192 return ENXIO;
193 }
194 return BUS_PROBE_DEFAULT;
195 }
196 }
197 }
198
199 return ENXIO;
200}
201
202
203static int
204oce_attach(device_t dev)
205{
206 POCE_SOFTC sc;
207 int rc = 0;
208
209 sc = device_get_softc(dev);
210
211 rc = oce_hw_pci_alloc(sc);
212 if (rc)
213 return rc;
214
215 sc->rss_enable = oce_enable_rss;
216 sc->tx_ring_size = OCE_TX_RING_SIZE;
217 sc->rx_ring_size = OCE_RX_RING_SIZE;
218 sc->rq_frag_size = OCE_RQ_BUF_SIZE;
219 sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
220 sc->promisc = OCE_DEFAULT_PROMISCUOUS;
221
222 LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
223 LOCK_CREATE(&sc->dev_lock, "Device_lock");
224
225 /* initialise the hardware */
226 rc = oce_hw_init(sc);
227 if (rc)
228 goto pci_res_free;
229
230 setup_max_queues_want(sc);
231
232 rc = oce_setup_intr(sc);
233 if (rc)
234 goto mbox_free;
235
236 rc = oce_queue_init_all(sc);
237 if (rc)
238 goto intr_free;
239
240 rc = oce_attach_ifp(sc);
241 if (rc)
242 goto queues_free;
243
244#if defined(INET6) || defined(INET)
245 rc = oce_init_lro(sc);
246 if (rc)
247 goto ifp_free;
248#endif
249
250 rc = oce_hw_start(sc);
251 if (rc)
252 goto lro_free;
253
254 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
255 oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
256 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
257 oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
258
259 rc = oce_stats_init(sc);
260 if (rc)
261 goto vlan_free;
262
263 oce_add_sysctls(sc);
264
265 callout_init(&sc->timer, CALLOUT_MPSAFE);
266 rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
267 if (rc)
268 goto stats_free;
101/* IP specific */
102#if defined(INET6) || defined(INET)
103static int oce_init_lro(POCE_SOFTC sc);
104static void oce_rx_flush_lro(struct oce_rq *rq);
105static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
106#endif
107
108static device_method_t oce_dispatch[] = {
109 DEVMETHOD(device_probe, oce_probe),
110 DEVMETHOD(device_attach, oce_attach),
111 DEVMETHOD(device_detach, oce_detach),
112 DEVMETHOD(device_shutdown, oce_shutdown),
113
114 DEVMETHOD_END
115};
116
117static driver_t oce_driver = {
118 "oce",
119 oce_dispatch,
120 sizeof(OCE_SOFTC)
121};
122static devclass_t oce_devclass;
123
124
125DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
126MODULE_DEPEND(oce, pci, 1, 1, 1);
127MODULE_DEPEND(oce, ether, 1, 1, 1);
128MODULE_VERSION(oce, 1);
129
130
131/* global vars */
132const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
133
134/* Module capabilites and parameters */
135uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
136uint32_t oce_enable_rss = OCE_MODCAP_RSS;
137
138
139TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
140TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
141
142
143/* Supported devices table */
144static uint32_t supportedDevices[] = {
145 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
146 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
147 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
148 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
149 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
150};
151
152
153
154
155/*****************************************************************************
156 * Driver entry points functions *
157 *****************************************************************************/
158
159static int
160oce_probe(device_t dev)
161{
162 uint16_t vendor = 0;
163 uint16_t device = 0;
164 int i = 0;
165 char str[256] = {0};
166 POCE_SOFTC sc;
167
168 sc = device_get_softc(dev);
169 bzero(sc, sizeof(OCE_SOFTC));
170 sc->dev = dev;
171
172 vendor = pci_get_vendor(dev);
173 device = pci_get_device(dev);
174
175 for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
176 if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
177 if (device == (supportedDevices[i] & 0xffff)) {
178 sprintf(str, "%s:%s", "Emulex CNA NIC function",
179 component_revision);
180 device_set_desc_copy(dev, str);
181
182 switch (device) {
183 case PCI_PRODUCT_BE2:
184 sc->flags |= OCE_FLAGS_BE2;
185 break;
186 case PCI_PRODUCT_BE3:
187 sc->flags |= OCE_FLAGS_BE3;
188 break;
189 case PCI_PRODUCT_XE201:
190 case PCI_PRODUCT_XE201_VF:
191 sc->flags |= OCE_FLAGS_XE201;
192 break;
193 default:
194 return ENXIO;
195 }
196 return BUS_PROBE_DEFAULT;
197 }
198 }
199 }
200
201 return ENXIO;
202}
203
204
205static int
206oce_attach(device_t dev)
207{
208 POCE_SOFTC sc;
209 int rc = 0;
210
211 sc = device_get_softc(dev);
212
213 rc = oce_hw_pci_alloc(sc);
214 if (rc)
215 return rc;
216
217 sc->rss_enable = oce_enable_rss;
218 sc->tx_ring_size = OCE_TX_RING_SIZE;
219 sc->rx_ring_size = OCE_RX_RING_SIZE;
220 sc->rq_frag_size = OCE_RQ_BUF_SIZE;
221 sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
222 sc->promisc = OCE_DEFAULT_PROMISCUOUS;
223
224 LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
225 LOCK_CREATE(&sc->dev_lock, "Device_lock");
226
227 /* initialise the hardware */
228 rc = oce_hw_init(sc);
229 if (rc)
230 goto pci_res_free;
231
232 setup_max_queues_want(sc);
233
234 rc = oce_setup_intr(sc);
235 if (rc)
236 goto mbox_free;
237
238 rc = oce_queue_init_all(sc);
239 if (rc)
240 goto intr_free;
241
242 rc = oce_attach_ifp(sc);
243 if (rc)
244 goto queues_free;
245
246#if defined(INET6) || defined(INET)
247 rc = oce_init_lro(sc);
248 if (rc)
249 goto ifp_free;
250#endif
251
252 rc = oce_hw_start(sc);
253 if (rc)
254 goto lro_free;
255
256 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
257 oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
258 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
259 oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
260
261 rc = oce_stats_init(sc);
262 if (rc)
263 goto vlan_free;
264
265 oce_add_sysctls(sc);
266
267 callout_init(&sc->timer, CALLOUT_MPSAFE);
268 rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
269 if (rc)
270 goto stats_free;
269#ifdef DEV_NETMAP
270#endif /* DEV_NETMAP */
271
272 return 0;
273
274stats_free:
275 callout_drain(&sc->timer);
276 oce_stats_free(sc);
277vlan_free:
278 if (sc->vlan_attach)
279 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
280 if (sc->vlan_detach)
281 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
282 oce_hw_intr_disable(sc);
283lro_free:
284#if defined(INET6) || defined(INET)
285 oce_free_lro(sc);
286ifp_free:
287#endif
288 ether_ifdetach(sc->ifp);
289 if_free(sc->ifp);
290queues_free:
291 oce_queue_release_all(sc);
292intr_free:
293 oce_intr_free(sc);
294mbox_free:
295 oce_dma_free(sc, &sc->bsmbx);
296pci_res_free:
297 oce_hw_pci_free(sc);
298 LOCK_DESTROY(&sc->dev_lock);
299 LOCK_DESTROY(&sc->bmbx_lock);
300 return rc;
301
302}
303
304
305static int
306oce_detach(device_t dev)
307{
308 POCE_SOFTC sc = device_get_softc(dev);
309
310 LOCK(&sc->dev_lock);
311 oce_if_deactivate(sc);
312 UNLOCK(&sc->dev_lock);
313
314 callout_drain(&sc->timer);
315
316 if (sc->vlan_attach != NULL)
317 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
318 if (sc->vlan_detach != NULL)
319 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
320
321 ether_ifdetach(sc->ifp);
322
323 if_free(sc->ifp);
324
325 oce_hw_shutdown(sc);
326
327 bus_generic_detach(dev);
328
329 return 0;
330}
331
332
333static int
334oce_shutdown(device_t dev)
335{
336 int rc;
337
338 rc = oce_detach(dev);
339
340 return rc;
341}
342
343
344static int
345oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
346{
347 struct ifreq *ifr = (struct ifreq *)data;
348 POCE_SOFTC sc = ifp->if_softc;
349 int rc = 0;
350 uint32_t u;
351
352 switch (command) {
353
354 case SIOCGIFMEDIA:
355 rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
356 break;
357
358 case SIOCSIFMTU:
359 if (ifr->ifr_mtu > OCE_MAX_MTU)
360 rc = EINVAL;
361 else
362 ifp->if_mtu = ifr->ifr_mtu;
363 break;
364
365 case SIOCSIFFLAGS:
366 if (ifp->if_flags & IFF_UP) {
367 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
368 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
369 oce_init(sc);
370 }
371 device_printf(sc->dev, "Interface Up\n");
372 } else {
373 LOCK(&sc->dev_lock);
374
375 sc->ifp->if_drv_flags &=
376 ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
377 oce_if_deactivate(sc);
378
379 UNLOCK(&sc->dev_lock);
380
381 device_printf(sc->dev, "Interface Down\n");
382 }
383
384 if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
385 sc->promisc = TRUE;
386 oce_rxf_set_promiscuous(sc, sc->promisc);
387 } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
388 sc->promisc = FALSE;
389 oce_rxf_set_promiscuous(sc, sc->promisc);
390 }
391
392 break;
393
394 case SIOCADDMULTI:
395 case SIOCDELMULTI:
396 rc = oce_hw_update_multicast(sc);
397 if (rc)
398 device_printf(sc->dev,
399 "Update multicast address failed\n");
400 break;
401
402 case SIOCSIFCAP:
403 u = ifr->ifr_reqcap ^ ifp->if_capenable;
404
405 if (u & IFCAP_TXCSUM) {
406 ifp->if_capenable ^= IFCAP_TXCSUM;
407 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
408
409 if (IFCAP_TSO & ifp->if_capenable &&
410 !(IFCAP_TXCSUM & ifp->if_capenable)) {
411 ifp->if_capenable &= ~IFCAP_TSO;
412 ifp->if_hwassist &= ~CSUM_TSO;
413 if_printf(ifp,
414 "TSO disabled due to -txcsum.\n");
415 }
416 }
417
418 if (u & IFCAP_RXCSUM)
419 ifp->if_capenable ^= IFCAP_RXCSUM;
420
421 if (u & IFCAP_TSO4) {
422 ifp->if_capenable ^= IFCAP_TSO4;
423
424 if (IFCAP_TSO & ifp->if_capenable) {
425 if (IFCAP_TXCSUM & ifp->if_capenable)
426 ifp->if_hwassist |= CSUM_TSO;
427 else {
428 ifp->if_capenable &= ~IFCAP_TSO;
429 ifp->if_hwassist &= ~CSUM_TSO;
430 if_printf(ifp,
431 "Enable txcsum first.\n");
432 rc = EAGAIN;
433 }
434 } else
435 ifp->if_hwassist &= ~CSUM_TSO;
436 }
437
438 if (u & IFCAP_VLAN_HWTAGGING)
439 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
440
441 if (u & IFCAP_VLAN_HWFILTER) {
442 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
443 oce_vid_config(sc);
444 }
445#if defined(INET6) || defined(INET)
446 if (u & IFCAP_LRO)
447 ifp->if_capenable ^= IFCAP_LRO;
448#endif
449
450 break;
451
452 case SIOCGPRIVATE_0:
453 rc = oce_handle_passthrough(ifp, data);
454 break;
455 default:
456 rc = ether_ioctl(ifp, command, data);
457 break;
458 }
459
460 return rc;
461}
462
463
464static void
465oce_init(void *arg)
466{
467 POCE_SOFTC sc = arg;
468
469 LOCK(&sc->dev_lock);
470
471 if (sc->ifp->if_flags & IFF_UP) {
472 oce_if_deactivate(sc);
473 oce_if_activate(sc);
474 }
475
476 UNLOCK(&sc->dev_lock);
477
478}
479
480
481static int
482oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
483{
484 POCE_SOFTC sc = ifp->if_softc;
485 struct oce_wq *wq = NULL;
486 int queue_index = 0;
487 int status = 0;
271
272 return 0;
273
274stats_free:
275 callout_drain(&sc->timer);
276 oce_stats_free(sc);
277vlan_free:
278 if (sc->vlan_attach)
279 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
280 if (sc->vlan_detach)
281 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
282 oce_hw_intr_disable(sc);
283lro_free:
284#if defined(INET6) || defined(INET)
285 oce_free_lro(sc);
286ifp_free:
287#endif
288 ether_ifdetach(sc->ifp);
289 if_free(sc->ifp);
290queues_free:
291 oce_queue_release_all(sc);
292intr_free:
293 oce_intr_free(sc);
294mbox_free:
295 oce_dma_free(sc, &sc->bsmbx);
296pci_res_free:
297 oce_hw_pci_free(sc);
298 LOCK_DESTROY(&sc->dev_lock);
299 LOCK_DESTROY(&sc->bmbx_lock);
300 return rc;
301
302}
303
304
305static int
306oce_detach(device_t dev)
307{
308 POCE_SOFTC sc = device_get_softc(dev);
309
310 LOCK(&sc->dev_lock);
311 oce_if_deactivate(sc);
312 UNLOCK(&sc->dev_lock);
313
314 callout_drain(&sc->timer);
315
316 if (sc->vlan_attach != NULL)
317 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
318 if (sc->vlan_detach != NULL)
319 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
320
321 ether_ifdetach(sc->ifp);
322
323 if_free(sc->ifp);
324
325 oce_hw_shutdown(sc);
326
327 bus_generic_detach(dev);
328
329 return 0;
330}
331
332
333static int
334oce_shutdown(device_t dev)
335{
336 int rc;
337
338 rc = oce_detach(dev);
339
340 return rc;
341}
342
343
344static int
345oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
346{
347 struct ifreq *ifr = (struct ifreq *)data;
348 POCE_SOFTC sc = ifp->if_softc;
349 int rc = 0;
350 uint32_t u;
351
352 switch (command) {
353
354 case SIOCGIFMEDIA:
355 rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
356 break;
357
358 case SIOCSIFMTU:
359 if (ifr->ifr_mtu > OCE_MAX_MTU)
360 rc = EINVAL;
361 else
362 ifp->if_mtu = ifr->ifr_mtu;
363 break;
364
365 case SIOCSIFFLAGS:
366 if (ifp->if_flags & IFF_UP) {
367 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
368 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
369 oce_init(sc);
370 }
371 device_printf(sc->dev, "Interface Up\n");
372 } else {
373 LOCK(&sc->dev_lock);
374
375 sc->ifp->if_drv_flags &=
376 ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
377 oce_if_deactivate(sc);
378
379 UNLOCK(&sc->dev_lock);
380
381 device_printf(sc->dev, "Interface Down\n");
382 }
383
384 if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
385 sc->promisc = TRUE;
386 oce_rxf_set_promiscuous(sc, sc->promisc);
387 } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
388 sc->promisc = FALSE;
389 oce_rxf_set_promiscuous(sc, sc->promisc);
390 }
391
392 break;
393
394 case SIOCADDMULTI:
395 case SIOCDELMULTI:
396 rc = oce_hw_update_multicast(sc);
397 if (rc)
398 device_printf(sc->dev,
399 "Update multicast address failed\n");
400 break;
401
402 case SIOCSIFCAP:
403 u = ifr->ifr_reqcap ^ ifp->if_capenable;
404
405 if (u & IFCAP_TXCSUM) {
406 ifp->if_capenable ^= IFCAP_TXCSUM;
407 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
408
409 if (IFCAP_TSO & ifp->if_capenable &&
410 !(IFCAP_TXCSUM & ifp->if_capenable)) {
411 ifp->if_capenable &= ~IFCAP_TSO;
412 ifp->if_hwassist &= ~CSUM_TSO;
413 if_printf(ifp,
414 "TSO disabled due to -txcsum.\n");
415 }
416 }
417
418 if (u & IFCAP_RXCSUM)
419 ifp->if_capenable ^= IFCAP_RXCSUM;
420
421 if (u & IFCAP_TSO4) {
422 ifp->if_capenable ^= IFCAP_TSO4;
423
424 if (IFCAP_TSO & ifp->if_capenable) {
425 if (IFCAP_TXCSUM & ifp->if_capenable)
426 ifp->if_hwassist |= CSUM_TSO;
427 else {
428 ifp->if_capenable &= ~IFCAP_TSO;
429 ifp->if_hwassist &= ~CSUM_TSO;
430 if_printf(ifp,
431 "Enable txcsum first.\n");
432 rc = EAGAIN;
433 }
434 } else
435 ifp->if_hwassist &= ~CSUM_TSO;
436 }
437
438 if (u & IFCAP_VLAN_HWTAGGING)
439 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
440
441 if (u & IFCAP_VLAN_HWFILTER) {
442 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
443 oce_vid_config(sc);
444 }
445#if defined(INET6) || defined(INET)
446 if (u & IFCAP_LRO)
447 ifp->if_capenable ^= IFCAP_LRO;
448#endif
449
450 break;
451
452 case SIOCGPRIVATE_0:
453 rc = oce_handle_passthrough(ifp, data);
454 break;
455 default:
456 rc = ether_ioctl(ifp, command, data);
457 break;
458 }
459
460 return rc;
461}
462
463
464static void
465oce_init(void *arg)
466{
467 POCE_SOFTC sc = arg;
468
469 LOCK(&sc->dev_lock);
470
471 if (sc->ifp->if_flags & IFF_UP) {
472 oce_if_deactivate(sc);
473 oce_if_activate(sc);
474 }
475
476 UNLOCK(&sc->dev_lock);
477
478}
479
480
481static int
482oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
483{
484 POCE_SOFTC sc = ifp->if_softc;
485 struct oce_wq *wq = NULL;
486 int queue_index = 0;
487 int status = 0;
488
488
489 if ((m->m_flags & M_FLOWID) != 0)
490 queue_index = m->m_pkthdr.flowid % sc->nwqs;
491
492 wq = sc->wq[queue_index];
493
494 if (TRY_LOCK(&wq->tx_lock)) {
495 status = oce_multiq_transmit(ifp, m, wq);
496 UNLOCK(&wq->tx_lock);
497 } else {
498 status = drbr_enqueue(ifp, wq->br, m);
499 }
500 return status;
501
502}
503
504
505static void
506oce_multiq_flush(struct ifnet *ifp)
507{
508 POCE_SOFTC sc = ifp->if_softc;
509 struct mbuf *m;
510 int i = 0;
511
512 for (i = 0; i < sc->nwqs; i++) {
513 while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
514 m_freem(m);
515 }
516 if_qflush(ifp);
517}
518
519
520
521/*****************************************************************************
522 * Driver interrupt routines functions *
523 *****************************************************************************/
524
525static void
526oce_intr(void *arg, int pending)
527{
528
529 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
530 POCE_SOFTC sc = ii->sc;
531 struct oce_eq *eq = ii->eq;
532 struct oce_eqe *eqe;
533 struct oce_cq *cq = NULL;
534 int i, num_eqes = 0;
535
536
537 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
538 BUS_DMASYNC_POSTWRITE);
539 do {
540 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
541 if (eqe->evnt == 0)
542 break;
543 eqe->evnt = 0;
544 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
545 BUS_DMASYNC_POSTWRITE);
546 RING_GET(eq->ring, 1);
547 num_eqes++;
548
549 } while (TRUE);
550
551 if (!num_eqes)
552 goto eq_arm; /* Spurious */
553
554 /* Clear EQ entries, but dont arm */
555 oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
556
557 /* Process TX, RX and MCC. But dont arm CQ*/
558 for (i = 0; i < eq->cq_valid; i++) {
559 cq = eq->cq[i];
560 (*cq->cq_handler)(cq->cb_arg);
561 }
562
563 /* Arm all cqs connected to this EQ */
564 for (i = 0; i < eq->cq_valid; i++) {
565 cq = eq->cq[i];
566 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
567 }
568
569eq_arm:
570 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
489 if ((m->m_flags & M_FLOWID) != 0)
490 queue_index = m->m_pkthdr.flowid % sc->nwqs;
491
492 wq = sc->wq[queue_index];
493
494 if (TRY_LOCK(&wq->tx_lock)) {
495 status = oce_multiq_transmit(ifp, m, wq);
496 UNLOCK(&wq->tx_lock);
497 } else {
498 status = drbr_enqueue(ifp, wq->br, m);
499 }
500 return status;
501
502}
503
504
505static void
506oce_multiq_flush(struct ifnet *ifp)
507{
508 POCE_SOFTC sc = ifp->if_softc;
509 struct mbuf *m;
510 int i = 0;
511
512 for (i = 0; i < sc->nwqs; i++) {
513 while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
514 m_freem(m);
515 }
516 if_qflush(ifp);
517}
518
519
520
521/*****************************************************************************
522 * Driver interrupt routines functions *
523 *****************************************************************************/
524
525static void
526oce_intr(void *arg, int pending)
527{
528
529 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
530 POCE_SOFTC sc = ii->sc;
531 struct oce_eq *eq = ii->eq;
532 struct oce_eqe *eqe;
533 struct oce_cq *cq = NULL;
534 int i, num_eqes = 0;
535
536
537 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
538 BUS_DMASYNC_POSTWRITE);
539 do {
540 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
541 if (eqe->evnt == 0)
542 break;
543 eqe->evnt = 0;
544 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
545 BUS_DMASYNC_POSTWRITE);
546 RING_GET(eq->ring, 1);
547 num_eqes++;
548
549 } while (TRUE);
550
551 if (!num_eqes)
552 goto eq_arm; /* Spurious */
553
554 /* Clear EQ entries, but dont arm */
555 oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
556
557 /* Process TX, RX and MCC. But dont arm CQ*/
558 for (i = 0; i < eq->cq_valid; i++) {
559 cq = eq->cq[i];
560 (*cq->cq_handler)(cq->cb_arg);
561 }
562
563 /* Arm all cqs connected to this EQ */
564 for (i = 0; i < eq->cq_valid; i++) {
565 cq = eq->cq[i];
566 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
567 }
568
569eq_arm:
570 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
571
571 return;
572}
573
574
575static int
576oce_setup_intr(POCE_SOFTC sc)
577{
578 int rc = 0, use_intx = 0;
579 int vector = 0, req_vectors = 0;
580
581 if (sc->rss_enable)
582 req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
583 else
584 req_vectors = 1;
585
586 if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
587 sc->intr_count = req_vectors;
588 rc = pci_alloc_msix(sc->dev, &sc->intr_count);
589 if (rc != 0) {
590 use_intx = 1;
591 pci_release_msi(sc->dev);
592 } else
593 sc->flags |= OCE_FLAGS_USING_MSIX;
594 } else
595 use_intx = 1;
596
597 if (use_intx)
598 sc->intr_count = 1;
599
600 /* Scale number of queues based on intr we got */
601 update_queues_got(sc);
602
603 if (use_intx) {
604 device_printf(sc->dev, "Using legacy interrupt\n");
605 rc = oce_alloc_intr(sc, vector, oce_intr);
606 if (rc)
607 goto error;
608 } else {
609 for (; vector < sc->intr_count; vector++) {
610 rc = oce_alloc_intr(sc, vector, oce_intr);
611 if (rc)
612 goto error;
613 }
614 }
615
616 return 0;
617error:
618 oce_intr_free(sc);
619 return rc;
620}
621
622
623static int
624oce_fast_isr(void *arg)
625{
626 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
627 POCE_SOFTC sc = ii->sc;
628
629 if (ii->eq == NULL)
630 return FILTER_STRAY;
631
632 oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
633
634 taskqueue_enqueue_fast(ii->tq, &ii->task);
635
572 return;
573}
574
575
576static int
577oce_setup_intr(POCE_SOFTC sc)
578{
579 int rc = 0, use_intx = 0;
580 int vector = 0, req_vectors = 0;
581
582 if (sc->rss_enable)
583 req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
584 else
585 req_vectors = 1;
586
587 if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
588 sc->intr_count = req_vectors;
589 rc = pci_alloc_msix(sc->dev, &sc->intr_count);
590 if (rc != 0) {
591 use_intx = 1;
592 pci_release_msi(sc->dev);
593 } else
594 sc->flags |= OCE_FLAGS_USING_MSIX;
595 } else
596 use_intx = 1;
597
598 if (use_intx)
599 sc->intr_count = 1;
600
601 /* Scale number of queues based on intr we got */
602 update_queues_got(sc);
603
604 if (use_intx) {
605 device_printf(sc->dev, "Using legacy interrupt\n");
606 rc = oce_alloc_intr(sc, vector, oce_intr);
607 if (rc)
608 goto error;
609 } else {
610 for (; vector < sc->intr_count; vector++) {
611 rc = oce_alloc_intr(sc, vector, oce_intr);
612 if (rc)
613 goto error;
614 }
615 }
616
617 return 0;
618error:
619 oce_intr_free(sc);
620 return rc;
621}
622
623
624static int
625oce_fast_isr(void *arg)
626{
627 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
628 POCE_SOFTC sc = ii->sc;
629
630 if (ii->eq == NULL)
631 return FILTER_STRAY;
632
633 oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
634
635 taskqueue_enqueue_fast(ii->tq, &ii->task);
636
637 ii->eq->intr++;
638
636 return FILTER_HANDLED;
637}
638
639
640static int
641oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
642{
643 POCE_INTR_INFO ii = &sc->intrs[vector];
644 int rc = 0, rr;
645
646 if (vector >= OCE_MAX_EQ)
647 return (EINVAL);
648
649 /* Set the resource id for the interrupt.
650 * MSIx is vector + 1 for the resource id,
651 * INTx is 0 for the resource id.
652 */
653 if (sc->flags & OCE_FLAGS_USING_MSIX)
654 rr = vector + 1;
655 else
656 rr = 0;
657 ii->intr_res = bus_alloc_resource_any(sc->dev,
658 SYS_RES_IRQ,
659 &rr, RF_ACTIVE|RF_SHAREABLE);
660 ii->irq_rr = rr;
661 if (ii->intr_res == NULL) {
662 device_printf(sc->dev,
663 "Could not allocate interrupt\n");
664 rc = ENXIO;
665 return rc;
666 }
667
668 TASK_INIT(&ii->task, 0, isr, ii);
669 ii->vector = vector;
670 sprintf(ii->task_name, "oce_task[%d]", ii->vector);
671 ii->tq = taskqueue_create_fast(ii->task_name,
672 M_NOWAIT,
673 taskqueue_thread_enqueue,
674 &ii->tq);
675 taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
676 device_get_nameunit(sc->dev));
677
678 ii->sc = sc;
679 rc = bus_setup_intr(sc->dev,
680 ii->intr_res,
681 INTR_TYPE_NET,
682 oce_fast_isr, NULL, ii, &ii->tag);
683 return rc;
684
685}
686
687
688void
689oce_intr_free(POCE_SOFTC sc)
690{
691 int i = 0;
692
693 for (i = 0; i < sc->intr_count; i++) {
694
695 if (sc->intrs[i].tag != NULL)
696 bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
697 sc->intrs[i].tag);
698 if (sc->intrs[i].tq != NULL)
699 taskqueue_free(sc->intrs[i].tq);
700
701 if (sc->intrs[i].intr_res != NULL)
702 bus_release_resource(sc->dev, SYS_RES_IRQ,
703 sc->intrs[i].irq_rr,
704 sc->intrs[i].intr_res);
705 sc->intrs[i].tag = NULL;
706 sc->intrs[i].intr_res = NULL;
707 }
708
709 if (sc->flags & OCE_FLAGS_USING_MSIX)
710 pci_release_msi(sc->dev);
711
712}
713
714
715
716/******************************************************************************
717* Media callbacks functions *
718******************************************************************************/
719
720static void
721oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
722{
723 POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
724
725
726 req->ifm_status = IFM_AVALID;
727 req->ifm_active = IFM_ETHER;
728
729 if (sc->link_status == 1)
730 req->ifm_status |= IFM_ACTIVE;
731 else
732 return;
733
734 switch (sc->link_speed) {
735 case 1: /* 10 Mbps */
736 req->ifm_active |= IFM_10_T | IFM_FDX;
737 sc->speed = 10;
738 break;
739 case 2: /* 100 Mbps */
740 req->ifm_active |= IFM_100_TX | IFM_FDX;
741 sc->speed = 100;
742 break;
743 case 3: /* 1 Gbps */
744 req->ifm_active |= IFM_1000_T | IFM_FDX;
745 sc->speed = 1000;
746 break;
747 case 4: /* 10 Gbps */
748 req->ifm_active |= IFM_10G_SR | IFM_FDX;
749 sc->speed = 10000;
750 break;
751 }
752
753 return;
754}
755
756
757int
758oce_media_change(struct ifnet *ifp)
759{
760 return 0;
761}
762
763
764
765
766/*****************************************************************************
767 * Transmit routines functions *
768 *****************************************************************************/
769
770static int
771oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
772{
773 int rc = 0, i, retry_cnt = 0;
774 bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
775 struct mbuf *m, *m_temp;
776 struct oce_wq *wq = sc->wq[wq_index];
777 struct oce_packet_desc *pd;
778 uint32_t out;
779 struct oce_nic_hdr_wqe *nichdr;
780 struct oce_nic_frag_wqe *nicfrag;
781 int num_wqes;
782 uint32_t reg_value;
639 return FILTER_HANDLED;
640}
641
642
643static int
644oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
645{
646 POCE_INTR_INFO ii = &sc->intrs[vector];
647 int rc = 0, rr;
648
649 if (vector >= OCE_MAX_EQ)
650 return (EINVAL);
651
652 /* Set the resource id for the interrupt.
653 * MSIx is vector + 1 for the resource id,
654 * INTx is 0 for the resource id.
655 */
656 if (sc->flags & OCE_FLAGS_USING_MSIX)
657 rr = vector + 1;
658 else
659 rr = 0;
660 ii->intr_res = bus_alloc_resource_any(sc->dev,
661 SYS_RES_IRQ,
662 &rr, RF_ACTIVE|RF_SHAREABLE);
663 ii->irq_rr = rr;
664 if (ii->intr_res == NULL) {
665 device_printf(sc->dev,
666 "Could not allocate interrupt\n");
667 rc = ENXIO;
668 return rc;
669 }
670
671 TASK_INIT(&ii->task, 0, isr, ii);
672 ii->vector = vector;
673 sprintf(ii->task_name, "oce_task[%d]", ii->vector);
674 ii->tq = taskqueue_create_fast(ii->task_name,
675 M_NOWAIT,
676 taskqueue_thread_enqueue,
677 &ii->tq);
678 taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
679 device_get_nameunit(sc->dev));
680
681 ii->sc = sc;
682 rc = bus_setup_intr(sc->dev,
683 ii->intr_res,
684 INTR_TYPE_NET,
685 oce_fast_isr, NULL, ii, &ii->tag);
686 return rc;
687
688}
689
690
691void
692oce_intr_free(POCE_SOFTC sc)
693{
694 int i = 0;
695
696 for (i = 0; i < sc->intr_count; i++) {
697
698 if (sc->intrs[i].tag != NULL)
699 bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
700 sc->intrs[i].tag);
701 if (sc->intrs[i].tq != NULL)
702 taskqueue_free(sc->intrs[i].tq);
703
704 if (sc->intrs[i].intr_res != NULL)
705 bus_release_resource(sc->dev, SYS_RES_IRQ,
706 sc->intrs[i].irq_rr,
707 sc->intrs[i].intr_res);
708 sc->intrs[i].tag = NULL;
709 sc->intrs[i].intr_res = NULL;
710 }
711
712 if (sc->flags & OCE_FLAGS_USING_MSIX)
713 pci_release_msi(sc->dev);
714
715}
716
717
718
719/******************************************************************************
720* Media callbacks functions *
721******************************************************************************/
722
723static void
724oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
725{
726 POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
727
728
729 req->ifm_status = IFM_AVALID;
730 req->ifm_active = IFM_ETHER;
731
732 if (sc->link_status == 1)
733 req->ifm_status |= IFM_ACTIVE;
734 else
735 return;
736
737 switch (sc->link_speed) {
738 case 1: /* 10 Mbps */
739 req->ifm_active |= IFM_10_T | IFM_FDX;
740 sc->speed = 10;
741 break;
742 case 2: /* 100 Mbps */
743 req->ifm_active |= IFM_100_TX | IFM_FDX;
744 sc->speed = 100;
745 break;
746 case 3: /* 1 Gbps */
747 req->ifm_active |= IFM_1000_T | IFM_FDX;
748 sc->speed = 1000;
749 break;
750 case 4: /* 10 Gbps */
751 req->ifm_active |= IFM_10G_SR | IFM_FDX;
752 sc->speed = 10000;
753 break;
754 }
755
756 return;
757}
758
759
760int
761oce_media_change(struct ifnet *ifp)
762{
763 return 0;
764}
765
766
767
768
769/*****************************************************************************
770 * Transmit routines functions *
771 *****************************************************************************/
772
773static int
774oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
775{
776 int rc = 0, i, retry_cnt = 0;
777 bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
778 struct mbuf *m, *m_temp;
779 struct oce_wq *wq = sc->wq[wq_index];
780 struct oce_packet_desc *pd;
781 uint32_t out;
782 struct oce_nic_hdr_wqe *nichdr;
783 struct oce_nic_frag_wqe *nicfrag;
784 int num_wqes;
785 uint32_t reg_value;
786 boolean_t complete = TRUE;
783
784 m = *mpp;
785 if (!m)
786 return EINVAL;
787
788 if (!(m->m_flags & M_PKTHDR)) {
789 rc = ENXIO;
790 goto free_ret;
791 }
792
787
788 m = *mpp;
789 if (!m)
790 return EINVAL;
791
792 if (!(m->m_flags & M_PKTHDR)) {
793 rc = ENXIO;
794 goto free_ret;
795 }
796
797 if(oce_tx_asic_stall_verify(sc, m)) {
798 m = oce_insert_vlan_tag(sc, m, &complete);
799 if(!m) {
800 device_printf(sc->dev, "Insertion unsuccessful\n");
801 return 0;
802 }
803
804 }
805
793 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
794 /* consolidate packet buffers for TSO/LSO segment offload */
795#if defined(INET6) || defined(INET)
796 m = oce_tso_setup(sc, mpp);
797#else
798 m = NULL;
799#endif
800 if (m == NULL) {
801 rc = ENXIO;
802 goto free_ret;
803 }
804 }
805
806 out = wq->packets_out + 1;
807 if (out == OCE_WQ_PACKET_ARRAY_SIZE)
808 out = 0;
809 if (out == wq->packets_in)
810 return EBUSY;
811
812 pd = &wq->pckts[wq->packets_out];
813retry:
814 rc = bus_dmamap_load_mbuf_sg(wq->tag,
815 pd->map,
816 m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
817 if (rc == 0) {
818 num_wqes = pd->nsegs + 1;
819 if (IS_BE(sc)) {
820 /*Dummy required only for BE3.*/
821 if (num_wqes & 1)
822 num_wqes++;
823 }
824 if (num_wqes >= RING_NUM_FREE(wq->ring)) {
825 bus_dmamap_unload(wq->tag, pd->map);
826 return EBUSY;
827 }
828
829 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
830 pd->mbuf = m;
831 wq->packets_out = out;
832
833 nichdr =
834 RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
835 nichdr->u0.dw[0] = 0;
836 nichdr->u0.dw[1] = 0;
837 nichdr->u0.dw[2] = 0;
838 nichdr->u0.dw[3] = 0;
839
806 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
807 /* consolidate packet buffers for TSO/LSO segment offload */
808#if defined(INET6) || defined(INET)
809 m = oce_tso_setup(sc, mpp);
810#else
811 m = NULL;
812#endif
813 if (m == NULL) {
814 rc = ENXIO;
815 goto free_ret;
816 }
817 }
818
819 out = wq->packets_out + 1;
820 if (out == OCE_WQ_PACKET_ARRAY_SIZE)
821 out = 0;
822 if (out == wq->packets_in)
823 return EBUSY;
824
825 pd = &wq->pckts[wq->packets_out];
826retry:
827 rc = bus_dmamap_load_mbuf_sg(wq->tag,
828 pd->map,
829 m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
830 if (rc == 0) {
831 num_wqes = pd->nsegs + 1;
832 if (IS_BE(sc)) {
833 /*Dummy required only for BE3.*/
834 if (num_wqes & 1)
835 num_wqes++;
836 }
837 if (num_wqes >= RING_NUM_FREE(wq->ring)) {
838 bus_dmamap_unload(wq->tag, pd->map);
839 return EBUSY;
840 }
841
842 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
843 pd->mbuf = m;
844 wq->packets_out = out;
845
846 nichdr =
847 RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
848 nichdr->u0.dw[0] = 0;
849 nichdr->u0.dw[1] = 0;
850 nichdr->u0.dw[2] = 0;
851 nichdr->u0.dw[3] = 0;
852
840 nichdr->u0.s.complete = 1;
853 nichdr->u0.s.complete = complete;
841 nichdr->u0.s.event = 1;
842 nichdr->u0.s.crc = 1;
843 nichdr->u0.s.forward = 0;
844 nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
845 nichdr->u0.s.udpcs =
854 nichdr->u0.s.event = 1;
855 nichdr->u0.s.crc = 1;
856 nichdr->u0.s.forward = 0;
857 nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
858 nichdr->u0.s.udpcs =
846 (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
859 (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
847 nichdr->u0.s.tcpcs =
860 nichdr->u0.s.tcpcs =
848 (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
861 (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
849 nichdr->u0.s.num_wqe = num_wqes;
850 nichdr->u0.s.total_length = m->m_pkthdr.len;
851 if (m->m_flags & M_VLANTAG) {
852 nichdr->u0.s.vlan = 1; /*Vlan present*/
853 nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
854 }
855 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
856 if (m->m_pkthdr.tso_segsz) {
857 nichdr->u0.s.lso = 1;
858 nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz;
859 }
860 if (!IS_BE(sc))
861 nichdr->u0.s.ipcs = 1;
862 }
863
864 RING_PUT(wq->ring, 1);
865 wq->ring->num_used++;
866
867 for (i = 0; i < pd->nsegs; i++) {
868 nicfrag =
869 RING_GET_PRODUCER_ITEM_VA(wq->ring,
870 struct oce_nic_frag_wqe);
871 nicfrag->u0.s.rsvd0 = 0;
872 nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
873 nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
874 nicfrag->u0.s.frag_len = segs[i].ds_len;
875 pd->wqe_idx = wq->ring->pidx;
876 RING_PUT(wq->ring, 1);
877 wq->ring->num_used++;
878 }
879 if (num_wqes > (pd->nsegs + 1)) {
880 nicfrag =
881 RING_GET_PRODUCER_ITEM_VA(wq->ring,
882 struct oce_nic_frag_wqe);
883 nicfrag->u0.dw[0] = 0;
884 nicfrag->u0.dw[1] = 0;
885 nicfrag->u0.dw[2] = 0;
886 nicfrag->u0.dw[3] = 0;
887 pd->wqe_idx = wq->ring->pidx;
888 RING_PUT(wq->ring, 1);
889 wq->ring->num_used++;
890 pd->nsegs++;
891 }
892
893 sc->ifp->if_opackets++;
894 wq->tx_stats.tx_reqs++;
895 wq->tx_stats.tx_wrbs += num_wqes;
896 wq->tx_stats.tx_bytes += m->m_pkthdr.len;
897 wq->tx_stats.tx_pkts++;
862 nichdr->u0.s.num_wqe = num_wqes;
863 nichdr->u0.s.total_length = m->m_pkthdr.len;
864 if (m->m_flags & M_VLANTAG) {
865 nichdr->u0.s.vlan = 1; /*Vlan present*/
866 nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
867 }
868 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
869 if (m->m_pkthdr.tso_segsz) {
870 nichdr->u0.s.lso = 1;
871 nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz;
872 }
873 if (!IS_BE(sc))
874 nichdr->u0.s.ipcs = 1;
875 }
876
877 RING_PUT(wq->ring, 1);
878 wq->ring->num_used++;
879
880 for (i = 0; i < pd->nsegs; i++) {
881 nicfrag =
882 RING_GET_PRODUCER_ITEM_VA(wq->ring,
883 struct oce_nic_frag_wqe);
884 nicfrag->u0.s.rsvd0 = 0;
885 nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
886 nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
887 nicfrag->u0.s.frag_len = segs[i].ds_len;
888 pd->wqe_idx = wq->ring->pidx;
889 RING_PUT(wq->ring, 1);
890 wq->ring->num_used++;
891 }
892 if (num_wqes > (pd->nsegs + 1)) {
893 nicfrag =
894 RING_GET_PRODUCER_ITEM_VA(wq->ring,
895 struct oce_nic_frag_wqe);
896 nicfrag->u0.dw[0] = 0;
897 nicfrag->u0.dw[1] = 0;
898 nicfrag->u0.dw[2] = 0;
899 nicfrag->u0.dw[3] = 0;
900 pd->wqe_idx = wq->ring->pidx;
901 RING_PUT(wq->ring, 1);
902 wq->ring->num_used++;
903 pd->nsegs++;
904 }
905
906 sc->ifp->if_opackets++;
907 wq->tx_stats.tx_reqs++;
908 wq->tx_stats.tx_wrbs += num_wqes;
909 wq->tx_stats.tx_bytes += m->m_pkthdr.len;
910 wq->tx_stats.tx_pkts++;
898
911
899 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
900 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
901 reg_value = (num_wqes << 16) | wq->wq_id;
902 OCE_WRITE_REG32(sc, db, PD_TXULP_DB, reg_value);
903
904 } else if (rc == EFBIG) {
905 if (retry_cnt == 0) {
906 m_temp = m_defrag(m, M_NOWAIT);
907 if (m_temp == NULL)
908 goto free_ret;
909 m = m_temp;
910 *mpp = m_temp;
911 retry_cnt = retry_cnt + 1;
912 goto retry;
913 } else
914 goto free_ret;
915 } else if (rc == ENOMEM)
916 return rc;
917 else
918 goto free_ret;
919
920 return 0;
921
922free_ret:
923 m_freem(*mpp);
924 *mpp = NULL;
925 return rc;
926}
927
928
929static void
930oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
931{
932 uint32_t in;
933 struct oce_packet_desc *pd;
934 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
935 struct mbuf *m;
936
937 if (wq->packets_out == wq->packets_in)
938 device_printf(sc->dev, "WQ transmit descriptor missing\n");
939
940 in = wq->packets_in + 1;
941 if (in == OCE_WQ_PACKET_ARRAY_SIZE)
942 in = 0;
943
944 pd = &wq->pckts[wq->packets_in];
945 wq->packets_in = in;
946 wq->ring->num_used -= (pd->nsegs + 1);
947 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
948 bus_dmamap_unload(wq->tag, pd->map);
949
950 m = pd->mbuf;
951 m_freem(m);
952 pd->mbuf = NULL;
953
954 if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
955 if (wq->ring->num_used < (wq->ring->num_items / 2)) {
956 sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
957 oce_tx_restart(sc, wq);
958 }
959 }
960}
961
962
963static void
964oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
965{
966
967 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
968 return;
969
970#if __FreeBSD_version >= 800000
971 if (!drbr_empty(sc->ifp, wq->br))
972#else
973 if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
974#endif
975 taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
976
977}
978
979
980#if defined(INET6) || defined(INET)
981static struct mbuf *
982oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
983{
984 struct mbuf *m;
985#ifdef INET
986 struct ip *ip;
987#endif
988#ifdef INET6
989 struct ip6_hdr *ip6;
990#endif
991 struct ether_vlan_header *eh;
992 struct tcphdr *th;
993 uint16_t etype;
994 int total_len = 0, ehdrlen = 0;
995
996 m = *mpp;
997
998 if (M_WRITABLE(m) == 0) {
999 m = m_dup(*mpp, M_NOWAIT);
1000 if (!m)
1001 return NULL;
1002 m_freem(*mpp);
1003 *mpp = m;
1004 }
1005
1006 eh = mtod(m, struct ether_vlan_header *);
1007 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1008 etype = ntohs(eh->evl_proto);
1009 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1010 } else {
1011 etype = ntohs(eh->evl_encap_proto);
1012 ehdrlen = ETHER_HDR_LEN;
1013 }
1014
1015 switch (etype) {
1016#ifdef INET
1017 case ETHERTYPE_IP:
1018 ip = (struct ip *)(m->m_data + ehdrlen);
1019 if (ip->ip_p != IPPROTO_TCP)
1020 return NULL;
1021 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1022
1023 total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1024 break;
1025#endif
1026#ifdef INET6
1027 case ETHERTYPE_IPV6:
1028 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1029 if (ip6->ip6_nxt != IPPROTO_TCP)
1030 return NULL;
1031 th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1032
1033 total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1034 break;
1035#endif
1036 default:
1037 return NULL;
1038 }
1039
1040 m = m_pullup(m, total_len);
1041 if (!m)
1042 return NULL;
1043 *mpp = m;
1044 return m;
1045
1046}
1047#endif /* INET6 || INET */
1048
1049void
1050oce_tx_task(void *arg, int npending)
1051{
1052 struct oce_wq *wq = arg;
1053 POCE_SOFTC sc = wq->parent;
1054 struct ifnet *ifp = sc->ifp;
1055 int rc = 0;
1056
1057#if __FreeBSD_version >= 800000
1058 if (TRY_LOCK(&wq->tx_lock)) {
1059 rc = oce_multiq_transmit(ifp, NULL, wq);
1060 if (rc) {
1061 device_printf(sc->dev,
1062 "TX[%d] restart failed\n", wq->queue_index);
1063 }
1064 UNLOCK(&wq->tx_lock);
1065 }
1066#else
1067 oce_start(ifp);
1068#endif
1069
1070}
1071
1072
1073void
1074oce_start(struct ifnet *ifp)
1075{
1076 POCE_SOFTC sc = ifp->if_softc;
1077 struct mbuf *m;
1078 int rc = 0;
1079 int def_q = 0; /* Defualt tx queue is 0*/
1080
1081 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1082 IFF_DRV_RUNNING)
1083 return;
912 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
913 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
914 reg_value = (num_wqes << 16) | wq->wq_id;
915 OCE_WRITE_REG32(sc, db, PD_TXULP_DB, reg_value);
916
917 } else if (rc == EFBIG) {
918 if (retry_cnt == 0) {
919 m_temp = m_defrag(m, M_NOWAIT);
920 if (m_temp == NULL)
921 goto free_ret;
922 m = m_temp;
923 *mpp = m_temp;
924 retry_cnt = retry_cnt + 1;
925 goto retry;
926 } else
927 goto free_ret;
928 } else if (rc == ENOMEM)
929 return rc;
930 else
931 goto free_ret;
932
933 return 0;
934
935free_ret:
936 m_freem(*mpp);
937 *mpp = NULL;
938 return rc;
939}
940
941
942static void
943oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
944{
945 uint32_t in;
946 struct oce_packet_desc *pd;
947 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
948 struct mbuf *m;
949
950 if (wq->packets_out == wq->packets_in)
951 device_printf(sc->dev, "WQ transmit descriptor missing\n");
952
953 in = wq->packets_in + 1;
954 if (in == OCE_WQ_PACKET_ARRAY_SIZE)
955 in = 0;
956
957 pd = &wq->pckts[wq->packets_in];
958 wq->packets_in = in;
959 wq->ring->num_used -= (pd->nsegs + 1);
960 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
961 bus_dmamap_unload(wq->tag, pd->map);
962
963 m = pd->mbuf;
964 m_freem(m);
965 pd->mbuf = NULL;
966
967 if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
968 if (wq->ring->num_used < (wq->ring->num_items / 2)) {
969 sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
970 oce_tx_restart(sc, wq);
971 }
972 }
973}
974
975
976static void
977oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
978{
979
980 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
981 return;
982
983#if __FreeBSD_version >= 800000
984 if (!drbr_empty(sc->ifp, wq->br))
985#else
986 if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
987#endif
988 taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
989
990}
991
992
993#if defined(INET6) || defined(INET)
994static struct mbuf *
995oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
996{
997 struct mbuf *m;
998#ifdef INET
999 struct ip *ip;
1000#endif
1001#ifdef INET6
1002 struct ip6_hdr *ip6;
1003#endif
1004 struct ether_vlan_header *eh;
1005 struct tcphdr *th;
1006 uint16_t etype;
1007 int total_len = 0, ehdrlen = 0;
1008
1009 m = *mpp;
1010
1011 if (M_WRITABLE(m) == 0) {
1012 m = m_dup(*mpp, M_NOWAIT);
1013 if (!m)
1014 return NULL;
1015 m_freem(*mpp);
1016 *mpp = m;
1017 }
1018
1019 eh = mtod(m, struct ether_vlan_header *);
1020 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1021 etype = ntohs(eh->evl_proto);
1022 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1023 } else {
1024 etype = ntohs(eh->evl_encap_proto);
1025 ehdrlen = ETHER_HDR_LEN;
1026 }
1027
1028 switch (etype) {
1029#ifdef INET
1030 case ETHERTYPE_IP:
1031 ip = (struct ip *)(m->m_data + ehdrlen);
1032 if (ip->ip_p != IPPROTO_TCP)
1033 return NULL;
1034 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1035
1036 total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1037 break;
1038#endif
1039#ifdef INET6
1040 case ETHERTYPE_IPV6:
1041 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1042 if (ip6->ip6_nxt != IPPROTO_TCP)
1043 return NULL;
1044 th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1045
1046 total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1047 break;
1048#endif
1049 default:
1050 return NULL;
1051 }
1052
1053 m = m_pullup(m, total_len);
1054 if (!m)
1055 return NULL;
1056 *mpp = m;
1057 return m;
1058
1059}
1060#endif /* INET6 || INET */
1061
1062void
1063oce_tx_task(void *arg, int npending)
1064{
1065 struct oce_wq *wq = arg;
1066 POCE_SOFTC sc = wq->parent;
1067 struct ifnet *ifp = sc->ifp;
1068 int rc = 0;
1069
1070#if __FreeBSD_version >= 800000
1071 if (TRY_LOCK(&wq->tx_lock)) {
1072 rc = oce_multiq_transmit(ifp, NULL, wq);
1073 if (rc) {
1074 device_printf(sc->dev,
1075 "TX[%d] restart failed\n", wq->queue_index);
1076 }
1077 UNLOCK(&wq->tx_lock);
1078 }
1079#else
1080 oce_start(ifp);
1081#endif
1082
1083}
1084
1085
1086void
1087oce_start(struct ifnet *ifp)
1088{
1089 POCE_SOFTC sc = ifp->if_softc;
1090 struct mbuf *m;
1091 int rc = 0;
1092 int def_q = 0; /* Defualt tx queue is 0*/
1093
1094 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1095 IFF_DRV_RUNNING)
1096 return;
1097
1098 if (!sc->link_status)
1099 return;
1084
1085 do {
1086 IF_DEQUEUE(&sc->ifp->if_snd, m);
1087 if (m == NULL)
1088 break;
1089
1090 LOCK(&sc->wq[def_q]->tx_lock);
1091 rc = oce_tx(sc, &m, def_q);
1092 UNLOCK(&sc->wq[def_q]->tx_lock);
1093 if (rc) {
1094 if (m != NULL) {
1095 sc->wq[def_q]->tx_stats.tx_stops ++;
1096 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1097 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1098 m = NULL;
1099 }
1100 break;
1101 }
1102 if (m != NULL)
1103 ETHER_BPF_MTAP(ifp, m);
1104
1105 } while (TRUE);
1106
1107 return;
1108}
1109
1110
1111/* Handle the Completion Queue for transmit */
1112uint16_t
1113oce_wq_handler(void *arg)
1114{
1115 struct oce_wq *wq = (struct oce_wq *)arg;
1116 POCE_SOFTC sc = wq->parent;
1117 struct oce_cq *cq = wq->cq;
1118 struct oce_nic_tx_cqe *cqe;
1119 int num_cqes = 0;
1120
1121 LOCK(&wq->tx_lock);
1122 bus_dmamap_sync(cq->ring->dma.tag,
1123 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1124 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1125 while (cqe->u0.dw[3]) {
1126 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1127
1128 wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1129 if (wq->ring->cidx >= wq->ring->num_items)
1130 wq->ring->cidx -= wq->ring->num_items;
1131
1132 oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1133 wq->tx_stats.tx_compl++;
1134 cqe->u0.dw[3] = 0;
1135 RING_GET(cq->ring, 1);
1136 bus_dmamap_sync(cq->ring->dma.tag,
1137 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1138 cqe =
1139 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1140 num_cqes++;
1141 }
1142
1143 if (num_cqes)
1144 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1145 UNLOCK(&wq->tx_lock);
1146
1147 return 0;
1148}
1149
1150
1151static int
1152oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1153{
1154 POCE_SOFTC sc = ifp->if_softc;
1155 int status = 0, queue_index = 0;
1156 struct mbuf *next = NULL;
1157 struct buf_ring *br = NULL;
1158
1159 br = wq->br;
1160 queue_index = wq->queue_index;
1161
1162 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1163 IFF_DRV_RUNNING) {
1164 if (m != NULL)
1165 status = drbr_enqueue(ifp, br, m);
1166 return status;
1167 }
1168
1169 if (m != NULL) {
1170 if ((status = drbr_enqueue(ifp, br, m)) != 0)
1171 return status;
1172 }
1173 while ((next = drbr_peek(ifp, br)) != NULL) {
1174 if (oce_tx(sc, &next, queue_index)) {
1175 if (next == NULL) {
1176 drbr_advance(ifp, br);
1177 } else {
1178 drbr_putback(ifp, br, next);
1179 wq->tx_stats.tx_stops ++;
1180 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1181 status = drbr_enqueue(ifp, br, next);
1182 }
1183 break;
1184 }
1185 drbr_advance(ifp, br);
1186 ifp->if_obytes += next->m_pkthdr.len;
1187 if (next->m_flags & M_MCAST)
1188 ifp->if_omcasts++;
1189 ETHER_BPF_MTAP(ifp, next);
1190 }
1191
1192 return status;
1193}
1194
1195
1196
1197
1198/*****************************************************************************
1199 * Receive routines functions *
1200 *****************************************************************************/
1201
1202static void
1203oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1204{
1205 uint32_t out;
1206 struct oce_packet_desc *pd;
1207 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1208 int i, len, frag_len;
1209 struct mbuf *m = NULL, *tail = NULL;
1210 uint16_t vtag;
1211
1212 len = cqe->u0.s.pkt_size;
1213 if (!len) {
1214 /*partial DMA workaround for Lancer*/
1215 oce_discard_rx_comp(rq, cqe);
1216 goto exit;
1217 }
1218
1219 /* Get vlan_tag value */
1220 if(IS_BE(sc))
1221 vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1222 else
1223 vtag = cqe->u0.s.vlan_tag;
1224
1225
1226 for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1227
1228 if (rq->packets_out == rq->packets_in) {
1229 device_printf(sc->dev,
1230 "RQ transmit descriptor missing\n");
1231 }
1232 out = rq->packets_out + 1;
1233 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1234 out = 0;
1235 pd = &rq->pckts[rq->packets_out];
1236 rq->packets_out = out;
1237
1238 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1239 bus_dmamap_unload(rq->tag, pd->map);
1240 rq->pending--;
1241
1242 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1243 pd->mbuf->m_len = frag_len;
1244
1245 if (tail != NULL) {
1246 /* additional fragments */
1247 pd->mbuf->m_flags &= ~M_PKTHDR;
1248 tail->m_next = pd->mbuf;
1249 tail = pd->mbuf;
1250 } else {
1251 /* first fragment, fill out much of the packet header */
1252 pd->mbuf->m_pkthdr.len = len;
1253 pd->mbuf->m_pkthdr.csum_flags = 0;
1254 if (IF_CSUM_ENABLED(sc)) {
1255 if (cqe->u0.s.l4_cksum_pass) {
1256 pd->mbuf->m_pkthdr.csum_flags |=
1257 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1258 pd->mbuf->m_pkthdr.csum_data = 0xffff;
1259 }
1260 if (cqe->u0.s.ip_cksum_pass) {
1261 if (!cqe->u0.s.ip_ver) { /* IPV4 */
1262 pd->mbuf->m_pkthdr.csum_flags |=
1263 (CSUM_IP_CHECKED|CSUM_IP_VALID);
1264 }
1265 }
1266 }
1267 m = tail = pd->mbuf;
1268 }
1269 pd->mbuf = NULL;
1270 len -= frag_len;
1271 }
1272
1273 if (m) {
1274 if (!oce_cqe_portid_valid(sc, cqe)) {
1275 m_freem(m);
1276 goto exit;
1277 }
1278
1279 m->m_pkthdr.rcvif = sc->ifp;
1280#if __FreeBSD_version >= 800000
1281 m->m_pkthdr.flowid = rq->queue_index;
1282 m->m_flags |= M_FLOWID;
1283#endif
1284 /* This deternies if vlan tag is Valid */
1285 if (oce_cqe_vtp_valid(sc, cqe)) {
1286 if (sc->function_mode & FNM_FLEX10_MODE) {
1287 /* FLEX10. If QnQ is not set, neglect VLAN */
1288 if (cqe->u0.s.qnq) {
1289 m->m_pkthdr.ether_vtag = vtag;
1290 m->m_flags |= M_VLANTAG;
1291 }
1292 } else if (sc->pvid != (vtag & VLAN_VID_MASK)) {
1293 /* In UMC mode generally pvid will be striped by
1294 hw. But in some cases we have seen it comes
1295 with pvid. So if pvid == vlan, neglect vlan.
1296 */
1297 m->m_pkthdr.ether_vtag = vtag;
1298 m->m_flags |= M_VLANTAG;
1299 }
1300 }
1301
1302 sc->ifp->if_ipackets++;
1303#if defined(INET6) || defined(INET)
1304 /* Try to queue to LRO */
1305 if (IF_LRO_ENABLED(sc) &&
1100
1101 do {
1102 IF_DEQUEUE(&sc->ifp->if_snd, m);
1103 if (m == NULL)
1104 break;
1105
1106 LOCK(&sc->wq[def_q]->tx_lock);
1107 rc = oce_tx(sc, &m, def_q);
1108 UNLOCK(&sc->wq[def_q]->tx_lock);
1109 if (rc) {
1110 if (m != NULL) {
1111 sc->wq[def_q]->tx_stats.tx_stops ++;
1112 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1113 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1114 m = NULL;
1115 }
1116 break;
1117 }
1118 if (m != NULL)
1119 ETHER_BPF_MTAP(ifp, m);
1120
1121 } while (TRUE);
1122
1123 return;
1124}
1125
1126
1127/* Handle the Completion Queue for transmit */
1128uint16_t
1129oce_wq_handler(void *arg)
1130{
1131 struct oce_wq *wq = (struct oce_wq *)arg;
1132 POCE_SOFTC sc = wq->parent;
1133 struct oce_cq *cq = wq->cq;
1134 struct oce_nic_tx_cqe *cqe;
1135 int num_cqes = 0;
1136
1137 LOCK(&wq->tx_lock);
1138 bus_dmamap_sync(cq->ring->dma.tag,
1139 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1140 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1141 while (cqe->u0.dw[3]) {
1142 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1143
1144 wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1145 if (wq->ring->cidx >= wq->ring->num_items)
1146 wq->ring->cidx -= wq->ring->num_items;
1147
1148 oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1149 wq->tx_stats.tx_compl++;
1150 cqe->u0.dw[3] = 0;
1151 RING_GET(cq->ring, 1);
1152 bus_dmamap_sync(cq->ring->dma.tag,
1153 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1154 cqe =
1155 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1156 num_cqes++;
1157 }
1158
1159 if (num_cqes)
1160 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1161 UNLOCK(&wq->tx_lock);
1162
1163 return 0;
1164}
1165
1166
1167static int
1168oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1169{
1170 POCE_SOFTC sc = ifp->if_softc;
1171 int status = 0, queue_index = 0;
1172 struct mbuf *next = NULL;
1173 struct buf_ring *br = NULL;
1174
1175 br = wq->br;
1176 queue_index = wq->queue_index;
1177
1178 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1179 IFF_DRV_RUNNING) {
1180 if (m != NULL)
1181 status = drbr_enqueue(ifp, br, m);
1182 return status;
1183 }
1184
1185 if (m != NULL) {
1186 if ((status = drbr_enqueue(ifp, br, m)) != 0)
1187 return status;
1188 }
1189 while ((next = drbr_peek(ifp, br)) != NULL) {
1190 if (oce_tx(sc, &next, queue_index)) {
1191 if (next == NULL) {
1192 drbr_advance(ifp, br);
1193 } else {
1194 drbr_putback(ifp, br, next);
1195 wq->tx_stats.tx_stops ++;
1196 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1197 status = drbr_enqueue(ifp, br, next);
1198 }
1199 break;
1200 }
1201 drbr_advance(ifp, br);
1202 ifp->if_obytes += next->m_pkthdr.len;
1203 if (next->m_flags & M_MCAST)
1204 ifp->if_omcasts++;
1205 ETHER_BPF_MTAP(ifp, next);
1206 }
1207
1208 return status;
1209}
1210
1211
1212
1213
1214/*****************************************************************************
1215 * Receive routines functions *
1216 *****************************************************************************/
1217
1218static void
1219oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1220{
1221 uint32_t out;
1222 struct oce_packet_desc *pd;
1223 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1224 int i, len, frag_len;
1225 struct mbuf *m = NULL, *tail = NULL;
1226 uint16_t vtag;
1227
1228 len = cqe->u0.s.pkt_size;
1229 if (!len) {
1230 /*partial DMA workaround for Lancer*/
1231 oce_discard_rx_comp(rq, cqe);
1232 goto exit;
1233 }
1234
1235 /* Get vlan_tag value */
1236 if(IS_BE(sc))
1237 vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1238 else
1239 vtag = cqe->u0.s.vlan_tag;
1240
1241
1242 for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1243
1244 if (rq->packets_out == rq->packets_in) {
1245 device_printf(sc->dev,
1246 "RQ transmit descriptor missing\n");
1247 }
1248 out = rq->packets_out + 1;
1249 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1250 out = 0;
1251 pd = &rq->pckts[rq->packets_out];
1252 rq->packets_out = out;
1253
1254 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1255 bus_dmamap_unload(rq->tag, pd->map);
1256 rq->pending--;
1257
1258 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1259 pd->mbuf->m_len = frag_len;
1260
1261 if (tail != NULL) {
1262 /* additional fragments */
1263 pd->mbuf->m_flags &= ~M_PKTHDR;
1264 tail->m_next = pd->mbuf;
1265 tail = pd->mbuf;
1266 } else {
1267 /* first fragment, fill out much of the packet header */
1268 pd->mbuf->m_pkthdr.len = len;
1269 pd->mbuf->m_pkthdr.csum_flags = 0;
1270 if (IF_CSUM_ENABLED(sc)) {
1271 if (cqe->u0.s.l4_cksum_pass) {
1272 pd->mbuf->m_pkthdr.csum_flags |=
1273 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1274 pd->mbuf->m_pkthdr.csum_data = 0xffff;
1275 }
1276 if (cqe->u0.s.ip_cksum_pass) {
1277 if (!cqe->u0.s.ip_ver) { /* IPV4 */
1278 pd->mbuf->m_pkthdr.csum_flags |=
1279 (CSUM_IP_CHECKED|CSUM_IP_VALID);
1280 }
1281 }
1282 }
1283 m = tail = pd->mbuf;
1284 }
1285 pd->mbuf = NULL;
1286 len -= frag_len;
1287 }
1288
1289 if (m) {
1290 if (!oce_cqe_portid_valid(sc, cqe)) {
1291 m_freem(m);
1292 goto exit;
1293 }
1294
1295 m->m_pkthdr.rcvif = sc->ifp;
1296#if __FreeBSD_version >= 800000
1297 m->m_pkthdr.flowid = rq->queue_index;
1298 m->m_flags |= M_FLOWID;
1299#endif
1300 /* This deternies if vlan tag is Valid */
1301 if (oce_cqe_vtp_valid(sc, cqe)) {
1302 if (sc->function_mode & FNM_FLEX10_MODE) {
1303 /* FLEX10. If QnQ is not set, neglect VLAN */
1304 if (cqe->u0.s.qnq) {
1305 m->m_pkthdr.ether_vtag = vtag;
1306 m->m_flags |= M_VLANTAG;
1307 }
1308 } else if (sc->pvid != (vtag & VLAN_VID_MASK)) {
1309 /* In UMC mode generally pvid will be striped by
1310 hw. But in some cases we have seen it comes
1311 with pvid. So if pvid == vlan, neglect vlan.
1312 */
1313 m->m_pkthdr.ether_vtag = vtag;
1314 m->m_flags |= M_VLANTAG;
1315 }
1316 }
1317
1318 sc->ifp->if_ipackets++;
1319#if defined(INET6) || defined(INET)
1320 /* Try to queue to LRO */
1321 if (IF_LRO_ENABLED(sc) &&
1306 !(m->m_flags & M_VLANTAG) &&
1307 (cqe->u0.s.ip_cksum_pass) &&
1308 (cqe->u0.s.l4_cksum_pass) &&
1309 (!cqe->u0.s.ip_ver) &&
1310 (rq->lro.lro_cnt != 0)) {
1311
1312 if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1313 rq->lro_pkts_queued ++;
1314 goto post_done;
1315 }
1316 /* If LRO posting fails then try to post to STACK */
1317 }
1318#endif
1319
1320 (*sc->ifp->if_input) (sc->ifp, m);
1321#if defined(INET6) || defined(INET)
1322post_done:
1323#endif
1324 /* Update rx stats per queue */
1325 rq->rx_stats.rx_pkts++;
1326 rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1327 rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1328 if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1329 rq->rx_stats.rx_mcast_pkts++;
1330 if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1331 rq->rx_stats.rx_ucast_pkts++;
1332 }
1333exit:
1334 return;
1335}
1336
1337
1338static void
1339oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1340{
1341 uint32_t out, i = 0;
1342 struct oce_packet_desc *pd;
1343 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1344 int num_frags = cqe->u0.s.num_fragments;
1345
1322 (cqe->u0.s.ip_cksum_pass) &&
1323 (cqe->u0.s.l4_cksum_pass) &&
1324 (!cqe->u0.s.ip_ver) &&
1325 (rq->lro.lro_cnt != 0)) {
1326
1327 if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1328 rq->lro_pkts_queued ++;
1329 goto post_done;
1330 }
1331 /* If LRO posting fails then try to post to STACK */
1332 }
1333#endif
1334
1335 (*sc->ifp->if_input) (sc->ifp, m);
1336#if defined(INET6) || defined(INET)
1337post_done:
1338#endif
1339 /* Update rx stats per queue */
1340 rq->rx_stats.rx_pkts++;
1341 rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1342 rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1343 if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1344 rq->rx_stats.rx_mcast_pkts++;
1345 if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1346 rq->rx_stats.rx_ucast_pkts++;
1347 }
1348exit:
1349 return;
1350}
1351
1352
1353static void
1354oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1355{
1356 uint32_t out, i = 0;
1357 struct oce_packet_desc *pd;
1358 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1359 int num_frags = cqe->u0.s.num_fragments;
1360
1346 if (IS_XE201(sc) && cqe->u0.s.error) {
1347 /* Lancer A0 workaround
1348 * num_frags will be 1 more than actual in case of error
1349 */
1350 if (num_frags)
1351 num_frags -= 1;
1352 }
1353 for (i = 0; i < num_frags; i++) {
1354 if (rq->packets_out == rq->packets_in) {
1355 device_printf(sc->dev,
1356 "RQ transmit descriptor missing\n");
1357 }
1358 out = rq->packets_out + 1;
1359 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1360 out = 0;
1361 pd = &rq->pckts[rq->packets_out];
1362 rq->packets_out = out;
1363
1364 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1365 bus_dmamap_unload(rq->tag, pd->map);
1366 rq->pending--;
1367 m_freem(pd->mbuf);
1368 }
1369
1370}
1371
1372
1373static int
1374oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1375{
1376 struct oce_nic_rx_cqe_v1 *cqe_v1;
1377 int vtp = 0;
1378
1379 if (sc->be3_native) {
1380 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1381 vtp = cqe_v1->u0.s.vlan_tag_present;
1382 } else
1383 vtp = cqe->u0.s.vlan_tag_present;
1384
1385 return vtp;
1386
1387}
1388
1389
1390static int
1391oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1392{
1393 struct oce_nic_rx_cqe_v1 *cqe_v1;
1394 int port_id = 0;
1395
1396 if (sc->be3_native && IS_BE(sc)) {
1397 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1398 port_id = cqe_v1->u0.s.port;
1399 if (sc->port_id != port_id)
1400 return 0;
1401 } else
1402 ;/* For BE3 legacy and Lancer this is dummy */
1403
1404 return 1;
1405
1406}
1407
1408#if defined(INET6) || defined(INET)
1409static void
1410oce_rx_flush_lro(struct oce_rq *rq)
1411{
1412 struct lro_ctrl *lro = &rq->lro;
1413 struct lro_entry *queued;
1414 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1415
1416 if (!IF_LRO_ENABLED(sc))
1417 return;
1418
1419 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1420 SLIST_REMOVE_HEAD(&lro->lro_active, next);
1421 tcp_lro_flush(lro, queued);
1422 }
1423 rq->lro_pkts_queued = 0;
1424
1425 return;
1426}
1427
1428
1429static int
1430oce_init_lro(POCE_SOFTC sc)
1431{
1432 struct lro_ctrl *lro = NULL;
1433 int i = 0, rc = 0;
1434
1435 for (i = 0; i < sc->nrqs; i++) {
1436 lro = &sc->rq[i]->lro;
1437 rc = tcp_lro_init(lro);
1438 if (rc != 0) {
1439 device_printf(sc->dev, "LRO init failed\n");
1440 return rc;
1441 }
1442 lro->ifp = sc->ifp;
1443 }
1444
1445 return rc;
1446}
1447
1448
1449void
1450oce_free_lro(POCE_SOFTC sc)
1451{
1452 struct lro_ctrl *lro = NULL;
1453 int i = 0;
1454
1455 for (i = 0; i < sc->nrqs; i++) {
1456 lro = &sc->rq[i]->lro;
1457 if (lro)
1458 tcp_lro_free(lro);
1459 }
1460}
1361 for (i = 0; i < num_frags; i++) {
1362 if (rq->packets_out == rq->packets_in) {
1363 device_printf(sc->dev,
1364 "RQ transmit descriptor missing\n");
1365 }
1366 out = rq->packets_out + 1;
1367 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1368 out = 0;
1369 pd = &rq->pckts[rq->packets_out];
1370 rq->packets_out = out;
1371
1372 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1373 bus_dmamap_unload(rq->tag, pd->map);
1374 rq->pending--;
1375 m_freem(pd->mbuf);
1376 }
1377
1378}
1379
1380
1381static int
1382oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1383{
1384 struct oce_nic_rx_cqe_v1 *cqe_v1;
1385 int vtp = 0;
1386
1387 if (sc->be3_native) {
1388 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1389 vtp = cqe_v1->u0.s.vlan_tag_present;
1390 } else
1391 vtp = cqe->u0.s.vlan_tag_present;
1392
1393 return vtp;
1394
1395}
1396
1397
1398static int
1399oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1400{
1401 struct oce_nic_rx_cqe_v1 *cqe_v1;
1402 int port_id = 0;
1403
1404 if (sc->be3_native && IS_BE(sc)) {
1405 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1406 port_id = cqe_v1->u0.s.port;
1407 if (sc->port_id != port_id)
1408 return 0;
1409 } else
1410 ;/* For BE3 legacy and Lancer this is dummy */
1411
1412 return 1;
1413
1414}
1415
1416#if defined(INET6) || defined(INET)
1417static void
1418oce_rx_flush_lro(struct oce_rq *rq)
1419{
1420 struct lro_ctrl *lro = &rq->lro;
1421 struct lro_entry *queued;
1422 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1423
1424 if (!IF_LRO_ENABLED(sc))
1425 return;
1426
1427 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1428 SLIST_REMOVE_HEAD(&lro->lro_active, next);
1429 tcp_lro_flush(lro, queued);
1430 }
1431 rq->lro_pkts_queued = 0;
1432
1433 return;
1434}
1435
1436
1437static int
1438oce_init_lro(POCE_SOFTC sc)
1439{
1440 struct lro_ctrl *lro = NULL;
1441 int i = 0, rc = 0;
1442
1443 for (i = 0; i < sc->nrqs; i++) {
1444 lro = &sc->rq[i]->lro;
1445 rc = tcp_lro_init(lro);
1446 if (rc != 0) {
1447 device_printf(sc->dev, "LRO init failed\n");
1448 return rc;
1449 }
1450 lro->ifp = sc->ifp;
1451 }
1452
1453 return rc;
1454}
1455
1456
1457void
1458oce_free_lro(POCE_SOFTC sc)
1459{
1460 struct lro_ctrl *lro = NULL;
1461 int i = 0;
1462
1463 for (i = 0; i < sc->nrqs; i++) {
1464 lro = &sc->rq[i]->lro;
1465 if (lro)
1466 tcp_lro_free(lro);
1467 }
1468}
1461#endif /* INET6 || INET */
1469#endif
1462
1463int
1464oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1465{
1466 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1467 int i, in, rc;
1468 struct oce_packet_desc *pd;
1469 bus_dma_segment_t segs[6];
1470 int nsegs, added = 0;
1471 struct oce_nic_rqe *rqe;
1472 pd_rxulp_db_t rxdb_reg;
1473
1470
1471int
1472oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1473{
1474 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1475 int i, in, rc;
1476 struct oce_packet_desc *pd;
1477 bus_dma_segment_t segs[6];
1478 int nsegs, added = 0;
1479 struct oce_nic_rqe *rqe;
1480 pd_rxulp_db_t rxdb_reg;
1481
1474
1482 bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1475 for (i = 0; i < count; i++) {
1476 in = rq->packets_in + 1;
1477 if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1478 in = 0;
1479 if (in == rq->packets_out)
1480 break; /* no more room */
1481
1482 pd = &rq->pckts[rq->packets_in];
1483 pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1484 if (pd->mbuf == NULL)
1485 break;
1486
1487 pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1488 rc = bus_dmamap_load_mbuf_sg(rq->tag,
1489 pd->map,
1490 pd->mbuf,
1491 segs, &nsegs, BUS_DMA_NOWAIT);
1492 if (rc) {
1493 m_free(pd->mbuf);
1494 break;
1495 }
1496
1497 if (nsegs != 1) {
1498 i--;
1499 continue;
1500 }
1501
1502 rq->packets_in = in;
1503 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1504
1505 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1506 rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1507 rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1508 DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1509 RING_PUT(rq->ring, 1);
1510 added++;
1511 rq->pending++;
1512 }
1513 if (added != 0) {
1514 for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1483 for (i = 0; i < count; i++) {
1484 in = rq->packets_in + 1;
1485 if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1486 in = 0;
1487 if (in == rq->packets_out)
1488 break; /* no more room */
1489
1490 pd = &rq->pckts[rq->packets_in];
1491 pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1492 if (pd->mbuf == NULL)
1493 break;
1494
1495 pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1496 rc = bus_dmamap_load_mbuf_sg(rq->tag,
1497 pd->map,
1498 pd->mbuf,
1499 segs, &nsegs, BUS_DMA_NOWAIT);
1500 if (rc) {
1501 m_free(pd->mbuf);
1502 break;
1503 }
1504
1505 if (nsegs != 1) {
1506 i--;
1507 continue;
1508 }
1509
1510 rq->packets_in = in;
1511 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1512
1513 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1514 rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1515 rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1516 DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1517 RING_PUT(rq->ring, 1);
1518 added++;
1519 rq->pending++;
1520 }
1521 if (added != 0) {
1522 for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1515 DELAY(1);
1516 rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1517 rxdb_reg.bits.qid = rq->rq_id;
1518 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1519 added -= OCE_MAX_RQ_POSTS;
1520 }
1521 if (added > 0) {
1523 rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1524 rxdb_reg.bits.qid = rq->rq_id;
1525 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1526 added -= OCE_MAX_RQ_POSTS;
1527 }
1528 if (added > 0) {
1522 DELAY(1);
1523 rxdb_reg.bits.qid = rq->rq_id;
1524 rxdb_reg.bits.num_posted = added;
1525 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1526 }
1527 }
1528
1529 return 0;
1530}
1531
1532
1533/* Handle the Completion Queue for receive */
1534uint16_t
1535oce_rq_handler(void *arg)
1536{
1537 struct oce_rq *rq = (struct oce_rq *)arg;
1538 struct oce_cq *cq = rq->cq;
1539 POCE_SOFTC sc = rq->parent;
1540 struct oce_nic_rx_cqe *cqe;
1541 int num_cqes = 0, rq_buffers_used = 0;
1542
1543
1544 LOCK(&rq->rx_lock);
1545 bus_dmamap_sync(cq->ring->dma.tag,
1546 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1547 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1548 while (cqe->u0.dw[2]) {
1549 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1550
1551 RING_GET(rq->ring, 1);
1552 if (cqe->u0.s.error == 0) {
1553 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1554 } else {
1555 rq->rx_stats.rxcp_err++;
1556 sc->ifp->if_ierrors++;
1529 rxdb_reg.bits.qid = rq->rq_id;
1530 rxdb_reg.bits.num_posted = added;
1531 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1532 }
1533 }
1534
1535 return 0;
1536}
1537
1538
1539/* Handle the Completion Queue for receive */
1540uint16_t
1541oce_rq_handler(void *arg)
1542{
1543 struct oce_rq *rq = (struct oce_rq *)arg;
1544 struct oce_cq *cq = rq->cq;
1545 POCE_SOFTC sc = rq->parent;
1546 struct oce_nic_rx_cqe *cqe;
1547 int num_cqes = 0, rq_buffers_used = 0;
1548
1549
1550 LOCK(&rq->rx_lock);
1551 bus_dmamap_sync(cq->ring->dma.tag,
1552 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1553 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1554 while (cqe->u0.dw[2]) {
1555 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1556
1557 RING_GET(rq->ring, 1);
1558 if (cqe->u0.s.error == 0) {
1559 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1560 } else {
1561 rq->rx_stats.rxcp_err++;
1562 sc->ifp->if_ierrors++;
1557 if (IS_XE201(sc))
1558 /* Lancer A0 no buffer workaround */
1559 oce_discard_rx_comp(rq, cqe);
1560 else
1561 /* Post L3/L4 errors to stack.*/
1562 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1563
1563 /* Post L3/L4 errors to stack.*/
1564 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1564 }
1565 rq->rx_stats.rx_compl++;
1566 cqe->u0.dw[2] = 0;
1567
1568#if defined(INET6) || defined(INET)
1569 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1570 oce_rx_flush_lro(rq);
1571 }
1572#endif
1573
1574 RING_GET(cq->ring, 1);
1575 bus_dmamap_sync(cq->ring->dma.tag,
1576 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1577 cqe =
1578 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1579 num_cqes++;
1580 if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1581 break;
1582 }
1583
1584#if defined(INET6) || defined(INET)
1585 if (IF_LRO_ENABLED(sc))
1586 oce_rx_flush_lro(rq);
1587#endif
1588
1589 if (num_cqes) {
1590 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1591 rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1592 if (rq_buffers_used > 1)
1593 oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1594 }
1595
1596 UNLOCK(&rq->rx_lock);
1597
1598 return 0;
1599
1600}
1601
1602
1603
1604
1605/*****************************************************************************
1606 * Helper function prototypes in this file *
1607 *****************************************************************************/
1608
1609static int
1610oce_attach_ifp(POCE_SOFTC sc)
1611{
1612
1613 sc->ifp = if_alloc(IFT_ETHER);
1614 if (!sc->ifp)
1615 return ENOMEM;
1616
1617 ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1618 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1619 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1620
1621 sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1622 sc->ifp->if_ioctl = oce_ioctl;
1623 sc->ifp->if_start = oce_start;
1624 sc->ifp->if_init = oce_init;
1625 sc->ifp->if_mtu = ETHERMTU;
1626 sc->ifp->if_softc = sc;
1627#if __FreeBSD_version >= 800000
1628 sc->ifp->if_transmit = oce_multiq_start;
1629 sc->ifp->if_qflush = oce_multiq_flush;
1630#endif
1631
1632 if_initname(sc->ifp,
1633 device_get_name(sc->dev), device_get_unit(sc->dev));
1634
1635 sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1636 IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1637 IFQ_SET_READY(&sc->ifp->if_snd);
1638
1639 sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1640 sc->ifp->if_hwassist |= CSUM_TSO;
1641 sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1642
1643 sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1644 sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1645 sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1646
1647#if defined(INET6) || defined(INET)
1648 sc->ifp->if_capabilities |= IFCAP_TSO;
1649 sc->ifp->if_capabilities |= IFCAP_LRO;
1650 sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1651#endif
1652
1653 sc->ifp->if_capenable = sc->ifp->if_capabilities;
1654 if_initbaudrate(sc->ifp, IF_Gbps(10));
1655
1656 ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1657
1658 return 0;
1659}
1660
1661
1662static void
1663oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1664{
1665 POCE_SOFTC sc = ifp->if_softc;
1666
1667 if (ifp->if_softc != arg)
1668 return;
1669 if ((vtag == 0) || (vtag > 4095))
1670 return;
1671
1672 sc->vlan_tag[vtag] = 1;
1673 sc->vlans_added++;
1674 oce_vid_config(sc);
1675}
1676
1677
1678static void
1679oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1680{
1681 POCE_SOFTC sc = ifp->if_softc;
1682
1683 if (ifp->if_softc != arg)
1684 return;
1685 if ((vtag == 0) || (vtag > 4095))
1686 return;
1687
1688 sc->vlan_tag[vtag] = 0;
1689 sc->vlans_added--;
1690 oce_vid_config(sc);
1691}
1692
1693
1694/*
1695 * A max of 64 vlans can be configured in BE. If the user configures
1696 * more, place the card in vlan promiscuous mode.
1697 */
1698static int
1699oce_vid_config(POCE_SOFTC sc)
1700{
1701 struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1702 uint16_t ntags = 0, i;
1703 int status = 0;
1704
1705 if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1706 (sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1707 for (i = 0; i < MAX_VLANS; i++) {
1708 if (sc->vlan_tag[i]) {
1709 vtags[ntags].vtag = i;
1710 ntags++;
1711 }
1712 }
1713 if (ntags)
1714 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1715 vtags, ntags, 1, 0);
1716 } else
1717 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1718 NULL, 0, 1, 1);
1719 return status;
1720}
1721
1722
1723static void
1724oce_mac_addr_set(POCE_SOFTC sc)
1725{
1726 uint32_t old_pmac_id = sc->pmac_id;
1727 int status = 0;
1728
1729
1730 status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1731 sc->macaddr.size_of_struct);
1732 if (!status)
1733 return;
1734
1735 status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1736 sc->if_id, &sc->pmac_id);
1737 if (!status) {
1738 status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1739 bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1740 sc->macaddr.size_of_struct);
1741 }
1742 if (status)
1743 device_printf(sc->dev, "Failed update macaddress\n");
1744
1745}
1746
1747
1748static int
1749oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1750{
1751 POCE_SOFTC sc = ifp->if_softc;
1752 struct ifreq *ifr = (struct ifreq *)data;
1753 int rc = ENXIO;
1754 char cookie[32] = {0};
1755 void *priv_data = (void *)ifr->ifr_data;
1756 void *ioctl_ptr;
1757 uint32_t req_size;
1758 struct mbx_hdr req;
1759 OCE_DMA_MEM dma_mem;
1565 }
1566 rq->rx_stats.rx_compl++;
1567 cqe->u0.dw[2] = 0;
1568
1569#if defined(INET6) || defined(INET)
1570 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1571 oce_rx_flush_lro(rq);
1572 }
1573#endif
1574
1575 RING_GET(cq->ring, 1);
1576 bus_dmamap_sync(cq->ring->dma.tag,
1577 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1578 cqe =
1579 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1580 num_cqes++;
1581 if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1582 break;
1583 }
1584
1585#if defined(INET6) || defined(INET)
1586 if (IF_LRO_ENABLED(sc))
1587 oce_rx_flush_lro(rq);
1588#endif
1589
1590 if (num_cqes) {
1591 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1592 rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1593 if (rq_buffers_used > 1)
1594 oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1595 }
1596
1597 UNLOCK(&rq->rx_lock);
1598
1599 return 0;
1600
1601}
1602
1603
1604
1605
1606/*****************************************************************************
1607 * Helper function prototypes in this file *
1608 *****************************************************************************/
1609
1610static int
1611oce_attach_ifp(POCE_SOFTC sc)
1612{
1613
1614 sc->ifp = if_alloc(IFT_ETHER);
1615 if (!sc->ifp)
1616 return ENOMEM;
1617
1618 ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1619 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1620 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1621
1622 sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1623 sc->ifp->if_ioctl = oce_ioctl;
1624 sc->ifp->if_start = oce_start;
1625 sc->ifp->if_init = oce_init;
1626 sc->ifp->if_mtu = ETHERMTU;
1627 sc->ifp->if_softc = sc;
1628#if __FreeBSD_version >= 800000
1629 sc->ifp->if_transmit = oce_multiq_start;
1630 sc->ifp->if_qflush = oce_multiq_flush;
1631#endif
1632
1633 if_initname(sc->ifp,
1634 device_get_name(sc->dev), device_get_unit(sc->dev));
1635
1636 sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1637 IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1638 IFQ_SET_READY(&sc->ifp->if_snd);
1639
1640 sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1641 sc->ifp->if_hwassist |= CSUM_TSO;
1642 sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1643
1644 sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1645 sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1646 sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1647
1648#if defined(INET6) || defined(INET)
1649 sc->ifp->if_capabilities |= IFCAP_TSO;
1650 sc->ifp->if_capabilities |= IFCAP_LRO;
1651 sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1652#endif
1653
1654 sc->ifp->if_capenable = sc->ifp->if_capabilities;
1655 if_initbaudrate(sc->ifp, IF_Gbps(10));
1656
1657 ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1658
1659 return 0;
1660}
1661
1662
1663static void
1664oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1665{
1666 POCE_SOFTC sc = ifp->if_softc;
1667
1668 if (ifp->if_softc != arg)
1669 return;
1670 if ((vtag == 0) || (vtag > 4095))
1671 return;
1672
1673 sc->vlan_tag[vtag] = 1;
1674 sc->vlans_added++;
1675 oce_vid_config(sc);
1676}
1677
1678
1679static void
1680oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1681{
1682 POCE_SOFTC sc = ifp->if_softc;
1683
1684 if (ifp->if_softc != arg)
1685 return;
1686 if ((vtag == 0) || (vtag > 4095))
1687 return;
1688
1689 sc->vlan_tag[vtag] = 0;
1690 sc->vlans_added--;
1691 oce_vid_config(sc);
1692}
1693
1694
1695/*
1696 * A max of 64 vlans can be configured in BE. If the user configures
1697 * more, place the card in vlan promiscuous mode.
1698 */
1699static int
1700oce_vid_config(POCE_SOFTC sc)
1701{
1702 struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1703 uint16_t ntags = 0, i;
1704 int status = 0;
1705
1706 if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1707 (sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1708 for (i = 0; i < MAX_VLANS; i++) {
1709 if (sc->vlan_tag[i]) {
1710 vtags[ntags].vtag = i;
1711 ntags++;
1712 }
1713 }
1714 if (ntags)
1715 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1716 vtags, ntags, 1, 0);
1717 } else
1718 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1719 NULL, 0, 1, 1);
1720 return status;
1721}
1722
1723
1724static void
1725oce_mac_addr_set(POCE_SOFTC sc)
1726{
1727 uint32_t old_pmac_id = sc->pmac_id;
1728 int status = 0;
1729
1730
1731 status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1732 sc->macaddr.size_of_struct);
1733 if (!status)
1734 return;
1735
1736 status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1737 sc->if_id, &sc->pmac_id);
1738 if (!status) {
1739 status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1740 bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1741 sc->macaddr.size_of_struct);
1742 }
1743 if (status)
1744 device_printf(sc->dev, "Failed update macaddress\n");
1745
1746}
1747
1748
1749static int
1750oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1751{
1752 POCE_SOFTC sc = ifp->if_softc;
1753 struct ifreq *ifr = (struct ifreq *)data;
1754 int rc = ENXIO;
1755 char cookie[32] = {0};
1756 void *priv_data = (void *)ifr->ifr_data;
1757 void *ioctl_ptr;
1758 uint32_t req_size;
1759 struct mbx_hdr req;
1760 OCE_DMA_MEM dma_mem;
1761 struct mbx_common_get_cntl_attr *fw_cmd;
1760
1762
1761
1762 if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1763 return EFAULT;
1763 if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1764 return EFAULT;
1764
1765
1765 if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1766 return EINVAL;
1766 if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1767 return EINVAL;
1767
1768
1768 ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1769 if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1770 return EFAULT;
1769 ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1770 if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1771 return EFAULT;
1771
1772
1772 req_size = le32toh(req.u0.req.request_length);
1773 if (req_size > 65536)
1774 return EINVAL;
1775
1776 req_size += sizeof(struct mbx_hdr);
1777 rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1778 if (rc)
1779 return ENOMEM;
1780
1781 if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1782 rc = EFAULT;
1783 goto dma_free;
1784 }
1785
1786 rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1787 if (rc) {
1788 rc = EIO;
1789 goto dma_free;
1790 }
1791
1792 if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1793 rc = EFAULT;
1794
1773 req_size = le32toh(req.u0.req.request_length);
1774 if (req_size > 65536)
1775 return EINVAL;
1776
1777 req_size += sizeof(struct mbx_hdr);
1778 rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1779 if (rc)
1780 return ENOMEM;
1781
1782 if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1783 rc = EFAULT;
1784 goto dma_free;
1785 }
1786
1787 rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1788 if (rc) {
1789 rc = EIO;
1790 goto dma_free;
1791 }
1792
1793 if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1794 rc = EFAULT;
1795
1796 /*
1797 firmware is filling all the attributes for this ioctl except
1798 the driver version..so fill it
1799 */
1800 if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
1801 fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
1802 strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
1803 COMPONENT_REVISION, strlen(COMPONENT_REVISION));
1804 }
1805
1795dma_free:
1796 oce_dma_free(sc, &dma_mem);
1797 return rc;
1798
1799}
1800
1806dma_free:
1807 oce_dma_free(sc, &dma_mem);
1808 return rc;
1809
1810}
1811
1812static void
1813oce_eqd_set_periodic(POCE_SOFTC sc)
1814{
1815 struct oce_set_eqd set_eqd[OCE_MAX_EQ];
1816 struct oce_aic_obj *aic;
1817 struct oce_eq *eqo;
1818 uint64_t now = 0, delta;
1819 int eqd, i, num = 0;
1820 uint32_t ips = 0;
1821 int tps;
1801
1822
1823 for (i = 0 ; i < sc->neqs; i++) {
1824 eqo = sc->eq[i];
1825 aic = &sc->aic_obj[i];
1826 /* When setting the static eq delay from the user space */
1827 if (!aic->enable) {
1828 eqd = aic->et_eqd;
1829 goto modify_eqd;
1830 }
1831
1832 now = ticks;
1833
1834 /* Over flow check */
1835 if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
1836 goto done;
1837
1838 delta = now - aic->ticks;
1839 tps = delta/hz;
1840
1841 /* Interrupt rate based on elapsed ticks */
1842 if(tps)
1843 ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
1844
1845 if (ips > INTR_RATE_HWM)
1846 eqd = aic->cur_eqd + 20;
1847 else if (ips < INTR_RATE_LWM)
1848 eqd = aic->cur_eqd / 2;
1849 else
1850 goto done;
1851
1852 if (eqd < 10)
1853 eqd = 0;
1854
1855 /* Make sure that the eq delay is in the known range */
1856 eqd = min(eqd, aic->max_eqd);
1857 eqd = max(eqd, aic->min_eqd);
1858
1859modify_eqd:
1860 if (eqd != aic->cur_eqd) {
1861 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1862 set_eqd[num].eq_id = eqo->eq_id;
1863 aic->cur_eqd = eqd;
1864 num++;
1865 }
1866done:
1867 aic->intr_prev = eqo->intr;
1868 aic->ticks = now;
1869 }
1870
1871 /* Is there atleast one eq that needs to be modified? */
1872 if(num)
1873 oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
1874
1875}
1876
1802static void
1803oce_local_timer(void *arg)
1804{
1805 POCE_SOFTC sc = arg;
1806 int i = 0;
1807
1808 oce_refresh_nic_stats(sc);
1809 oce_refresh_queue_stats(sc);
1810 oce_mac_addr_set(sc);
1811
1812 /* TX Watch Dog*/
1813 for (i = 0; i < sc->nwqs; i++)
1814 oce_tx_restart(sc, sc->wq[i]);
1815
1877static void
1878oce_local_timer(void *arg)
1879{
1880 POCE_SOFTC sc = arg;
1881 int i = 0;
1882
1883 oce_refresh_nic_stats(sc);
1884 oce_refresh_queue_stats(sc);
1885 oce_mac_addr_set(sc);
1886
1887 /* TX Watch Dog*/
1888 for (i = 0; i < sc->nwqs; i++)
1889 oce_tx_restart(sc, sc->wq[i]);
1890
1891 /* calculate and set the eq delay for optimal interrupt rate */
1892 if (IS_BE(sc))
1893 oce_eqd_set_periodic(sc);
1894
1816 callout_reset(&sc->timer, hz, oce_local_timer, sc);
1817}
1818
1819
1820/* NOTE : This should only be called holding
1821 * DEVICE_LOCK.
1822*/
1823static void
1824oce_if_deactivate(POCE_SOFTC sc)
1825{
1826 int i, mtime = 0;
1827 int wait_req = 0;
1828 struct oce_rq *rq;
1829 struct oce_wq *wq;
1830 struct oce_eq *eq;
1831
1832 sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1833
1834 /*Wait for max of 400ms for TX completions to be done */
1835 while (mtime < 400) {
1836 wait_req = 0;
1837 for_all_wq_queues(sc, wq, i) {
1838 if (wq->ring->num_used) {
1839 wait_req = 1;
1840 DELAY(1);
1841 break;
1842 }
1843 }
1844 mtime += 1;
1845 if (!wait_req)
1846 break;
1847 }
1848
1849 /* Stop intrs and finish any bottom halves pending */
1850 oce_hw_intr_disable(sc);
1851
1895 callout_reset(&sc->timer, hz, oce_local_timer, sc);
1896}
1897
1898
1899/* NOTE : This should only be called holding
1900 * DEVICE_LOCK.
1901*/
1902static void
1903oce_if_deactivate(POCE_SOFTC sc)
1904{
1905 int i, mtime = 0;
1906 int wait_req = 0;
1907 struct oce_rq *rq;
1908 struct oce_wq *wq;
1909 struct oce_eq *eq;
1910
1911 sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1912
1913 /*Wait for max of 400ms for TX completions to be done */
1914 while (mtime < 400) {
1915 wait_req = 0;
1916 for_all_wq_queues(sc, wq, i) {
1917 if (wq->ring->num_used) {
1918 wait_req = 1;
1919 DELAY(1);
1920 break;
1921 }
1922 }
1923 mtime += 1;
1924 if (!wait_req)
1925 break;
1926 }
1927
1928 /* Stop intrs and finish any bottom halves pending */
1929 oce_hw_intr_disable(sc);
1930
1852 /* Since taskqueue_drain takes a Giant Lock, We should not acquire
1853 any other lock. So unlock device lock and require after
1854 completing taskqueue_drain.
1855 */
1856 UNLOCK(&sc->dev_lock);
1931 /* Since taskqueue_drain takes a Gaint Lock, We should not acquire
1932 any other lock. So unlock device lock and require after
1933 completing taskqueue_drain.
1934 */
1935 UNLOCK(&sc->dev_lock);
1857 for (i = 0; i < sc->intr_count; i++) {
1858 if (sc->intrs[i].tq != NULL) {
1859 taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
1860 }
1861 }
1936 for (i = 0; i < sc->intr_count; i++) {
1937 if (sc->intrs[i].tq != NULL) {
1938 taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
1939 }
1940 }
1862 LOCK(&sc->dev_lock);
1941 LOCK(&sc->dev_lock);
1863
1864 /* Delete RX queue in card with flush param */
1865 oce_stop_rx(sc);
1866
1867 /* Invalidate any pending cq and eq entries*/
1868 for_all_evnt_queues(sc, eq, i)
1869 oce_drain_eq(eq);
1870 for_all_rq_queues(sc, rq, i)
1871 oce_drain_rq_cq(rq);
1872 for_all_wq_queues(sc, wq, i)
1873 oce_drain_wq_cq(wq);
1874
1875 /* But still we need to get MCC aync events.
1876 So enable intrs and also arm first EQ
1942
1943 /* Delete RX queue in card with flush param */
1944 oce_stop_rx(sc);
1945
1946 /* Invalidate any pending cq and eq entries*/
1947 for_all_evnt_queues(sc, eq, i)
1948 oce_drain_eq(eq);
1949 for_all_rq_queues(sc, rq, i)
1950 oce_drain_rq_cq(rq);
1951 for_all_wq_queues(sc, wq, i)
1952 oce_drain_wq_cq(wq);
1953
1954 /* But still we need to get MCC aync events.
1955 So enable intrs and also arm first EQ
1877 */
1956 */
1878 oce_hw_intr_enable(sc);
1879 oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
1880
1881 DELAY(10);
1882}
1883
1884
1885static void
1886oce_if_activate(POCE_SOFTC sc)
1887{
1888 struct oce_eq *eq;
1889 struct oce_rq *rq;
1890 struct oce_wq *wq;
1891 int i, rc = 0;
1892
1893 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1894
1895 oce_hw_intr_disable(sc);
1896
1897 oce_start_rx(sc);
1898
1899 for_all_rq_queues(sc, rq, i) {
1900 rc = oce_start_rq(rq);
1901 if (rc)
1902 device_printf(sc->dev, "Unable to start RX\n");
1903 }
1904
1905 for_all_wq_queues(sc, wq, i) {
1906 rc = oce_start_wq(wq);
1907 if (rc)
1908 device_printf(sc->dev, "Unable to start TX\n");
1909 }
1910
1911
1912 for_all_evnt_queues(sc, eq, i)
1913 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
1914
1915 oce_hw_intr_enable(sc);
1916
1917}
1918
1919static void
1920process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
1921{
1922 /* Update Link status */
1923 if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
1924 ASYNC_EVENT_LINK_UP) {
1925 sc->link_status = ASYNC_EVENT_LINK_UP;
1926 if_link_state_change(sc->ifp, LINK_STATE_UP);
1927 } else {
1928 sc->link_status = ASYNC_EVENT_LINK_DOWN;
1929 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
1930 }
1931
1932 /* Update speed */
1933 sc->link_speed = acqe->u0.s.speed;
1934 sc->qos_link_speed = (uint32_t) acqe->u0.s.qos_link_speed * 10;
1935
1936}
1937
1938
1939/* Handle the Completion Queue for the Mailbox/Async notifications */
1940uint16_t
1941oce_mq_handler(void *arg)
1942{
1943 struct oce_mq *mq = (struct oce_mq *)arg;
1944 POCE_SOFTC sc = mq->parent;
1945 struct oce_cq *cq = mq->cq;
1946 int num_cqes = 0, evt_type = 0, optype = 0;
1947 struct oce_mq_cqe *cqe;
1948 struct oce_async_cqe_link_state *acqe;
1949 struct oce_async_event_grp5_pvid_state *gcqe;
1957 oce_hw_intr_enable(sc);
1958 oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
1959
1960 DELAY(10);
1961}
1962
1963
1964static void
1965oce_if_activate(POCE_SOFTC sc)
1966{
1967 struct oce_eq *eq;
1968 struct oce_rq *rq;
1969 struct oce_wq *wq;
1970 int i, rc = 0;
1971
1972 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1973
1974 oce_hw_intr_disable(sc);
1975
1976 oce_start_rx(sc);
1977
1978 for_all_rq_queues(sc, rq, i) {
1979 rc = oce_start_rq(rq);
1980 if (rc)
1981 device_printf(sc->dev, "Unable to start RX\n");
1982 }
1983
1984 for_all_wq_queues(sc, wq, i) {
1985 rc = oce_start_wq(wq);
1986 if (rc)
1987 device_printf(sc->dev, "Unable to start TX\n");
1988 }
1989
1990
1991 for_all_evnt_queues(sc, eq, i)
1992 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
1993
1994 oce_hw_intr_enable(sc);
1995
1996}
1997
1998static void
1999process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2000{
2001 /* Update Link status */
2002 if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2003 ASYNC_EVENT_LINK_UP) {
2004 sc->link_status = ASYNC_EVENT_LINK_UP;
2005 if_link_state_change(sc->ifp, LINK_STATE_UP);
2006 } else {
2007 sc->link_status = ASYNC_EVENT_LINK_DOWN;
2008 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2009 }
2010
2011 /* Update speed */
2012 sc->link_speed = acqe->u0.s.speed;
2013 sc->qos_link_speed = (uint32_t) acqe->u0.s.qos_link_speed * 10;
2014
2015}
2016
2017
2018/* Handle the Completion Queue for the Mailbox/Async notifications */
2019uint16_t
2020oce_mq_handler(void *arg)
2021{
2022 struct oce_mq *mq = (struct oce_mq *)arg;
2023 POCE_SOFTC sc = mq->parent;
2024 struct oce_cq *cq = mq->cq;
2025 int num_cqes = 0, evt_type = 0, optype = 0;
2026 struct oce_mq_cqe *cqe;
2027 struct oce_async_cqe_link_state *acqe;
2028 struct oce_async_event_grp5_pvid_state *gcqe;
2029 struct oce_async_event_qnq *dbgcqe;
1950
1951
1952 bus_dmamap_sync(cq->ring->dma.tag,
1953 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1954 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1955
1956 while (cqe->u0.dw[3]) {
1957 DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
1958 if (cqe->u0.s.async_event) {
1959 evt_type = cqe->u0.s.event_type;
1960 optype = cqe->u0.s.async_type;
1961 if (evt_type == ASYNC_EVENT_CODE_LINK_STATE) {
1962 /* Link status evt */
1963 acqe = (struct oce_async_cqe_link_state *)cqe;
1964 process_link_state(sc, acqe);
1965 } else if ((evt_type == ASYNC_EVENT_GRP5) &&
1966 (optype == ASYNC_EVENT_PVID_STATE)) {
1967 /* GRP5 PVID */
1968 gcqe =
1969 (struct oce_async_event_grp5_pvid_state *)cqe;
1970 if (gcqe->enabled)
1971 sc->pvid = gcqe->tag & VLAN_VID_MASK;
1972 else
1973 sc->pvid = 0;
1974
1975 }
2030
2031
2032 bus_dmamap_sync(cq->ring->dma.tag,
2033 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2034 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2035
2036 while (cqe->u0.dw[3]) {
2037 DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2038 if (cqe->u0.s.async_event) {
2039 evt_type = cqe->u0.s.event_type;
2040 optype = cqe->u0.s.async_type;
2041 if (evt_type == ASYNC_EVENT_CODE_LINK_STATE) {
2042 /* Link status evt */
2043 acqe = (struct oce_async_cqe_link_state *)cqe;
2044 process_link_state(sc, acqe);
2045 } else if ((evt_type == ASYNC_EVENT_GRP5) &&
2046 (optype == ASYNC_EVENT_PVID_STATE)) {
2047 /* GRP5 PVID */
2048 gcqe =
2049 (struct oce_async_event_grp5_pvid_state *)cqe;
2050 if (gcqe->enabled)
2051 sc->pvid = gcqe->tag & VLAN_VID_MASK;
2052 else
2053 sc->pvid = 0;
2054
2055 }
2056 else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
2057 optype == ASYNC_EVENT_DEBUG_QNQ) {
2058 dbgcqe =
2059 (struct oce_async_event_qnq *)cqe;
2060 if(dbgcqe->valid)
2061 sc->qnqid = dbgcqe->vlan_tag;
2062 sc->qnq_debug_event = TRUE;
2063 }
1976 }
1977 cqe->u0.dw[3] = 0;
1978 RING_GET(cq->ring, 1);
1979 bus_dmamap_sync(cq->ring->dma.tag,
1980 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1981 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1982 num_cqes++;
1983 }
1984
1985 if (num_cqes)
1986 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1987
1988 return 0;
1989}
1990
1991
1992static void
1993setup_max_queues_want(POCE_SOFTC sc)
1994{
1995 int max_rss = 0;
1996
1997 /* Check if it is FLEX machine. Is so dont use RSS */
1998 if ((sc->function_mode & FNM_FLEX10_MODE) ||
1999 (sc->function_mode & FNM_UMC_MODE) ||
2000 (sc->function_mode & FNM_VNIC_MODE) ||
2001 (!sc->rss_enable) ||
2002 (sc->flags & OCE_FLAGS_BE2)) {
2003 sc->nrqs = 1;
2004 sc->nwqs = 1;
2005 sc->rss_enable = 0;
2006 } else {
2007 /* For multiq, our deisgn is to have TX rings equal to
2008 RSS rings. So that we can pair up one RSS ring and TX
2009 to a single intr, which improves CPU cache efficiency.
2010 */
2011 if (IS_BE(sc) && (!sc->be3_native))
2012 max_rss = OCE_LEGACY_MODE_RSS;
2013 else
2014 max_rss = OCE_MAX_RSS;
2015
2016 sc->nrqs = MIN(OCE_NCPUS, max_rss) + 1; /* 1 for def RX */
2017 sc->nwqs = MIN(OCE_NCPUS, max_rss);
2018 }
2019
2020}
2021
2022
2023static void
2024update_queues_got(POCE_SOFTC sc)
2025{
2026 if (sc->rss_enable) {
2027 sc->nrqs = sc->intr_count + 1;
2028 sc->nwqs = sc->intr_count;
2029 } else {
2030 sc->nrqs = 1;
2031 sc->nwqs = 1;
2032 }
2033}
2034
2064 }
2065 cqe->u0.dw[3] = 0;
2066 RING_GET(cq->ring, 1);
2067 bus_dmamap_sync(cq->ring->dma.tag,
2068 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2069 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2070 num_cqes++;
2071 }
2072
2073 if (num_cqes)
2074 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2075
2076 return 0;
2077}
2078
2079
2080static void
2081setup_max_queues_want(POCE_SOFTC sc)
2082{
2083 int max_rss = 0;
2084
2085 /* Check if it is FLEX machine. Is so dont use RSS */
2086 if ((sc->function_mode & FNM_FLEX10_MODE) ||
2087 (sc->function_mode & FNM_UMC_MODE) ||
2088 (sc->function_mode & FNM_VNIC_MODE) ||
2089 (!sc->rss_enable) ||
2090 (sc->flags & OCE_FLAGS_BE2)) {
2091 sc->nrqs = 1;
2092 sc->nwqs = 1;
2093 sc->rss_enable = 0;
2094 } else {
2095 /* For multiq, our deisgn is to have TX rings equal to
2096 RSS rings. So that we can pair up one RSS ring and TX
2097 to a single intr, which improves CPU cache efficiency.
2098 */
2099 if (IS_BE(sc) && (!sc->be3_native))
2100 max_rss = OCE_LEGACY_MODE_RSS;
2101 else
2102 max_rss = OCE_MAX_RSS;
2103
2104 sc->nrqs = MIN(OCE_NCPUS, max_rss) + 1; /* 1 for def RX */
2105 sc->nwqs = MIN(OCE_NCPUS, max_rss);
2106 }
2107
2108}
2109
2110
2111static void
2112update_queues_got(POCE_SOFTC sc)
2113{
2114 if (sc->rss_enable) {
2115 sc->nrqs = sc->intr_count + 1;
2116 sc->nwqs = sc->intr_count;
2117 } else {
2118 sc->nrqs = 1;
2119 sc->nwqs = 1;
2120 }
2121}
2122
2123static int
2124oce_check_ipv6_ext_hdr(struct mbuf *m)
2125{
2126 struct ether_header *eh = mtod(m, struct ether_header *);
2127 caddr_t m_datatemp = m->m_data;
2128
2129 if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2130 m->m_data += sizeof(struct ether_header);
2131 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2132
2133 if((ip6->ip6_nxt != IPPROTO_TCP) && \
2134 (ip6->ip6_nxt != IPPROTO_UDP)){
2135 struct ip6_ext *ip6e = NULL;
2136 m->m_data += sizeof(struct ip6_hdr);
2137
2138 ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2139 if(ip6e->ip6e_len == 0xff) {
2140 m->m_data = m_datatemp;
2141 return TRUE;
2142 }
2143 }
2144 m->m_data = m_datatemp;
2145 }
2146 return FALSE;
2147}
2148
2149static int
2150is_be3_a1(POCE_SOFTC sc)
2151{
2152 if((sc->flags & OCE_FLAGS_BE3) && ((sc->asic_revision & 0xFF) < 2)) {
2153 return TRUE;
2154 }
2155 return FALSE;
2156}
2157
2158static struct mbuf *
2159oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2160{
2161 uint16_t vlan_tag = 0;
2162
2163 if(!M_WRITABLE(m))
2164 return NULL;
2165
2166 /* Embed vlan tag in the packet if it is not part of it */
2167 if(m->m_flags & M_VLANTAG) {
2168 vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2169 m->m_flags &= ~M_VLANTAG;
2170 }
2171
2172 /* if UMC, ignore vlan tag insertion and instead insert pvid */
2173 if(sc->pvid) {
2174 if(!vlan_tag)
2175 vlan_tag = sc->pvid;
2176 *complete = FALSE;
2177 }
2178
2179 if(vlan_tag) {
2180 m = ether_vlanencap(m, vlan_tag);
2181 }
2182
2183 if(sc->qnqid) {
2184 m = ether_vlanencap(m, sc->qnqid);
2185 *complete = FALSE;
2186 }
2187 return m;
2188}
2189
2190static int
2191oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2192{
2193 if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2194 oce_check_ipv6_ext_hdr(m)) {
2195 return TRUE;
2196 }
2197 return FALSE;
2198}