Deleted Added
full compact
oce_if.c (252869) oce_if.c (257007)
1/*-
2 * Copyright (C) 2013 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
1/*-
2 * Copyright (C) 2013 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
39/* $FreeBSD: head/sys/dev/oce/oce_if.c 257007 2013-10-23 18:58:38Z delphij $ */
39
40
40/* $FreeBSD: head/sys/dev/oce/oce_if.c 252869 2013-07-06 08:30:45Z delphij $ */
41
42#include "opt_inet6.h"
43#include "opt_inet.h"
44
45#include "oce_if.h"
46
41#include "opt_inet6.h"
42#include "opt_inet.h"
43
44#include "oce_if.h"
45
46/* UE Status Low CSR */
47static char *ue_status_low_desc[] = {
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
47
81
82/* UE Status High CSR */
83static char *ue_status_hi_desc[] = {
84 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
107 "NETC",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116};
117
118
48/* Driver entry points prototypes */
49static int oce_probe(device_t dev);
50static int oce_attach(device_t dev);
51static int oce_detach(device_t dev);
52static int oce_shutdown(device_t dev);
53static int oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
54static void oce_init(void *xsc);
55static int oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
56static void oce_multiq_flush(struct ifnet *ifp);
57
58/* Driver interrupt routines protypes */
59static void oce_intr(void *arg, int pending);
60static int oce_setup_intr(POCE_SOFTC sc);
61static int oce_fast_isr(void *arg);
62static int oce_alloc_intr(POCE_SOFTC sc, int vector,
63 void (*isr) (void *arg, int pending));
64
65/* Media callbacks prototypes */
66static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
67static int oce_media_change(struct ifnet *ifp);
68
69/* Transmit routines prototypes */
70static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
71static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
72static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
73 uint32_t status);
74static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
75 struct oce_wq *wq);
76
77/* Receive routines prototypes */
78static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
79static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
80static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
81static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
82 struct oce_nic_rx_cqe *cqe);
83
84/* Helper function prototypes in this file */
85static int oce_attach_ifp(POCE_SOFTC sc);
86static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
87static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
88static int oce_vid_config(POCE_SOFTC sc);
89static void oce_mac_addr_set(POCE_SOFTC sc);
90static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
91static void oce_local_timer(void *arg);
92static void oce_if_deactivate(POCE_SOFTC sc);
93static void oce_if_activate(POCE_SOFTC sc);
94static void setup_max_queues_want(POCE_SOFTC sc);
95static void update_queues_got(POCE_SOFTC sc);
96static void process_link_state(POCE_SOFTC sc,
97 struct oce_async_cqe_link_state *acqe);
98static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
99static void oce_get_config(POCE_SOFTC sc);
100static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
101
102/* IP specific */
103#if defined(INET6) || defined(INET)
104static int oce_init_lro(POCE_SOFTC sc);
105static void oce_rx_flush_lro(struct oce_rq *rq);
106static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
107#endif
108
109static device_method_t oce_dispatch[] = {
110 DEVMETHOD(device_probe, oce_probe),
111 DEVMETHOD(device_attach, oce_attach),
112 DEVMETHOD(device_detach, oce_detach),
113 DEVMETHOD(device_shutdown, oce_shutdown),
114
115 DEVMETHOD_END
116};
117
118static driver_t oce_driver = {
119 "oce",
120 oce_dispatch,
121 sizeof(OCE_SOFTC)
122};
123static devclass_t oce_devclass;
124
125
126DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
127MODULE_DEPEND(oce, pci, 1, 1, 1);
128MODULE_DEPEND(oce, ether, 1, 1, 1);
129MODULE_VERSION(oce, 1);
130
131
132/* global vars */
133const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
134
135/* Module capabilites and parameters */
136uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
137uint32_t oce_enable_rss = OCE_MODCAP_RSS;
138
139
140TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
141TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
142
143
144/* Supported devices table */
145static uint32_t supportedDevices[] = {
146 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
147 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
148 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
149 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
150 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
151 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
152};
153
154
155
156
157/*****************************************************************************
158 * Driver entry points functions *
159 *****************************************************************************/
160
161static int
162oce_probe(device_t dev)
163{
164 uint16_t vendor = 0;
165 uint16_t device = 0;
166 int i = 0;
167 char str[256] = {0};
168 POCE_SOFTC sc;
169
170 sc = device_get_softc(dev);
171 bzero(sc, sizeof(OCE_SOFTC));
172 sc->dev = dev;
173
174 vendor = pci_get_vendor(dev);
175 device = pci_get_device(dev);
176
177 for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
178 if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
179 if (device == (supportedDevices[i] & 0xffff)) {
180 sprintf(str, "%s:%s", "Emulex CNA NIC function",
181 component_revision);
182 device_set_desc_copy(dev, str);
183
184 switch (device) {
185 case PCI_PRODUCT_BE2:
186 sc->flags |= OCE_FLAGS_BE2;
187 break;
188 case PCI_PRODUCT_BE3:
189 sc->flags |= OCE_FLAGS_BE3;
190 break;
191 case PCI_PRODUCT_XE201:
192 case PCI_PRODUCT_XE201_VF:
193 sc->flags |= OCE_FLAGS_XE201;
194 break;
195 case PCI_PRODUCT_SH:
196 sc->flags |= OCE_FLAGS_SH;
197 break;
198 default:
199 return ENXIO;
200 }
201 return BUS_PROBE_DEFAULT;
202 }
203 }
204 }
205
206 return ENXIO;
207}
208
209
210static int
211oce_attach(device_t dev)
212{
213 POCE_SOFTC sc;
214 int rc = 0;
215
216 sc = device_get_softc(dev);
217
218 rc = oce_hw_pci_alloc(sc);
219 if (rc)
220 return rc;
221
222 sc->tx_ring_size = OCE_TX_RING_SIZE;
223 sc->rx_ring_size = OCE_RX_RING_SIZE;
224 sc->rq_frag_size = OCE_RQ_BUF_SIZE;
225 sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
226 sc->promisc = OCE_DEFAULT_PROMISCUOUS;
227
228 LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
229 LOCK_CREATE(&sc->dev_lock, "Device_lock");
230
231 /* initialise the hardware */
232 rc = oce_hw_init(sc);
233 if (rc)
234 goto pci_res_free;
235
236 oce_get_config(sc);
237
238 setup_max_queues_want(sc);
239
240 rc = oce_setup_intr(sc);
241 if (rc)
242 goto mbox_free;
243
244 rc = oce_queue_init_all(sc);
245 if (rc)
246 goto intr_free;
247
248 rc = oce_attach_ifp(sc);
249 if (rc)
250 goto queues_free;
251
252#if defined(INET6) || defined(INET)
253 rc = oce_init_lro(sc);
254 if (rc)
255 goto ifp_free;
256#endif
257
258 rc = oce_hw_start(sc);
259 if (rc)
260 goto lro_free;
261
262 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
263 oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
264 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
265 oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
266
267 rc = oce_stats_init(sc);
268 if (rc)
269 goto vlan_free;
270
271 oce_add_sysctls(sc);
272
273 callout_init(&sc->timer, CALLOUT_MPSAFE);
274 rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
275 if (rc)
276 goto stats_free;
277
278 return 0;
279
280stats_free:
281 callout_drain(&sc->timer);
282 oce_stats_free(sc);
283vlan_free:
284 if (sc->vlan_attach)
285 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
286 if (sc->vlan_detach)
287 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
288 oce_hw_intr_disable(sc);
289lro_free:
290#if defined(INET6) || defined(INET)
291 oce_free_lro(sc);
292ifp_free:
293#endif
294 ether_ifdetach(sc->ifp);
295 if_free(sc->ifp);
296queues_free:
297 oce_queue_release_all(sc);
298intr_free:
299 oce_intr_free(sc);
300mbox_free:
301 oce_dma_free(sc, &sc->bsmbx);
302pci_res_free:
303 oce_hw_pci_free(sc);
304 LOCK_DESTROY(&sc->dev_lock);
305 LOCK_DESTROY(&sc->bmbx_lock);
306 return rc;
307
308}
309
310
311static int
312oce_detach(device_t dev)
313{
314 POCE_SOFTC sc = device_get_softc(dev);
315
316 LOCK(&sc->dev_lock);
317 oce_if_deactivate(sc);
318 UNLOCK(&sc->dev_lock);
319
320 callout_drain(&sc->timer);
321
322 if (sc->vlan_attach != NULL)
323 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
324 if (sc->vlan_detach != NULL)
325 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
326
327 ether_ifdetach(sc->ifp);
328
329 if_free(sc->ifp);
330
331 oce_hw_shutdown(sc);
332
333 bus_generic_detach(dev);
334
335 return 0;
336}
337
338
339static int
340oce_shutdown(device_t dev)
341{
342 int rc;
343
344 rc = oce_detach(dev);
345
346 return rc;
347}
348
349
350static int
351oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
352{
353 struct ifreq *ifr = (struct ifreq *)data;
354 POCE_SOFTC sc = ifp->if_softc;
355 int rc = 0;
356 uint32_t u;
357
358 switch (command) {
359
360 case SIOCGIFMEDIA:
361 rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
362 break;
363
364 case SIOCSIFMTU:
365 if (ifr->ifr_mtu > OCE_MAX_MTU)
366 rc = EINVAL;
367 else
368 ifp->if_mtu = ifr->ifr_mtu;
369 break;
370
371 case SIOCSIFFLAGS:
372 if (ifp->if_flags & IFF_UP) {
373 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
374 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
375 oce_init(sc);
376 }
377 device_printf(sc->dev, "Interface Up\n");
378 } else {
379 LOCK(&sc->dev_lock);
380
381 sc->ifp->if_drv_flags &=
382 ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
383 oce_if_deactivate(sc);
384
385 UNLOCK(&sc->dev_lock);
386
387 device_printf(sc->dev, "Interface Down\n");
388 }
389
390 if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
119/* Driver entry points prototypes */
120static int oce_probe(device_t dev);
121static int oce_attach(device_t dev);
122static int oce_detach(device_t dev);
123static int oce_shutdown(device_t dev);
124static int oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
125static void oce_init(void *xsc);
126static int oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
127static void oce_multiq_flush(struct ifnet *ifp);
128
129/* Driver interrupt routines protypes */
130static void oce_intr(void *arg, int pending);
131static int oce_setup_intr(POCE_SOFTC sc);
132static int oce_fast_isr(void *arg);
133static int oce_alloc_intr(POCE_SOFTC sc, int vector,
134 void (*isr) (void *arg, int pending));
135
136/* Media callbacks prototypes */
137static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
138static int oce_media_change(struct ifnet *ifp);
139
140/* Transmit routines prototypes */
141static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
142static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
143static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
144 uint32_t status);
145static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
146 struct oce_wq *wq);
147
148/* Receive routines prototypes */
149static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
150static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
151static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
152static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
153 struct oce_nic_rx_cqe *cqe);
154
155/* Helper function prototypes in this file */
156static int oce_attach_ifp(POCE_SOFTC sc);
157static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
158static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
159static int oce_vid_config(POCE_SOFTC sc);
160static void oce_mac_addr_set(POCE_SOFTC sc);
161static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
162static void oce_local_timer(void *arg);
163static void oce_if_deactivate(POCE_SOFTC sc);
164static void oce_if_activate(POCE_SOFTC sc);
165static void setup_max_queues_want(POCE_SOFTC sc);
166static void update_queues_got(POCE_SOFTC sc);
167static void process_link_state(POCE_SOFTC sc,
168 struct oce_async_cqe_link_state *acqe);
169static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
170static void oce_get_config(POCE_SOFTC sc);
171static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
172
173/* IP specific */
174#if defined(INET6) || defined(INET)
175static int oce_init_lro(POCE_SOFTC sc);
176static void oce_rx_flush_lro(struct oce_rq *rq);
177static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
178#endif
179
180static device_method_t oce_dispatch[] = {
181 DEVMETHOD(device_probe, oce_probe),
182 DEVMETHOD(device_attach, oce_attach),
183 DEVMETHOD(device_detach, oce_detach),
184 DEVMETHOD(device_shutdown, oce_shutdown),
185
186 DEVMETHOD_END
187};
188
189static driver_t oce_driver = {
190 "oce",
191 oce_dispatch,
192 sizeof(OCE_SOFTC)
193};
194static devclass_t oce_devclass;
195
196
197DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
198MODULE_DEPEND(oce, pci, 1, 1, 1);
199MODULE_DEPEND(oce, ether, 1, 1, 1);
200MODULE_VERSION(oce, 1);
201
202
203/* global vars */
204const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
205
206/* Module capabilites and parameters */
207uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
208uint32_t oce_enable_rss = OCE_MODCAP_RSS;
209
210
211TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
212TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
213
214
215/* Supported devices table */
216static uint32_t supportedDevices[] = {
217 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
218 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
219 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
220 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
221 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
222 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
223};
224
225
226
227
228/*****************************************************************************
229 * Driver entry points functions *
230 *****************************************************************************/
231
232static int
233oce_probe(device_t dev)
234{
235 uint16_t vendor = 0;
236 uint16_t device = 0;
237 int i = 0;
238 char str[256] = {0};
239 POCE_SOFTC sc;
240
241 sc = device_get_softc(dev);
242 bzero(sc, sizeof(OCE_SOFTC));
243 sc->dev = dev;
244
245 vendor = pci_get_vendor(dev);
246 device = pci_get_device(dev);
247
248 for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
249 if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
250 if (device == (supportedDevices[i] & 0xffff)) {
251 sprintf(str, "%s:%s", "Emulex CNA NIC function",
252 component_revision);
253 device_set_desc_copy(dev, str);
254
255 switch (device) {
256 case PCI_PRODUCT_BE2:
257 sc->flags |= OCE_FLAGS_BE2;
258 break;
259 case PCI_PRODUCT_BE3:
260 sc->flags |= OCE_FLAGS_BE3;
261 break;
262 case PCI_PRODUCT_XE201:
263 case PCI_PRODUCT_XE201_VF:
264 sc->flags |= OCE_FLAGS_XE201;
265 break;
266 case PCI_PRODUCT_SH:
267 sc->flags |= OCE_FLAGS_SH;
268 break;
269 default:
270 return ENXIO;
271 }
272 return BUS_PROBE_DEFAULT;
273 }
274 }
275 }
276
277 return ENXIO;
278}
279
280
281static int
282oce_attach(device_t dev)
283{
284 POCE_SOFTC sc;
285 int rc = 0;
286
287 sc = device_get_softc(dev);
288
289 rc = oce_hw_pci_alloc(sc);
290 if (rc)
291 return rc;
292
293 sc->tx_ring_size = OCE_TX_RING_SIZE;
294 sc->rx_ring_size = OCE_RX_RING_SIZE;
295 sc->rq_frag_size = OCE_RQ_BUF_SIZE;
296 sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
297 sc->promisc = OCE_DEFAULT_PROMISCUOUS;
298
299 LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
300 LOCK_CREATE(&sc->dev_lock, "Device_lock");
301
302 /* initialise the hardware */
303 rc = oce_hw_init(sc);
304 if (rc)
305 goto pci_res_free;
306
307 oce_get_config(sc);
308
309 setup_max_queues_want(sc);
310
311 rc = oce_setup_intr(sc);
312 if (rc)
313 goto mbox_free;
314
315 rc = oce_queue_init_all(sc);
316 if (rc)
317 goto intr_free;
318
319 rc = oce_attach_ifp(sc);
320 if (rc)
321 goto queues_free;
322
323#if defined(INET6) || defined(INET)
324 rc = oce_init_lro(sc);
325 if (rc)
326 goto ifp_free;
327#endif
328
329 rc = oce_hw_start(sc);
330 if (rc)
331 goto lro_free;
332
333 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
334 oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
335 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
336 oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
337
338 rc = oce_stats_init(sc);
339 if (rc)
340 goto vlan_free;
341
342 oce_add_sysctls(sc);
343
344 callout_init(&sc->timer, CALLOUT_MPSAFE);
345 rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
346 if (rc)
347 goto stats_free;
348
349 return 0;
350
351stats_free:
352 callout_drain(&sc->timer);
353 oce_stats_free(sc);
354vlan_free:
355 if (sc->vlan_attach)
356 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
357 if (sc->vlan_detach)
358 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
359 oce_hw_intr_disable(sc);
360lro_free:
361#if defined(INET6) || defined(INET)
362 oce_free_lro(sc);
363ifp_free:
364#endif
365 ether_ifdetach(sc->ifp);
366 if_free(sc->ifp);
367queues_free:
368 oce_queue_release_all(sc);
369intr_free:
370 oce_intr_free(sc);
371mbox_free:
372 oce_dma_free(sc, &sc->bsmbx);
373pci_res_free:
374 oce_hw_pci_free(sc);
375 LOCK_DESTROY(&sc->dev_lock);
376 LOCK_DESTROY(&sc->bmbx_lock);
377 return rc;
378
379}
380
381
382static int
383oce_detach(device_t dev)
384{
385 POCE_SOFTC sc = device_get_softc(dev);
386
387 LOCK(&sc->dev_lock);
388 oce_if_deactivate(sc);
389 UNLOCK(&sc->dev_lock);
390
391 callout_drain(&sc->timer);
392
393 if (sc->vlan_attach != NULL)
394 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
395 if (sc->vlan_detach != NULL)
396 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
397
398 ether_ifdetach(sc->ifp);
399
400 if_free(sc->ifp);
401
402 oce_hw_shutdown(sc);
403
404 bus_generic_detach(dev);
405
406 return 0;
407}
408
409
410static int
411oce_shutdown(device_t dev)
412{
413 int rc;
414
415 rc = oce_detach(dev);
416
417 return rc;
418}
419
420
421static int
422oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
423{
424 struct ifreq *ifr = (struct ifreq *)data;
425 POCE_SOFTC sc = ifp->if_softc;
426 int rc = 0;
427 uint32_t u;
428
429 switch (command) {
430
431 case SIOCGIFMEDIA:
432 rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
433 break;
434
435 case SIOCSIFMTU:
436 if (ifr->ifr_mtu > OCE_MAX_MTU)
437 rc = EINVAL;
438 else
439 ifp->if_mtu = ifr->ifr_mtu;
440 break;
441
442 case SIOCSIFFLAGS:
443 if (ifp->if_flags & IFF_UP) {
444 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
445 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
446 oce_init(sc);
447 }
448 device_printf(sc->dev, "Interface Up\n");
449 } else {
450 LOCK(&sc->dev_lock);
451
452 sc->ifp->if_drv_flags &=
453 ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
454 oce_if_deactivate(sc);
455
456 UNLOCK(&sc->dev_lock);
457
458 device_printf(sc->dev, "Interface Down\n");
459 }
460
461 if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
391 sc->promisc = TRUE;
392 oce_rxf_set_promiscuous(sc, sc->promisc);
462 if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
463 sc->promisc = TRUE;
393 } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
464 } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
394 sc->promisc = FALSE;
395 oce_rxf_set_promiscuous(sc, sc->promisc);
465 if (!oce_rxf_set_promiscuous(sc, 0))
466 sc->promisc = FALSE;
396 }
397
398 break;
399
400 case SIOCADDMULTI:
401 case SIOCDELMULTI:
402 rc = oce_hw_update_multicast(sc);
403 if (rc)
404 device_printf(sc->dev,
405 "Update multicast address failed\n");
406 break;
407
408 case SIOCSIFCAP:
409 u = ifr->ifr_reqcap ^ ifp->if_capenable;
410
411 if (u & IFCAP_TXCSUM) {
412 ifp->if_capenable ^= IFCAP_TXCSUM;
413 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
414
415 if (IFCAP_TSO & ifp->if_capenable &&
416 !(IFCAP_TXCSUM & ifp->if_capenable)) {
417 ifp->if_capenable &= ~IFCAP_TSO;
418 ifp->if_hwassist &= ~CSUM_TSO;
419 if_printf(ifp,
420 "TSO disabled due to -txcsum.\n");
421 }
422 }
423
424 if (u & IFCAP_RXCSUM)
425 ifp->if_capenable ^= IFCAP_RXCSUM;
426
427 if (u & IFCAP_TSO4) {
428 ifp->if_capenable ^= IFCAP_TSO4;
429
430 if (IFCAP_TSO & ifp->if_capenable) {
431 if (IFCAP_TXCSUM & ifp->if_capenable)
432 ifp->if_hwassist |= CSUM_TSO;
433 else {
434 ifp->if_capenable &= ~IFCAP_TSO;
435 ifp->if_hwassist &= ~CSUM_TSO;
436 if_printf(ifp,
437 "Enable txcsum first.\n");
438 rc = EAGAIN;
439 }
440 } else
441 ifp->if_hwassist &= ~CSUM_TSO;
442 }
443
444 if (u & IFCAP_VLAN_HWTAGGING)
445 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
446
447 if (u & IFCAP_VLAN_HWFILTER) {
448 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
449 oce_vid_config(sc);
450 }
451#if defined(INET6) || defined(INET)
452 if (u & IFCAP_LRO)
453 ifp->if_capenable ^= IFCAP_LRO;
454#endif
455
456 break;
457
458 case SIOCGPRIVATE_0:
459 rc = oce_handle_passthrough(ifp, data);
460 break;
461 default:
462 rc = ether_ioctl(ifp, command, data);
463 break;
464 }
465
466 return rc;
467}
468
469
470static void
471oce_init(void *arg)
472{
473 POCE_SOFTC sc = arg;
474
475 LOCK(&sc->dev_lock);
476
477 if (sc->ifp->if_flags & IFF_UP) {
478 oce_if_deactivate(sc);
479 oce_if_activate(sc);
480 }
481
482 UNLOCK(&sc->dev_lock);
483
484}
485
486
487static int
488oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
489{
490 POCE_SOFTC sc = ifp->if_softc;
491 struct oce_wq *wq = NULL;
492 int queue_index = 0;
493 int status = 0;
494
495 if (!sc->link_status)
496 return ENXIO;
497
498 if ((m->m_flags & M_FLOWID) != 0)
499 queue_index = m->m_pkthdr.flowid % sc->nwqs;
500
501 wq = sc->wq[queue_index];
502
503 LOCK(&wq->tx_lock);
504 status = oce_multiq_transmit(ifp, m, wq);
505 UNLOCK(&wq->tx_lock);
506
507 return status;
508
509}
510
511
512static void
513oce_multiq_flush(struct ifnet *ifp)
514{
515 POCE_SOFTC sc = ifp->if_softc;
516 struct mbuf *m;
517 int i = 0;
518
519 for (i = 0; i < sc->nwqs; i++) {
520 while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
521 m_freem(m);
522 }
523 if_qflush(ifp);
524}
525
526
527
528/*****************************************************************************
529 * Driver interrupt routines functions *
530 *****************************************************************************/
531
532static void
533oce_intr(void *arg, int pending)
534{
535
536 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
537 POCE_SOFTC sc = ii->sc;
538 struct oce_eq *eq = ii->eq;
539 struct oce_eqe *eqe;
540 struct oce_cq *cq = NULL;
541 int i, num_eqes = 0;
542
543
544 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
545 BUS_DMASYNC_POSTWRITE);
546 do {
547 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
548 if (eqe->evnt == 0)
549 break;
550 eqe->evnt = 0;
551 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
552 BUS_DMASYNC_POSTWRITE);
553 RING_GET(eq->ring, 1);
554 num_eqes++;
555
556 } while (TRUE);
557
558 if (!num_eqes)
559 goto eq_arm; /* Spurious */
560
561 /* Clear EQ entries, but dont arm */
562 oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
563
564 /* Process TX, RX and MCC. But dont arm CQ*/
565 for (i = 0; i < eq->cq_valid; i++) {
566 cq = eq->cq[i];
567 (*cq->cq_handler)(cq->cb_arg);
568 }
569
570 /* Arm all cqs connected to this EQ */
571 for (i = 0; i < eq->cq_valid; i++) {
572 cq = eq->cq[i];
573 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
574 }
575
576eq_arm:
577 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
578
579 return;
580}
581
582
583static int
584oce_setup_intr(POCE_SOFTC sc)
585{
586 int rc = 0, use_intx = 0;
587 int vector = 0, req_vectors = 0;
588
589 if (is_rss_enabled(sc))
590 req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
591 else
592 req_vectors = 1;
593
594 if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
595 sc->intr_count = req_vectors;
596 rc = pci_alloc_msix(sc->dev, &sc->intr_count);
597 if (rc != 0) {
598 use_intx = 1;
599 pci_release_msi(sc->dev);
600 } else
601 sc->flags |= OCE_FLAGS_USING_MSIX;
602 } else
603 use_intx = 1;
604
605 if (use_intx)
606 sc->intr_count = 1;
607
608 /* Scale number of queues based on intr we got */
609 update_queues_got(sc);
610
611 if (use_intx) {
612 device_printf(sc->dev, "Using legacy interrupt\n");
613 rc = oce_alloc_intr(sc, vector, oce_intr);
614 if (rc)
615 goto error;
616 } else {
617 for (; vector < sc->intr_count; vector++) {
618 rc = oce_alloc_intr(sc, vector, oce_intr);
619 if (rc)
620 goto error;
621 }
622 }
623
624 return 0;
625error:
626 oce_intr_free(sc);
627 return rc;
628}
629
630
631static int
632oce_fast_isr(void *arg)
633{
634 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
635 POCE_SOFTC sc = ii->sc;
636
637 if (ii->eq == NULL)
638 return FILTER_STRAY;
639
640 oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
641
642 taskqueue_enqueue_fast(ii->tq, &ii->task);
643
644 ii->eq->intr++;
645
646 return FILTER_HANDLED;
647}
648
649
650static int
651oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
652{
653 POCE_INTR_INFO ii = &sc->intrs[vector];
654 int rc = 0, rr;
655
656 if (vector >= OCE_MAX_EQ)
657 return (EINVAL);
658
659 /* Set the resource id for the interrupt.
660 * MSIx is vector + 1 for the resource id,
661 * INTx is 0 for the resource id.
662 */
663 if (sc->flags & OCE_FLAGS_USING_MSIX)
664 rr = vector + 1;
665 else
666 rr = 0;
667 ii->intr_res = bus_alloc_resource_any(sc->dev,
668 SYS_RES_IRQ,
669 &rr, RF_ACTIVE|RF_SHAREABLE);
670 ii->irq_rr = rr;
671 if (ii->intr_res == NULL) {
672 device_printf(sc->dev,
673 "Could not allocate interrupt\n");
674 rc = ENXIO;
675 return rc;
676 }
677
678 TASK_INIT(&ii->task, 0, isr, ii);
679 ii->vector = vector;
680 sprintf(ii->task_name, "oce_task[%d]", ii->vector);
681 ii->tq = taskqueue_create_fast(ii->task_name,
682 M_NOWAIT,
683 taskqueue_thread_enqueue,
684 &ii->tq);
685 taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
686 device_get_nameunit(sc->dev));
687
688 ii->sc = sc;
689 rc = bus_setup_intr(sc->dev,
690 ii->intr_res,
691 INTR_TYPE_NET,
692 oce_fast_isr, NULL, ii, &ii->tag);
693 return rc;
694
695}
696
697
698void
699oce_intr_free(POCE_SOFTC sc)
700{
701 int i = 0;
702
703 for (i = 0; i < sc->intr_count; i++) {
704
705 if (sc->intrs[i].tag != NULL)
706 bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
707 sc->intrs[i].tag);
708 if (sc->intrs[i].tq != NULL)
709 taskqueue_free(sc->intrs[i].tq);
710
711 if (sc->intrs[i].intr_res != NULL)
712 bus_release_resource(sc->dev, SYS_RES_IRQ,
713 sc->intrs[i].irq_rr,
714 sc->intrs[i].intr_res);
715 sc->intrs[i].tag = NULL;
716 sc->intrs[i].intr_res = NULL;
717 }
718
719 if (sc->flags & OCE_FLAGS_USING_MSIX)
720 pci_release_msi(sc->dev);
721
722}
723
724
725
726/******************************************************************************
727* Media callbacks functions *
728******************************************************************************/
729
730static void
731oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
732{
733 POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
734
735
736 req->ifm_status = IFM_AVALID;
737 req->ifm_active = IFM_ETHER;
738
739 if (sc->link_status == 1)
740 req->ifm_status |= IFM_ACTIVE;
741 else
742 return;
743
744 switch (sc->link_speed) {
745 case 1: /* 10 Mbps */
746 req->ifm_active |= IFM_10_T | IFM_FDX;
747 sc->speed = 10;
748 break;
749 case 2: /* 100 Mbps */
750 req->ifm_active |= IFM_100_TX | IFM_FDX;
751 sc->speed = 100;
752 break;
753 case 3: /* 1 Gbps */
754 req->ifm_active |= IFM_1000_T | IFM_FDX;
755 sc->speed = 1000;
756 break;
757 case 4: /* 10 Gbps */
758 req->ifm_active |= IFM_10G_SR | IFM_FDX;
759 sc->speed = 10000;
760 break;
761 }
762
763 return;
764}
765
766
767int
768oce_media_change(struct ifnet *ifp)
769{
770 return 0;
771}
772
773
774
775
776/*****************************************************************************
777 * Transmit routines functions *
778 *****************************************************************************/
779
780static int
781oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
782{
783 int rc = 0, i, retry_cnt = 0;
784 bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
785 struct mbuf *m, *m_temp;
786 struct oce_wq *wq = sc->wq[wq_index];
787 struct oce_packet_desc *pd;
788 struct oce_nic_hdr_wqe *nichdr;
789 struct oce_nic_frag_wqe *nicfrag;
790 int num_wqes;
791 uint32_t reg_value;
792 boolean_t complete = TRUE;
793
794 m = *mpp;
795 if (!m)
796 return EINVAL;
797
798 if (!(m->m_flags & M_PKTHDR)) {
799 rc = ENXIO;
800 goto free_ret;
801 }
802
803 if(oce_tx_asic_stall_verify(sc, m)) {
804 m = oce_insert_vlan_tag(sc, m, &complete);
805 if(!m) {
806 device_printf(sc->dev, "Insertion unsuccessful\n");
807 return 0;
808 }
809
810 }
811
812 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
813 /* consolidate packet buffers for TSO/LSO segment offload */
814#if defined(INET6) || defined(INET)
815 m = oce_tso_setup(sc, mpp);
816#else
817 m = NULL;
818#endif
819 if (m == NULL) {
820 rc = ENXIO;
821 goto free_ret;
822 }
823 }
824
825 pd = &wq->pckts[wq->pkt_desc_head];
826retry:
827 rc = bus_dmamap_load_mbuf_sg(wq->tag,
828 pd->map,
829 m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
830 if (rc == 0) {
831 num_wqes = pd->nsegs + 1;
832 if (IS_BE(sc) || IS_SH(sc)) {
833 /*Dummy required only for BE3.*/
834 if (num_wqes & 1)
835 num_wqes++;
836 }
837 if (num_wqes >= RING_NUM_FREE(wq->ring)) {
838 bus_dmamap_unload(wq->tag, pd->map);
839 return EBUSY;
840 }
841 atomic_store_rel_int(&wq->pkt_desc_head,
842 (wq->pkt_desc_head + 1) % \
843 OCE_WQ_PACKET_ARRAY_SIZE);
844 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
845 pd->mbuf = m;
846
847 nichdr =
848 RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
849 nichdr->u0.dw[0] = 0;
850 nichdr->u0.dw[1] = 0;
851 nichdr->u0.dw[2] = 0;
852 nichdr->u0.dw[3] = 0;
853
854 nichdr->u0.s.complete = complete;
855 nichdr->u0.s.event = 1;
856 nichdr->u0.s.crc = 1;
857 nichdr->u0.s.forward = 0;
858 nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
859 nichdr->u0.s.udpcs =
860 (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
861 nichdr->u0.s.tcpcs =
862 (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
863 nichdr->u0.s.num_wqe = num_wqes;
864 nichdr->u0.s.total_length = m->m_pkthdr.len;
467 }
468
469 break;
470
471 case SIOCADDMULTI:
472 case SIOCDELMULTI:
473 rc = oce_hw_update_multicast(sc);
474 if (rc)
475 device_printf(sc->dev,
476 "Update multicast address failed\n");
477 break;
478
479 case SIOCSIFCAP:
480 u = ifr->ifr_reqcap ^ ifp->if_capenable;
481
482 if (u & IFCAP_TXCSUM) {
483 ifp->if_capenable ^= IFCAP_TXCSUM;
484 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
485
486 if (IFCAP_TSO & ifp->if_capenable &&
487 !(IFCAP_TXCSUM & ifp->if_capenable)) {
488 ifp->if_capenable &= ~IFCAP_TSO;
489 ifp->if_hwassist &= ~CSUM_TSO;
490 if_printf(ifp,
491 "TSO disabled due to -txcsum.\n");
492 }
493 }
494
495 if (u & IFCAP_RXCSUM)
496 ifp->if_capenable ^= IFCAP_RXCSUM;
497
498 if (u & IFCAP_TSO4) {
499 ifp->if_capenable ^= IFCAP_TSO4;
500
501 if (IFCAP_TSO & ifp->if_capenable) {
502 if (IFCAP_TXCSUM & ifp->if_capenable)
503 ifp->if_hwassist |= CSUM_TSO;
504 else {
505 ifp->if_capenable &= ~IFCAP_TSO;
506 ifp->if_hwassist &= ~CSUM_TSO;
507 if_printf(ifp,
508 "Enable txcsum first.\n");
509 rc = EAGAIN;
510 }
511 } else
512 ifp->if_hwassist &= ~CSUM_TSO;
513 }
514
515 if (u & IFCAP_VLAN_HWTAGGING)
516 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
517
518 if (u & IFCAP_VLAN_HWFILTER) {
519 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
520 oce_vid_config(sc);
521 }
522#if defined(INET6) || defined(INET)
523 if (u & IFCAP_LRO)
524 ifp->if_capenable ^= IFCAP_LRO;
525#endif
526
527 break;
528
529 case SIOCGPRIVATE_0:
530 rc = oce_handle_passthrough(ifp, data);
531 break;
532 default:
533 rc = ether_ioctl(ifp, command, data);
534 break;
535 }
536
537 return rc;
538}
539
540
541static void
542oce_init(void *arg)
543{
544 POCE_SOFTC sc = arg;
545
546 LOCK(&sc->dev_lock);
547
548 if (sc->ifp->if_flags & IFF_UP) {
549 oce_if_deactivate(sc);
550 oce_if_activate(sc);
551 }
552
553 UNLOCK(&sc->dev_lock);
554
555}
556
557
558static int
559oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
560{
561 POCE_SOFTC sc = ifp->if_softc;
562 struct oce_wq *wq = NULL;
563 int queue_index = 0;
564 int status = 0;
565
566 if (!sc->link_status)
567 return ENXIO;
568
569 if ((m->m_flags & M_FLOWID) != 0)
570 queue_index = m->m_pkthdr.flowid % sc->nwqs;
571
572 wq = sc->wq[queue_index];
573
574 LOCK(&wq->tx_lock);
575 status = oce_multiq_transmit(ifp, m, wq);
576 UNLOCK(&wq->tx_lock);
577
578 return status;
579
580}
581
582
583static void
584oce_multiq_flush(struct ifnet *ifp)
585{
586 POCE_SOFTC sc = ifp->if_softc;
587 struct mbuf *m;
588 int i = 0;
589
590 for (i = 0; i < sc->nwqs; i++) {
591 while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
592 m_freem(m);
593 }
594 if_qflush(ifp);
595}
596
597
598
599/*****************************************************************************
600 * Driver interrupt routines functions *
601 *****************************************************************************/
602
603static void
604oce_intr(void *arg, int pending)
605{
606
607 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
608 POCE_SOFTC sc = ii->sc;
609 struct oce_eq *eq = ii->eq;
610 struct oce_eqe *eqe;
611 struct oce_cq *cq = NULL;
612 int i, num_eqes = 0;
613
614
615 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
616 BUS_DMASYNC_POSTWRITE);
617 do {
618 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
619 if (eqe->evnt == 0)
620 break;
621 eqe->evnt = 0;
622 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
623 BUS_DMASYNC_POSTWRITE);
624 RING_GET(eq->ring, 1);
625 num_eqes++;
626
627 } while (TRUE);
628
629 if (!num_eqes)
630 goto eq_arm; /* Spurious */
631
632 /* Clear EQ entries, but dont arm */
633 oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
634
635 /* Process TX, RX and MCC. But dont arm CQ*/
636 for (i = 0; i < eq->cq_valid; i++) {
637 cq = eq->cq[i];
638 (*cq->cq_handler)(cq->cb_arg);
639 }
640
641 /* Arm all cqs connected to this EQ */
642 for (i = 0; i < eq->cq_valid; i++) {
643 cq = eq->cq[i];
644 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
645 }
646
647eq_arm:
648 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
649
650 return;
651}
652
653
654static int
655oce_setup_intr(POCE_SOFTC sc)
656{
657 int rc = 0, use_intx = 0;
658 int vector = 0, req_vectors = 0;
659
660 if (is_rss_enabled(sc))
661 req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
662 else
663 req_vectors = 1;
664
665 if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
666 sc->intr_count = req_vectors;
667 rc = pci_alloc_msix(sc->dev, &sc->intr_count);
668 if (rc != 0) {
669 use_intx = 1;
670 pci_release_msi(sc->dev);
671 } else
672 sc->flags |= OCE_FLAGS_USING_MSIX;
673 } else
674 use_intx = 1;
675
676 if (use_intx)
677 sc->intr_count = 1;
678
679 /* Scale number of queues based on intr we got */
680 update_queues_got(sc);
681
682 if (use_intx) {
683 device_printf(sc->dev, "Using legacy interrupt\n");
684 rc = oce_alloc_intr(sc, vector, oce_intr);
685 if (rc)
686 goto error;
687 } else {
688 for (; vector < sc->intr_count; vector++) {
689 rc = oce_alloc_intr(sc, vector, oce_intr);
690 if (rc)
691 goto error;
692 }
693 }
694
695 return 0;
696error:
697 oce_intr_free(sc);
698 return rc;
699}
700
701
702static int
703oce_fast_isr(void *arg)
704{
705 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
706 POCE_SOFTC sc = ii->sc;
707
708 if (ii->eq == NULL)
709 return FILTER_STRAY;
710
711 oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
712
713 taskqueue_enqueue_fast(ii->tq, &ii->task);
714
715 ii->eq->intr++;
716
717 return FILTER_HANDLED;
718}
719
720
721static int
722oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
723{
724 POCE_INTR_INFO ii = &sc->intrs[vector];
725 int rc = 0, rr;
726
727 if (vector >= OCE_MAX_EQ)
728 return (EINVAL);
729
730 /* Set the resource id for the interrupt.
731 * MSIx is vector + 1 for the resource id,
732 * INTx is 0 for the resource id.
733 */
734 if (sc->flags & OCE_FLAGS_USING_MSIX)
735 rr = vector + 1;
736 else
737 rr = 0;
738 ii->intr_res = bus_alloc_resource_any(sc->dev,
739 SYS_RES_IRQ,
740 &rr, RF_ACTIVE|RF_SHAREABLE);
741 ii->irq_rr = rr;
742 if (ii->intr_res == NULL) {
743 device_printf(sc->dev,
744 "Could not allocate interrupt\n");
745 rc = ENXIO;
746 return rc;
747 }
748
749 TASK_INIT(&ii->task, 0, isr, ii);
750 ii->vector = vector;
751 sprintf(ii->task_name, "oce_task[%d]", ii->vector);
752 ii->tq = taskqueue_create_fast(ii->task_name,
753 M_NOWAIT,
754 taskqueue_thread_enqueue,
755 &ii->tq);
756 taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
757 device_get_nameunit(sc->dev));
758
759 ii->sc = sc;
760 rc = bus_setup_intr(sc->dev,
761 ii->intr_res,
762 INTR_TYPE_NET,
763 oce_fast_isr, NULL, ii, &ii->tag);
764 return rc;
765
766}
767
768
769void
770oce_intr_free(POCE_SOFTC sc)
771{
772 int i = 0;
773
774 for (i = 0; i < sc->intr_count; i++) {
775
776 if (sc->intrs[i].tag != NULL)
777 bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
778 sc->intrs[i].tag);
779 if (sc->intrs[i].tq != NULL)
780 taskqueue_free(sc->intrs[i].tq);
781
782 if (sc->intrs[i].intr_res != NULL)
783 bus_release_resource(sc->dev, SYS_RES_IRQ,
784 sc->intrs[i].irq_rr,
785 sc->intrs[i].intr_res);
786 sc->intrs[i].tag = NULL;
787 sc->intrs[i].intr_res = NULL;
788 }
789
790 if (sc->flags & OCE_FLAGS_USING_MSIX)
791 pci_release_msi(sc->dev);
792
793}
794
795
796
797/******************************************************************************
798* Media callbacks functions *
799******************************************************************************/
800
801static void
802oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
803{
804 POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
805
806
807 req->ifm_status = IFM_AVALID;
808 req->ifm_active = IFM_ETHER;
809
810 if (sc->link_status == 1)
811 req->ifm_status |= IFM_ACTIVE;
812 else
813 return;
814
815 switch (sc->link_speed) {
816 case 1: /* 10 Mbps */
817 req->ifm_active |= IFM_10_T | IFM_FDX;
818 sc->speed = 10;
819 break;
820 case 2: /* 100 Mbps */
821 req->ifm_active |= IFM_100_TX | IFM_FDX;
822 sc->speed = 100;
823 break;
824 case 3: /* 1 Gbps */
825 req->ifm_active |= IFM_1000_T | IFM_FDX;
826 sc->speed = 1000;
827 break;
828 case 4: /* 10 Gbps */
829 req->ifm_active |= IFM_10G_SR | IFM_FDX;
830 sc->speed = 10000;
831 break;
832 }
833
834 return;
835}
836
837
838int
839oce_media_change(struct ifnet *ifp)
840{
841 return 0;
842}
843
844
845
846
847/*****************************************************************************
848 * Transmit routines functions *
849 *****************************************************************************/
850
851static int
852oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
853{
854 int rc = 0, i, retry_cnt = 0;
855 bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
856 struct mbuf *m, *m_temp;
857 struct oce_wq *wq = sc->wq[wq_index];
858 struct oce_packet_desc *pd;
859 struct oce_nic_hdr_wqe *nichdr;
860 struct oce_nic_frag_wqe *nicfrag;
861 int num_wqes;
862 uint32_t reg_value;
863 boolean_t complete = TRUE;
864
865 m = *mpp;
866 if (!m)
867 return EINVAL;
868
869 if (!(m->m_flags & M_PKTHDR)) {
870 rc = ENXIO;
871 goto free_ret;
872 }
873
874 if(oce_tx_asic_stall_verify(sc, m)) {
875 m = oce_insert_vlan_tag(sc, m, &complete);
876 if(!m) {
877 device_printf(sc->dev, "Insertion unsuccessful\n");
878 return 0;
879 }
880
881 }
882
883 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
884 /* consolidate packet buffers for TSO/LSO segment offload */
885#if defined(INET6) || defined(INET)
886 m = oce_tso_setup(sc, mpp);
887#else
888 m = NULL;
889#endif
890 if (m == NULL) {
891 rc = ENXIO;
892 goto free_ret;
893 }
894 }
895
896 pd = &wq->pckts[wq->pkt_desc_head];
897retry:
898 rc = bus_dmamap_load_mbuf_sg(wq->tag,
899 pd->map,
900 m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
901 if (rc == 0) {
902 num_wqes = pd->nsegs + 1;
903 if (IS_BE(sc) || IS_SH(sc)) {
904 /*Dummy required only for BE3.*/
905 if (num_wqes & 1)
906 num_wqes++;
907 }
908 if (num_wqes >= RING_NUM_FREE(wq->ring)) {
909 bus_dmamap_unload(wq->tag, pd->map);
910 return EBUSY;
911 }
912 atomic_store_rel_int(&wq->pkt_desc_head,
913 (wq->pkt_desc_head + 1) % \
914 OCE_WQ_PACKET_ARRAY_SIZE);
915 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
916 pd->mbuf = m;
917
918 nichdr =
919 RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
920 nichdr->u0.dw[0] = 0;
921 nichdr->u0.dw[1] = 0;
922 nichdr->u0.dw[2] = 0;
923 nichdr->u0.dw[3] = 0;
924
925 nichdr->u0.s.complete = complete;
926 nichdr->u0.s.event = 1;
927 nichdr->u0.s.crc = 1;
928 nichdr->u0.s.forward = 0;
929 nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
930 nichdr->u0.s.udpcs =
931 (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
932 nichdr->u0.s.tcpcs =
933 (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
934 nichdr->u0.s.num_wqe = num_wqes;
935 nichdr->u0.s.total_length = m->m_pkthdr.len;
936
865 if (m->m_flags & M_VLANTAG) {
866 nichdr->u0.s.vlan = 1; /*Vlan present*/
867 nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
868 }
937 if (m->m_flags & M_VLANTAG) {
938 nichdr->u0.s.vlan = 1; /*Vlan present*/
939 nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
940 }
941
869 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
870 if (m->m_pkthdr.tso_segsz) {
871 nichdr->u0.s.lso = 1;
872 nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz;
873 }
874 if (!IS_BE(sc) || !IS_SH(sc))
875 nichdr->u0.s.ipcs = 1;
876 }
877
878 RING_PUT(wq->ring, 1);
879 atomic_add_int(&wq->ring->num_used, 1);
880
881 for (i = 0; i < pd->nsegs; i++) {
882 nicfrag =
883 RING_GET_PRODUCER_ITEM_VA(wq->ring,
884 struct oce_nic_frag_wqe);
885 nicfrag->u0.s.rsvd0 = 0;
886 nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
887 nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
888 nicfrag->u0.s.frag_len = segs[i].ds_len;
889 pd->wqe_idx = wq->ring->pidx;
890 RING_PUT(wq->ring, 1);
891 atomic_add_int(&wq->ring->num_used, 1);
892 }
893 if (num_wqes > (pd->nsegs + 1)) {
894 nicfrag =
895 RING_GET_PRODUCER_ITEM_VA(wq->ring,
896 struct oce_nic_frag_wqe);
897 nicfrag->u0.dw[0] = 0;
898 nicfrag->u0.dw[1] = 0;
899 nicfrag->u0.dw[2] = 0;
900 nicfrag->u0.dw[3] = 0;
901 pd->wqe_idx = wq->ring->pidx;
902 RING_PUT(wq->ring, 1);
903 atomic_add_int(&wq->ring->num_used, 1);
904 pd->nsegs++;
905 }
906
907 sc->ifp->if_opackets++;
908 wq->tx_stats.tx_reqs++;
909 wq->tx_stats.tx_wrbs += num_wqes;
910 wq->tx_stats.tx_bytes += m->m_pkthdr.len;
911 wq->tx_stats.tx_pkts++;
912
913 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
914 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
915 reg_value = (num_wqes << 16) | wq->wq_id;
916 OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
917
918 } else if (rc == EFBIG) {
919 if (retry_cnt == 0) {
920 m_temp = m_defrag(m, M_NOWAIT);
921 if (m_temp == NULL)
922 goto free_ret;
923 m = m_temp;
924 *mpp = m_temp;
925 retry_cnt = retry_cnt + 1;
926 goto retry;
927 } else
928 goto free_ret;
929 } else if (rc == ENOMEM)
930 return rc;
931 else
932 goto free_ret;
933
934 return 0;
935
936free_ret:
937 m_freem(*mpp);
938 *mpp = NULL;
939 return rc;
940}
941
942
943static void
944oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
945{
946 struct oce_packet_desc *pd;
947 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
948 struct mbuf *m;
949
950 pd = &wq->pckts[wq->pkt_desc_tail];
951 atomic_store_rel_int(&wq->pkt_desc_tail,
952 (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
953 atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
954 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
955 bus_dmamap_unload(wq->tag, pd->map);
956
957 m = pd->mbuf;
958 m_freem(m);
959 pd->mbuf = NULL;
960
961
962 if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
963 if (wq->ring->num_used < (wq->ring->num_items / 2)) {
964 sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
965 oce_tx_restart(sc, wq);
966 }
967 }
968}
969
970
971static void
972oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
973{
974
975 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
976 return;
977
978#if __FreeBSD_version >= 800000
979 if (!drbr_empty(sc->ifp, wq->br))
980#else
981 if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
982#endif
983 taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
984
985}
986
987
988#if defined(INET6) || defined(INET)
989static struct mbuf *
990oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
991{
992 struct mbuf *m;
993#ifdef INET
994 struct ip *ip;
995#endif
996#ifdef INET6
997 struct ip6_hdr *ip6;
998#endif
999 struct ether_vlan_header *eh;
1000 struct tcphdr *th;
1001 uint16_t etype;
1002 int total_len = 0, ehdrlen = 0;
1003
1004 m = *mpp;
1005
1006 if (M_WRITABLE(m) == 0) {
1007 m = m_dup(*mpp, M_NOWAIT);
1008 if (!m)
1009 return NULL;
1010 m_freem(*mpp);
1011 *mpp = m;
1012 }
1013
1014 eh = mtod(m, struct ether_vlan_header *);
1015 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1016 etype = ntohs(eh->evl_proto);
1017 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1018 } else {
1019 etype = ntohs(eh->evl_encap_proto);
1020 ehdrlen = ETHER_HDR_LEN;
1021 }
1022
1023 switch (etype) {
1024#ifdef INET
1025 case ETHERTYPE_IP:
1026 ip = (struct ip *)(m->m_data + ehdrlen);
1027 if (ip->ip_p != IPPROTO_TCP)
1028 return NULL;
1029 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1030
1031 total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1032 break;
1033#endif
1034#ifdef INET6
1035 case ETHERTYPE_IPV6:
1036 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1037 if (ip6->ip6_nxt != IPPROTO_TCP)
1038 return NULL;
1039 th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1040
1041 total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1042 break;
1043#endif
1044 default:
1045 return NULL;
1046 }
1047
1048 m = m_pullup(m, total_len);
1049 if (!m)
1050 return NULL;
1051 *mpp = m;
1052 return m;
1053
1054}
1055#endif /* INET6 || INET */
1056
1057void
1058oce_tx_task(void *arg, int npending)
1059{
1060 struct oce_wq *wq = arg;
1061 POCE_SOFTC sc = wq->parent;
1062 struct ifnet *ifp = sc->ifp;
1063 int rc = 0;
1064
1065#if __FreeBSD_version >= 800000
1066 LOCK(&wq->tx_lock);
1067 rc = oce_multiq_transmit(ifp, NULL, wq);
1068 if (rc) {
1069 device_printf(sc->dev,
1070 "TX[%d] restart failed\n", wq->queue_index);
1071 }
1072 UNLOCK(&wq->tx_lock);
1073#else
1074 oce_start(ifp);
1075#endif
1076
1077}
1078
1079
1080void
1081oce_start(struct ifnet *ifp)
1082{
1083 POCE_SOFTC sc = ifp->if_softc;
1084 struct mbuf *m;
1085 int rc = 0;
1086 int def_q = 0; /* Defualt tx queue is 0*/
1087
1088 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1089 IFF_DRV_RUNNING)
1090 return;
1091
1092 if (!sc->link_status)
1093 return;
1094
1095 do {
1096 IF_DEQUEUE(&sc->ifp->if_snd, m);
1097 if (m == NULL)
1098 break;
1099
1100 LOCK(&sc->wq[def_q]->tx_lock);
1101 rc = oce_tx(sc, &m, def_q);
1102 UNLOCK(&sc->wq[def_q]->tx_lock);
1103 if (rc) {
1104 if (m != NULL) {
1105 sc->wq[def_q]->tx_stats.tx_stops ++;
1106 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1107 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1108 m = NULL;
1109 }
1110 break;
1111 }
1112 if (m != NULL)
1113 ETHER_BPF_MTAP(ifp, m);
1114
1115 } while (TRUE);
1116
1117 return;
1118}
1119
1120
1121/* Handle the Completion Queue for transmit */
1122uint16_t
1123oce_wq_handler(void *arg)
1124{
1125 struct oce_wq *wq = (struct oce_wq *)arg;
1126 POCE_SOFTC sc = wq->parent;
1127 struct oce_cq *cq = wq->cq;
1128 struct oce_nic_tx_cqe *cqe;
1129 int num_cqes = 0;
1130
1131 bus_dmamap_sync(cq->ring->dma.tag,
1132 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1133 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1134 while (cqe->u0.dw[3]) {
1135 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1136
1137 wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1138 if (wq->ring->cidx >= wq->ring->num_items)
1139 wq->ring->cidx -= wq->ring->num_items;
1140
1141 oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1142 wq->tx_stats.tx_compl++;
1143 cqe->u0.dw[3] = 0;
1144 RING_GET(cq->ring, 1);
1145 bus_dmamap_sync(cq->ring->dma.tag,
1146 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1147 cqe =
1148 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1149 num_cqes++;
1150 }
1151
1152 if (num_cqes)
1153 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1154
1155 return 0;
1156}
1157
1158
942 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
943 if (m->m_pkthdr.tso_segsz) {
944 nichdr->u0.s.lso = 1;
945 nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz;
946 }
947 if (!IS_BE(sc) || !IS_SH(sc))
948 nichdr->u0.s.ipcs = 1;
949 }
950
951 RING_PUT(wq->ring, 1);
952 atomic_add_int(&wq->ring->num_used, 1);
953
954 for (i = 0; i < pd->nsegs; i++) {
955 nicfrag =
956 RING_GET_PRODUCER_ITEM_VA(wq->ring,
957 struct oce_nic_frag_wqe);
958 nicfrag->u0.s.rsvd0 = 0;
959 nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
960 nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
961 nicfrag->u0.s.frag_len = segs[i].ds_len;
962 pd->wqe_idx = wq->ring->pidx;
963 RING_PUT(wq->ring, 1);
964 atomic_add_int(&wq->ring->num_used, 1);
965 }
966 if (num_wqes > (pd->nsegs + 1)) {
967 nicfrag =
968 RING_GET_PRODUCER_ITEM_VA(wq->ring,
969 struct oce_nic_frag_wqe);
970 nicfrag->u0.dw[0] = 0;
971 nicfrag->u0.dw[1] = 0;
972 nicfrag->u0.dw[2] = 0;
973 nicfrag->u0.dw[3] = 0;
974 pd->wqe_idx = wq->ring->pidx;
975 RING_PUT(wq->ring, 1);
976 atomic_add_int(&wq->ring->num_used, 1);
977 pd->nsegs++;
978 }
979
980 sc->ifp->if_opackets++;
981 wq->tx_stats.tx_reqs++;
982 wq->tx_stats.tx_wrbs += num_wqes;
983 wq->tx_stats.tx_bytes += m->m_pkthdr.len;
984 wq->tx_stats.tx_pkts++;
985
986 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
987 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
988 reg_value = (num_wqes << 16) | wq->wq_id;
989 OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
990
991 } else if (rc == EFBIG) {
992 if (retry_cnt == 0) {
993 m_temp = m_defrag(m, M_NOWAIT);
994 if (m_temp == NULL)
995 goto free_ret;
996 m = m_temp;
997 *mpp = m_temp;
998 retry_cnt = retry_cnt + 1;
999 goto retry;
1000 } else
1001 goto free_ret;
1002 } else if (rc == ENOMEM)
1003 return rc;
1004 else
1005 goto free_ret;
1006
1007 return 0;
1008
1009free_ret:
1010 m_freem(*mpp);
1011 *mpp = NULL;
1012 return rc;
1013}
1014
1015
1016static void
1017oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
1018{
1019 struct oce_packet_desc *pd;
1020 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1021 struct mbuf *m;
1022
1023 pd = &wq->pckts[wq->pkt_desc_tail];
1024 atomic_store_rel_int(&wq->pkt_desc_tail,
1025 (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1026 atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1027 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1028 bus_dmamap_unload(wq->tag, pd->map);
1029
1030 m = pd->mbuf;
1031 m_freem(m);
1032 pd->mbuf = NULL;
1033
1034
1035 if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1036 if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1037 sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
1038 oce_tx_restart(sc, wq);
1039 }
1040 }
1041}
1042
1043
1044static void
1045oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1046{
1047
1048 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1049 return;
1050
1051#if __FreeBSD_version >= 800000
1052 if (!drbr_empty(sc->ifp, wq->br))
1053#else
1054 if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
1055#endif
1056 taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
1057
1058}
1059
1060
1061#if defined(INET6) || defined(INET)
1062static struct mbuf *
1063oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1064{
1065 struct mbuf *m;
1066#ifdef INET
1067 struct ip *ip;
1068#endif
1069#ifdef INET6
1070 struct ip6_hdr *ip6;
1071#endif
1072 struct ether_vlan_header *eh;
1073 struct tcphdr *th;
1074 uint16_t etype;
1075 int total_len = 0, ehdrlen = 0;
1076
1077 m = *mpp;
1078
1079 if (M_WRITABLE(m) == 0) {
1080 m = m_dup(*mpp, M_NOWAIT);
1081 if (!m)
1082 return NULL;
1083 m_freem(*mpp);
1084 *mpp = m;
1085 }
1086
1087 eh = mtod(m, struct ether_vlan_header *);
1088 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1089 etype = ntohs(eh->evl_proto);
1090 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1091 } else {
1092 etype = ntohs(eh->evl_encap_proto);
1093 ehdrlen = ETHER_HDR_LEN;
1094 }
1095
1096 switch (etype) {
1097#ifdef INET
1098 case ETHERTYPE_IP:
1099 ip = (struct ip *)(m->m_data + ehdrlen);
1100 if (ip->ip_p != IPPROTO_TCP)
1101 return NULL;
1102 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1103
1104 total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1105 break;
1106#endif
1107#ifdef INET6
1108 case ETHERTYPE_IPV6:
1109 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1110 if (ip6->ip6_nxt != IPPROTO_TCP)
1111 return NULL;
1112 th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1113
1114 total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1115 break;
1116#endif
1117 default:
1118 return NULL;
1119 }
1120
1121 m = m_pullup(m, total_len);
1122 if (!m)
1123 return NULL;
1124 *mpp = m;
1125 return m;
1126
1127}
1128#endif /* INET6 || INET */
1129
1130void
1131oce_tx_task(void *arg, int npending)
1132{
1133 struct oce_wq *wq = arg;
1134 POCE_SOFTC sc = wq->parent;
1135 struct ifnet *ifp = sc->ifp;
1136 int rc = 0;
1137
1138#if __FreeBSD_version >= 800000
1139 LOCK(&wq->tx_lock);
1140 rc = oce_multiq_transmit(ifp, NULL, wq);
1141 if (rc) {
1142 device_printf(sc->dev,
1143 "TX[%d] restart failed\n", wq->queue_index);
1144 }
1145 UNLOCK(&wq->tx_lock);
1146#else
1147 oce_start(ifp);
1148#endif
1149
1150}
1151
1152
1153void
1154oce_start(struct ifnet *ifp)
1155{
1156 POCE_SOFTC sc = ifp->if_softc;
1157 struct mbuf *m;
1158 int rc = 0;
1159 int def_q = 0; /* Defualt tx queue is 0*/
1160
1161 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1162 IFF_DRV_RUNNING)
1163 return;
1164
1165 if (!sc->link_status)
1166 return;
1167
1168 do {
1169 IF_DEQUEUE(&sc->ifp->if_snd, m);
1170 if (m == NULL)
1171 break;
1172
1173 LOCK(&sc->wq[def_q]->tx_lock);
1174 rc = oce_tx(sc, &m, def_q);
1175 UNLOCK(&sc->wq[def_q]->tx_lock);
1176 if (rc) {
1177 if (m != NULL) {
1178 sc->wq[def_q]->tx_stats.tx_stops ++;
1179 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1180 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1181 m = NULL;
1182 }
1183 break;
1184 }
1185 if (m != NULL)
1186 ETHER_BPF_MTAP(ifp, m);
1187
1188 } while (TRUE);
1189
1190 return;
1191}
1192
1193
1194/* Handle the Completion Queue for transmit */
1195uint16_t
1196oce_wq_handler(void *arg)
1197{
1198 struct oce_wq *wq = (struct oce_wq *)arg;
1199 POCE_SOFTC sc = wq->parent;
1200 struct oce_cq *cq = wq->cq;
1201 struct oce_nic_tx_cqe *cqe;
1202 int num_cqes = 0;
1203
1204 bus_dmamap_sync(cq->ring->dma.tag,
1205 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1206 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1207 while (cqe->u0.dw[3]) {
1208 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1209
1210 wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1211 if (wq->ring->cidx >= wq->ring->num_items)
1212 wq->ring->cidx -= wq->ring->num_items;
1213
1214 oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1215 wq->tx_stats.tx_compl++;
1216 cqe->u0.dw[3] = 0;
1217 RING_GET(cq->ring, 1);
1218 bus_dmamap_sync(cq->ring->dma.tag,
1219 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1220 cqe =
1221 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1222 num_cqes++;
1223 }
1224
1225 if (num_cqes)
1226 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1227
1228 return 0;
1229}
1230
1231
1232#if __FreeBSD_version >= 1000000
1233static __inline void
1234drbr_stats_update(struct ifnet *ifp, int len, int mflags)
1235{
1236#ifndef NO_SLOW_STATS
1237 ifp->if_obytes += len;
1238 if (mflags & M_MCAST)
1239 ifp->if_omcasts++;
1240#endif
1241}
1242#endif
1243
1159static int
1160oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1161{
1162 POCE_SOFTC sc = ifp->if_softc;
1163 int status = 0, queue_index = 0;
1164 struct mbuf *next = NULL;
1165 struct buf_ring *br = NULL;
1166
1167 br = wq->br;
1168 queue_index = wq->queue_index;
1169
1170 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1171 IFF_DRV_RUNNING) {
1172 if (m != NULL)
1173 status = drbr_enqueue(ifp, br, m);
1174 return status;
1175 }
1176
1244static int
1245oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1246{
1247 POCE_SOFTC sc = ifp->if_softc;
1248 int status = 0, queue_index = 0;
1249 struct mbuf *next = NULL;
1250 struct buf_ring *br = NULL;
1251
1252 br = wq->br;
1253 queue_index = wq->queue_index;
1254
1255 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1256 IFF_DRV_RUNNING) {
1257 if (m != NULL)
1258 status = drbr_enqueue(ifp, br, m);
1259 return status;
1260 }
1261
1177 if (m != NULL) {
1262 if (m != NULL) {
1178 if ((status = drbr_enqueue(ifp, br, m)) != 0)
1179 return status;
1180 }
1181 while ((next = drbr_peek(ifp, br)) != NULL) {
1182 if (oce_tx(sc, &next, queue_index)) {
1183 if (next == NULL) {
1184 drbr_advance(ifp, br);
1185 } else {
1186 drbr_putback(ifp, br, next);
1187 wq->tx_stats.tx_stops ++;
1188 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1189 status = drbr_enqueue(ifp, br, next);
1190 }
1191 break;
1192 }
1193 drbr_advance(ifp, br);
1194 ifp->if_obytes += next->m_pkthdr.len;
1195 if (next->m_flags & M_MCAST)
1196 ifp->if_omcasts++;
1197 ETHER_BPF_MTAP(ifp, next);
1198 }
1199
1200 return status;
1201}
1202
1203
1204
1205
1206/*****************************************************************************
1207 * Receive routines functions *
1208 *****************************************************************************/
1209
1210static void
1211oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1212{
1213 uint32_t out;
1214 struct oce_packet_desc *pd;
1215 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1216 int i, len, frag_len;
1217 struct mbuf *m = NULL, *tail = NULL;
1218 uint16_t vtag;
1219
1220 len = cqe->u0.s.pkt_size;
1221 if (!len) {
1222 /*partial DMA workaround for Lancer*/
1223 oce_discard_rx_comp(rq, cqe);
1224 goto exit;
1225 }
1226
1227 /* Get vlan_tag value */
1228 if(IS_BE(sc) || IS_SH(sc))
1229 vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1230 else
1231 vtag = cqe->u0.s.vlan_tag;
1232
1233
1234 for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1235
1236 if (rq->packets_out == rq->packets_in) {
1237 device_printf(sc->dev,
1238 "RQ transmit descriptor missing\n");
1239 }
1240 out = rq->packets_out + 1;
1241 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1242 out = 0;
1243 pd = &rq->pckts[rq->packets_out];
1244 rq->packets_out = out;
1245
1246 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1247 bus_dmamap_unload(rq->tag, pd->map);
1248 rq->pending--;
1249
1250 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1251 pd->mbuf->m_len = frag_len;
1252
1253 if (tail != NULL) {
1254 /* additional fragments */
1255 pd->mbuf->m_flags &= ~M_PKTHDR;
1256 tail->m_next = pd->mbuf;
1257 tail = pd->mbuf;
1258 } else {
1259 /* first fragment, fill out much of the packet header */
1260 pd->mbuf->m_pkthdr.len = len;
1261 pd->mbuf->m_pkthdr.csum_flags = 0;
1262 if (IF_CSUM_ENABLED(sc)) {
1263 if (cqe->u0.s.l4_cksum_pass) {
1264 pd->mbuf->m_pkthdr.csum_flags |=
1265 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1266 pd->mbuf->m_pkthdr.csum_data = 0xffff;
1267 }
1268 if (cqe->u0.s.ip_cksum_pass) {
1269 if (!cqe->u0.s.ip_ver) { /* IPV4 */
1270 pd->mbuf->m_pkthdr.csum_flags |=
1271 (CSUM_IP_CHECKED|CSUM_IP_VALID);
1272 }
1273 }
1274 }
1275 m = tail = pd->mbuf;
1276 }
1277 pd->mbuf = NULL;
1278 len -= frag_len;
1279 }
1280
1281 if (m) {
1282 if (!oce_cqe_portid_valid(sc, cqe)) {
1283 m_freem(m);
1284 goto exit;
1285 }
1286
1287 m->m_pkthdr.rcvif = sc->ifp;
1288#if __FreeBSD_version >= 800000
1289 if (rq->queue_index)
1290 m->m_pkthdr.flowid = (rq->queue_index - 1);
1291 else
1292 m->m_pkthdr.flowid = rq->queue_index;
1293 m->m_flags |= M_FLOWID;
1294#endif
1295 /* This deternies if vlan tag is Valid */
1296 if (oce_cqe_vtp_valid(sc, cqe)) {
1297 if (sc->function_mode & FNM_FLEX10_MODE) {
1298 /* FLEX10. If QnQ is not set, neglect VLAN */
1299 if (cqe->u0.s.qnq) {
1300 m->m_pkthdr.ether_vtag = vtag;
1301 m->m_flags |= M_VLANTAG;
1302 }
1303 } else if (sc->pvid != (vtag & VLAN_VID_MASK)) {
1304 /* In UMC mode generally pvid will be striped by
1305 hw. But in some cases we have seen it comes
1306 with pvid. So if pvid == vlan, neglect vlan.
1307 */
1308 m->m_pkthdr.ether_vtag = vtag;
1309 m->m_flags |= M_VLANTAG;
1310 }
1311 }
1312
1313 sc->ifp->if_ipackets++;
1314#if defined(INET6) || defined(INET)
1315 /* Try to queue to LRO */
1316 if (IF_LRO_ENABLED(sc) &&
1317 (cqe->u0.s.ip_cksum_pass) &&
1318 (cqe->u0.s.l4_cksum_pass) &&
1319 (!cqe->u0.s.ip_ver) &&
1320 (rq->lro.lro_cnt != 0)) {
1321
1322 if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1323 rq->lro_pkts_queued ++;
1324 goto post_done;
1325 }
1326 /* If LRO posting fails then try to post to STACK */
1327 }
1328#endif
1329
1330 (*sc->ifp->if_input) (sc->ifp, m);
1331#if defined(INET6) || defined(INET)
1332post_done:
1333#endif
1334 /* Update rx stats per queue */
1335 rq->rx_stats.rx_pkts++;
1336 rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1337 rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1338 if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1339 rq->rx_stats.rx_mcast_pkts++;
1340 if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1341 rq->rx_stats.rx_ucast_pkts++;
1342 }
1343exit:
1344 return;
1345}
1346
1347
1348static void
1349oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1350{
1351 uint32_t out, i = 0;
1352 struct oce_packet_desc *pd;
1353 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1354 int num_frags = cqe->u0.s.num_fragments;
1355
1356 for (i = 0; i < num_frags; i++) {
1357 if (rq->packets_out == rq->packets_in) {
1358 device_printf(sc->dev,
1359 "RQ transmit descriptor missing\n");
1360 }
1361 out = rq->packets_out + 1;
1362 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1363 out = 0;
1364 pd = &rq->pckts[rq->packets_out];
1365 rq->packets_out = out;
1366
1367 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1368 bus_dmamap_unload(rq->tag, pd->map);
1369 rq->pending--;
1370 m_freem(pd->mbuf);
1371 }
1372
1373}
1374
1375
1376static int
1377oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1378{
1379 struct oce_nic_rx_cqe_v1 *cqe_v1;
1380 int vtp = 0;
1381
1382 if (sc->be3_native) {
1383 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1384 vtp = cqe_v1->u0.s.vlan_tag_present;
1385 } else
1386 vtp = cqe->u0.s.vlan_tag_present;
1387
1388 return vtp;
1389
1390}
1391
1392
1393static int
1394oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1395{
1396 struct oce_nic_rx_cqe_v1 *cqe_v1;
1397 int port_id = 0;
1398
1399 if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1400 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1401 port_id = cqe_v1->u0.s.port;
1402 if (sc->port_id != port_id)
1403 return 0;
1404 } else
1405 ;/* For BE3 legacy and Lancer this is dummy */
1406
1407 return 1;
1408
1409}
1410
1411#if defined(INET6) || defined(INET)
1412static void
1413oce_rx_flush_lro(struct oce_rq *rq)
1414{
1415 struct lro_ctrl *lro = &rq->lro;
1416 struct lro_entry *queued;
1417 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1418
1419 if (!IF_LRO_ENABLED(sc))
1420 return;
1421
1422 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1423 SLIST_REMOVE_HEAD(&lro->lro_active, next);
1424 tcp_lro_flush(lro, queued);
1425 }
1426 rq->lro_pkts_queued = 0;
1427
1428 return;
1429}
1430
1431
1432static int
1433oce_init_lro(POCE_SOFTC sc)
1434{
1435 struct lro_ctrl *lro = NULL;
1436 int i = 0, rc = 0;
1437
1438 for (i = 0; i < sc->nrqs; i++) {
1439 lro = &sc->rq[i]->lro;
1440 rc = tcp_lro_init(lro);
1441 if (rc != 0) {
1442 device_printf(sc->dev, "LRO init failed\n");
1443 return rc;
1444 }
1445 lro->ifp = sc->ifp;
1446 }
1447
1448 return rc;
1449}
1450
1451
1452void
1453oce_free_lro(POCE_SOFTC sc)
1454{
1455 struct lro_ctrl *lro = NULL;
1456 int i = 0;
1457
1458 for (i = 0; i < sc->nrqs; i++) {
1459 lro = &sc->rq[i]->lro;
1460 if (lro)
1461 tcp_lro_free(lro);
1462 }
1463}
1464#endif
1465
1466int
1467oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1468{
1469 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1470 int i, in, rc;
1471 struct oce_packet_desc *pd;
1472 bus_dma_segment_t segs[6];
1473 int nsegs, added = 0;
1474 struct oce_nic_rqe *rqe;
1475 pd_rxulp_db_t rxdb_reg;
1476
1477 bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1478 for (i = 0; i < count; i++) {
1479 in = rq->packets_in + 1;
1480 if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1481 in = 0;
1482 if (in == rq->packets_out)
1483 break; /* no more room */
1484
1485 pd = &rq->pckts[rq->packets_in];
1486 pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1487 if (pd->mbuf == NULL)
1488 break;
1489
1490 pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1491 rc = bus_dmamap_load_mbuf_sg(rq->tag,
1492 pd->map,
1493 pd->mbuf,
1494 segs, &nsegs, BUS_DMA_NOWAIT);
1495 if (rc) {
1496 m_free(pd->mbuf);
1497 break;
1498 }
1499
1500 if (nsegs != 1) {
1501 i--;
1502 continue;
1503 }
1504
1505 rq->packets_in = in;
1506 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1507
1508 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1509 rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1510 rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1511 DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1512 RING_PUT(rq->ring, 1);
1513 added++;
1514 rq->pending++;
1515 }
1516 if (added != 0) {
1517 for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1518 rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1519 rxdb_reg.bits.qid = rq->rq_id;
1520 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1521 added -= OCE_MAX_RQ_POSTS;
1522 }
1523 if (added > 0) {
1524 rxdb_reg.bits.qid = rq->rq_id;
1525 rxdb_reg.bits.num_posted = added;
1526 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1527 }
1528 }
1529
1530 return 0;
1531}
1532
1533
1534/* Handle the Completion Queue for receive */
1535uint16_t
1536oce_rq_handler(void *arg)
1537{
1538 struct oce_rq *rq = (struct oce_rq *)arg;
1539 struct oce_cq *cq = rq->cq;
1540 POCE_SOFTC sc = rq->parent;
1541 struct oce_nic_rx_cqe *cqe;
1542 int num_cqes = 0, rq_buffers_used = 0;
1543
1544
1545 bus_dmamap_sync(cq->ring->dma.tag,
1546 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1547 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1548 while (cqe->u0.dw[2]) {
1549 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1550
1551 RING_GET(rq->ring, 1);
1552 if (cqe->u0.s.error == 0) {
1553 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1554 } else {
1555 rq->rx_stats.rxcp_err++;
1556 sc->ifp->if_ierrors++;
1557 /* Post L3/L4 errors to stack.*/
1558 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1559 }
1560 rq->rx_stats.rx_compl++;
1561 cqe->u0.dw[2] = 0;
1562
1563#if defined(INET6) || defined(INET)
1564 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1565 oce_rx_flush_lro(rq);
1566 }
1567#endif
1568
1569 RING_GET(cq->ring, 1);
1570 bus_dmamap_sync(cq->ring->dma.tag,
1571 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1572 cqe =
1573 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1574 num_cqes++;
1575 if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1576 break;
1577 }
1578
1579#if defined(INET6) || defined(INET)
1580 if (IF_LRO_ENABLED(sc))
1581 oce_rx_flush_lro(rq);
1582#endif
1583
1584 if (num_cqes) {
1585 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1586 rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1587 if (rq_buffers_used > 1)
1588 oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1589 }
1590
1591 return 0;
1592
1593}
1594
1595
1596
1597
1598/*****************************************************************************
1599 * Helper function prototypes in this file *
1600 *****************************************************************************/
1601
1602static int
1603oce_attach_ifp(POCE_SOFTC sc)
1604{
1605
1606 sc->ifp = if_alloc(IFT_ETHER);
1607 if (!sc->ifp)
1608 return ENOMEM;
1609
1610 ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1611 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1612 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1613
1614 sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1615 sc->ifp->if_ioctl = oce_ioctl;
1616 sc->ifp->if_start = oce_start;
1617 sc->ifp->if_init = oce_init;
1618 sc->ifp->if_mtu = ETHERMTU;
1619 sc->ifp->if_softc = sc;
1620#if __FreeBSD_version >= 800000
1621 sc->ifp->if_transmit = oce_multiq_start;
1622 sc->ifp->if_qflush = oce_multiq_flush;
1623#endif
1624
1625 if_initname(sc->ifp,
1626 device_get_name(sc->dev), device_get_unit(sc->dev));
1627
1628 sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1629 IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1630 IFQ_SET_READY(&sc->ifp->if_snd);
1631
1632 sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1633 sc->ifp->if_hwassist |= CSUM_TSO;
1634 sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1635
1636 sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1637 sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1638 sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1639
1640#if defined(INET6) || defined(INET)
1641 sc->ifp->if_capabilities |= IFCAP_TSO;
1642 sc->ifp->if_capabilities |= IFCAP_LRO;
1643 sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1644#endif
1645
1646 sc->ifp->if_capenable = sc->ifp->if_capabilities;
1647 if_initbaudrate(sc->ifp, IF_Gbps(10));
1648
1263 if ((status = drbr_enqueue(ifp, br, m)) != 0)
1264 return status;
1265 }
1266 while ((next = drbr_peek(ifp, br)) != NULL) {
1267 if (oce_tx(sc, &next, queue_index)) {
1268 if (next == NULL) {
1269 drbr_advance(ifp, br);
1270 } else {
1271 drbr_putback(ifp, br, next);
1272 wq->tx_stats.tx_stops ++;
1273 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1274 status = drbr_enqueue(ifp, br, next);
1275 }
1276 break;
1277 }
1278 drbr_advance(ifp, br);
1279 ifp->if_obytes += next->m_pkthdr.len;
1280 if (next->m_flags & M_MCAST)
1281 ifp->if_omcasts++;
1282 ETHER_BPF_MTAP(ifp, next);
1283 }
1284
1285 return status;
1286}
1287
1288
1289
1290
1291/*****************************************************************************
1292 * Receive routines functions *
1293 *****************************************************************************/
1294
1295static void
1296oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1297{
1298 uint32_t out;
1299 struct oce_packet_desc *pd;
1300 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1301 int i, len, frag_len;
1302 struct mbuf *m = NULL, *tail = NULL;
1303 uint16_t vtag;
1304
1305 len = cqe->u0.s.pkt_size;
1306 if (!len) {
1307 /*partial DMA workaround for Lancer*/
1308 oce_discard_rx_comp(rq, cqe);
1309 goto exit;
1310 }
1311
1312 /* Get vlan_tag value */
1313 if(IS_BE(sc) || IS_SH(sc))
1314 vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1315 else
1316 vtag = cqe->u0.s.vlan_tag;
1317
1318
1319 for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1320
1321 if (rq->packets_out == rq->packets_in) {
1322 device_printf(sc->dev,
1323 "RQ transmit descriptor missing\n");
1324 }
1325 out = rq->packets_out + 1;
1326 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1327 out = 0;
1328 pd = &rq->pckts[rq->packets_out];
1329 rq->packets_out = out;
1330
1331 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1332 bus_dmamap_unload(rq->tag, pd->map);
1333 rq->pending--;
1334
1335 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1336 pd->mbuf->m_len = frag_len;
1337
1338 if (tail != NULL) {
1339 /* additional fragments */
1340 pd->mbuf->m_flags &= ~M_PKTHDR;
1341 tail->m_next = pd->mbuf;
1342 tail = pd->mbuf;
1343 } else {
1344 /* first fragment, fill out much of the packet header */
1345 pd->mbuf->m_pkthdr.len = len;
1346 pd->mbuf->m_pkthdr.csum_flags = 0;
1347 if (IF_CSUM_ENABLED(sc)) {
1348 if (cqe->u0.s.l4_cksum_pass) {
1349 pd->mbuf->m_pkthdr.csum_flags |=
1350 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1351 pd->mbuf->m_pkthdr.csum_data = 0xffff;
1352 }
1353 if (cqe->u0.s.ip_cksum_pass) {
1354 if (!cqe->u0.s.ip_ver) { /* IPV4 */
1355 pd->mbuf->m_pkthdr.csum_flags |=
1356 (CSUM_IP_CHECKED|CSUM_IP_VALID);
1357 }
1358 }
1359 }
1360 m = tail = pd->mbuf;
1361 }
1362 pd->mbuf = NULL;
1363 len -= frag_len;
1364 }
1365
1366 if (m) {
1367 if (!oce_cqe_portid_valid(sc, cqe)) {
1368 m_freem(m);
1369 goto exit;
1370 }
1371
1372 m->m_pkthdr.rcvif = sc->ifp;
1373#if __FreeBSD_version >= 800000
1374 if (rq->queue_index)
1375 m->m_pkthdr.flowid = (rq->queue_index - 1);
1376 else
1377 m->m_pkthdr.flowid = rq->queue_index;
1378 m->m_flags |= M_FLOWID;
1379#endif
1380 /* This deternies if vlan tag is Valid */
1381 if (oce_cqe_vtp_valid(sc, cqe)) {
1382 if (sc->function_mode & FNM_FLEX10_MODE) {
1383 /* FLEX10. If QnQ is not set, neglect VLAN */
1384 if (cqe->u0.s.qnq) {
1385 m->m_pkthdr.ether_vtag = vtag;
1386 m->m_flags |= M_VLANTAG;
1387 }
1388 } else if (sc->pvid != (vtag & VLAN_VID_MASK)) {
1389 /* In UMC mode generally pvid will be striped by
1390 hw. But in some cases we have seen it comes
1391 with pvid. So if pvid == vlan, neglect vlan.
1392 */
1393 m->m_pkthdr.ether_vtag = vtag;
1394 m->m_flags |= M_VLANTAG;
1395 }
1396 }
1397
1398 sc->ifp->if_ipackets++;
1399#if defined(INET6) || defined(INET)
1400 /* Try to queue to LRO */
1401 if (IF_LRO_ENABLED(sc) &&
1402 (cqe->u0.s.ip_cksum_pass) &&
1403 (cqe->u0.s.l4_cksum_pass) &&
1404 (!cqe->u0.s.ip_ver) &&
1405 (rq->lro.lro_cnt != 0)) {
1406
1407 if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1408 rq->lro_pkts_queued ++;
1409 goto post_done;
1410 }
1411 /* If LRO posting fails then try to post to STACK */
1412 }
1413#endif
1414
1415 (*sc->ifp->if_input) (sc->ifp, m);
1416#if defined(INET6) || defined(INET)
1417post_done:
1418#endif
1419 /* Update rx stats per queue */
1420 rq->rx_stats.rx_pkts++;
1421 rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1422 rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1423 if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1424 rq->rx_stats.rx_mcast_pkts++;
1425 if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1426 rq->rx_stats.rx_ucast_pkts++;
1427 }
1428exit:
1429 return;
1430}
1431
1432
1433static void
1434oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1435{
1436 uint32_t out, i = 0;
1437 struct oce_packet_desc *pd;
1438 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1439 int num_frags = cqe->u0.s.num_fragments;
1440
1441 for (i = 0; i < num_frags; i++) {
1442 if (rq->packets_out == rq->packets_in) {
1443 device_printf(sc->dev,
1444 "RQ transmit descriptor missing\n");
1445 }
1446 out = rq->packets_out + 1;
1447 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1448 out = 0;
1449 pd = &rq->pckts[rq->packets_out];
1450 rq->packets_out = out;
1451
1452 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1453 bus_dmamap_unload(rq->tag, pd->map);
1454 rq->pending--;
1455 m_freem(pd->mbuf);
1456 }
1457
1458}
1459
1460
1461static int
1462oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1463{
1464 struct oce_nic_rx_cqe_v1 *cqe_v1;
1465 int vtp = 0;
1466
1467 if (sc->be3_native) {
1468 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1469 vtp = cqe_v1->u0.s.vlan_tag_present;
1470 } else
1471 vtp = cqe->u0.s.vlan_tag_present;
1472
1473 return vtp;
1474
1475}
1476
1477
1478static int
1479oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1480{
1481 struct oce_nic_rx_cqe_v1 *cqe_v1;
1482 int port_id = 0;
1483
1484 if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1485 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1486 port_id = cqe_v1->u0.s.port;
1487 if (sc->port_id != port_id)
1488 return 0;
1489 } else
1490 ;/* For BE3 legacy and Lancer this is dummy */
1491
1492 return 1;
1493
1494}
1495
1496#if defined(INET6) || defined(INET)
1497static void
1498oce_rx_flush_lro(struct oce_rq *rq)
1499{
1500 struct lro_ctrl *lro = &rq->lro;
1501 struct lro_entry *queued;
1502 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1503
1504 if (!IF_LRO_ENABLED(sc))
1505 return;
1506
1507 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1508 SLIST_REMOVE_HEAD(&lro->lro_active, next);
1509 tcp_lro_flush(lro, queued);
1510 }
1511 rq->lro_pkts_queued = 0;
1512
1513 return;
1514}
1515
1516
1517static int
1518oce_init_lro(POCE_SOFTC sc)
1519{
1520 struct lro_ctrl *lro = NULL;
1521 int i = 0, rc = 0;
1522
1523 for (i = 0; i < sc->nrqs; i++) {
1524 lro = &sc->rq[i]->lro;
1525 rc = tcp_lro_init(lro);
1526 if (rc != 0) {
1527 device_printf(sc->dev, "LRO init failed\n");
1528 return rc;
1529 }
1530 lro->ifp = sc->ifp;
1531 }
1532
1533 return rc;
1534}
1535
1536
1537void
1538oce_free_lro(POCE_SOFTC sc)
1539{
1540 struct lro_ctrl *lro = NULL;
1541 int i = 0;
1542
1543 for (i = 0; i < sc->nrqs; i++) {
1544 lro = &sc->rq[i]->lro;
1545 if (lro)
1546 tcp_lro_free(lro);
1547 }
1548}
1549#endif
1550
1551int
1552oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1553{
1554 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1555 int i, in, rc;
1556 struct oce_packet_desc *pd;
1557 bus_dma_segment_t segs[6];
1558 int nsegs, added = 0;
1559 struct oce_nic_rqe *rqe;
1560 pd_rxulp_db_t rxdb_reg;
1561
1562 bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1563 for (i = 0; i < count; i++) {
1564 in = rq->packets_in + 1;
1565 if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1566 in = 0;
1567 if (in == rq->packets_out)
1568 break; /* no more room */
1569
1570 pd = &rq->pckts[rq->packets_in];
1571 pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1572 if (pd->mbuf == NULL)
1573 break;
1574
1575 pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1576 rc = bus_dmamap_load_mbuf_sg(rq->tag,
1577 pd->map,
1578 pd->mbuf,
1579 segs, &nsegs, BUS_DMA_NOWAIT);
1580 if (rc) {
1581 m_free(pd->mbuf);
1582 break;
1583 }
1584
1585 if (nsegs != 1) {
1586 i--;
1587 continue;
1588 }
1589
1590 rq->packets_in = in;
1591 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1592
1593 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1594 rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1595 rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1596 DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1597 RING_PUT(rq->ring, 1);
1598 added++;
1599 rq->pending++;
1600 }
1601 if (added != 0) {
1602 for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1603 rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1604 rxdb_reg.bits.qid = rq->rq_id;
1605 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1606 added -= OCE_MAX_RQ_POSTS;
1607 }
1608 if (added > 0) {
1609 rxdb_reg.bits.qid = rq->rq_id;
1610 rxdb_reg.bits.num_posted = added;
1611 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1612 }
1613 }
1614
1615 return 0;
1616}
1617
1618
1619/* Handle the Completion Queue for receive */
1620uint16_t
1621oce_rq_handler(void *arg)
1622{
1623 struct oce_rq *rq = (struct oce_rq *)arg;
1624 struct oce_cq *cq = rq->cq;
1625 POCE_SOFTC sc = rq->parent;
1626 struct oce_nic_rx_cqe *cqe;
1627 int num_cqes = 0, rq_buffers_used = 0;
1628
1629
1630 bus_dmamap_sync(cq->ring->dma.tag,
1631 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1632 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1633 while (cqe->u0.dw[2]) {
1634 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1635
1636 RING_GET(rq->ring, 1);
1637 if (cqe->u0.s.error == 0) {
1638 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1639 } else {
1640 rq->rx_stats.rxcp_err++;
1641 sc->ifp->if_ierrors++;
1642 /* Post L3/L4 errors to stack.*/
1643 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1644 }
1645 rq->rx_stats.rx_compl++;
1646 cqe->u0.dw[2] = 0;
1647
1648#if defined(INET6) || defined(INET)
1649 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1650 oce_rx_flush_lro(rq);
1651 }
1652#endif
1653
1654 RING_GET(cq->ring, 1);
1655 bus_dmamap_sync(cq->ring->dma.tag,
1656 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1657 cqe =
1658 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1659 num_cqes++;
1660 if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1661 break;
1662 }
1663
1664#if defined(INET6) || defined(INET)
1665 if (IF_LRO_ENABLED(sc))
1666 oce_rx_flush_lro(rq);
1667#endif
1668
1669 if (num_cqes) {
1670 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1671 rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1672 if (rq_buffers_used > 1)
1673 oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1674 }
1675
1676 return 0;
1677
1678}
1679
1680
1681
1682
1683/*****************************************************************************
1684 * Helper function prototypes in this file *
1685 *****************************************************************************/
1686
1687static int
1688oce_attach_ifp(POCE_SOFTC sc)
1689{
1690
1691 sc->ifp = if_alloc(IFT_ETHER);
1692 if (!sc->ifp)
1693 return ENOMEM;
1694
1695 ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1696 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1697 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1698
1699 sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1700 sc->ifp->if_ioctl = oce_ioctl;
1701 sc->ifp->if_start = oce_start;
1702 sc->ifp->if_init = oce_init;
1703 sc->ifp->if_mtu = ETHERMTU;
1704 sc->ifp->if_softc = sc;
1705#if __FreeBSD_version >= 800000
1706 sc->ifp->if_transmit = oce_multiq_start;
1707 sc->ifp->if_qflush = oce_multiq_flush;
1708#endif
1709
1710 if_initname(sc->ifp,
1711 device_get_name(sc->dev), device_get_unit(sc->dev));
1712
1713 sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1714 IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1715 IFQ_SET_READY(&sc->ifp->if_snd);
1716
1717 sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1718 sc->ifp->if_hwassist |= CSUM_TSO;
1719 sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1720
1721 sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1722 sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1723 sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1724
1725#if defined(INET6) || defined(INET)
1726 sc->ifp->if_capabilities |= IFCAP_TSO;
1727 sc->ifp->if_capabilities |= IFCAP_LRO;
1728 sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1729#endif
1730
1731 sc->ifp->if_capenable = sc->ifp->if_capabilities;
1732 if_initbaudrate(sc->ifp, IF_Gbps(10));
1733
1734#if __FreeBSD_version >= 1000000
1735 sc->ifp->if_hw_tsomax = OCE_MAX_TSO_SIZE;
1736#endif
1737
1649 ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1650
1651 return 0;
1652}
1653
1654
1655static void
1656oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1657{
1658 POCE_SOFTC sc = ifp->if_softc;
1659
1660 if (ifp->if_softc != arg)
1661 return;
1662 if ((vtag == 0) || (vtag > 4095))
1663 return;
1664
1665 sc->vlan_tag[vtag] = 1;
1666 sc->vlans_added++;
1738 ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1739
1740 return 0;
1741}
1742
1743
1744static void
1745oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1746{
1747 POCE_SOFTC sc = ifp->if_softc;
1748
1749 if (ifp->if_softc != arg)
1750 return;
1751 if ((vtag == 0) || (vtag > 4095))
1752 return;
1753
1754 sc->vlan_tag[vtag] = 1;
1755 sc->vlans_added++;
1667 oce_vid_config(sc);
1756 if (sc->vlans_added <= (sc->max_vlans + 1))
1757 oce_vid_config(sc);
1668}
1669
1670
1671static void
1672oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1673{
1674 POCE_SOFTC sc = ifp->if_softc;
1675
1676 if (ifp->if_softc != arg)
1677 return;
1678 if ((vtag == 0) || (vtag > 4095))
1679 return;
1680
1681 sc->vlan_tag[vtag] = 0;
1682 sc->vlans_added--;
1683 oce_vid_config(sc);
1684}
1685
1686
1687/*
1688 * A max of 64 vlans can be configured in BE. If the user configures
1689 * more, place the card in vlan promiscuous mode.
1690 */
1691static int
1692oce_vid_config(POCE_SOFTC sc)
1693{
1694 struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1695 uint16_t ntags = 0, i;
1696 int status = 0;
1697
1698 if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1699 (sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1700 for (i = 0; i < MAX_VLANS; i++) {
1701 if (sc->vlan_tag[i]) {
1702 vtags[ntags].vtag = i;
1703 ntags++;
1704 }
1705 }
1706 if (ntags)
1707 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1708 vtags, ntags, 1, 0);
1709 } else
1710 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1711 NULL, 0, 1, 1);
1712 return status;
1713}
1714
1715
1716static void
1717oce_mac_addr_set(POCE_SOFTC sc)
1718{
1719 uint32_t old_pmac_id = sc->pmac_id;
1720 int status = 0;
1721
1722
1723 status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1724 sc->macaddr.size_of_struct);
1725 if (!status)
1726 return;
1727
1728 status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1729 sc->if_id, &sc->pmac_id);
1730 if (!status) {
1731 status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1732 bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1733 sc->macaddr.size_of_struct);
1734 }
1735 if (status)
1736 device_printf(sc->dev, "Failed update macaddress\n");
1737
1738}
1739
1740
1741static int
1742oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1743{
1744 POCE_SOFTC sc = ifp->if_softc;
1745 struct ifreq *ifr = (struct ifreq *)data;
1746 int rc = ENXIO;
1747 char cookie[32] = {0};
1748 void *priv_data = (void *)ifr->ifr_data;
1749 void *ioctl_ptr;
1750 uint32_t req_size;
1751 struct mbx_hdr req;
1752 OCE_DMA_MEM dma_mem;
1753 struct mbx_common_get_cntl_attr *fw_cmd;
1754
1755 if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1756 return EFAULT;
1757
1758 if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1759 return EINVAL;
1760
1761 ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1762 if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1763 return EFAULT;
1764
1765 req_size = le32toh(req.u0.req.request_length);
1766 if (req_size > 65536)
1767 return EINVAL;
1768
1769 req_size += sizeof(struct mbx_hdr);
1770 rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1771 if (rc)
1772 return ENOMEM;
1773
1774 if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1775 rc = EFAULT;
1776 goto dma_free;
1777 }
1778
1779 rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1780 if (rc) {
1781 rc = EIO;
1782 goto dma_free;
1783 }
1784
1785 if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1786 rc = EFAULT;
1787
1788 /*
1789 firmware is filling all the attributes for this ioctl except
1790 the driver version..so fill it
1791 */
1792 if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
1793 fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
1794 strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
1795 COMPONENT_REVISION, strlen(COMPONENT_REVISION));
1796 }
1797
1798dma_free:
1799 oce_dma_free(sc, &dma_mem);
1800 return rc;
1801
1802}
1803
1804static void
1805oce_eqd_set_periodic(POCE_SOFTC sc)
1806{
1807 struct oce_set_eqd set_eqd[OCE_MAX_EQ];
1808 struct oce_aic_obj *aic;
1809 struct oce_eq *eqo;
1810 uint64_t now = 0, delta;
1811 int eqd, i, num = 0;
1812 uint32_t ips = 0;
1813 int tps;
1814
1815 for (i = 0 ; i < sc->neqs; i++) {
1816 eqo = sc->eq[i];
1817 aic = &sc->aic_obj[i];
1818 /* When setting the static eq delay from the user space */
1819 if (!aic->enable) {
1820 eqd = aic->et_eqd;
1821 goto modify_eqd;
1822 }
1823
1824 now = ticks;
1825
1826 /* Over flow check */
1827 if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
1828 goto done;
1829
1830 delta = now - aic->ticks;
1831 tps = delta/hz;
1832
1833 /* Interrupt rate based on elapsed ticks */
1834 if(tps)
1835 ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
1836
1837 if (ips > INTR_RATE_HWM)
1838 eqd = aic->cur_eqd + 20;
1839 else if (ips < INTR_RATE_LWM)
1840 eqd = aic->cur_eqd / 2;
1841 else
1842 goto done;
1843
1844 if (eqd < 10)
1845 eqd = 0;
1846
1847 /* Make sure that the eq delay is in the known range */
1848 eqd = min(eqd, aic->max_eqd);
1849 eqd = max(eqd, aic->min_eqd);
1850
1851modify_eqd:
1852 if (eqd != aic->cur_eqd) {
1853 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1854 set_eqd[num].eq_id = eqo->eq_id;
1855 aic->cur_eqd = eqd;
1856 num++;
1857 }
1858done:
1859 aic->intr_prev = eqo->intr;
1860 aic->ticks = now;
1861 }
1862
1863 /* Is there atleast one eq that needs to be modified? */
1864 if(num)
1865 oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
1866
1867}
1868
1758}
1759
1760
1761static void
1762oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1763{
1764 POCE_SOFTC sc = ifp->if_softc;
1765
1766 if (ifp->if_softc != arg)
1767 return;
1768 if ((vtag == 0) || (vtag > 4095))
1769 return;
1770
1771 sc->vlan_tag[vtag] = 0;
1772 sc->vlans_added--;
1773 oce_vid_config(sc);
1774}
1775
1776
1777/*
1778 * A max of 64 vlans can be configured in BE. If the user configures
1779 * more, place the card in vlan promiscuous mode.
1780 */
1781static int
1782oce_vid_config(POCE_SOFTC sc)
1783{
1784 struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1785 uint16_t ntags = 0, i;
1786 int status = 0;
1787
1788 if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1789 (sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1790 for (i = 0; i < MAX_VLANS; i++) {
1791 if (sc->vlan_tag[i]) {
1792 vtags[ntags].vtag = i;
1793 ntags++;
1794 }
1795 }
1796 if (ntags)
1797 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1798 vtags, ntags, 1, 0);
1799 } else
1800 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1801 NULL, 0, 1, 1);
1802 return status;
1803}
1804
1805
1806static void
1807oce_mac_addr_set(POCE_SOFTC sc)
1808{
1809 uint32_t old_pmac_id = sc->pmac_id;
1810 int status = 0;
1811
1812
1813 status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1814 sc->macaddr.size_of_struct);
1815 if (!status)
1816 return;
1817
1818 status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1819 sc->if_id, &sc->pmac_id);
1820 if (!status) {
1821 status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1822 bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1823 sc->macaddr.size_of_struct);
1824 }
1825 if (status)
1826 device_printf(sc->dev, "Failed update macaddress\n");
1827
1828}
1829
1830
1831static int
1832oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1833{
1834 POCE_SOFTC sc = ifp->if_softc;
1835 struct ifreq *ifr = (struct ifreq *)data;
1836 int rc = ENXIO;
1837 char cookie[32] = {0};
1838 void *priv_data = (void *)ifr->ifr_data;
1839 void *ioctl_ptr;
1840 uint32_t req_size;
1841 struct mbx_hdr req;
1842 OCE_DMA_MEM dma_mem;
1843 struct mbx_common_get_cntl_attr *fw_cmd;
1844
1845 if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1846 return EFAULT;
1847
1848 if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1849 return EINVAL;
1850
1851 ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1852 if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1853 return EFAULT;
1854
1855 req_size = le32toh(req.u0.req.request_length);
1856 if (req_size > 65536)
1857 return EINVAL;
1858
1859 req_size += sizeof(struct mbx_hdr);
1860 rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1861 if (rc)
1862 return ENOMEM;
1863
1864 if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1865 rc = EFAULT;
1866 goto dma_free;
1867 }
1868
1869 rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1870 if (rc) {
1871 rc = EIO;
1872 goto dma_free;
1873 }
1874
1875 if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1876 rc = EFAULT;
1877
1878 /*
1879 firmware is filling all the attributes for this ioctl except
1880 the driver version..so fill it
1881 */
1882 if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
1883 fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
1884 strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
1885 COMPONENT_REVISION, strlen(COMPONENT_REVISION));
1886 }
1887
1888dma_free:
1889 oce_dma_free(sc, &dma_mem);
1890 return rc;
1891
1892}
1893
1894static void
1895oce_eqd_set_periodic(POCE_SOFTC sc)
1896{
1897 struct oce_set_eqd set_eqd[OCE_MAX_EQ];
1898 struct oce_aic_obj *aic;
1899 struct oce_eq *eqo;
1900 uint64_t now = 0, delta;
1901 int eqd, i, num = 0;
1902 uint32_t ips = 0;
1903 int tps;
1904
1905 for (i = 0 ; i < sc->neqs; i++) {
1906 eqo = sc->eq[i];
1907 aic = &sc->aic_obj[i];
1908 /* When setting the static eq delay from the user space */
1909 if (!aic->enable) {
1910 eqd = aic->et_eqd;
1911 goto modify_eqd;
1912 }
1913
1914 now = ticks;
1915
1916 /* Over flow check */
1917 if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
1918 goto done;
1919
1920 delta = now - aic->ticks;
1921 tps = delta/hz;
1922
1923 /* Interrupt rate based on elapsed ticks */
1924 if(tps)
1925 ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
1926
1927 if (ips > INTR_RATE_HWM)
1928 eqd = aic->cur_eqd + 20;
1929 else if (ips < INTR_RATE_LWM)
1930 eqd = aic->cur_eqd / 2;
1931 else
1932 goto done;
1933
1934 if (eqd < 10)
1935 eqd = 0;
1936
1937 /* Make sure that the eq delay is in the known range */
1938 eqd = min(eqd, aic->max_eqd);
1939 eqd = max(eqd, aic->min_eqd);
1940
1941modify_eqd:
1942 if (eqd != aic->cur_eqd) {
1943 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1944 set_eqd[num].eq_id = eqo->eq_id;
1945 aic->cur_eqd = eqd;
1946 num++;
1947 }
1948done:
1949 aic->intr_prev = eqo->intr;
1950 aic->ticks = now;
1951 }
1952
1953 /* Is there atleast one eq that needs to be modified? */
1954 if(num)
1955 oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
1956
1957}
1958
1959static void oce_detect_hw_error(POCE_SOFTC sc)
1960{
1961
1962 uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
1963 uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1964 uint32_t i;
1965
1966 if (sc->hw_error)
1967 return;
1968
1969 if (IS_XE201(sc)) {
1970 sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
1971 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1972 sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
1973 sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
1974 }
1975 } else {
1976 ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
1977 ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
1978 ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
1979 ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
1980
1981 ue_low = (ue_low & ~ue_low_mask);
1982 ue_high = (ue_high & ~ue_high_mask);
1983 }
1984
1985 /* On certain platforms BE hardware can indicate spurious UEs.
1986 * Allow the h/w to stop working completely in case of a real UE.
1987 * Hence not setting the hw_error for UE detection.
1988 */
1989 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1990 sc->hw_error = TRUE;
1991 device_printf(sc->dev, "Error detected in the card\n");
1992 }
1993
1994 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1995 device_printf(sc->dev,
1996 "ERR: sliport status 0x%x\n", sliport_status);
1997 device_printf(sc->dev,
1998 "ERR: sliport error1 0x%x\n", sliport_err1);
1999 device_printf(sc->dev,
2000 "ERR: sliport error2 0x%x\n", sliport_err2);
2001 }
2002
2003 if (ue_low) {
2004 for (i = 0; ue_low; ue_low >>= 1, i++) {
2005 if (ue_low & 1)
2006 device_printf(sc->dev, "UE: %s bit set\n",
2007 ue_status_low_desc[i]);
2008 }
2009 }
2010
2011 if (ue_high) {
2012 for (i = 0; ue_high; ue_high >>= 1, i++) {
2013 if (ue_high & 1)
2014 device_printf(sc->dev, "UE: %s bit set\n",
2015 ue_status_hi_desc[i]);
2016 }
2017 }
2018
2019}
2020
2021
1869static void
1870oce_local_timer(void *arg)
1871{
1872 POCE_SOFTC sc = arg;
1873 int i = 0;
1874
2022static void
2023oce_local_timer(void *arg)
2024{
2025 POCE_SOFTC sc = arg;
2026 int i = 0;
2027
2028 oce_detect_hw_error(sc);
1875 oce_refresh_nic_stats(sc);
1876 oce_refresh_queue_stats(sc);
1877 oce_mac_addr_set(sc);
1878
1879 /* TX Watch Dog*/
1880 for (i = 0; i < sc->nwqs; i++)
1881 oce_tx_restart(sc, sc->wq[i]);
1882
1883 /* calculate and set the eq delay for optimal interrupt rate */
1884 if (IS_BE(sc) || IS_SH(sc))
1885 oce_eqd_set_periodic(sc);
1886
1887 callout_reset(&sc->timer, hz, oce_local_timer, sc);
1888}
1889
1890
1891/* NOTE : This should only be called holding
1892 * DEVICE_LOCK.
2029 oce_refresh_nic_stats(sc);
2030 oce_refresh_queue_stats(sc);
2031 oce_mac_addr_set(sc);
2032
2033 /* TX Watch Dog*/
2034 for (i = 0; i < sc->nwqs; i++)
2035 oce_tx_restart(sc, sc->wq[i]);
2036
2037 /* calculate and set the eq delay for optimal interrupt rate */
2038 if (IS_BE(sc) || IS_SH(sc))
2039 oce_eqd_set_periodic(sc);
2040
2041 callout_reset(&sc->timer, hz, oce_local_timer, sc);
2042}
2043
2044
2045/* NOTE : This should only be called holding
2046 * DEVICE_LOCK.
1893*/
2047 */
1894static void
1895oce_if_deactivate(POCE_SOFTC sc)
1896{
1897 int i, mtime = 0;
1898 int wait_req = 0;
1899 struct oce_rq *rq;
1900 struct oce_wq *wq;
1901 struct oce_eq *eq;
1902
1903 sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1904
1905 /*Wait for max of 400ms for TX completions to be done */
1906 while (mtime < 400) {
1907 wait_req = 0;
1908 for_all_wq_queues(sc, wq, i) {
1909 if (wq->ring->num_used) {
1910 wait_req = 1;
1911 DELAY(1);
1912 break;
1913 }
1914 }
1915 mtime += 1;
1916 if (!wait_req)
1917 break;
1918 }
1919
1920 /* Stop intrs and finish any bottom halves pending */
1921 oce_hw_intr_disable(sc);
1922
1923 /* Since taskqueue_drain takes a Gaint Lock, We should not acquire
1924 any other lock. So unlock device lock and require after
1925 completing taskqueue_drain.
1926 */
1927 UNLOCK(&sc->dev_lock);
1928 for (i = 0; i < sc->intr_count; i++) {
1929 if (sc->intrs[i].tq != NULL) {
1930 taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
1931 }
1932 }
1933 LOCK(&sc->dev_lock);
1934
1935 /* Delete RX queue in card with flush param */
1936 oce_stop_rx(sc);
1937
1938 /* Invalidate any pending cq and eq entries*/
1939 for_all_evnt_queues(sc, eq, i)
1940 oce_drain_eq(eq);
1941 for_all_rq_queues(sc, rq, i)
1942 oce_drain_rq_cq(rq);
1943 for_all_wq_queues(sc, wq, i)
1944 oce_drain_wq_cq(wq);
1945
1946 /* But still we need to get MCC aync events.
1947 So enable intrs and also arm first EQ
1948 */
1949 oce_hw_intr_enable(sc);
1950 oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
1951
1952 DELAY(10);
1953}
1954
1955
1956static void
1957oce_if_activate(POCE_SOFTC sc)
1958{
1959 struct oce_eq *eq;
1960 struct oce_rq *rq;
1961 struct oce_wq *wq;
1962 int i, rc = 0;
1963
1964 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1965
1966 oce_hw_intr_disable(sc);
1967
1968 oce_start_rx(sc);
1969
1970 for_all_rq_queues(sc, rq, i) {
1971 rc = oce_start_rq(rq);
1972 if (rc)
1973 device_printf(sc->dev, "Unable to start RX\n");
1974 }
1975
1976 for_all_wq_queues(sc, wq, i) {
1977 rc = oce_start_wq(wq);
1978 if (rc)
1979 device_printf(sc->dev, "Unable to start TX\n");
1980 }
1981
1982
1983 for_all_evnt_queues(sc, eq, i)
1984 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
1985
1986 oce_hw_intr_enable(sc);
1987
1988}
1989
1990static void
1991process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
1992{
1993 /* Update Link status */
1994 if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
1995 ASYNC_EVENT_LINK_UP) {
1996 sc->link_status = ASYNC_EVENT_LINK_UP;
1997 if_link_state_change(sc->ifp, LINK_STATE_UP);
1998 } else {
1999 sc->link_status = ASYNC_EVENT_LINK_DOWN;
2000 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2001 }
2002
2003 /* Update speed */
2004 sc->link_speed = acqe->u0.s.speed;
2005 sc->qos_link_speed = (uint32_t) acqe->u0.s.qos_link_speed * 10;
2006
2007}
2008
2009
2010/* Handle the Completion Queue for the Mailbox/Async notifications */
2011uint16_t
2012oce_mq_handler(void *arg)
2013{
2014 struct oce_mq *mq = (struct oce_mq *)arg;
2015 POCE_SOFTC sc = mq->parent;
2016 struct oce_cq *cq = mq->cq;
2017 int num_cqes = 0, evt_type = 0, optype = 0;
2018 struct oce_mq_cqe *cqe;
2019 struct oce_async_cqe_link_state *acqe;
2020 struct oce_async_event_grp5_pvid_state *gcqe;
2021 struct oce_async_event_qnq *dbgcqe;
2022
2023
2024 bus_dmamap_sync(cq->ring->dma.tag,
2025 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2026 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2027
2028 while (cqe->u0.dw[3]) {
2029 DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2030 if (cqe->u0.s.async_event) {
2031 evt_type = cqe->u0.s.event_type;
2032 optype = cqe->u0.s.async_type;
2033 if (evt_type == ASYNC_EVENT_CODE_LINK_STATE) {
2034 /* Link status evt */
2035 acqe = (struct oce_async_cqe_link_state *)cqe;
2036 process_link_state(sc, acqe);
2037 } else if ((evt_type == ASYNC_EVENT_GRP5) &&
2038 (optype == ASYNC_EVENT_PVID_STATE)) {
2039 /* GRP5 PVID */
2040 gcqe =
2041 (struct oce_async_event_grp5_pvid_state *)cqe;
2042 if (gcqe->enabled)
2043 sc->pvid = gcqe->tag & VLAN_VID_MASK;
2044 else
2045 sc->pvid = 0;
2046
2047 }
2048 else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
2049 optype == ASYNC_EVENT_DEBUG_QNQ) {
2050 dbgcqe =
2051 (struct oce_async_event_qnq *)cqe;
2052 if(dbgcqe->valid)
2053 sc->qnqid = dbgcqe->vlan_tag;
2054 sc->qnq_debug_event = TRUE;
2055 }
2056 }
2057 cqe->u0.dw[3] = 0;
2058 RING_GET(cq->ring, 1);
2059 bus_dmamap_sync(cq->ring->dma.tag,
2060 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2061 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2062 num_cqes++;
2063 }
2064
2065 if (num_cqes)
2066 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2067
2068 return 0;
2069}
2070
2071
2072static void
2073setup_max_queues_want(POCE_SOFTC sc)
2074{
2075 /* Check if it is FLEX machine. Is so dont use RSS */
2076 if ((sc->function_mode & FNM_FLEX10_MODE) ||
2077 (sc->function_mode & FNM_UMC_MODE) ||
2078 (sc->function_mode & FNM_VNIC_MODE) ||
2079 (!is_rss_enabled(sc)) ||
2080 (sc->flags & OCE_FLAGS_BE2)) {
2081 sc->nrqs = 1;
2082 sc->nwqs = 1;
2048static void
2049oce_if_deactivate(POCE_SOFTC sc)
2050{
2051 int i, mtime = 0;
2052 int wait_req = 0;
2053 struct oce_rq *rq;
2054 struct oce_wq *wq;
2055 struct oce_eq *eq;
2056
2057 sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2058
2059 /*Wait for max of 400ms for TX completions to be done */
2060 while (mtime < 400) {
2061 wait_req = 0;
2062 for_all_wq_queues(sc, wq, i) {
2063 if (wq->ring->num_used) {
2064 wait_req = 1;
2065 DELAY(1);
2066 break;
2067 }
2068 }
2069 mtime += 1;
2070 if (!wait_req)
2071 break;
2072 }
2073
2074 /* Stop intrs and finish any bottom halves pending */
2075 oce_hw_intr_disable(sc);
2076
2077 /* Since taskqueue_drain takes a Gaint Lock, We should not acquire
2078 any other lock. So unlock device lock and require after
2079 completing taskqueue_drain.
2080 */
2081 UNLOCK(&sc->dev_lock);
2082 for (i = 0; i < sc->intr_count; i++) {
2083 if (sc->intrs[i].tq != NULL) {
2084 taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2085 }
2086 }
2087 LOCK(&sc->dev_lock);
2088
2089 /* Delete RX queue in card with flush param */
2090 oce_stop_rx(sc);
2091
2092 /* Invalidate any pending cq and eq entries*/
2093 for_all_evnt_queues(sc, eq, i)
2094 oce_drain_eq(eq);
2095 for_all_rq_queues(sc, rq, i)
2096 oce_drain_rq_cq(rq);
2097 for_all_wq_queues(sc, wq, i)
2098 oce_drain_wq_cq(wq);
2099
2100 /* But still we need to get MCC aync events.
2101 So enable intrs and also arm first EQ
2102 */
2103 oce_hw_intr_enable(sc);
2104 oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2105
2106 DELAY(10);
2107}
2108
2109
2110static void
2111oce_if_activate(POCE_SOFTC sc)
2112{
2113 struct oce_eq *eq;
2114 struct oce_rq *rq;
2115 struct oce_wq *wq;
2116 int i, rc = 0;
2117
2118 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2119
2120 oce_hw_intr_disable(sc);
2121
2122 oce_start_rx(sc);
2123
2124 for_all_rq_queues(sc, rq, i) {
2125 rc = oce_start_rq(rq);
2126 if (rc)
2127 device_printf(sc->dev, "Unable to start RX\n");
2128 }
2129
2130 for_all_wq_queues(sc, wq, i) {
2131 rc = oce_start_wq(wq);
2132 if (rc)
2133 device_printf(sc->dev, "Unable to start TX\n");
2134 }
2135
2136
2137 for_all_evnt_queues(sc, eq, i)
2138 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2139
2140 oce_hw_intr_enable(sc);
2141
2142}
2143
2144static void
2145process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2146{
2147 /* Update Link status */
2148 if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2149 ASYNC_EVENT_LINK_UP) {
2150 sc->link_status = ASYNC_EVENT_LINK_UP;
2151 if_link_state_change(sc->ifp, LINK_STATE_UP);
2152 } else {
2153 sc->link_status = ASYNC_EVENT_LINK_DOWN;
2154 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2155 }
2156
2157 /* Update speed */
2158 sc->link_speed = acqe->u0.s.speed;
2159 sc->qos_link_speed = (uint32_t) acqe->u0.s.qos_link_speed * 10;
2160
2161}
2162
2163
2164/* Handle the Completion Queue for the Mailbox/Async notifications */
2165uint16_t
2166oce_mq_handler(void *arg)
2167{
2168 struct oce_mq *mq = (struct oce_mq *)arg;
2169 POCE_SOFTC sc = mq->parent;
2170 struct oce_cq *cq = mq->cq;
2171 int num_cqes = 0, evt_type = 0, optype = 0;
2172 struct oce_mq_cqe *cqe;
2173 struct oce_async_cqe_link_state *acqe;
2174 struct oce_async_event_grp5_pvid_state *gcqe;
2175 struct oce_async_event_qnq *dbgcqe;
2176
2177
2178 bus_dmamap_sync(cq->ring->dma.tag,
2179 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2180 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2181
2182 while (cqe->u0.dw[3]) {
2183 DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2184 if (cqe->u0.s.async_event) {
2185 evt_type = cqe->u0.s.event_type;
2186 optype = cqe->u0.s.async_type;
2187 if (evt_type == ASYNC_EVENT_CODE_LINK_STATE) {
2188 /* Link status evt */
2189 acqe = (struct oce_async_cqe_link_state *)cqe;
2190 process_link_state(sc, acqe);
2191 } else if ((evt_type == ASYNC_EVENT_GRP5) &&
2192 (optype == ASYNC_EVENT_PVID_STATE)) {
2193 /* GRP5 PVID */
2194 gcqe =
2195 (struct oce_async_event_grp5_pvid_state *)cqe;
2196 if (gcqe->enabled)
2197 sc->pvid = gcqe->tag & VLAN_VID_MASK;
2198 else
2199 sc->pvid = 0;
2200
2201 }
2202 else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
2203 optype == ASYNC_EVENT_DEBUG_QNQ) {
2204 dbgcqe =
2205 (struct oce_async_event_qnq *)cqe;
2206 if(dbgcqe->valid)
2207 sc->qnqid = dbgcqe->vlan_tag;
2208 sc->qnq_debug_event = TRUE;
2209 }
2210 }
2211 cqe->u0.dw[3] = 0;
2212 RING_GET(cq->ring, 1);
2213 bus_dmamap_sync(cq->ring->dma.tag,
2214 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2215 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2216 num_cqes++;
2217 }
2218
2219 if (num_cqes)
2220 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2221
2222 return 0;
2223}
2224
2225
2226static void
2227setup_max_queues_want(POCE_SOFTC sc)
2228{
2229 /* Check if it is FLEX machine. Is so dont use RSS */
2230 if ((sc->function_mode & FNM_FLEX10_MODE) ||
2231 (sc->function_mode & FNM_UMC_MODE) ||
2232 (sc->function_mode & FNM_VNIC_MODE) ||
2233 (!is_rss_enabled(sc)) ||
2234 (sc->flags & OCE_FLAGS_BE2)) {
2235 sc->nrqs = 1;
2236 sc->nwqs = 1;
2237 } else {
2238 sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2239 sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2083 }
2084}
2085
2086
2087static void
2088update_queues_got(POCE_SOFTC sc)
2089{
2090 if (is_rss_enabled(sc)) {
2091 sc->nrqs = sc->intr_count + 1;
2092 sc->nwqs = sc->intr_count;
2093 } else {
2094 sc->nrqs = 1;
2095 sc->nwqs = 1;
2096 }
2097}
2098
2099static int
2100oce_check_ipv6_ext_hdr(struct mbuf *m)
2101{
2102 struct ether_header *eh = mtod(m, struct ether_header *);
2103 caddr_t m_datatemp = m->m_data;
2104
2105 if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2106 m->m_data += sizeof(struct ether_header);
2107 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2108
2109 if((ip6->ip6_nxt != IPPROTO_TCP) && \
2110 (ip6->ip6_nxt != IPPROTO_UDP)){
2111 struct ip6_ext *ip6e = NULL;
2112 m->m_data += sizeof(struct ip6_hdr);
2113
2114 ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2115 if(ip6e->ip6e_len == 0xff) {
2116 m->m_data = m_datatemp;
2117 return TRUE;
2118 }
2119 }
2120 m->m_data = m_datatemp;
2121 }
2122 return FALSE;
2123}
2124
2125static int
2126is_be3_a1(POCE_SOFTC sc)
2127{
2128 if((sc->flags & OCE_FLAGS_BE3) && ((sc->asic_revision & 0xFF) < 2)) {
2129 return TRUE;
2130 }
2131 return FALSE;
2132}
2133
2134static struct mbuf *
2135oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2136{
2137 uint16_t vlan_tag = 0;
2138
2139 if(!M_WRITABLE(m))
2140 return NULL;
2141
2142 /* Embed vlan tag in the packet if it is not part of it */
2143 if(m->m_flags & M_VLANTAG) {
2144 vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2145 m->m_flags &= ~M_VLANTAG;
2146 }
2147
2148 /* if UMC, ignore vlan tag insertion and instead insert pvid */
2149 if(sc->pvid) {
2150 if(!vlan_tag)
2151 vlan_tag = sc->pvid;
2152 *complete = FALSE;
2153 }
2154
2155 if(vlan_tag) {
2156 m = ether_vlanencap(m, vlan_tag);
2157 }
2158
2159 if(sc->qnqid) {
2160 m = ether_vlanencap(m, sc->qnqid);
2161 *complete = FALSE;
2162 }
2163 return m;
2164}
2165
2166static int
2167oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2168{
2169 if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2170 oce_check_ipv6_ext_hdr(m)) {
2171 return TRUE;
2172 }
2173 return FALSE;
2174}
2175
2176static void
2177oce_get_config(POCE_SOFTC sc)
2178{
2179 int rc = 0;
2180 uint32_t max_rss = 0;
2181
2182 if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2183 max_rss = OCE_LEGACY_MODE_RSS;
2184 else
2185 max_rss = OCE_MAX_RSS;
2186
2187 if (!IS_BE(sc)) {
2188 rc = oce_get_func_config(sc);
2189 if (rc) {
2190 sc->nwqs = OCE_MAX_WQ;
2191 sc->nrssqs = max_rss;
2192 sc->nrqs = sc->nrssqs + 1;
2193 }
2194 }
2195 else {
2196 rc = oce_get_profile_config(sc);
2197 sc->nrssqs = max_rss;
2198 sc->nrqs = sc->nrssqs + 1;
2199 if (rc)
2200 sc->nwqs = OCE_MAX_WQ;
2201 }
2202}
2240 }
2241}
2242
2243
2244static void
2245update_queues_got(POCE_SOFTC sc)
2246{
2247 if (is_rss_enabled(sc)) {
2248 sc->nrqs = sc->intr_count + 1;
2249 sc->nwqs = sc->intr_count;
2250 } else {
2251 sc->nrqs = 1;
2252 sc->nwqs = 1;
2253 }
2254}
2255
2256static int
2257oce_check_ipv6_ext_hdr(struct mbuf *m)
2258{
2259 struct ether_header *eh = mtod(m, struct ether_header *);
2260 caddr_t m_datatemp = m->m_data;
2261
2262 if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2263 m->m_data += sizeof(struct ether_header);
2264 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2265
2266 if((ip6->ip6_nxt != IPPROTO_TCP) && \
2267 (ip6->ip6_nxt != IPPROTO_UDP)){
2268 struct ip6_ext *ip6e = NULL;
2269 m->m_data += sizeof(struct ip6_hdr);
2270
2271 ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2272 if(ip6e->ip6e_len == 0xff) {
2273 m->m_data = m_datatemp;
2274 return TRUE;
2275 }
2276 }
2277 m->m_data = m_datatemp;
2278 }
2279 return FALSE;
2280}
2281
2282static int
2283is_be3_a1(POCE_SOFTC sc)
2284{
2285 if((sc->flags & OCE_FLAGS_BE3) && ((sc->asic_revision & 0xFF) < 2)) {
2286 return TRUE;
2287 }
2288 return FALSE;
2289}
2290
2291static struct mbuf *
2292oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2293{
2294 uint16_t vlan_tag = 0;
2295
2296 if(!M_WRITABLE(m))
2297 return NULL;
2298
2299 /* Embed vlan tag in the packet if it is not part of it */
2300 if(m->m_flags & M_VLANTAG) {
2301 vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2302 m->m_flags &= ~M_VLANTAG;
2303 }
2304
2305 /* if UMC, ignore vlan tag insertion and instead insert pvid */
2306 if(sc->pvid) {
2307 if(!vlan_tag)
2308 vlan_tag = sc->pvid;
2309 *complete = FALSE;
2310 }
2311
2312 if(vlan_tag) {
2313 m = ether_vlanencap(m, vlan_tag);
2314 }
2315
2316 if(sc->qnqid) {
2317 m = ether_vlanencap(m, sc->qnqid);
2318 *complete = FALSE;
2319 }
2320 return m;
2321}
2322
2323static int
2324oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2325{
2326 if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2327 oce_check_ipv6_ext_hdr(m)) {
2328 return TRUE;
2329 }
2330 return FALSE;
2331}
2332
2333static void
2334oce_get_config(POCE_SOFTC sc)
2335{
2336 int rc = 0;
2337 uint32_t max_rss = 0;
2338
2339 if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2340 max_rss = OCE_LEGACY_MODE_RSS;
2341 else
2342 max_rss = OCE_MAX_RSS;
2343
2344 if (!IS_BE(sc)) {
2345 rc = oce_get_func_config(sc);
2346 if (rc) {
2347 sc->nwqs = OCE_MAX_WQ;
2348 sc->nrssqs = max_rss;
2349 sc->nrqs = sc->nrssqs + 1;
2350 }
2351 }
2352 else {
2353 rc = oce_get_profile_config(sc);
2354 sc->nrssqs = max_rss;
2355 sc->nrqs = sc->nrssqs + 1;
2356 if (rc)
2357 sc->nwqs = OCE_MAX_WQ;
2358 }
2359}