Deleted Added
sdiff udiff text old ( 220643 ) new ( 220649 )
full compact
1/*-
2 * Copyright (c) 2011 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/dev/cxgbe/t4_main.c 220649 2011-04-15 03:09:27Z np $");
30
31#include "opt_inet.h"
32
33#include <sys/param.h>
34#include <sys/conf.h>
35#include <sys/priv.h>
36#include <sys/kernel.h>
37#include <sys/bus.h>
38#include <sys/module.h>
39#include <sys/malloc.h>
40#include <sys/queue.h>
41#include <sys/taskqueue.h>
42#include <sys/pciio.h>
43#include <dev/pci/pcireg.h>
44#include <dev/pci/pcivar.h>
45#include <dev/pci/pci_private.h>
46#include <sys/firmware.h>
47#include <sys/sbuf.h>
48#include <sys/smp.h>
49#include <sys/socket.h>
50#include <sys/sockio.h>
51#include <sys/sysctl.h>
52#include <net/ethernet.h>
53#include <net/if.h>
54#include <net/if_types.h>
55#include <net/if_dl.h>
56
57#include "common/t4_hw.h"
58#include "common/common.h"
59#include "common/t4_regs.h"
60#include "common/t4_regs_values.h"
61#include "common/t4fw_interface.h"
62#include "t4_ioctl.h"
63
64/* T4 bus driver interface */
65static int t4_probe(device_t);
66static int t4_attach(device_t);
67static int t4_detach(device_t);
68static device_method_t t4_methods[] = {
69 DEVMETHOD(device_probe, t4_probe),
70 DEVMETHOD(device_attach, t4_attach),
71 DEVMETHOD(device_detach, t4_detach),
72
73 /* bus interface */
74 DEVMETHOD(bus_print_child, bus_generic_print_child),
75 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
76
77 { 0, 0 }
78};
79static driver_t t4_driver = {
80 "t4nex",
81 t4_methods,
82 sizeof(struct adapter)
83};
84
85
86/* T4 port (cxgbe) interface */
87static int cxgbe_probe(device_t);
88static int cxgbe_attach(device_t);
89static int cxgbe_detach(device_t);
90static device_method_t cxgbe_methods[] = {
91 DEVMETHOD(device_probe, cxgbe_probe),
92 DEVMETHOD(device_attach, cxgbe_attach),
93 DEVMETHOD(device_detach, cxgbe_detach),
94 { 0, 0 }
95};
96static driver_t cxgbe_driver = {
97 "cxgbe",
98 cxgbe_methods,
99 sizeof(struct port_info)
100};
101
102static d_ioctl_t t4_ioctl;
103static d_open_t t4_open;
104static d_close_t t4_close;
105
106static struct cdevsw t4_cdevsw = {
107 .d_version = D_VERSION,
108 .d_flags = 0,
109 .d_open = t4_open,
110 .d_close = t4_close,
111 .d_ioctl = t4_ioctl,
112 .d_name = "t4nex",
113};
114
115/* ifnet + media interface */
116static void cxgbe_init(void *);
117static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
118static void cxgbe_start(struct ifnet *);
119static int cxgbe_transmit(struct ifnet *, struct mbuf *);
120static void cxgbe_qflush(struct ifnet *);
121static int cxgbe_media_change(struct ifnet *);
122static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
123
124MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4 Ethernet driver and services");
125
126/*
127 * Tunables.
128 */
129SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe driver parameters");
130
131static int force_firmware_install = 0;
132TUNABLE_INT("hw.cxgbe.force_firmware_install", &force_firmware_install);
133SYSCTL_UINT(_hw_cxgbe, OID_AUTO, force_firmware_install, CTLFLAG_RDTUN,
134 &force_firmware_install, 0, "install firmware on every attach.");
135
136/*
137 * Holdoff timer and packet counter values.
138 */
139static unsigned int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200};
140static unsigned int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
141
142/*
143 * Max # of tx and rx queues to use for each 10G and 1G port.
144 */
145static unsigned int max_ntxq_10g = 8;
146TUNABLE_INT("hw.cxgbe.max_ntxq_10G_port", &max_ntxq_10g);
147SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_ntxq_10G_port, CTLFLAG_RDTUN,
148 &max_ntxq_10g, 0, "maximum number of tx queues per 10G port.");
149
150static unsigned int max_nrxq_10g = 8;
151TUNABLE_INT("hw.cxgbe.max_nrxq_10G_port", &max_nrxq_10g);
152SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_nrxq_10G_port, CTLFLAG_RDTUN,
153 &max_nrxq_10g, 0, "maximum number of rxq's (per 10G port).");
154
155static unsigned int max_ntxq_1g = 2;
156TUNABLE_INT("hw.cxgbe.max_ntxq_1G_port", &max_ntxq_1g);
157SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_ntxq_1G_port, CTLFLAG_RDTUN,
158 &max_ntxq_1g, 0, "maximum number of tx queues per 1G port.");
159
160static unsigned int max_nrxq_1g = 2;
161TUNABLE_INT("hw.cxgbe.max_nrxq_1G_port", &max_nrxq_1g);
162SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_nrxq_1G_port, CTLFLAG_RDTUN,
163 &max_nrxq_1g, 0, "maximum number of rxq's (per 1G port).");
164
165/*
166 * Holdoff parameters for 10G and 1G ports.
167 */
168static unsigned int tmr_idx_10g = 1;
169TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &tmr_idx_10g);
170SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_10G, CTLFLAG_RDTUN,
171 &tmr_idx_10g, 0,
172 "default timer index for interrupt holdoff (10G ports).");
173
174static int pktc_idx_10g = 2;
175TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &pktc_idx_10g);
176SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_10G, CTLFLAG_RDTUN,
177 &pktc_idx_10g, 0,
178 "default pkt counter index for interrupt holdoff (10G ports).");
179
180static unsigned int tmr_idx_1g = 1;
181TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &tmr_idx_1g);
182SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_1G, CTLFLAG_RDTUN,
183 &tmr_idx_1g, 0,
184 "default timer index for interrupt holdoff (1G ports).");
185
186static int pktc_idx_1g = 2;
187TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &pktc_idx_1g);
188SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_1G, CTLFLAG_RDTUN,
189 &pktc_idx_1g, 0,
190 "default pkt counter index for interrupt holdoff (1G ports).");
191
192/*
193 * Size (# of entries) of each tx and rx queue.
194 */
195static unsigned int qsize_txq = TX_EQ_QSIZE;
196TUNABLE_INT("hw.cxgbe.qsize_txq", &qsize_txq);
197SYSCTL_UINT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN,
198 &qsize_txq, 0, "default queue size of NIC tx queues.");
199
200static unsigned int qsize_rxq = RX_IQ_QSIZE;
201TUNABLE_INT("hw.cxgbe.qsize_rxq", &qsize_rxq);
202SYSCTL_UINT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN,
203 &qsize_rxq, 0, "default queue size of NIC rx queues.");
204
205/*
206 * Interrupt types allowed.
207 */
208static int intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
209TUNABLE_INT("hw.cxgbe.interrupt_types", &intr_types);
210SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &intr_types, 0,
211 "interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively)");
212
213/*
214 * Force the driver to use interrupt forwarding.
215 */
216static int intr_fwd = 0;
217TUNABLE_INT("hw.cxgbe.interrupt_forwarding", &intr_fwd);
218SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupt_forwarding, CTLFLAG_RDTUN,
219 &intr_fwd, 0, "always use forwarded interrupts");
220
221struct intrs_and_queues {
222 int intr_type; /* INTx, MSI, or MSI-X */
223 int nirq; /* Number of vectors */
224 int intr_fwd; /* Interrupts forwarded */
225 int ntxq10g; /* # of NIC txq's for each 10G port */
226 int nrxq10g; /* # of NIC rxq's for each 10G port */
227 int ntxq1g; /* # of NIC txq's for each 1G port */
228 int nrxq1g; /* # of NIC rxq's for each 1G port */
229};
230
231enum {
232 MEMWIN0_APERTURE = 2048,
233 MEMWIN0_BASE = 0x1b800,
234 MEMWIN1_APERTURE = 32768,
235 MEMWIN1_BASE = 0x28000,
236 MEMWIN2_APERTURE = 65536,
237 MEMWIN2_BASE = 0x30000,
238};
239
240enum {
241 XGMAC_MTU = (1 << 0),
242 XGMAC_PROMISC = (1 << 1),
243 XGMAC_ALLMULTI = (1 << 2),
244 XGMAC_VLANEX = (1 << 3),
245 XGMAC_UCADDR = (1 << 4),
246 XGMAC_MCADDRS = (1 << 5),
247
248 XGMAC_ALL = 0xffff
249};
250
251static int map_bars(struct adapter *);
252static void setup_memwin(struct adapter *);
253static int cfg_itype_and_nqueues(struct adapter *, int, int,
254 struct intrs_and_queues *);
255static int prep_firmware(struct adapter *);
256static int get_capabilities(struct adapter *, struct fw_caps_config_cmd *);
257static int get_params(struct adapter *, struct fw_caps_config_cmd *);
258static void t4_set_desc(struct adapter *);
259static void build_medialist(struct port_info *);
260static int update_mac_settings(struct port_info *, int);
261static int cxgbe_init_locked(struct port_info *);
262static int cxgbe_init_synchronized(struct port_info *);
263static int cxgbe_uninit_locked(struct port_info *);
264static int cxgbe_uninit_synchronized(struct port_info *);
265static int first_port_up(struct adapter *);
266static int last_port_down(struct adapter *);
267static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
268 iq_intr_handler_t *, void *, char *);
269static int t4_free_irq(struct adapter *, struct irq *);
270static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
271 unsigned int);
272static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
273static void cxgbe_tick(void *);
274static int t4_sysctls(struct adapter *);
275static int cxgbe_sysctls(struct port_info *);
276static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
277static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
278static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
279static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
280static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
281static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
282static inline void txq_start(struct ifnet *, struct sge_txq *);
283static int t4_mod_event(module_t, int, void *);
284
285struct t4_pciids {
286 uint16_t device;
287 uint8_t mpf;
288 char *desc;
289} t4_pciids[] = {
290 {0xa000, 0, "Chelsio Terminator 4 FPGA"},
291 {0x4400, 4, "Chelsio T440-dbg"},
292 {0x4401, 4, "Chelsio T420-CR"},
293 {0x4402, 4, "Chelsio T422-CR"},
294 {0x4403, 4, "Chelsio T440-CR"},
295 {0x4404, 4, "Chelsio T420-BCH"},
296 {0x4405, 4, "Chelsio T440-BCH"},
297 {0x4406, 4, "Chelsio T440-CH"},
298 {0x4407, 4, "Chelsio T420-SO"},
299 {0x4408, 4, "Chelsio T420-CX"},
300 {0x4409, 4, "Chelsio T420-BT"},
301 {0x440a, 4, "Chelsio T404-BT"},
302};
303
304static int
305t4_probe(device_t dev)
306{
307 int i;
308 uint16_t v = pci_get_vendor(dev);
309 uint16_t d = pci_get_device(dev);
310
311 if (v != PCI_VENDOR_ID_CHELSIO)
312 return (ENXIO);
313
314 for (i = 0; i < ARRAY_SIZE(t4_pciids); i++) {
315 if (d == t4_pciids[i].device &&
316 pci_get_function(dev) == t4_pciids[i].mpf) {
317 device_set_desc(dev, t4_pciids[i].desc);
318 return (BUS_PROBE_DEFAULT);
319 }
320 }
321
322 return (ENXIO);
323}
324
325static int
326t4_attach(device_t dev)
327{
328 struct adapter *sc;
329 int rc = 0, i, n10g, n1g, rqidx, tqidx;
330 struct fw_caps_config_cmd caps;
331 uint32_t p, v;
332 struct intrs_and_queues iaq;
333 struct sge *s;
334
335 sc = device_get_softc(dev);
336 sc->dev = dev;
337 sc->pf = pci_get_function(dev);
338 sc->mbox = sc->pf;
339
340 pci_enable_busmaster(dev);
341 pci_set_max_read_req(dev, 4096);
342 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
343 device_get_nameunit(dev));
344 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
345
346 rc = map_bars(sc);
347 if (rc != 0)
348 goto done; /* error message displayed already */
349
350 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
351
352 /* Prepare the adapter for operation */
353 rc = -t4_prep_adapter(sc);
354 if (rc != 0) {
355 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
356 goto done;
357 }
358
359 /* Do this really early */
360 sc->cdev = make_dev(&t4_cdevsw, device_get_unit(dev), UID_ROOT,
361 GID_WHEEL, 0600, "%s", device_get_nameunit(dev));
362 sc->cdev->si_drv1 = sc;
363
364 /* Prepare the firmware for operation */
365 rc = prep_firmware(sc);
366 if (rc != 0)
367 goto done; /* error message displayed already */
368
369 /* Get device capabilities and select which ones we'll use */
370 rc = get_capabilities(sc, &caps);
371 if (rc != 0) {
372 device_printf(dev,
373 "failed to initialize adapter capabilities: %d.\n", rc);
374 goto done;
375 }
376
377 /* Choose the global RSS mode. */
378 rc = -t4_config_glbl_rss(sc, sc->mbox,
379 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
380 F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
381 F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
382 if (rc != 0) {
383 device_printf(dev,
384 "failed to select global RSS mode: %d.\n", rc);
385 goto done;
386 }
387
388 /* These are total (sum of all ports) limits for a bus driver */
389 rc = -t4_cfg_pfvf(sc, sc->mbox, sc->pf, 0,
390 64, /* max # of egress queues */
391 64, /* max # of egress Ethernet or control queues */
392 64, /* max # of ingress queues with fl/interrupt */
393 0, /* max # of ingress queues without interrupt */
394 0, /* PCIe traffic class */
395 4, /* max # of virtual interfaces */
396 M_FW_PFVF_CMD_CMASK, M_FW_PFVF_CMD_PMASK, 16,
397 FW_CMD_CAP_PF, FW_CMD_CAP_PF);
398 if (rc != 0) {
399 device_printf(dev,
400 "failed to configure pf/vf resources: %d.\n", rc);
401 goto done;
402 }
403
404 /* Need this before sge_init */
405 for (i = 0; i < SGE_NTIMERS; i++)
406 sc->sge.timer_val[i] = min(intr_timer[i], 200U);
407 for (i = 0; i < SGE_NCOUNTERS; i++)
408 sc->sge.counter_val[i] = min(intr_pktcount[i], M_THRESHOLD_0);
409
410 /* Also need the cooked value of cclk before sge_init */
411 p = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
412 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
413 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &p, &v);
414 if (rc != 0) {
415 device_printf(sc->dev,
416 "failed to obtain core clock value: %d.\n", rc);
417 goto done;
418 }
419 sc->params.vpd.cclk = v;
420
421 t4_sge_init(sc);
422
423 /*
424 * XXX: This is the place to call t4_set_filter_mode()
425 */
426
427 /* get basic stuff going */
428 rc = -t4_early_init(sc, sc->mbox);
429 if (rc != 0) {
430 device_printf(dev, "early init failed: %d.\n", rc);
431 goto done;
432 }
433
434 rc = get_params(sc, &caps);
435 if (rc != 0)
436 goto done; /* error message displayed already */
437
438 /* These are finalized by FW initialization, load their values now */
439 v = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
440 sc->params.tp.tre = G_TIMERRESOLUTION(v);
441 sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
442 t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
443
444 /* tweak some settings */
445 t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) |
446 V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
447 V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9));
448 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
449
450 setup_memwin(sc);
451
452 rc = t4_create_dma_tag(sc);
453 if (rc != 0)
454 goto done; /* error message displayed already */
455
456 /*
457 * First pass over all the ports - allocate VIs and initialize some
458 * basic parameters like mac address, port type, etc. We also figure
459 * out whether a port is 10G or 1G and use that information when
460 * calculating how many interrupts to attempt to allocate.
461 */
462 n10g = n1g = 0;
463 for_each_port(sc, i) {
464 struct port_info *pi;
465
466 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
467 sc->port[i] = pi;
468
469 /* These must be set before t4_port_init */
470 pi->adapter = sc;
471 pi->port_id = i;
472
473 /* Allocate the vi and initialize parameters like mac addr */
474 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
475 if (rc != 0) {
476 device_printf(dev, "unable to initialize port %d: %d\n",
477 i, rc);
478 free(pi, M_CXGBE);
479 sc->port[i] = NULL; /* indicates init failed */
480 continue;
481 }
482
483 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
484 device_get_nameunit(dev), i);
485 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
486
487 if (is_10G_port(pi)) {
488 n10g++;
489 pi->tmr_idx = tmr_idx_10g;
490 pi->pktc_idx = pktc_idx_10g;
491 } else {
492 n1g++;
493 pi->tmr_idx = tmr_idx_1g;
494 pi->pktc_idx = pktc_idx_1g;
495 }
496
497 pi->xact_addr_filt = -1;
498
499 pi->qsize_rxq = max(qsize_rxq, 128);
500 while (pi->qsize_rxq & 7)
501 pi->qsize_rxq++;
502 pi->qsize_txq = max(qsize_txq, 128);
503
504 if (pi->qsize_rxq != qsize_rxq) {
505 device_printf(dev,
506 "using %d instead of %d as the rx queue size.\n",
507 pi->qsize_rxq, qsize_rxq);
508 }
509 if (pi->qsize_txq != qsize_txq) {
510 device_printf(dev,
511 "using %d instead of %d as the tx queue size.\n",
512 pi->qsize_txq, qsize_txq);
513 }
514
515 pi->dev = device_add_child(dev, "cxgbe", -1);
516 if (pi->dev == NULL) {
517 device_printf(dev,
518 "failed to add device for port %d.\n", i);
519 rc = ENXIO;
520 goto done;
521 }
522 device_set_softc(pi->dev, pi);
523
524 setbit(&sc->registered_device_map, i);
525 }
526
527 if (sc->registered_device_map == 0) {
528 device_printf(dev, "no usable ports\n");
529 rc = ENXIO;
530 goto done;
531 }
532
533 /*
534 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
535 */
536 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
537 if (rc != 0)
538 goto done; /* error message displayed already */
539
540 sc->intr_type = iaq.intr_type;
541 sc->intr_count = iaq.nirq;
542
543 s = &sc->sge;
544 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
545 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
546 s->neq = s->ntxq + s->nrxq; /* the fl in an rxq is an eq */
547 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
548 if (iaq.intr_fwd) {
549 sc->flags |= INTR_FWD;
550 s->niq += NFIQ(sc); /* forwarded interrupt queues */
551 s->fiq = malloc(NFIQ(sc) * sizeof(struct sge_iq), M_CXGBE,
552 M_ZERO | M_WAITOK);
553 }
554 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
555 M_ZERO | M_WAITOK);
556 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
557 M_ZERO | M_WAITOK);
558 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
559 M_ZERO | M_WAITOK);
560 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
561 M_ZERO | M_WAITOK);
562
563 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
564 M_ZERO | M_WAITOK);
565
566 t4_sysctls(sc);
567
568 /*
569 * Second pass over the ports. This time we know the number of rx and
570 * tx queues that each port should get.
571 */
572 rqidx = tqidx = 0;
573 for_each_port(sc, i) {
574 struct port_info *pi = sc->port[i];
575
576 if (pi == NULL)
577 continue;
578
579 pi->first_rxq = rqidx;
580 pi->nrxq = is_10G_port(pi) ? iaq.nrxq10g : iaq.nrxq1g;
581
582 pi->first_txq = tqidx;
583 pi->ntxq = is_10G_port(pi) ? iaq.ntxq10g : iaq.ntxq1g;
584
585 rqidx += pi->nrxq;
586 tqidx += pi->ntxq;
587 }
588
589 rc = bus_generic_attach(dev);
590 if (rc != 0) {
591 device_printf(dev,
592 "failed to attach all child ports: %d\n", rc);
593 goto done;
594 }
595
596#ifdef INVARIANTS
597 device_printf(dev,
598 "%p, %d ports (0x%x), %d intr_type, %d intr_count\n",
599 sc, sc->params.nports, sc->params.portvec,
600 sc->intr_type, sc->intr_count);
601#endif
602 t4_set_desc(sc);
603
604done:
605 if (rc != 0)
606 t4_detach(dev);
607
608 return (rc);
609}
610
611/*
612 * Idempotent
613 */
614static int
615t4_detach(device_t dev)
616{
617 struct adapter *sc;
618 struct port_info *pi;
619 int i;
620
621 sc = device_get_softc(dev);
622
623 if (sc->cdev)
624 destroy_dev(sc->cdev);
625
626 bus_generic_detach(dev);
627 for (i = 0; i < MAX_NPORTS; i++) {
628 pi = sc->port[i];
629 if (pi) {
630 t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
631 if (pi->dev)
632 device_delete_child(dev, pi->dev);
633
634 mtx_destroy(&pi->pi_lock);
635 free(pi, M_CXGBE);
636 }
637 }
638
639 if (sc->flags & FW_OK)
640 t4_fw_bye(sc, sc->mbox);
641
642 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
643 pci_release_msi(dev);
644
645 if (sc->regs_res)
646 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
647 sc->regs_res);
648
649 if (sc->msix_res)
650 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
651 sc->msix_res);
652
653 free(sc->irq, M_CXGBE);
654 free(sc->sge.rxq, M_CXGBE);
655 free(sc->sge.txq, M_CXGBE);
656 free(sc->sge.fiq, M_CXGBE);
657 free(sc->sge.iqmap, M_CXGBE);
658 free(sc->sge.eqmap, M_CXGBE);
659 t4_destroy_dma_tag(sc);
660 mtx_destroy(&sc->sc_lock);
661
662 bzero(sc, sizeof(*sc));
663
664 return (0);
665}
666
667
668static int
669cxgbe_probe(device_t dev)
670{
671 char buf[128];
672 struct port_info *pi = device_get_softc(dev);
673
674 snprintf(buf, sizeof(buf), "Port %d", pi->port_id);
675 device_set_desc_copy(dev, buf);
676
677 return (BUS_PROBE_DEFAULT);
678}
679
680#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
681 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
682 IFCAP_VLAN_HWTSO)
683#define T4_CAP_ENABLE (T4_CAP & ~IFCAP_TSO6)
684
685static int
686cxgbe_attach(device_t dev)
687{
688 struct port_info *pi = device_get_softc(dev);
689 struct ifnet *ifp;
690
691 /* Allocate an ifnet and set it up */
692 ifp = if_alloc(IFT_ETHER);
693 if (ifp == NULL) {
694 device_printf(dev, "Cannot allocate ifnet\n");
695 return (ENOMEM);
696 }
697 pi->ifp = ifp;
698 ifp->if_softc = pi;
699
700 callout_init(&pi->tick, CALLOUT_MPSAFE);
701 pi->tq = taskqueue_create("cxgbe_taskq", M_NOWAIT,
702 taskqueue_thread_enqueue, &pi->tq);
703 if (pi->tq == NULL) {
704 device_printf(dev, "failed to allocate port task queue\n");
705 if_free(pi->ifp);
706 return (ENOMEM);
707 }
708 taskqueue_start_threads(&pi->tq, 1, PI_NET, "%s taskq",
709 device_get_nameunit(dev));
710
711 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
712 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
713
714 ifp->if_init = cxgbe_init;
715 ifp->if_ioctl = cxgbe_ioctl;
716 ifp->if_start = cxgbe_start;
717 ifp->if_transmit = cxgbe_transmit;
718 ifp->if_qflush = cxgbe_qflush;
719
720 ifp->if_snd.ifq_drv_maxlen = 1024;
721 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
722 IFQ_SET_READY(&ifp->if_snd);
723
724 ifp->if_capabilities = T4_CAP;
725 ifp->if_capenable = T4_CAP_ENABLE;
726 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO;
727
728 /* Initialize ifmedia for this port */
729 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
730 cxgbe_media_status);
731 build_medialist(pi);
732
733 ether_ifattach(ifp, pi->hw_addr);
734
735#ifdef INVARIANTS
736 device_printf(dev, "%p, %d txq, %d rxq\n", pi, pi->ntxq, pi->nrxq);
737#endif
738
739 cxgbe_sysctls(pi);
740
741 return (0);
742}
743
744static int
745cxgbe_detach(device_t dev)
746{
747 struct port_info *pi = device_get_softc(dev);
748 struct adapter *sc = pi->adapter;
749 int rc;
750
751 /* Tell if_ioctl and if_init that the port is going away */
752 ADAPTER_LOCK(sc);
753 SET_DOOMED(pi);
754 wakeup(&sc->flags);
755 while (IS_BUSY(sc))
756 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
757 SET_BUSY(sc);
758 ADAPTER_UNLOCK(sc);
759
760 rc = cxgbe_uninit_synchronized(pi);
761 if (rc != 0)
762 device_printf(dev, "port uninit failed: %d.\n", rc);
763
764 taskqueue_free(pi->tq);
765
766 ifmedia_removeall(&pi->media);
767 ether_ifdetach(pi->ifp);
768 if_free(pi->ifp);
769
770 ADAPTER_LOCK(sc);
771 CLR_BUSY(sc);
772 wakeup_one(&sc->flags);
773 ADAPTER_UNLOCK(sc);
774
775 return (0);
776}
777
778static void
779cxgbe_init(void *arg)
780{
781 struct port_info *pi = arg;
782 struct adapter *sc = pi->adapter;
783
784 ADAPTER_LOCK(sc);
785 cxgbe_init_locked(pi); /* releases adapter lock */
786 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
787}
788
789static int
790cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
791{
792 int rc = 0, mtu, flags;
793 struct port_info *pi = ifp->if_softc;
794 struct adapter *sc = pi->adapter;
795 struct ifreq *ifr = (struct ifreq *)data;
796 uint32_t mask;
797
798 switch (cmd) {
799 case SIOCSIFMTU:
800 ADAPTER_LOCK(sc);
801 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
802 if (rc) {
803fail:
804 ADAPTER_UNLOCK(sc);
805 return (rc);
806 }
807
808 mtu = ifr->ifr_mtu;
809 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
810 rc = EINVAL;
811 } else {
812 ifp->if_mtu = mtu;
813 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
814 t4_update_fl_bufsize(ifp);
815 PORT_LOCK(pi);
816 rc = update_mac_settings(pi, XGMAC_MTU);
817 PORT_UNLOCK(pi);
818 }
819 }
820 ADAPTER_UNLOCK(sc);
821 break;
822
823 case SIOCSIFFLAGS:
824 ADAPTER_LOCK(sc);
825 if (IS_DOOMED(pi)) {
826 rc = ENXIO;
827 goto fail;
828 }
829 if (ifp->if_flags & IFF_UP) {
830 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
831 flags = pi->if_flags;
832 if ((ifp->if_flags ^ flags) &
833 (IFF_PROMISC | IFF_ALLMULTI)) {
834 if (IS_BUSY(sc)) {
835 rc = EBUSY;
836 goto fail;
837 }
838 PORT_LOCK(pi);
839 rc = update_mac_settings(pi,
840 XGMAC_PROMISC | XGMAC_ALLMULTI);
841 PORT_UNLOCK(pi);
842 }
843 ADAPTER_UNLOCK(sc);
844 } else
845 rc = cxgbe_init_locked(pi);
846 pi->if_flags = ifp->if_flags;
847 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
848 rc = cxgbe_uninit_locked(pi);
849 else
850 ADAPTER_UNLOCK(sc);
851
852 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
853 break;
854
855 case SIOCADDMULTI:
856 case SIOCDELMULTI: /* these two can be called with a mutex held :-( */
857 ADAPTER_LOCK(sc);
858 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
859 if (rc)
860 goto fail;
861
862 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
863 PORT_LOCK(pi);
864 rc = update_mac_settings(pi, XGMAC_MCADDRS);
865 PORT_UNLOCK(pi);
866 }
867 ADAPTER_UNLOCK(sc);
868 break;
869
870 case SIOCSIFCAP:
871 ADAPTER_LOCK(sc);
872 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
873 if (rc)
874 goto fail;
875
876 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
877 if (mask & IFCAP_TXCSUM) {
878 ifp->if_capenable ^= IFCAP_TXCSUM;
879 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
880
881 if (IFCAP_TSO & ifp->if_capenable &&
882 !(IFCAP_TXCSUM & ifp->if_capenable)) {
883 ifp->if_capenable &= ~IFCAP_TSO;
884 ifp->if_hwassist &= ~CSUM_TSO;
885 if_printf(ifp,
886 "tso disabled due to -txcsum.\n");
887 }
888 }
889 if (mask & IFCAP_RXCSUM)
890 ifp->if_capenable ^= IFCAP_RXCSUM;
891 if (mask & IFCAP_TSO4) {
892 ifp->if_capenable ^= IFCAP_TSO4;
893
894 if (IFCAP_TSO & ifp->if_capenable) {
895 if (IFCAP_TXCSUM & ifp->if_capenable)
896 ifp->if_hwassist |= CSUM_TSO;
897 else {
898 ifp->if_capenable &= ~IFCAP_TSO;
899 ifp->if_hwassist &= ~CSUM_TSO;
900 if_printf(ifp,
901 "enable txcsum first.\n");
902 rc = EAGAIN;
903 }
904 } else
905 ifp->if_hwassist &= ~CSUM_TSO;
906 }
907 if (mask & IFCAP_LRO) {
908#ifdef INET
909 int i;
910 struct sge_rxq *rxq;
911
912 ifp->if_capenable ^= IFCAP_LRO;
913 for_each_rxq(pi, i, rxq) {
914 if (ifp->if_capenable & IFCAP_LRO)
915 rxq->flags |= RXQ_LRO_ENABLED;
916 else
917 rxq->flags &= ~RXQ_LRO_ENABLED;
918 }
919#endif
920 }
921#ifndef TCP_OFFLOAD_DISABLE
922 if (mask & IFCAP_TOE4) {
923 rc = EOPNOTSUPP;
924 }
925#endif
926 if (mask & IFCAP_VLAN_HWTAGGING) {
927 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
928 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
929 PORT_LOCK(pi);
930 rc = update_mac_settings(pi, XGMAC_VLANEX);
931 PORT_UNLOCK(pi);
932 }
933 }
934 if (mask & IFCAP_VLAN_MTU) {
935 ifp->if_capenable ^= IFCAP_VLAN_MTU;
936
937 /* Need to find out how to disable auto-mtu-inflation */
938 }
939 if (mask & IFCAP_VLAN_HWTSO)
940 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
941 if (mask & IFCAP_VLAN_HWCSUM)
942 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
943
944#ifdef VLAN_CAPABILITIES
945 VLAN_CAPABILITIES(ifp);
946#endif
947 ADAPTER_UNLOCK(sc);
948 break;
949
950 case SIOCSIFMEDIA:
951 case SIOCGIFMEDIA:
952 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
953 break;
954
955 default:
956 rc = ether_ioctl(ifp, cmd, data);
957 }
958
959 return (rc);
960}
961
962static void
963cxgbe_start(struct ifnet *ifp)
964{
965 struct port_info *pi = ifp->if_softc;
966 struct sge_txq *txq;
967 int i;
968
969 for_each_txq(pi, i, txq) {
970 if (TXQ_TRYLOCK(txq)) {
971 txq_start(ifp, txq);
972 TXQ_UNLOCK(txq);
973 }
974 }
975}
976
977static int
978cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
979{
980 struct port_info *pi = ifp->if_softc;
981 struct adapter *sc = pi->adapter;
982 struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
983 struct buf_ring *br;
984 int rc;
985
986 M_ASSERTPKTHDR(m);
987
988 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
989 m_freem(m);
990 return (0);
991 }
992
993 if (m->m_flags & M_FLOWID)
994 txq += (m->m_pkthdr.flowid % pi->ntxq);
995 br = txq->eq.br;
996
997 if (TXQ_TRYLOCK(txq) == 0) {
998 /*
999 * XXX: make sure that this packet really is sent out. There is
1000 * a small race where t4_eth_tx may stop draining the drbr and
1001 * goes away, just before we enqueued this mbuf.
1002 */
1003
1004 return (drbr_enqueue(ifp, br, m));
1005 }
1006
1007 /*
1008 * txq->m is the mbuf that is held up due to a temporary shortage of
1009 * resources and it should be put on the wire first. Then what's in
1010 * drbr and finally the mbuf that was just passed in to us.
1011 *
1012 * Return code should indicate the fate of the mbuf that was passed in
1013 * this time.
1014 */
1015
1016 TXQ_LOCK_ASSERT_OWNED(txq);
1017 if (drbr_needs_enqueue(ifp, br) || txq->m) {
1018
1019 /* Queued for transmission. */
1020
1021 rc = drbr_enqueue(ifp, br, m);
1022 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1023 (void) t4_eth_tx(ifp, txq, m);
1024 TXQ_UNLOCK(txq);
1025 return (rc);
1026 }
1027
1028 /* Direct transmission. */
1029 rc = t4_eth_tx(ifp, txq, m);
1030 if (rc != 0 && txq->m)
1031 rc = 0; /* held, will be transmitted soon (hopefully) */
1032
1033 TXQ_UNLOCK(txq);
1034 return (rc);
1035}
1036
1037static void
1038cxgbe_qflush(struct ifnet *ifp)
1039{
1040 struct port_info *pi = ifp->if_softc;
1041 struct sge_txq *txq;
1042 int i;
1043 struct mbuf *m;
1044
1045 /* queues do not exist if !IFF_DRV_RUNNING. */
1046 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1047 for_each_txq(pi, i, txq) {
1048 TXQ_LOCK(txq);
1049 m_freem(txq->m);
1050 while ((m = buf_ring_dequeue_sc(txq->eq.br)) != NULL)
1051 m_freem(m);
1052 TXQ_UNLOCK(txq);
1053 }
1054 }
1055 if_qflush(ifp);
1056}
1057
1058static int
1059cxgbe_media_change(struct ifnet *ifp)
1060{
1061 struct port_info *pi = ifp->if_softc;
1062
1063 device_printf(pi->dev, "%s unimplemented.\n", __func__);
1064
1065 return (EOPNOTSUPP);
1066}
1067
1068static void
1069cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1070{
1071 struct port_info *pi = ifp->if_softc;
1072 struct ifmedia_entry *cur = pi->media.ifm_cur;
1073 int speed = pi->link_cfg.speed;
1074 int data = (pi->port_type << 8) | pi->mod_type;
1075
1076 if (cur->ifm_data != data) {
1077 build_medialist(pi);
1078 cur = pi->media.ifm_cur;
1079 }
1080
1081 ifmr->ifm_status = IFM_AVALID;
1082 if (!pi->link_cfg.link_ok)
1083 return;
1084
1085 ifmr->ifm_status |= IFM_ACTIVE;
1086
1087 /* active and current will differ iff current media is autoselect. */
1088 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1089 return;
1090
1091 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1092 if (speed == SPEED_10000)
1093 ifmr->ifm_active |= IFM_10G_T;
1094 else if (speed == SPEED_1000)
1095 ifmr->ifm_active |= IFM_1000_T;
1096 else if (speed == SPEED_100)
1097 ifmr->ifm_active |= IFM_100_TX;
1098 else if (speed == SPEED_10)
1099 ifmr->ifm_active |= IFM_10_T;
1100 else
1101 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1102 speed));
1103}
1104
1105void
1106t4_fatal_err(struct adapter *sc)
1107{
1108 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1109 t4_intr_disable(sc);
1110 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1111 device_get_nameunit(sc->dev));
1112}
1113
1114static int
1115map_bars(struct adapter *sc)
1116{
1117 sc->regs_rid = PCIR_BAR(0);
1118 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1119 &sc->regs_rid, RF_ACTIVE);
1120 if (sc->regs_res == NULL) {
1121 device_printf(sc->dev, "cannot map registers.\n");
1122 return (ENXIO);
1123 }
1124 sc->bt = rman_get_bustag(sc->regs_res);
1125 sc->bh = rman_get_bushandle(sc->regs_res);
1126 sc->mmio_len = rman_get_size(sc->regs_res);
1127
1128 sc->msix_rid = PCIR_BAR(4);
1129 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1130 &sc->msix_rid, RF_ACTIVE);
1131 if (sc->msix_res == NULL) {
1132 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1133 return (ENXIO);
1134 }
1135
1136 return (0);
1137}
1138
1139static void
1140setup_memwin(struct adapter *sc)
1141{
1142 u_long bar0;
1143
1144 bar0 = rman_get_start(sc->regs_res);
1145
1146 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0),
1147 (bar0 + MEMWIN0_BASE) | V_BIR(0) |
1148 V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
1149
1150 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1),
1151 (bar0 + MEMWIN1_BASE) | V_BIR(0) |
1152 V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
1153
1154 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2),
1155 (bar0 + MEMWIN2_BASE) | V_BIR(0) |
1156 V_WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
1157}
1158
1159static int
1160cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1161 struct intrs_and_queues *iaq)
1162{
1163 int rc, itype, navail, nc, nrxq10g, nrxq1g;
1164
1165 bzero(iaq, sizeof(*iaq));
1166 nc = mp_ncpus; /* our snapshot of the number of CPUs */
1167
1168 for (itype = INTR_MSIX; itype; itype >>= 1) {
1169
1170 if ((itype & intr_types) == 0)
1171 continue; /* not allowed */
1172
1173 if (itype == INTR_MSIX)
1174 navail = pci_msix_count(sc->dev);
1175 else if (itype == INTR_MSI)
1176 navail = pci_msi_count(sc->dev);
1177 else
1178 navail = 1;
1179
1180 if (navail == 0)
1181 continue;
1182
1183 iaq->intr_type = itype;
1184
1185 iaq->ntxq10g = min(nc, max_ntxq_10g);
1186 iaq->ntxq1g = min(nc, max_ntxq_1g);
1187
1188 nrxq10g = min(nc, max_nrxq_10g);
1189 nrxq1g = min(nc, max_nrxq_1g);
1190
1191 /* Extra 2 is for a) error interrupt b) firmware event */
1192 iaq->nirq = n10g * nrxq10g + n1g * nrxq1g + 2;
1193 if (iaq->nirq <= navail && intr_fwd == 0) {
1194
1195 if (itype == INTR_MSI && !powerof2(iaq->nirq))
1196 goto fwd;
1197
1198 /* One for err, one for fwq, and one for each rxq */
1199
1200 iaq->intr_fwd = 0;
1201 iaq->nrxq10g = nrxq10g;
1202 iaq->nrxq1g = nrxq1g;
1203
1204 } else {
1205fwd:
1206 iaq->intr_fwd = 1;
1207
1208 if (navail > nc) {
1209 if (itype == INTR_MSIX)
1210 navail = nc + 1;
1211
1212 /* navail is and must remain a pow2 for MSI */
1213 if (itype == INTR_MSI) {
1214 KASSERT(powerof2(navail),
1215 ("%d not power of 2", navail));
1216
1217 while (navail / 2 > nc)
1218 navail /= 2;
1219 }
1220 }
1221 iaq->nirq = navail; /* total # of interrupts */
1222
1223 /*
1224 * If we have multiple vectors available reserve one
1225 * exclusively for errors. The rest will be shared by
1226 * the fwq and data.
1227 */
1228 if (navail > 1)
1229 navail--;
1230 iaq->nrxq10g = min(nrxq10g, navail);
1231 iaq->nrxq1g = min(nrxq1g, navail);
1232 }
1233
1234 navail = iaq->nirq;
1235 rc = 0;
1236 if (itype == INTR_MSIX)
1237 rc = pci_alloc_msix(sc->dev, &navail);
1238 else if (itype == INTR_MSI)
1239 rc = pci_alloc_msi(sc->dev, &navail);
1240
1241 if (rc == 0) {
1242 if (navail == iaq->nirq)
1243 return (0);
1244
1245 /*
1246 * Didn't get the number requested. Use whatever number
1247 * the kernel is willing to allocate (it's in navail).
1248 */
1249 pci_release_msi(sc->dev);
1250 goto fwd;
1251 }
1252
1253 device_printf(sc->dev,
1254 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1255 itype, rc, iaq->nirq, navail);
1256 }
1257
1258 device_printf(sc->dev,
1259 "failed to find a usable interrupt type. "
1260 "allowed=%d, msi-x=%d, msi=%d, intx=1", intr_types,
1261 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1262
1263 return (ENXIO);
1264}
1265
1266/*
1267 * Install a compatible firmware (if required), establish contact with it,
1268 * become the master, and reset the device.
1269 */
1270static int
1271prep_firmware(struct adapter *sc)
1272{
1273 const struct firmware *fw;
1274 int rc;
1275 enum dev_state state;
1276
1277 /* Check firmware version and install a different one if necessary */
1278 rc = t4_check_fw_version(sc);
1279 if (rc != 0 || force_firmware_install) {
1280 uint32_t v = 0;
1281
1282 fw = firmware_get(T4_FWNAME);
1283 if (fw != NULL) {
1284 const struct fw_hdr *hdr = (const void *)fw->data;
1285
1286 v = ntohl(hdr->fw_ver);
1287
1288 /*
1289 * The firmware module will not be used if it isn't the
1290 * same major version as what the driver was compiled
1291 * with. This check trumps force_firmware_install.
1292 */
1293 if (G_FW_HDR_FW_VER_MAJOR(v) != FW_VERSION_MAJOR) {
1294 device_printf(sc->dev,
1295 "Found firmware image but version %d "
1296 "can not be used with this driver (%d)\n",
1297 G_FW_HDR_FW_VER_MAJOR(v), FW_VERSION_MAJOR);
1298
1299 firmware_put(fw, FIRMWARE_UNLOAD);
1300 fw = NULL;
1301 }
1302 }
1303
1304 if (fw == NULL && (rc < 0 || force_firmware_install)) {
1305 device_printf(sc->dev, "No usable firmware. "
1306 "card has %d.%d.%d, driver compiled with %d.%d.%d, "
1307 "force_firmware_install%s set",
1308 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1309 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1310 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1311 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1312 FW_VERSION_MICRO,
1313 force_firmware_install ? "" : " not");
1314 return (EAGAIN);
1315 }
1316
1317 /*
1318 * Always upgrade, even for minor/micro/build mismatches.
1319 * Downgrade only for a major version mismatch or if
1320 * force_firmware_install was specified.
1321 */
1322 if (fw != NULL && (rc < 0 || force_firmware_install ||
1323 v > sc->params.fw_vers)) {
1324 device_printf(sc->dev,
1325 "installing firmware %d.%d.%d.%d on card.\n",
1326 G_FW_HDR_FW_VER_MAJOR(v), G_FW_HDR_FW_VER_MINOR(v),
1327 G_FW_HDR_FW_VER_MICRO(v), G_FW_HDR_FW_VER_BUILD(v));
1328
1329 rc = -t4_load_fw(sc, fw->data, fw->datasize);
1330 if (rc != 0) {
1331 device_printf(sc->dev,
1332 "failed to install firmware: %d\n", rc);
1333 firmware_put(fw, FIRMWARE_UNLOAD);
1334 return (rc);
1335 } else {
1336 /* refresh */
1337 (void) t4_check_fw_version(sc);
1338 }
1339 }
1340
1341 if (fw != NULL)
1342 firmware_put(fw, FIRMWARE_UNLOAD);
1343 }
1344
1345 /* Contact firmware, request master */
1346 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state);
1347 if (rc < 0) {
1348 rc = -rc;
1349 device_printf(sc->dev,
1350 "failed to connect to the firmware: %d.\n", rc);
1351 return (rc);
1352 }
1353
1354 /* Reset device */
1355 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
1356 if (rc != 0) {
1357 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
1358 if (rc != ETIMEDOUT && rc != EIO)
1359 t4_fw_bye(sc, sc->mbox);
1360 return (rc);
1361 }
1362
1363 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
1364 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1365 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1366 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1367 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
1368 sc->flags |= FW_OK;
1369
1370 return (0);
1371}
1372
1373static int
1374get_capabilities(struct adapter *sc, struct fw_caps_config_cmd *caps)
1375{
1376 int rc;
1377
1378 bzero(caps, sizeof(*caps));
1379 caps->op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1380 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1381 caps->retval_len16 = htobe32(FW_LEN16(*caps));
1382
1383 rc = -t4_wr_mbox(sc, sc->mbox, caps, sizeof(*caps), caps);
1384 if (rc != 0)
1385 return (rc);
1386
1387 if (caps->niccaps & htobe16(FW_CAPS_CONFIG_NIC_VM))
1388 caps->niccaps ^= htobe16(FW_CAPS_CONFIG_NIC_VM);
1389
1390 caps->op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1391 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1392 rc = -t4_wr_mbox(sc, sc->mbox, caps, sizeof(*caps), NULL);
1393
1394 return (rc);
1395}
1396
1397static int
1398get_params(struct adapter *sc, struct fw_caps_config_cmd *caps)
1399{
1400 int rc;
1401 uint32_t params[7], val[7];
1402
1403#define FW_PARAM_DEV(param) \
1404 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1405 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1406#define FW_PARAM_PFVF(param) \
1407 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1408 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1409
1410 params[0] = FW_PARAM_DEV(PORTVEC);
1411 params[1] = FW_PARAM_PFVF(IQFLINT_START);
1412 params[2] = FW_PARAM_PFVF(EQ_START);
1413 params[3] = FW_PARAM_PFVF(FILTER_START);
1414 params[4] = FW_PARAM_PFVF(FILTER_END);
1415 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 5, params, val);
1416 if (rc != 0) {
1417 device_printf(sc->dev,
1418 "failed to query parameters: %d.\n", rc);
1419 goto done;
1420 }
1421
1422 sc->params.portvec = val[0];
1423 sc->params.nports = 0;
1424 while (val[0]) {
1425 sc->params.nports++;
1426 val[0] &= val[0] - 1;
1427 }
1428
1429 sc->sge.iq_start = val[1];
1430 sc->sge.eq_start = val[2];
1431 sc->tids.ftid_base = val[3];
1432 sc->tids.nftids = val[4] - val[3] + 1;
1433
1434 if (caps->toecaps) {
1435 /* query offload-related parameters */
1436 params[0] = FW_PARAM_DEV(NTID);
1437 params[1] = FW_PARAM_PFVF(SERVER_START);
1438 params[2] = FW_PARAM_PFVF(SERVER_END);
1439 params[3] = FW_PARAM_PFVF(TDDP_START);
1440 params[4] = FW_PARAM_PFVF(TDDP_END);
1441 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
1442 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, params, val);
1443 if (rc != 0) {
1444 device_printf(sc->dev,
1445 "failed to query TOE parameters: %d.\n", rc);
1446 goto done;
1447 }
1448 sc->tids.ntids = val[0];
1449 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
1450 sc->tids.stid_base = val[1];
1451 sc->tids.nstids = val[2] - val[1] + 1;
1452 sc->vres.ddp.start = val[3];
1453 sc->vres.ddp.size = val[4] - val[3] + 1;
1454 sc->params.ofldq_wr_cred = val[5];
1455 sc->params.offload = 1;
1456 }
1457 if (caps->rdmacaps) {
1458 params[0] = FW_PARAM_PFVF(STAG_START);
1459 params[1] = FW_PARAM_PFVF(STAG_END);
1460 params[2] = FW_PARAM_PFVF(RQ_START);
1461 params[3] = FW_PARAM_PFVF(RQ_END);
1462 params[4] = FW_PARAM_PFVF(PBL_START);
1463 params[5] = FW_PARAM_PFVF(PBL_END);
1464 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, params, val);
1465 if (rc != 0) {
1466 device_printf(sc->dev,
1467 "failed to query RDMA parameters: %d.\n", rc);
1468 goto done;
1469 }
1470 sc->vres.stag.start = val[0];
1471 sc->vres.stag.size = val[1] - val[0] + 1;
1472 sc->vres.rq.start = val[2];
1473 sc->vres.rq.size = val[3] - val[2] + 1;
1474 sc->vres.pbl.start = val[4];
1475 sc->vres.pbl.size = val[5] - val[4] + 1;
1476 }
1477 if (caps->iscsicaps) {
1478 params[0] = FW_PARAM_PFVF(ISCSI_START);
1479 params[1] = FW_PARAM_PFVF(ISCSI_END);
1480 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, params, val);
1481 if (rc != 0) {
1482 device_printf(sc->dev,
1483 "failed to query iSCSI parameters: %d.\n", rc);
1484 goto done;
1485 }
1486 sc->vres.iscsi.start = val[0];
1487 sc->vres.iscsi.size = val[1] - val[0] + 1;
1488 }
1489#undef FW_PARAM_PFVF
1490#undef FW_PARAM_DEV
1491
1492done:
1493 return (rc);
1494}
1495
1496static void
1497t4_set_desc(struct adapter *sc)
1498{
1499 char buf[128];
1500 struct adapter_params *p = &sc->params;
1501
1502 snprintf(buf, sizeof(buf),
1503 "Chelsio %s (rev %d) %d port %sNIC PCIe-x%d %d %s, S/N:%s, E/C:%s",
1504 p->vpd.id, p->rev, p->nports, is_offload(sc) ? "R" : "",
1505 p->pci.width, sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1506 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), p->vpd.sn, p->vpd.ec);
1507
1508 device_set_desc_copy(sc->dev, buf);
1509}
1510
1511static void
1512build_medialist(struct port_info *pi)
1513{
1514 struct ifmedia *media = &pi->media;
1515 int data, m;
1516
1517 PORT_LOCK(pi);
1518
1519 ifmedia_removeall(media);
1520
1521 m = IFM_ETHER | IFM_FDX;
1522 data = (pi->port_type << 8) | pi->mod_type;
1523
1524 switch(pi->port_type) {
1525 case FW_PORT_TYPE_BT_XFI:
1526 ifmedia_add(media, m | IFM_10G_T, data, NULL);
1527 break;
1528
1529 case FW_PORT_TYPE_BT_XAUI:
1530 ifmedia_add(media, m | IFM_10G_T, data, NULL);
1531 /* fall through */
1532
1533 case FW_PORT_TYPE_BT_SGMII:
1534 ifmedia_add(media, m | IFM_1000_T, data, NULL);
1535 ifmedia_add(media, m | IFM_100_TX, data, NULL);
1536 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
1537 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
1538 break;
1539
1540 case FW_PORT_TYPE_CX4:
1541 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
1542 ifmedia_set(media, m | IFM_10G_CX4);
1543 break;
1544
1545 case FW_PORT_TYPE_SFP:
1546 case FW_PORT_TYPE_FIBER_XFI:
1547 case FW_PORT_TYPE_FIBER_XAUI:
1548 switch (pi->mod_type) {
1549
1550 case FW_PORT_MOD_TYPE_LR:
1551 ifmedia_add(media, m | IFM_10G_LR, data, NULL);
1552 ifmedia_set(media, m | IFM_10G_LR);
1553 break;
1554
1555 case FW_PORT_MOD_TYPE_SR:
1556 ifmedia_add(media, m | IFM_10G_SR, data, NULL);
1557 ifmedia_set(media, m | IFM_10G_SR);
1558 break;
1559
1560 case FW_PORT_MOD_TYPE_LRM:
1561 ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
1562 ifmedia_set(media, m | IFM_10G_LRM);
1563 break;
1564
1565 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
1566 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
1567 ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
1568 ifmedia_set(media, m | IFM_10G_TWINAX);
1569 break;
1570
1571 case FW_PORT_MOD_TYPE_NONE:
1572 m &= ~IFM_FDX;
1573 ifmedia_add(media, m | IFM_NONE, data, NULL);
1574 ifmedia_set(media, m | IFM_NONE);
1575 break;
1576
1577 case FW_PORT_MOD_TYPE_NA:
1578 case FW_PORT_MOD_TYPE_ER:
1579 default:
1580 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
1581 ifmedia_set(media, m | IFM_UNKNOWN);
1582 break;
1583 }
1584 break;
1585
1586 case FW_PORT_TYPE_KX4:
1587 case FW_PORT_TYPE_KX:
1588 case FW_PORT_TYPE_KR:
1589 default:
1590 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
1591 ifmedia_set(media, m | IFM_UNKNOWN);
1592 break;
1593 }
1594
1595 PORT_UNLOCK(pi);
1596}
1597
1598/*
1599 * Program the port's XGMAC based on parameters in ifnet. The caller also
1600 * indicates which parameters should be programmed (the rest are left alone).
1601 */
1602static int
1603update_mac_settings(struct port_info *pi, int flags)
1604{
1605 int rc;
1606 struct ifnet *ifp = pi->ifp;
1607 struct adapter *sc = pi->adapter;
1608 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
1609
1610 PORT_LOCK_ASSERT_OWNED(pi);
1611 KASSERT(flags, ("%s: not told what to update.", __func__));
1612
1613 if (flags & XGMAC_MTU)
1614 mtu = ifp->if_mtu;
1615
1616 if (flags & XGMAC_PROMISC)
1617 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
1618
1619 if (flags & XGMAC_ALLMULTI)
1620 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
1621
1622 if (flags & XGMAC_VLANEX)
1623 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
1624
1625 rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
1626 vlanex, false);
1627 if (rc) {
1628 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
1629 return (rc);
1630 }
1631
1632 if (flags & XGMAC_UCADDR) {
1633 uint8_t ucaddr[ETHER_ADDR_LEN];
1634
1635 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
1636 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
1637 ucaddr, true, true);
1638 if (rc < 0) {
1639 rc = -rc;
1640 if_printf(ifp, "change_mac failed: %d\n", rc);
1641 return (rc);
1642 } else {
1643 pi->xact_addr_filt = rc;
1644 rc = 0;
1645 }
1646 }
1647
1648 if (flags & XGMAC_MCADDRS) {
1649 const uint8_t *mcaddr;
1650 int del = 1;
1651 uint64_t hash = 0;
1652 struct ifmultiaddr *ifma;
1653
1654 if_maddr_rlock(ifp);
1655 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1656 if (ifma->ifma_addr->sa_family != AF_LINK)
1657 continue;
1658 mcaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1659
1660 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid, del, 1,
1661 &mcaddr, NULL, &hash, 0);
1662 if (rc < 0) {
1663 rc = -rc;
1664 if_printf(ifp, "failed to add mc address"
1665 " %02x:%02x:%02x:%02x:%02x:%02x rc=%d\n",
1666 mcaddr[0], mcaddr[1], mcaddr[2], mcaddr[3],
1667 mcaddr[4], mcaddr[5], rc);
1668 goto mcfail;
1669 }
1670 del = 0;
1671 }
1672
1673 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
1674 if (rc != 0)
1675 if_printf(ifp, "failed to set mc address hash: %d", rc);
1676mcfail:
1677 if_maddr_runlock(ifp);
1678 }
1679
1680 return (rc);
1681}
1682
1683static int
1684cxgbe_init_locked(struct port_info *pi)
1685{
1686 struct adapter *sc = pi->adapter;
1687 int rc = 0;
1688
1689 ADAPTER_LOCK_ASSERT_OWNED(sc);
1690
1691 while (!IS_DOOMED(pi) && IS_BUSY(sc)) {
1692 if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4init", 0)) {
1693 rc = EINTR;
1694 goto done;
1695 }
1696 }
1697 if (IS_DOOMED(pi)) {
1698 rc = ENXIO;
1699 goto done;
1700 }
1701 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1702
1703 /* Give up the adapter lock, port init code can sleep. */
1704 SET_BUSY(sc);
1705 ADAPTER_UNLOCK(sc);
1706
1707 rc = cxgbe_init_synchronized(pi);
1708
1709done:
1710 ADAPTER_LOCK(sc);
1711 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1712 CLR_BUSY(sc);
1713 wakeup_one(&sc->flags);
1714 ADAPTER_UNLOCK(sc);
1715 return (rc);
1716}
1717
1718static int
1719cxgbe_init_synchronized(struct port_info *pi)
1720{
1721 struct adapter *sc = pi->adapter;
1722 struct ifnet *ifp = pi->ifp;
1723 int rc = 0, i;
1724 uint16_t *rss;
1725 struct sge_rxq *rxq;
1726
1727 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1728
1729 if (isset(&sc->open_device_map, pi->port_id)) {
1730 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
1731 ("mismatch between open_device_map and if_drv_flags"));
1732 return (0); /* already running */
1733 }
1734
1735 if (sc->open_device_map == 0 && ((rc = first_port_up(sc)) != 0))
1736 return (rc); /* error message displayed already */
1737
1738 /*
1739 * Allocate tx/rx/fl queues for this port.
1740 */
1741 rc = t4_setup_eth_queues(pi);
1742 if (rc != 0)
1743 goto done; /* error message displayed already */
1744
1745 /*
1746 * Setup RSS for this port.
1747 */
1748 rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
1749 for_each_rxq(pi, i, rxq) {
1750 rss[i] = rxq->iq.abs_id;
1751 }
1752 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
1753 pi->nrxq);
1754 free(rss, M_CXGBE);
1755 if (rc != 0) {
1756 if_printf(ifp, "rss_config failed: %d\n", rc);
1757 goto done;
1758 }
1759
1760 PORT_LOCK(pi);
1761 rc = update_mac_settings(pi, XGMAC_ALL);
1762 PORT_UNLOCK(pi);
1763 if (rc)
1764 goto done; /* error message displayed already */
1765
1766 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
1767 if (rc != 0) {
1768 if_printf(ifp, "start_link failed: %d\n", rc);
1769 goto done;
1770 }
1771
1772 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
1773 if (rc != 0) {
1774 if_printf(ifp, "enable_vi failed: %d\n", rc);
1775 goto done;
1776 }
1777 pi->flags |= VI_ENABLED;
1778
1779 /* all ok */
1780 setbit(&sc->open_device_map, pi->port_id);
1781 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1782 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1783
1784 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
1785done:
1786 if (rc != 0)
1787 cxgbe_uninit_synchronized(pi);
1788
1789 return (rc);
1790}
1791
1792static int
1793cxgbe_uninit_locked(struct port_info *pi)
1794{
1795 struct adapter *sc = pi->adapter;
1796 int rc;
1797
1798 ADAPTER_LOCK_ASSERT_OWNED(sc);
1799
1800 while (!IS_DOOMED(pi) && IS_BUSY(sc)) {
1801 if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4uninit", 0)) {
1802 rc = EINTR;
1803 goto done;
1804 }
1805 }
1806 if (IS_DOOMED(pi)) {
1807 rc = ENXIO;
1808 goto done;
1809 }
1810 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1811 SET_BUSY(sc);
1812 ADAPTER_UNLOCK(sc);
1813
1814 rc = cxgbe_uninit_synchronized(pi);
1815
1816 ADAPTER_LOCK(sc);
1817 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1818 CLR_BUSY(sc);
1819 wakeup_one(&sc->flags);
1820done:
1821 ADAPTER_UNLOCK(sc);
1822 return (rc);
1823}
1824
1825/*
1826 * Idempotent.
1827 */
1828static int
1829cxgbe_uninit_synchronized(struct port_info *pi)
1830{
1831 struct adapter *sc = pi->adapter;
1832 struct ifnet *ifp = pi->ifp;
1833 int rc;
1834
1835 /*
1836 * taskqueue_drain may cause a deadlock if the adapter lock is held.
1837 */
1838 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1839
1840 /*
1841 * Clear this port's bit from the open device map, and then drain
1842 * tasks and callouts.
1843 */
1844 clrbit(&sc->open_device_map, pi->port_id);
1845
1846 PORT_LOCK(pi);
1847 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1848 callout_stop(&pi->tick);
1849 PORT_UNLOCK(pi);
1850 callout_drain(&pi->tick);
1851
1852 /*
1853 * Stop and then free the queues' resources, including the queues
1854 * themselves.
1855 *
1856 * XXX: we could just stop the queues here (on ifconfig down) and free
1857 * them later (on port detach), but having up/down go through the entire
1858 * allocate/activate/deactivate/free sequence is a good way to find
1859 * leaks and bugs.
1860 */
1861 rc = t4_teardown_eth_queues(pi);
1862 if (rc != 0)
1863 if_printf(ifp, "teardown failed: %d\n", rc);
1864
1865 if (pi->flags & VI_ENABLED) {
1866 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
1867 if (rc)
1868 if_printf(ifp, "disable_vi failed: %d\n", rc);
1869 else
1870 pi->flags &= ~VI_ENABLED;
1871 }
1872
1873 pi->link_cfg.link_ok = 0;
1874 pi->link_cfg.speed = 0;
1875 t4_os_link_changed(sc, pi->port_id, 0);
1876
1877 if (sc->open_device_map == 0)
1878 last_port_down(sc);
1879
1880 return (0);
1881}
1882
1883#define T4_ALLOC_IRQ(sc, irqid, rid, handler, arg, name) do { \
1884 rc = t4_alloc_irq(sc, &sc->irq[irqid], rid, handler, arg, name); \
1885 if (rc != 0) \
1886 goto done; \
1887} while (0)
1888static int
1889first_port_up(struct adapter *sc)
1890{
1891 int rc, i;
1892 char name[8];
1893
1894 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1895
1896 /*
1897 * The firmware event queue and the optional forwarded interrupt queues.
1898 */
1899 rc = t4_setup_adapter_iqs(sc);
1900 if (rc != 0)
1901 goto done;
1902
1903 /*
1904 * Setup interrupts.
1905 */
1906 if (sc->intr_count == 1) {
1907 KASSERT(sc->flags & INTR_FWD,
1908 ("%s: single interrupt but not forwarded?", __func__));
1909 T4_ALLOC_IRQ(sc, 0, 0, t4_intr_all, sc, "all");
1910 } else {
1911 /* Multiple interrupts. The first one is always error intr */
1912 T4_ALLOC_IRQ(sc, 0, 1, t4_intr_err, sc, "err");
1913
1914 if (sc->flags & INTR_FWD) {
1915 /* The rest are shared by the fwq and all data intr */
1916 for (i = 1; i < sc->intr_count; i++) {
1917 snprintf(name, sizeof(name), "mux%d", i - 1);
1918 T4_ALLOC_IRQ(sc, i, i + 1, t4_intr_fwd,
1919 &sc->sge.fiq[i - 1], name);
1920 }
1921 } else {
1922 struct port_info *pi;
1923 int p, q;
1924
1925 T4_ALLOC_IRQ(sc, 1, 2, t4_intr_evt, &sc->sge.fwq,
1926 "evt");
1927
1928 p = q = 0;
1929 pi = sc->port[p];
1930 for (i = 2; i < sc->intr_count; i++) {
1931 snprintf(name, sizeof(name), "p%dq%d", p, q);
1932 if (++q >= pi->nrxq) {
1933 p++;
1934 q = 0;
1935 pi = sc->port[p];
1936 }
1937 T4_ALLOC_IRQ(sc, i, i + 1, t4_intr_data,
1938 &sc->sge.rxq[i - 2], name);
1939 }
1940 }
1941 }
1942
1943 t4_intr_enable(sc);
1944 sc->flags |= FULL_INIT_DONE;
1945
1946done:
1947 if (rc != 0)
1948 last_port_down(sc);
1949
1950 return (rc);
1951}
1952#undef T4_ALLOC_IRQ
1953
1954/*
1955 * Idempotent.
1956 */
1957static int
1958last_port_down(struct adapter *sc)
1959{
1960 int i;
1961
1962 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1963
1964 t4_intr_disable(sc);
1965
1966 t4_teardown_adapter_iqs(sc);
1967
1968 for (i = 0; i < sc->intr_count; i++)
1969 t4_free_irq(sc, &sc->irq[i]);
1970
1971 sc->flags &= ~FULL_INIT_DONE;
1972
1973 return (0);
1974}
1975
1976static int
1977t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
1978 iq_intr_handler_t *handler, void *arg, char *name)
1979{
1980 int rc;
1981
1982 irq->rid = rid;
1983 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
1984 RF_SHAREABLE | RF_ACTIVE);
1985 if (irq->res == NULL) {
1986 device_printf(sc->dev,
1987 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
1988 return (ENOMEM);
1989 }
1990
1991 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
1992 NULL, handler, arg, &irq->tag);
1993 if (rc != 0) {
1994 device_printf(sc->dev,
1995 "failed to setup interrupt for rid %d, name %s: %d\n",
1996 rid, name, rc);
1997 } else if (name)
1998 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
1999
2000 return (rc);
2001}
2002
2003static int
2004t4_free_irq(struct adapter *sc, struct irq *irq)
2005{
2006 if (irq->tag)
2007 bus_teardown_intr(sc->dev, irq->res, irq->tag);
2008 if (irq->res)
2009 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
2010
2011 bzero(irq, sizeof(*irq));
2012
2013 return (0);
2014}
2015
2016static void
2017reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
2018 unsigned int end)
2019{
2020 uint32_t *p = (uint32_t *)(buf + start);
2021
2022 for ( ; start <= end; start += sizeof(uint32_t))
2023 *p++ = t4_read_reg(sc, start);
2024}
2025
2026static void
2027t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
2028{
2029 int i;
2030 static const unsigned int reg_ranges[] = {
2031 0x1008, 0x1108,
2032 0x1180, 0x11b4,
2033 0x11fc, 0x123c,
2034 0x1300, 0x173c,
2035 0x1800, 0x18fc,
2036 0x3000, 0x30d8,
2037 0x30e0, 0x5924,
2038 0x5960, 0x59d4,
2039 0x5a00, 0x5af8,
2040 0x6000, 0x6098,
2041 0x6100, 0x6150,
2042 0x6200, 0x6208,
2043 0x6240, 0x6248,
2044 0x6280, 0x6338,
2045 0x6370, 0x638c,
2046 0x6400, 0x643c,
2047 0x6500, 0x6524,
2048 0x6a00, 0x6a38,
2049 0x6a60, 0x6a78,
2050 0x6b00, 0x6b84,
2051 0x6bf0, 0x6c84,
2052 0x6cf0, 0x6d84,
2053 0x6df0, 0x6e84,
2054 0x6ef0, 0x6f84,
2055 0x6ff0, 0x7084,
2056 0x70f0, 0x7184,
2057 0x71f0, 0x7284,
2058 0x72f0, 0x7384,
2059 0x73f0, 0x7450,
2060 0x7500, 0x7530,
2061 0x7600, 0x761c,
2062 0x7680, 0x76cc,
2063 0x7700, 0x7798,
2064 0x77c0, 0x77fc,
2065 0x7900, 0x79fc,
2066 0x7b00, 0x7c38,
2067 0x7d00, 0x7efc,
2068 0x8dc0, 0x8e1c,
2069 0x8e30, 0x8e78,
2070 0x8ea0, 0x8f6c,
2071 0x8fc0, 0x9074,
2072 0x90fc, 0x90fc,
2073 0x9400, 0x9458,
2074 0x9600, 0x96bc,
2075 0x9800, 0x9808,
2076 0x9820, 0x983c,
2077 0x9850, 0x9864,
2078 0x9c00, 0x9c6c,
2079 0x9c80, 0x9cec,
2080 0x9d00, 0x9d6c,
2081 0x9d80, 0x9dec,
2082 0x9e00, 0x9e6c,
2083 0x9e80, 0x9eec,
2084 0x9f00, 0x9f6c,
2085 0x9f80, 0x9fec,
2086 0xd004, 0xd03c,
2087 0xdfc0, 0xdfe0,
2088 0xe000, 0xea7c,
2089 0xf000, 0x11190,
2090 0x19040, 0x19124,
2091 0x19150, 0x191b0,
2092 0x191d0, 0x191e8,
2093 0x19238, 0x1924c,
2094 0x193f8, 0x19474,
2095 0x19490, 0x194f8,
2096 0x19800, 0x19f30,
2097 0x1a000, 0x1a06c,
2098 0x1a0b0, 0x1a120,
2099 0x1a128, 0x1a138,
2100 0x1a190, 0x1a1c4,
2101 0x1a1fc, 0x1a1fc,
2102 0x1e040, 0x1e04c,
2103 0x1e240, 0x1e28c,
2104 0x1e2c0, 0x1e2c0,
2105 0x1e2e0, 0x1e2e0,
2106 0x1e300, 0x1e384,
2107 0x1e3c0, 0x1e3c8,
2108 0x1e440, 0x1e44c,
2109 0x1e640, 0x1e68c,
2110 0x1e6c0, 0x1e6c0,
2111 0x1e6e0, 0x1e6e0,
2112 0x1e700, 0x1e784,
2113 0x1e7c0, 0x1e7c8,
2114 0x1e840, 0x1e84c,
2115 0x1ea40, 0x1ea8c,
2116 0x1eac0, 0x1eac0,
2117 0x1eae0, 0x1eae0,
2118 0x1eb00, 0x1eb84,
2119 0x1ebc0, 0x1ebc8,
2120 0x1ec40, 0x1ec4c,
2121 0x1ee40, 0x1ee8c,
2122 0x1eec0, 0x1eec0,
2123 0x1eee0, 0x1eee0,
2124 0x1ef00, 0x1ef84,
2125 0x1efc0, 0x1efc8,
2126 0x1f040, 0x1f04c,
2127 0x1f240, 0x1f28c,
2128 0x1f2c0, 0x1f2c0,
2129 0x1f2e0, 0x1f2e0,
2130 0x1f300, 0x1f384,
2131 0x1f3c0, 0x1f3c8,
2132 0x1f440, 0x1f44c,
2133 0x1f640, 0x1f68c,
2134 0x1f6c0, 0x1f6c0,
2135 0x1f6e0, 0x1f6e0,
2136 0x1f700, 0x1f784,
2137 0x1f7c0, 0x1f7c8,
2138 0x1f840, 0x1f84c,
2139 0x1fa40, 0x1fa8c,
2140 0x1fac0, 0x1fac0,
2141 0x1fae0, 0x1fae0,
2142 0x1fb00, 0x1fb84,
2143 0x1fbc0, 0x1fbc8,
2144 0x1fc40, 0x1fc4c,
2145 0x1fe40, 0x1fe8c,
2146 0x1fec0, 0x1fec0,
2147 0x1fee0, 0x1fee0,
2148 0x1ff00, 0x1ff84,
2149 0x1ffc0, 0x1ffc8,
2150 0x20000, 0x2002c,
2151 0x20100, 0x2013c,
2152 0x20190, 0x201c8,
2153 0x20200, 0x20318,
2154 0x20400, 0x20528,
2155 0x20540, 0x20614,
2156 0x21000, 0x21040,
2157 0x2104c, 0x21060,
2158 0x210c0, 0x210ec,
2159 0x21200, 0x21268,
2160 0x21270, 0x21284,
2161 0x212fc, 0x21388,
2162 0x21400, 0x21404,
2163 0x21500, 0x21518,
2164 0x2152c, 0x2153c,
2165 0x21550, 0x21554,
2166 0x21600, 0x21600,
2167 0x21608, 0x21628,
2168 0x21630, 0x2163c,
2169 0x21700, 0x2171c,
2170 0x21780, 0x2178c,
2171 0x21800, 0x21c38,
2172 0x21c80, 0x21d7c,
2173 0x21e00, 0x21e04,
2174 0x22000, 0x2202c,
2175 0x22100, 0x2213c,
2176 0x22190, 0x221c8,
2177 0x22200, 0x22318,
2178 0x22400, 0x22528,
2179 0x22540, 0x22614,
2180 0x23000, 0x23040,
2181 0x2304c, 0x23060,
2182 0x230c0, 0x230ec,
2183 0x23200, 0x23268,
2184 0x23270, 0x23284,
2185 0x232fc, 0x23388,
2186 0x23400, 0x23404,
2187 0x23500, 0x23518,
2188 0x2352c, 0x2353c,
2189 0x23550, 0x23554,
2190 0x23600, 0x23600,
2191 0x23608, 0x23628,
2192 0x23630, 0x2363c,
2193 0x23700, 0x2371c,
2194 0x23780, 0x2378c,
2195 0x23800, 0x23c38,
2196 0x23c80, 0x23d7c,
2197 0x23e00, 0x23e04,
2198 0x24000, 0x2402c,
2199 0x24100, 0x2413c,
2200 0x24190, 0x241c8,
2201 0x24200, 0x24318,
2202 0x24400, 0x24528,
2203 0x24540, 0x24614,
2204 0x25000, 0x25040,
2205 0x2504c, 0x25060,
2206 0x250c0, 0x250ec,
2207 0x25200, 0x25268,
2208 0x25270, 0x25284,
2209 0x252fc, 0x25388,
2210 0x25400, 0x25404,
2211 0x25500, 0x25518,
2212 0x2552c, 0x2553c,
2213 0x25550, 0x25554,
2214 0x25600, 0x25600,
2215 0x25608, 0x25628,
2216 0x25630, 0x2563c,
2217 0x25700, 0x2571c,
2218 0x25780, 0x2578c,
2219 0x25800, 0x25c38,
2220 0x25c80, 0x25d7c,
2221 0x25e00, 0x25e04,
2222 0x26000, 0x2602c,
2223 0x26100, 0x2613c,
2224 0x26190, 0x261c8,
2225 0x26200, 0x26318,
2226 0x26400, 0x26528,
2227 0x26540, 0x26614,
2228 0x27000, 0x27040,
2229 0x2704c, 0x27060,
2230 0x270c0, 0x270ec,
2231 0x27200, 0x27268,
2232 0x27270, 0x27284,
2233 0x272fc, 0x27388,
2234 0x27400, 0x27404,
2235 0x27500, 0x27518,
2236 0x2752c, 0x2753c,
2237 0x27550, 0x27554,
2238 0x27600, 0x27600,
2239 0x27608, 0x27628,
2240 0x27630, 0x2763c,
2241 0x27700, 0x2771c,
2242 0x27780, 0x2778c,
2243 0x27800, 0x27c38,
2244 0x27c80, 0x27d7c,
2245 0x27e00, 0x27e04
2246 };
2247
2248 regs->version = 4 | (sc->params.rev << 10);
2249 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
2250 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
2251}
2252
2253static void
2254cxgbe_tick(void *arg)
2255{
2256 struct port_info *pi = arg;
2257 struct ifnet *ifp = pi->ifp;
2258 struct sge_txq *txq;
2259 int i, drops;
2260 struct port_stats *s = &pi->stats;
2261
2262 PORT_LOCK(pi);
2263 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2264 PORT_UNLOCK(pi);
2265 return; /* without scheduling another callout */
2266 }
2267
2268 t4_get_port_stats(pi->adapter, pi->tx_chan, s);
2269
2270 ifp->if_opackets = s->tx_frames;
2271 ifp->if_ipackets = s->rx_frames;
2272 ifp->if_obytes = s->tx_octets;
2273 ifp->if_ibytes = s->rx_octets;
2274 ifp->if_omcasts = s->tx_mcast_frames;
2275 ifp->if_imcasts = s->rx_mcast_frames;
2276 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
2277 s->rx_ovflow3;
2278
2279 drops = s->tx_drop;
2280 for_each_txq(pi, i, txq)
2281 drops += txq->eq.br->br_drops;
2282 ifp->if_snd.ifq_drops = drops;
2283
2284 ifp->if_oerrors = s->tx_error_frames;
2285 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
2286 s->rx_fcs_err + s->rx_len_err;
2287
2288 callout_schedule(&pi->tick, hz);
2289 PORT_UNLOCK(pi);
2290}
2291
2292static int
2293t4_sysctls(struct adapter *sc)
2294{
2295 struct sysctl_ctx_list *ctx;
2296 struct sysctl_oid *oid;
2297 struct sysctl_oid_list *children;
2298
2299 ctx = device_get_sysctl_ctx(sc->dev);
2300 oid = device_get_sysctl_tree(sc->dev);
2301 children = SYSCTL_CHILDREN(oid);
2302
2303 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD,
2304 &sc->params.nports, 0, "# of ports");
2305
2306 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
2307 &sc->params.rev, 0, "chip hardware revision");
2308
2309 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
2310 CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
2311
2312 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "TOE", CTLFLAG_RD,
2313 &sc->params.offload, 0, "hardware is capable of TCP offload");
2314
2315 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD,
2316 &sc->params.vpd.cclk, 0, "core clock frequency (in KHz)");
2317
2318 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
2319 CTLTYPE_STRING | CTLFLAG_RD, &intr_timer, sizeof(intr_timer),
2320 sysctl_int_array, "A", "interrupt holdoff timer values (us)");
2321
2322 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
2323 CTLTYPE_STRING | CTLFLAG_RD, &intr_pktcount, sizeof(intr_pktcount),
2324 sysctl_int_array, "A", "interrupt holdoff packet counter values");
2325
2326 return (0);
2327}
2328
2329static int
2330cxgbe_sysctls(struct port_info *pi)
2331{
2332 struct sysctl_ctx_list *ctx;
2333 struct sysctl_oid *oid;
2334 struct sysctl_oid_list *children;
2335
2336 ctx = device_get_sysctl_ctx(pi->dev);
2337
2338 /*
2339 * dev.cxgbe.X.
2340 */
2341 oid = device_get_sysctl_tree(pi->dev);
2342 children = SYSCTL_CHILDREN(oid);
2343
2344 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
2345 &pi->nrxq, 0, "# of rx queues");
2346 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
2347 &pi->ntxq, 0, "# of tx queues");
2348 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
2349 &pi->first_rxq, 0, "index of first rx queue");
2350 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
2351 &pi->first_txq, 0, "index of first tx queue");
2352
2353 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
2354 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
2355 "holdoff timer index");
2356 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
2357 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
2358 "holdoff packet counter index");
2359
2360 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
2361 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
2362 "rx queue size");
2363 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
2364 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
2365 "tx queue size");
2366
2367 /*
2368 * dev.cxgbe.X.stats.
2369 */
2370 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
2371 NULL, "port statistics");
2372 children = SYSCTL_CHILDREN(oid);
2373
2374#define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
2375 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
2376 CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
2377 sysctl_handle_t4_reg64, "QU", desc)
2378
2379 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
2380 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
2381 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
2382 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
2383 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
2384 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
2385 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
2386 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
2387 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
2388 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
2389 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
2390 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
2391 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
2392 "# of tx frames in this range",
2393 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
2394 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
2395 "# of tx frames in this range",
2396 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
2397 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
2398 "# of tx frames in this range",
2399 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
2400 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
2401 "# of tx frames in this range",
2402 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
2403 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
2404 "# of tx frames in this range",
2405 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
2406 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
2407 "# of tx frames in this range",
2408 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
2409 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
2410 "# of tx frames in this range",
2411 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
2412 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
2413 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
2414 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
2415 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
2416 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
2417 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
2418 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
2419 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
2420 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
2421 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
2422 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
2423 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
2424 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
2425 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
2426 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
2427 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
2428 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
2429 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
2430 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
2431 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
2432
2433 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
2434 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
2435 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
2436 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
2437 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
2438 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
2439 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
2440 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
2441 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
2442 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
2443 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
2444 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
2445 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
2446 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
2447 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
2448 "# of frames received with bad FCS",
2449 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
2450 SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
2451 "# of frames received with length error",
2452 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
2453 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
2454 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
2455 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
2456 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
2457 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
2458 "# of rx frames in this range",
2459 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
2460 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
2461 "# of rx frames in this range",
2462 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
2463 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
2464 "# of rx frames in this range",
2465 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
2466 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
2467 "# of rx frames in this range",
2468 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
2469 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
2470 "# of rx frames in this range",
2471 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
2472 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
2473 "# of rx frames in this range",
2474 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
2475 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
2476 "# of rx frames in this range",
2477 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
2478 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
2479 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
2480 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
2481 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
2482 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
2483 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
2484 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
2485 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
2486 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
2487 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
2488 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
2489 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
2490 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
2491 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
2492 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
2493 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
2494 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
2495 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
2496
2497#undef SYSCTL_ADD_T4_REG64
2498
2499#define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
2500 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
2501 &pi->stats.name, desc)
2502
2503 /* We get these from port_stats and they may be stale by upto 1s */
2504 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
2505 "# drops due to buffer-group 0 overflows");
2506 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
2507 "# drops due to buffer-group 1 overflows");
2508 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
2509 "# drops due to buffer-group 2 overflows");
2510 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
2511 "# drops due to buffer-group 3 overflows");
2512 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
2513 "# of buffer-group 0 truncated packets");
2514 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
2515 "# of buffer-group 1 truncated packets");
2516 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
2517 "# of buffer-group 2 truncated packets");
2518 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
2519 "# of buffer-group 3 truncated packets");
2520
2521#undef SYSCTL_ADD_T4_PORTSTAT
2522
2523 return (0);
2524}
2525
2526static int
2527sysctl_int_array(SYSCTL_HANDLER_ARGS)
2528{
2529 int rc, *i;
2530 struct sbuf sb;
2531
2532 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
2533 for (i = arg1; arg2; arg2 -= sizeof(int), i++)
2534 sbuf_printf(&sb, "%d ", *i);
2535 sbuf_trim(&sb);
2536 sbuf_finish(&sb);
2537 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
2538 sbuf_delete(&sb);
2539 return (rc);
2540}
2541
2542static int
2543sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
2544{
2545 struct port_info *pi = arg1;
2546 struct adapter *sc = pi->adapter;
2547 struct sge_rxq *rxq;
2548 int idx, rc, i;
2549
2550 idx = pi->tmr_idx;
2551
2552 rc = sysctl_handle_int(oidp, &idx, 0, req);
2553 if (rc != 0 || req->newptr == NULL)
2554 return (rc);
2555
2556 if (idx < 0 || idx >= SGE_NTIMERS)
2557 return (EINVAL);
2558
2559 ADAPTER_LOCK(sc);
2560 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2561 if (rc == 0) {
2562 for_each_rxq(pi, i, rxq) {
2563 rxq->iq.intr_params = V_QINTR_TIMER_IDX(idx) |
2564 V_QINTR_CNT_EN(pi->pktc_idx != -1);
2565 }
2566 pi->tmr_idx = idx;
2567 }
2568
2569 ADAPTER_UNLOCK(sc);
2570 return (rc);
2571}
2572
2573static int
2574sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
2575{
2576 struct port_info *pi = arg1;
2577 struct adapter *sc = pi->adapter;
2578 int idx, rc;
2579
2580 idx = pi->pktc_idx;
2581
2582 rc = sysctl_handle_int(oidp, &idx, 0, req);
2583 if (rc != 0 || req->newptr == NULL)
2584 return (rc);
2585
2586 if (idx < -1 || idx >= SGE_NCOUNTERS)
2587 return (EINVAL);
2588
2589 ADAPTER_LOCK(sc);
2590 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2591 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
2592 rc = EBUSY; /* can be changed only when port is down */
2593
2594 if (rc == 0)
2595 pi->pktc_idx = idx;
2596
2597 ADAPTER_UNLOCK(sc);
2598 return (rc);
2599}
2600
2601static int
2602sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
2603{
2604 struct port_info *pi = arg1;
2605 struct adapter *sc = pi->adapter;
2606 int qsize, rc;
2607
2608 qsize = pi->qsize_rxq;
2609
2610 rc = sysctl_handle_int(oidp, &qsize, 0, req);
2611 if (rc != 0 || req->newptr == NULL)
2612 return (rc);
2613
2614 if (qsize < 128 || (qsize & 7))
2615 return (EINVAL);
2616
2617 ADAPTER_LOCK(sc);
2618 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2619 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
2620 rc = EBUSY; /* can be changed only when port is down */
2621
2622 if (rc == 0)
2623 pi->qsize_rxq = qsize;
2624
2625 ADAPTER_UNLOCK(sc);
2626 return (rc);
2627}
2628
2629static int
2630sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
2631{
2632 struct port_info *pi = arg1;
2633 struct adapter *sc = pi->adapter;
2634 int qsize, rc;
2635
2636 qsize = pi->qsize_txq;
2637
2638 rc = sysctl_handle_int(oidp, &qsize, 0, req);
2639 if (rc != 0 || req->newptr == NULL)
2640 return (rc);
2641
2642 if (qsize < 128)
2643 return (EINVAL);
2644
2645 ADAPTER_LOCK(sc);
2646 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2647 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
2648 rc = EBUSY; /* can be changed only when port is down */
2649
2650 if (rc == 0)
2651 pi->qsize_txq = qsize;
2652
2653 ADAPTER_UNLOCK(sc);
2654 return (rc);
2655}
2656
2657static int
2658sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
2659{
2660 struct adapter *sc = arg1;
2661 int reg = arg2;
2662 uint64_t val;
2663
2664 val = t4_read_reg64(sc, reg);
2665
2666 return (sysctl_handle_64(oidp, &val, 0, req));
2667}
2668
2669static inline void
2670txq_start(struct ifnet *ifp, struct sge_txq *txq)
2671{
2672 struct buf_ring *br;
2673 struct mbuf *m;
2674
2675 TXQ_LOCK_ASSERT_OWNED(txq);
2676
2677 br = txq->eq.br;
2678 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
2679 if (m)
2680 t4_eth_tx(ifp, txq, m);
2681}
2682
2683void
2684cxgbe_txq_start(void *arg, int count)
2685{
2686 struct sge_txq *txq = arg;
2687
2688 TXQ_LOCK(txq);
2689 if (txq->eq.flags & EQ_CRFLUSHED) {
2690 txq->eq.flags &= ~EQ_CRFLUSHED;
2691 txq_start(txq->ifp, txq);
2692 } else
2693 wakeup_one(txq); /* txq is going away, wakeup free_txq */
2694 TXQ_UNLOCK(txq);
2695}
2696
2697int
2698t4_os_find_pci_capability(struct adapter *sc, int cap)
2699{
2700 device_t dev;
2701 struct pci_devinfo *dinfo;
2702 pcicfgregs *cfg;
2703 uint32_t status;
2704 uint8_t ptr;
2705
2706 dev = sc->dev;
2707 dinfo = device_get_ivars(dev);
2708 cfg = &dinfo->cfg;
2709
2710 status = pci_read_config(dev, PCIR_STATUS, 2);
2711 if (!(status & PCIM_STATUS_CAPPRESENT))
2712 return (0);
2713
2714 switch (cfg->hdrtype & PCIM_HDRTYPE) {
2715 case 0:
2716 case 1:
2717 ptr = PCIR_CAP_PTR;
2718 break;
2719 case 2:
2720 ptr = PCIR_CAP_PTR_2;
2721 break;
2722 default:
2723 return (0);
2724 break;
2725 }
2726 ptr = pci_read_config(dev, ptr, 1);
2727
2728 while (ptr != 0) {
2729 if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
2730 return (ptr);
2731 ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
2732 }
2733
2734 return (0);
2735}
2736
2737int
2738t4_os_pci_save_state(struct adapter *sc)
2739{
2740 device_t dev;
2741 struct pci_devinfo *dinfo;
2742
2743 dev = sc->dev;
2744 dinfo = device_get_ivars(dev);
2745
2746 pci_cfg_save(dev, dinfo, 0);
2747 return (0);
2748}
2749
2750int
2751t4_os_pci_restore_state(struct adapter *sc)
2752{
2753 device_t dev;
2754 struct pci_devinfo *dinfo;
2755
2756 dev = sc->dev;
2757 dinfo = device_get_ivars(dev);
2758
2759 pci_cfg_restore(dev, dinfo);
2760 return (0);
2761}
2762
2763void
2764t4_os_portmod_changed(const struct adapter *sc, int idx)
2765{
2766 struct port_info *pi = sc->port[idx];
2767 static const char *mod_str[] = {
2768 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
2769 };
2770
2771 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
2772 if_printf(pi->ifp, "transceiver unplugged.\n");
2773 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
2774 if_printf(pi->ifp, "unknown transceiver inserted.\n");
2775 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
2776 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
2777 else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str)) {
2778 if_printf(pi->ifp, "%s transceiver inserted.\n",
2779 mod_str[pi->mod_type]);
2780 } else {
2781 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
2782 pi->mod_type);
2783 }
2784}
2785
2786void
2787t4_os_link_changed(struct adapter *sc, int idx, int link_stat)
2788{
2789 struct port_info *pi = sc->port[idx];
2790 struct ifnet *ifp = pi->ifp;
2791
2792 if (link_stat) {
2793 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
2794 if_link_state_change(ifp, LINK_STATE_UP);
2795 } else
2796 if_link_state_change(ifp, LINK_STATE_DOWN);
2797}
2798
2799static int
2800t4_open(struct cdev *dev, int flags, int type, struct thread *td)
2801{
2802 return (0);
2803}
2804
2805static int
2806t4_close(struct cdev *dev, int flags, int type, struct thread *td)
2807{
2808 return (0);
2809}
2810
2811static int
2812t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
2813 struct thread *td)
2814{
2815 int rc;
2816 struct adapter *sc = dev->si_drv1;
2817
2818 rc = priv_check(td, PRIV_DRIVER);
2819 if (rc != 0)
2820 return (rc);
2821
2822 switch (cmd) {
2823 case CHELSIO_T4_GETREG: {
2824 struct t4_reg *edata = (struct t4_reg *)data;
2825
2826 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2827 return (EFAULT);
2828
2829 if (edata->size == 4)
2830 edata->val = t4_read_reg(sc, edata->addr);
2831 else if (edata->size == 8)
2832 edata->val = t4_read_reg64(sc, edata->addr);
2833 else
2834 return (EINVAL);
2835
2836 break;
2837 }
2838 case CHELSIO_T4_SETREG: {
2839 struct t4_reg *edata = (struct t4_reg *)data;
2840
2841 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2842 return (EFAULT);
2843
2844 if (edata->size == 4) {
2845 if (edata->val & 0xffffffff00000000)
2846 return (EINVAL);
2847 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
2848 } else if (edata->size == 8)
2849 t4_write_reg64(sc, edata->addr, edata->val);
2850 else
2851 return (EINVAL);
2852 break;
2853 }
2854 case CHELSIO_T4_REGDUMP: {
2855 struct t4_regdump *regs = (struct t4_regdump *)data;
2856 int reglen = T4_REGDUMP_SIZE;
2857 uint8_t *buf;
2858
2859 if (regs->len < reglen) {
2860 regs->len = reglen; /* hint to the caller */
2861 return (ENOBUFS);
2862 }
2863
2864 regs->len = reglen;
2865 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
2866 t4_get_regs(sc, regs, buf);
2867 rc = copyout(buf, regs->data, reglen);
2868 free(buf, M_CXGBE);
2869 break;
2870 }
2871 default:
2872 rc = EINVAL;
2873 }
2874
2875 return (rc);
2876}
2877
2878static int
2879t4_mod_event(module_t mod, int cmd, void *arg)
2880{
2881
2882 if (cmd == MOD_LOAD)
2883 t4_sge_modload();
2884
2885 return (0);
2886}
2887
2888static devclass_t t4_devclass;
2889static devclass_t cxgbe_devclass;
2890
2891DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, t4_mod_event, 0);
2892MODULE_VERSION(t4nex, 1);
2893
2894DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
2895MODULE_VERSION(cxgbe, 1);