Deleted Added
sdiff udiff text old ( 227843 ) new ( 228561 )
full compact
1/*-
2 * Copyright (c) 2011 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 12 unchanged lines hidden (view full) ---

21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/dev/cxgbe/t4_main.c 228561 2011-12-16 02:09:51Z np $");
30
31#include "opt_inet.h"
32
33#include <sys/param.h>
34#include <sys/conf.h>
35#include <sys/priv.h>
36#include <sys/kernel.h>
37#include <sys/bus.h>

--- 12 unchanged lines hidden (view full) ---

50#include <sys/sockio.h>
51#include <sys/sysctl.h>
52#include <net/ethernet.h>
53#include <net/if.h>
54#include <net/if_types.h>
55#include <net/if_dl.h>
56#include <net/if_vlan_var.h>
57
58#include "common/common.h"
59#include "common/t4_msg.h"
60#include "common/t4_regs.h"
61#include "common/t4_regs_values.h"
62#include "t4_ioctl.h"
63#include "t4_l2t.h"
64
65/* T4 bus driver interface */
66static int t4_probe(device_t);
67static int t4_attach(device_t);
68static int t4_detach(device_t);
69static device_method_t t4_methods[] = {

--- 45 unchanged lines hidden (view full) ---

115static void cxgbe_start(struct ifnet *);
116static int cxgbe_transmit(struct ifnet *, struct mbuf *);
117static void cxgbe_qflush(struct ifnet *);
118static int cxgbe_media_change(struct ifnet *);
119static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
120
121MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4 Ethernet driver and services");
122
123static struct mtx t4_list_lock;
124static SLIST_HEAD(, adapter) t4_list;
125#ifndef TCP_OFFLOAD_DISABLE
126static struct mtx t4_uld_list_lock;
127static SLIST_HEAD(, uld_info) t4_uld_list;
128#endif
129
130/*
131 * Tunables. See tweak_tunables() too.
132 */
133
134/*
135 * Number of queues for tx and rx, 10G and 1G, NIC and offload.
136 */
137#define NTXQ_10G 16
138static int t4_ntxq10g = -1;
139TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
140
141#define NRXQ_10G 8
142static int t4_nrxq10g = -1;
143TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
144
145#define NTXQ_1G 4
146static int t4_ntxq1g = -1;
147TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
148
149#define NRXQ_1G 2
150static int t4_nrxq1g = -1;
151TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
152
153#ifndef TCP_OFFLOAD_DISABLE
154#define NOFLDTXQ_10G 8
155static int t4_nofldtxq10g = -1;
156TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
157
158#define NOFLDRXQ_10G 2
159static int t4_nofldrxq10g = -1;
160TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
161
162#define NOFLDTXQ_1G 2
163static int t4_nofldtxq1g = -1;
164TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
165
166#define NOFLDRXQ_1G 1
167static int t4_nofldrxq1g = -1;
168TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
169#endif
170
171/*
172 * Holdoff parameters for 10G and 1G ports.
173 */
174#define TMR_IDX_10G 1
175static int t4_tmr_idx_10g = TMR_IDX_10G;
176TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
177
178#define PKTC_IDX_10G 2
179static int t4_pktc_idx_10g = PKTC_IDX_10G;
180TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
181
182#define TMR_IDX_1G 1
183static int t4_tmr_idx_1g = TMR_IDX_1G;
184TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
185
186#define PKTC_IDX_1G 2
187static int t4_pktc_idx_1g = PKTC_IDX_1G;
188TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
189
190/*
191 * Size (# of entries) of each tx and rx queue.
192 */
193static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
194TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
195
196static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
197TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
198
199/*
200 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
201 */
202static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
203TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
204
205/*
206 * Configuration file.
207 */
208static char t4_cfg_file[32] = "default";
209TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
210
211/*
212 * ASIC features that will be used. Disable the ones you don't want so that the
213 * chip resources aren't wasted on features that will not be used.
214 */
215static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
216TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
217
218static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
219TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
220
221static int t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
222TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
223
224static int t4_rdmacaps_allowed = 0;
225TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
226
227static int t4_iscsicaps_allowed = 0;
228TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
229
230static int t4_fcoecaps_allowed = 0;
231TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
232
233struct intrs_and_queues {
234 int intr_type; /* INTx, MSI, or MSI-X */
235 int nirq; /* Number of vectors */
236 int intr_flags;
237 int ntxq10g; /* # of NIC txq's for each 10G port */
238 int nrxq10g; /* # of NIC rxq's for each 10G port */
239 int ntxq1g; /* # of NIC txq's for each 1G port */
240 int nrxq1g; /* # of NIC rxq's for each 1G port */
241#ifndef TCP_OFFLOAD_DISABLE
242 int nofldtxq10g; /* # of TOE txq's for each 10G port */
243 int nofldrxq10g; /* # of TOE rxq's for each 10G port */
244 int nofldtxq1g; /* # of TOE txq's for each 1G port */
245 int nofldrxq1g; /* # of TOE rxq's for each 1G port */
246#endif
247};
248
249struct filter_entry {
250 uint32_t valid:1; /* filter allocated and valid */
251 uint32_t locked:1; /* filter is administratively locked */
252 uint32_t pending:1; /* filter action is pending firmware reply */
253 uint32_t smtidx:8; /* Source MAC Table index for smac */
254 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
255
256 struct t4_filter_specification fs;
257};
258
259enum {
260 XGMAC_MTU = (1 << 0),
261 XGMAC_PROMISC = (1 << 1),
262 XGMAC_ALLMULTI = (1 << 2),
263 XGMAC_VLANEX = (1 << 3),
264 XGMAC_UCADDR = (1 << 4),
265 XGMAC_MCADDRS = (1 << 5),
266
267 XGMAC_ALL = 0xffff
268};
269
270static int map_bars(struct adapter *);
271static void setup_memwin(struct adapter *);
272static int cfg_itype_and_nqueues(struct adapter *, int, int,
273 struct intrs_and_queues *);
274static int prep_firmware(struct adapter *);
275static int upload_config_file(struct adapter *, const struct firmware *,
276 uint32_t *, uint32_t *);
277static int partition_resources(struct adapter *, const struct firmware *);
278static int get_params__pre_init(struct adapter *);
279static int get_params__post_init(struct adapter *);
280static void t4_set_desc(struct adapter *);
281static void build_medialist(struct port_info *);
282static int update_mac_settings(struct port_info *, int);
283static int cxgbe_init_locked(struct port_info *);
284static int cxgbe_init_synchronized(struct port_info *);
285static int cxgbe_uninit_locked(struct port_info *);
286static int cxgbe_uninit_synchronized(struct port_info *);
287static int adapter_full_init(struct adapter *);
288static int adapter_full_uninit(struct adapter *);
289static int port_full_init(struct port_info *);
290static int port_full_uninit(struct port_info *);
291static void quiesce_eq(struct adapter *, struct sge_eq *);
292static void quiesce_iq(struct adapter *, struct sge_iq *);
293static void quiesce_fl(struct adapter *, struct sge_fl *);
294static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
295 driver_intr_t *, void *, char *);
296static int t4_free_irq(struct adapter *, struct irq *);
297static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
298 unsigned int);
299static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
300static void cxgbe_tick(void *);
301static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
302 struct mbuf *);
303static int t4_sysctls(struct adapter *);
304static int cxgbe_sysctls(struct port_info *);
305static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
306static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
307static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
308static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
309static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
310static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
311static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
312static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
313static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
314static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
315static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
316static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
317static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
318static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
319static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
320static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
321static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
322static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
323static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
324static int sysctl_tids(SYSCTL_HANDLER_ARGS);
325static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
326static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
327static inline void txq_start(struct ifnet *, struct sge_txq *);
328static uint32_t fconf_to_mode(uint32_t);
329static uint32_t mode_to_fconf(uint32_t);
330static uint32_t fspec_to_fconf(struct t4_filter_specification *);
331static int get_filter_mode(struct adapter *, uint32_t *);
332static int set_filter_mode(struct adapter *, uint32_t);
333static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
334static int get_filter(struct adapter *, struct t4_filter *);
335static int set_filter(struct adapter *, struct t4_filter *);
336static int del_filter(struct adapter *, struct t4_filter *);
337static void clear_filter(struct filter_entry *);
338static int set_filter_wr(struct adapter *, int);
339static int del_filter_wr(struct adapter *, int);
340static int filter_rpl(struct sge_iq *, const struct rss_header *,
341 struct mbuf *);
342static int get_sge_context(struct adapter *, struct t4_sge_context *);
343static int read_card_mem(struct adapter *, struct t4_mem_range *);
344#ifndef TCP_OFFLOAD_DISABLE
345static int toe_capability(struct port_info *, int);
346static int activate_uld(struct adapter *, int, struct uld_softc *);
347static int deactivate_uld(struct uld_softc *);
348#endif
349static int t4_mod_event(module_t, int, void *);
350
351struct t4_pciids {
352 uint16_t device;
353 uint8_t mpf;
354 char *desc;
355} t4_pciids[] = {
356 {0xa000, 0, "Chelsio Terminator 4 FPGA"},

--- 5 unchanged lines hidden (view full) ---

362 {0x4405, 4, "Chelsio T440-BCH"},
363 {0x4406, 4, "Chelsio T440-CH"},
364 {0x4407, 4, "Chelsio T420-SO"},
365 {0x4408, 4, "Chelsio T420-CX"},
366 {0x4409, 4, "Chelsio T420-BT"},
367 {0x440a, 4, "Chelsio T404-BT"},
368};
369
370#ifndef TCP_OFFLOAD_DISABLE
371/* This is used in service_iq() to get to the fl associated with an iq. */
372CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
373#endif
374
375static int
376t4_probe(device_t dev)
377{
378 int i;
379 uint16_t v = pci_get_vendor(dev);
380 uint16_t d = pci_get_device(dev);
381
382 if (v != PCI_VENDOR_ID_CHELSIO)

--- 10 unchanged lines hidden (view full) ---

393 return (ENXIO);
394}
395
396static int
397t4_attach(device_t dev)
398{
399 struct adapter *sc;
400 int rc = 0, i, n10g, n1g, rqidx, tqidx;
401 struct intrs_and_queues iaq;
402 struct sge *s;
403#ifndef TCP_OFFLOAD_DISABLE
404 int ofld_rqidx, ofld_tqidx;
405#endif
406
407 sc = device_get_softc(dev);
408 sc->dev = dev;
409 sc->pf = pci_get_function(dev);
410 sc->mbox = sc->pf;
411
412 pci_enable_busmaster(dev);
413 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
414 uint32_t v;
415
416 pci_set_max_read_req(dev, 4096);
417 v = pci_read_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, 2);
418 v |= PCIM_EXP_CTL_RELAXED_ORD_ENABLE;
419 pci_write_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, v, 2);
420 }
421
422 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
423 device_get_nameunit(dev));
424 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
425 mtx_lock(&t4_list_lock);
426 SLIST_INSERT_HEAD(&t4_list, sc, link);
427 mtx_unlock(&t4_list_lock);
428
429 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
430 TAILQ_INIT(&sc->sfl);
431 callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
432
433 rc = map_bars(sc);
434 if (rc != 0)
435 goto done; /* error message displayed already */
436
437 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
438 for (i = 0; i < ARRAY_SIZE(sc->cpl_handler); i++)
439 sc->cpl_handler[i] = cpl_not_handled;
440 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, filter_rpl);
441
442 /* Prepare the adapter for operation */
443 rc = -t4_prep_adapter(sc);
444 if (rc != 0) {
445 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
446 goto done;
447 }
448
449 /*
450 * Do this really early, with the memory windows set up even before the
451 * character device. The userland tool's register i/o and mem read
452 * will work even in "recovery mode".
453 */
454 setup_memwin(sc);
455 sc->cdev = make_dev(&t4_cdevsw, device_get_unit(dev), UID_ROOT,
456 GID_WHEEL, 0600, "%s", device_get_nameunit(dev));
457 sc->cdev->si_drv1 = sc;
458
459 /* Go no further if recovery mode has been requested. */
460 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
461 device_printf(dev, "recovery mode.\n");
462 goto done;
463 }
464
465 /* Prepare the firmware for operation */
466 rc = prep_firmware(sc);
467 if (rc != 0)
468 goto done; /* error message displayed already */
469
470 rc = get_params__pre_init(sc);
471 if (rc != 0)
472 goto done; /* error message displayed already */
473
474 rc = t4_sge_init(sc);
475 if (rc != 0)
476 goto done; /* error message displayed already */
477
478 if (sc->flags & MASTER_PF) {
479 /* get basic stuff going */
480 rc = -t4_fw_initialize(sc, sc->mbox);
481 if (rc != 0) {
482 device_printf(dev, "early init failed: %d.\n", rc);
483 goto done;
484 }
485 }
486
487 rc = get_params__post_init(sc);
488 if (rc != 0)
489 goto done; /* error message displayed already */
490
491 if (sc->flags & MASTER_PF) {
492
493 /* final tweaks to some settings */
494
495 t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd,
496 sc->params.b_wnd);
497 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
498 t4_set_reg_field(sc, A_TP_PARA_REG3, F_TUNNELCNGDROP0 |
499 F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | F_TUNNELCNGDROP3, 0);
500 t4_set_reg_field(sc, A_TP_PARA_REG5,
501 V_INDICATESIZE(M_INDICATESIZE) |
502 F_REARMDDPOFFSET | F_RESETDDPOFFSET,
503 V_INDICATESIZE(M_INDICATESIZE) |
504 F_REARMDDPOFFSET | F_RESETDDPOFFSET);
505 } else {
506 /*
507 * XXX: Verify that we can live with whatever the master driver
508 * has done so far, and hope that it doesn't change any global
509 * setting from underneath us in the future.
510 */
511 }
512
513 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &sc->filter_mode, 1,
514 A_TP_VLAN_PRI_MAP);
515
516 for (i = 0; i < NCHAN; i++)
517 sc->params.tp.tx_modq[i] = i;
518
519 rc = t4_create_dma_tag(sc);
520 if (rc != 0)
521 goto done; /* error message displayed already */
522
523 /*
524 * First pass over all the ports - allocate VIs and initialize some
525 * basic parameters like mac address, port type, etc. We also figure
526 * out whether a port is 10G or 1G and use that information when

--- 21 unchanged lines hidden (view full) ---

548 }
549
550 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
551 device_get_nameunit(dev), i);
552 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
553
554 if (is_10G_port(pi)) {
555 n10g++;
556 pi->tmr_idx = t4_tmr_idx_10g;
557 pi->pktc_idx = t4_pktc_idx_10g;
558 } else {
559 n1g++;
560 pi->tmr_idx = t4_tmr_idx_1g;
561 pi->pktc_idx = t4_pktc_idx_1g;
562 }
563
564 pi->xact_addr_filt = -1;
565
566 pi->qsize_rxq = t4_qsize_rxq;
567 pi->qsize_txq = t4_qsize_txq;
568
569 pi->dev = device_add_child(dev, "cxgbe", -1);
570 if (pi->dev == NULL) {
571 device_printf(dev,
572 "failed to add device for port %d.\n", i);
573 rc = ENXIO;
574 goto done;
575 }
576 device_set_softc(pi->dev, pi);
577 }
578
579 /*
580 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
581 */
582 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
583 if (rc != 0)
584 goto done; /* error message displayed already */
585
586 sc->intr_type = iaq.intr_type;
587 sc->intr_count = iaq.nirq;
588 sc->flags |= iaq.intr_flags;
589
590 s = &sc->sge;
591 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
592 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
593 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
594 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
595 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
596
597#ifndef TCP_OFFLOAD_DISABLE
598 if (is_offload(sc)) {
599
600 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
601 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
602 s->neq += s->nofldtxq + s->nofldrxq;
603 s->niq += s->nofldrxq;
604
605 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
606 M_CXGBE, M_ZERO | M_WAITOK);
607 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
608 M_CXGBE, M_ZERO | M_WAITOK);
609 }
610#endif
611
612 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
613 M_ZERO | M_WAITOK);
614 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
615 M_ZERO | M_WAITOK);
616 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
617 M_ZERO | M_WAITOK);
618 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
619 M_ZERO | M_WAITOK);
620 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
621 M_ZERO | M_WAITOK);
622
623 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
624 M_ZERO | M_WAITOK);
625
626 t4_init_l2t(sc, M_WAITOK);
627
628 /*
629 * Second pass over the ports. This time we know the number of rx and
630 * tx queues that each port should get.
631 */
632 rqidx = tqidx = 0;
633#ifndef TCP_OFFLOAD_DISABLE
634 ofld_rqidx = ofld_tqidx = 0;
635#endif
636 for_each_port(sc, i) {
637 struct port_info *pi = sc->port[i];
638
639 if (pi == NULL)
640 continue;
641
642 pi->first_rxq = rqidx;
643 pi->first_txq = tqidx;
644 if (is_10G_port(pi)) {
645 pi->nrxq = iaq.nrxq10g;
646 pi->ntxq = iaq.ntxq10g;
647 } else {
648 pi->nrxq = iaq.nrxq1g;
649 pi->ntxq = iaq.ntxq1g;
650 }
651
652 rqidx += pi->nrxq;
653 tqidx += pi->ntxq;
654
655#ifndef TCP_OFFLOAD_DISABLE
656 if (is_offload(sc)) {
657 pi->first_ofld_rxq = ofld_rqidx;
658 pi->first_ofld_txq = ofld_tqidx;
659 if (is_10G_port(pi)) {
660 pi->nofldrxq = iaq.nofldrxq10g;
661 pi->nofldtxq = iaq.nofldtxq10g;
662 } else {
663 pi->nofldrxq = iaq.nofldrxq1g;
664 pi->nofldtxq = iaq.nofldtxq1g;
665 }
666 ofld_rqidx += pi->nofldrxq;
667 ofld_tqidx += pi->nofldtxq;
668 }
669#endif
670 }
671
672 rc = bus_generic_attach(dev);
673 if (rc != 0) {
674 device_printf(dev,
675 "failed to attach all child ports: %d\n", rc);
676 goto done;
677 }
678
679 device_printf(dev,
680 "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
681 sc->params.pci.width, sc->params.nports, sc->intr_count,
682 sc->intr_type == INTR_MSIX ? "MSI-X" :
683 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
684 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
685
686 t4_set_desc(sc);
687
688done:
689 if (rc != 0 && sc->cdev) {
690 /* cdev was created and so cxgbetool works; recover that way. */
691 device_printf(dev,
692 "error during attach, adapter is now in recovery mode.\n");
693 rc = 0;
694 }
695
696 if (rc != 0)
697 t4_detach(dev);
698 else
699 t4_sysctls(sc);
700
701 return (rc);
702}
703
704/*
705 * Idempotent
706 */
707static int
708t4_detach(device_t dev)
709{
710 struct adapter *sc;
711 struct port_info *pi;
712 int i, rc;
713
714 sc = device_get_softc(dev);
715
716 if (sc->flags & FULL_INIT_DONE)
717 t4_intr_disable(sc);
718
719 if (sc->cdev) {
720 destroy_dev(sc->cdev);
721 sc->cdev = NULL;
722 }
723
724 rc = bus_generic_detach(dev);
725 if (rc) {
726 device_printf(dev,
727 "failed to detach child devices: %d\n", rc);
728 return (rc);
729 }
730
731 for (i = 0; i < MAX_NPORTS; i++) {
732 pi = sc->port[i];
733 if (pi) {
734 t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
735 if (pi->dev)
736 device_delete_child(dev, pi->dev);
737
738 mtx_destroy(&pi->pi_lock);
739 free(pi, M_CXGBE);
740 }
741 }
742
743 if (sc->flags & FULL_INIT_DONE)
744 adapter_full_uninit(sc);
745
746 if (sc->flags & FW_OK)
747 t4_fw_bye(sc, sc->mbox);
748
749 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
750 pci_release_msi(dev);
751
752 if (sc->regs_res)
753 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
754 sc->regs_res);
755
756 if (sc->msix_res)
757 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
758 sc->msix_res);
759
760 if (sc->l2t)
761 t4_free_l2t(sc->l2t);
762
763#ifndef TCP_OFFLOAD_DISABLE
764 free(sc->sge.ofld_rxq, M_CXGBE);
765 free(sc->sge.ofld_txq, M_CXGBE);
766#endif
767 free(sc->irq, M_CXGBE);
768 free(sc->sge.rxq, M_CXGBE);
769 free(sc->sge.txq, M_CXGBE);
770 free(sc->sge.ctrlq, M_CXGBE);
771 free(sc->sge.iqmap, M_CXGBE);
772 free(sc->sge.eqmap, M_CXGBE);
773 free(sc->tids.ftid_tab, M_CXGBE);
774 t4_destroy_dma_tag(sc);
775 if (mtx_initialized(&sc->sc_lock)) {
776 mtx_lock(&t4_list_lock);
777 SLIST_REMOVE(&t4_list, sc, adapter, link);
778 mtx_unlock(&t4_list_lock);
779 mtx_destroy(&sc->sc_lock);
780 }
781
782 if (mtx_initialized(&sc->sfl_lock))
783 mtx_destroy(&sc->sfl_lock);
784
785 bzero(sc, sizeof(*sc));
786
787 return (0);
788}
789
790
791static int
792cxgbe_probe(device_t dev)
793{
794 char buf[128];
795 struct port_info *pi = device_get_softc(dev);
796
797 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
798 device_set_desc_copy(dev, buf);
799
800 return (BUS_PROBE_DEFAULT);
801}
802
803#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
804 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
805 IFCAP_VLAN_HWTSO)

--- 10 unchanged lines hidden (view full) ---

816 if (ifp == NULL) {
817 device_printf(dev, "Cannot allocate ifnet\n");
818 return (ENOMEM);
819 }
820 pi->ifp = ifp;
821 ifp->if_softc = pi;
822
823 callout_init(&pi->tick, CALLOUT_MPSAFE);
824
825 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
826 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
827
828 ifp->if_init = cxgbe_init;
829 ifp->if_ioctl = cxgbe_ioctl;
830 ifp->if_start = cxgbe_start;
831 ifp->if_transmit = cxgbe_transmit;
832 ifp->if_qflush = cxgbe_qflush;
833
834 ifp->if_snd.ifq_drv_maxlen = 1024;
835 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
836 IFQ_SET_READY(&ifp->if_snd);
837
838 ifp->if_capabilities = T4_CAP;
839#ifndef TCP_OFFLOAD_DISABLE
840 if (is_offload(pi->adapter))
841 ifp->if_capabilities |= IFCAP_TOE4;
842#endif
843 ifp->if_capenable = T4_CAP_ENABLE;
844 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO;
845
846 /* Initialize ifmedia for this port */
847 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
848 cxgbe_media_status);
849 build_medialist(pi);
850
851 ether_ifattach(ifp, pi->hw_addr);
852
853#ifndef TCP_OFFLOAD_DISABLE
854 if (is_offload(pi->adapter)) {
855 device_printf(dev,
856 "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
857 pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
858 } else
859#endif
860 device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
861
862 cxgbe_sysctls(pi);
863
864 return (0);
865}
866
867static int
868cxgbe_detach(device_t dev)
869{
870 struct port_info *pi = device_get_softc(dev);
871 struct adapter *sc = pi->adapter;
872 struct ifnet *ifp = pi->ifp;
873
874 /* Tell if_ioctl and if_init that the port is going away */
875 ADAPTER_LOCK(sc);
876 SET_DOOMED(pi);
877 wakeup(&sc->flags);
878 while (IS_BUSY(sc))
879 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
880 SET_BUSY(sc);
881 ADAPTER_UNLOCK(sc);
882
883 PORT_LOCK(pi);
884 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
885 callout_stop(&pi->tick);
886 PORT_UNLOCK(pi);
887 callout_drain(&pi->tick);
888
889 /* Let detach proceed even if these fail. */
890 cxgbe_uninit_synchronized(pi);
891 port_full_uninit(pi);
892
893 ifmedia_removeall(&pi->media);
894 ether_ifdetach(pi->ifp);
895 if_free(pi->ifp);
896
897 ADAPTER_LOCK(sc);
898 CLR_BUSY(sc);
899 wakeup_one(&sc->flags);

--- 122 unchanged lines hidden (view full) ---

1022 if (IFCAP_TXCSUM & ifp->if_capenable)
1023 ifp->if_hwassist |= CSUM_TSO;
1024 else {
1025 ifp->if_capenable &= ~IFCAP_TSO;
1026 ifp->if_hwassist &= ~CSUM_TSO;
1027 if_printf(ifp,
1028 "enable txcsum first.\n");
1029 rc = EAGAIN;
1030 goto fail;
1031 }
1032 } else
1033 ifp->if_hwassist &= ~CSUM_TSO;
1034 }
1035 if (mask & IFCAP_LRO) {
1036#ifdef INET
1037 int i;
1038 struct sge_rxq *rxq;
1039
1040 ifp->if_capenable ^= IFCAP_LRO;
1041 for_each_rxq(pi, i, rxq) {
1042 if (ifp->if_capenable & IFCAP_LRO)
1043 rxq->iq.flags |= IQ_LRO_ENABLED;
1044 else
1045 rxq->iq.flags &= ~IQ_LRO_ENABLED;
1046 }
1047#endif
1048 }
1049#ifndef TCP_OFFLOAD_DISABLE
1050 if (mask & IFCAP_TOE) {
1051 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1052
1053 rc = toe_capability(pi, enable);
1054 if (rc != 0)
1055 goto fail;
1056
1057 ifp->if_capenable ^= mask;
1058 }
1059#endif
1060 if (mask & IFCAP_VLAN_HWTAGGING) {
1061 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1062 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1063 PORT_LOCK(pi);
1064 rc = update_mac_settings(pi, XGMAC_VLANEX);
1065 PORT_UNLOCK(pi);

--- 48 unchanged lines hidden (view full) ---

1114 struct port_info *pi = ifp->if_softc;
1115 struct adapter *sc = pi->adapter;
1116 struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1117 struct buf_ring *br;
1118 int rc;
1119
1120 M_ASSERTPKTHDR(m);
1121
1122 if (__predict_false(pi->link_cfg.link_ok == 0)) {
1123 m_freem(m);
1124 return (ENETDOWN);
1125 }
1126
1127 if (m->m_flags & M_FLOWID)
1128 txq += (m->m_pkthdr.flowid % pi->ntxq);
1129 br = txq->br;
1130
1131 if (TXQ_TRYLOCK(txq) == 0) {
1132 struct sge_eq *eq = &txq->eq;
1133
1134 /*
1135 * It is possible that t4_eth_tx finishes up and releases the
1136 * lock between the TRYLOCK above and the drbr_enqueue here. We
1137 * need to make sure that this mbuf doesn't just sit there in
1138 * the drbr.
1139 */
1140
1141 rc = drbr_enqueue(ifp, br, m);
1142 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1143 !(eq->flags & EQ_DOOMED))
1144 callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1145 return (rc);
1146 }
1147
1148 /*
1149 * txq->m is the mbuf that is held up due to a temporary shortage of
1150 * resources and it should be put on the wire first. Then what's in
1151 * drbr and finally the mbuf that was just passed in to us.
1152 *
1153 * Return code should indicate the fate of the mbuf that was passed in

--- 24 unchanged lines hidden (view full) ---

1178static void
1179cxgbe_qflush(struct ifnet *ifp)
1180{
1181 struct port_info *pi = ifp->if_softc;
1182 struct sge_txq *txq;
1183 int i;
1184 struct mbuf *m;
1185
1186 /* queues do not exist if !PORT_INIT_DONE. */
1187 if (pi->flags & PORT_INIT_DONE) {
1188 for_each_txq(pi, i, txq) {
1189 TXQ_LOCK(txq);
1190 m_freem(txq->m);
1191 txq->m = NULL;
1192 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1193 m_freem(m);
1194 TXQ_UNLOCK(txq);
1195 }
1196 }
1197 if_qflush(ifp);
1198}
1199

--- 97 unchanged lines hidden (view full) ---

1297 (bar0 + MEMWIN2_BASE) | V_BIR(0) |
1298 V_WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
1299}
1300
1301static int
1302cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1303 struct intrs_and_queues *iaq)
1304{
1305 int rc, itype, navail, nrxq10g, nrxq1g, n;
1306 int nofldrxq10g = 0, nofldrxq1g = 0;
1307
1308 bzero(iaq, sizeof(*iaq));
1309
1310 iaq->ntxq10g = t4_ntxq10g;
1311 iaq->ntxq1g = t4_ntxq1g;
1312 iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1313 iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1314#ifndef TCP_OFFLOAD_DISABLE
1315 iaq->nofldtxq10g = t4_nofldtxq10g;
1316 iaq->nofldtxq1g = t4_nofldtxq1g;
1317 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1318 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1319#endif
1320
1321 for (itype = INTR_MSIX; itype; itype >>= 1) {
1322
1323 if ((itype & t4_intr_types) == 0)
1324 continue; /* not allowed */
1325
1326 if (itype == INTR_MSIX)
1327 navail = pci_msix_count(sc->dev);
1328 else if (itype == INTR_MSI)
1329 navail = pci_msi_count(sc->dev);
1330 else
1331 navail = 1;
1332restart:
1333 if (navail == 0)
1334 continue;
1335
1336 iaq->intr_type = itype;
1337 iaq->intr_flags = 0;
1338
1339 /*
1340 * Best option: an interrupt vector for errors, one for the
1341 * firmware event queue, and one each for each rxq (NIC as well
1342 * as offload).
1343 */
1344 iaq->nirq = T4_EXTRA_INTR;
1345 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1346 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1347 if (iaq->nirq <= navail &&
1348 (itype != INTR_MSI || powerof2(iaq->nirq))) {
1349 iaq->intr_flags |= INTR_DIRECT;
1350 goto allocate;
1351 }
1352
1353 /*
1354 * Second best option: an interrupt vector for errors, one for
1355 * the firmware event queue, and one each for either NIC or
1356 * offload rxq's.
1357 */
1358 iaq->nirq = T4_EXTRA_INTR;
1359 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1360 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1361 if (iaq->nirq <= navail &&
1362 (itype != INTR_MSI || powerof2(iaq->nirq)))
1363 goto allocate;
1364
1365 /*
1366 * Next best option: an interrupt vector for errors, one for the
1367 * firmware event queue, and at least one per port. At this
1368 * point we know we'll have to downsize nrxq or nofldrxq to fit
1369 * what's available to us.
1370 */
1371 iaq->nirq = T4_EXTRA_INTR;
1372 iaq->nirq += n10g + n1g;
1373 if (iaq->nirq <= navail) {
1374 int leftover = navail - iaq->nirq;
1375
1376 if (n10g > 0) {
1377 int target = max(nrxq10g, nofldrxq10g);
1378
1379 n = 1;
1380 while (n < target && leftover >= n10g) {
1381 leftover -= n10g;
1382 iaq->nirq += n10g;
1383 n++;
1384 }
1385 iaq->nrxq10g = min(n, nrxq10g);
1386#ifndef TCP_OFFLOAD_DISABLE
1387 iaq->nofldrxq10g = min(n, nofldrxq10g);
1388#endif
1389 }
1390
1391 if (n1g > 0) {
1392 int target = max(nrxq1g, nofldrxq1g);
1393
1394 n = 1;
1395 while (n < target && leftover >= n1g) {
1396 leftover -= n1g;
1397 iaq->nirq += n1g;
1398 n++;
1399 }
1400 iaq->nrxq1g = min(n, nrxq1g);
1401#ifndef TCP_OFFLOAD_DISABLE
1402 iaq->nofldrxq1g = min(n, nofldrxq1g);
1403#endif
1404 }
1405
1406 if (itype != INTR_MSI || powerof2(iaq->nirq))
1407 goto allocate;
1408 }
1409
1410 /*
1411 * Least desirable option: one interrupt vector for everything.
1412 */
1413 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1414#ifndef TCP_OFFLOAD_DISABLE
1415 iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1416#endif
1417
1418allocate:
1419 navail = iaq->nirq;
1420 rc = 0;
1421 if (itype == INTR_MSIX)
1422 rc = pci_alloc_msix(sc->dev, &navail);
1423 else if (itype == INTR_MSI)
1424 rc = pci_alloc_msi(sc->dev, &navail);
1425
1426 if (rc == 0) {
1427 if (navail == iaq->nirq)
1428 return (0);
1429
1430 /*
1431 * Didn't get the number requested. Use whatever number
1432 * the kernel is willing to allocate (it's in navail).
1433 */
1434 device_printf(sc->dev, "fewer vectors than requested, "
1435 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1436 itype, iaq->nirq, navail);
1437 pci_release_msi(sc->dev);
1438 goto restart;
1439 }
1440
1441 device_printf(sc->dev,
1442 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1443 itype, rc, iaq->nirq, navail);
1444 }
1445
1446 device_printf(sc->dev,
1447 "failed to find a usable interrupt type. "
1448 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1449 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1450
1451 return (ENXIO);
1452}
1453
1454/*
1455 * Install a compatible firmware (if required), establish contact with it (by
1456 * saying hello), and reset the device. If we end up as the master driver,
1457 * partition adapter resources by providing a configuration file to the
1458 * firmware.
1459 */
1460static int
1461prep_firmware(struct adapter *sc)
1462{
1463 const struct firmware *fw = NULL, *cfg = NULL, *default_cfg;
1464 int rc;
1465 enum dev_state state;
1466
1467 default_cfg = firmware_get(T4_CFGNAME);
1468
1469 /* Check firmware version and install a different one if necessary */
1470 rc = t4_check_fw_version(sc);
1471 if (rc != 0) {
1472 uint32_t v = 0;
1473
1474 fw = firmware_get(T4_FWNAME);
1475 if (fw != NULL) {
1476 const struct fw_hdr *hdr = (const void *)fw->data;
1477
1478 v = ntohl(hdr->fw_ver);
1479
1480 /*
1481 * The firmware module will not be used if it isn't the
1482 * same major version as what the driver was compiled
1483 * with.
1484 */
1485 if (G_FW_HDR_FW_VER_MAJOR(v) != FW_VERSION_MAJOR) {
1486 device_printf(sc->dev,
1487 "Found firmware image but version %d "
1488 "can not be used with this driver (%d)\n",
1489 G_FW_HDR_FW_VER_MAJOR(v), FW_VERSION_MAJOR);
1490
1491 firmware_put(fw, FIRMWARE_UNLOAD);
1492 fw = NULL;
1493 }
1494 }
1495
1496 if (fw == NULL && rc < 0) {
1497 device_printf(sc->dev, "No usable firmware. "
1498 "card has %d.%d.%d, driver compiled with %d.%d.%d",
1499 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1500 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1501 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1502 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1503 FW_VERSION_MICRO);
1504 rc = EAGAIN;
1505 goto done;
1506 }
1507
1508 /*
1509 * Always upgrade, even for minor/micro/build mismatches.
1510 * Downgrade only for a major version mismatch or if
1511 * force_firmware_install was specified.
1512 */
1513 if (fw != NULL && (rc < 0 || v > sc->params.fw_vers)) {
1514 device_printf(sc->dev,
1515 "installing firmware %d.%d.%d.%d on card.\n",
1516 G_FW_HDR_FW_VER_MAJOR(v), G_FW_HDR_FW_VER_MINOR(v),
1517 G_FW_HDR_FW_VER_MICRO(v), G_FW_HDR_FW_VER_BUILD(v));
1518
1519 rc = -t4_load_fw(sc, fw->data, fw->datasize);
1520 if (rc != 0) {
1521 device_printf(sc->dev,
1522 "failed to install firmware: %d\n", rc);
1523 goto done;
1524 } else {
1525 /* refresh */
1526 (void) t4_check_fw_version(sc);
1527 }
1528 }
1529 }
1530
1531 /* Contact firmware. */
1532 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
1533 if (rc < 0) {
1534 rc = -rc;
1535 device_printf(sc->dev,
1536 "failed to connect to the firmware: %d.\n", rc);
1537 goto done;
1538 }
1539 if (rc == sc->mbox)
1540 sc->flags |= MASTER_PF;
1541
1542 /* Reset device */
1543 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
1544 if (rc != 0) {
1545 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
1546 if (rc != ETIMEDOUT && rc != EIO)
1547 t4_fw_bye(sc, sc->mbox);
1548 goto done;
1549 }
1550
1551 /* Partition adapter resources as specified in the config file. */
1552 if (sc->flags & MASTER_PF) {
1553 if (strncmp(t4_cfg_file, "default", sizeof(t4_cfg_file))) {
1554 char s[32];
1555
1556 snprintf(s, sizeof(s), "t4fw_cfg_%s", t4_cfg_file);
1557 cfg = firmware_get(s);
1558 if (cfg == NULL) {
1559 device_printf(sc->dev,
1560 "unable to locate %s module, "
1561 "will use default config file.\n", s);
1562 }
1563 }
1564
1565 rc = partition_resources(sc, cfg ? cfg : default_cfg);
1566 if (rc != 0)
1567 goto done; /* error message displayed already */
1568 }
1569
1570 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
1571 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1572 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1573 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1574 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
1575 sc->flags |= FW_OK;
1576
1577done:
1578 if (fw != NULL)
1579 firmware_put(fw, FIRMWARE_UNLOAD);
1580 if (cfg != NULL)
1581 firmware_put(cfg, FIRMWARE_UNLOAD);
1582 if (default_cfg != NULL)
1583 firmware_put(default_cfg, FIRMWARE_UNLOAD);
1584
1585 return (rc);
1586}
1587
1588#define FW_PARAM_DEV(param) \
1589 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1590 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1591#define FW_PARAM_PFVF(param) \
1592 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1593 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1594
1595/*
1596 * Upload configuration file to card's memory.
1597 */
1598static int
1599upload_config_file(struct adapter *sc, const struct firmware *fw, uint32_t *mt,
1600 uint32_t *ma)
1601{
1602 int rc, i;
1603 uint32_t param, val, mtype, maddr, bar, off, win, remaining;
1604 const uint32_t *b;
1605
1606 /* Figure out where the firmware wants us to upload it. */
1607 param = FW_PARAM_DEV(CF);
1608 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
1609 if (rc != 0) {
1610 /* Firmwares without config file support will fail this way */
1611 device_printf(sc->dev,
1612 "failed to query config file location: %d.\n", rc);
1613 return (rc);
1614 }
1615 *mt = mtype = G_FW_PARAMS_PARAM_Y(val);
1616 *ma = maddr = G_FW_PARAMS_PARAM_Z(val) << 16;
1617
1618 if (maddr & 3) {
1619 device_printf(sc->dev,
1620 "cannot upload config file (type %u, addr %x).\n",
1621 mtype, maddr);
1622 return (EFAULT);
1623 }
1624
1625 /* Translate mtype/maddr to an address suitable for the PCIe window */
1626 val = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1627 val &= F_EDRAM0_ENABLE | F_EDRAM1_ENABLE | F_EXT_MEM_ENABLE;
1628 switch (mtype) {
1629 case FW_MEMTYPE_CF_EDC0:
1630 if (!(val & F_EDRAM0_ENABLE))
1631 goto err;
1632 bar = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1633 maddr += G_EDRAM0_BASE(bar) << 20;
1634 break;
1635
1636 case FW_MEMTYPE_CF_EDC1:
1637 if (!(val & F_EDRAM1_ENABLE))
1638 goto err;
1639 bar = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1640 maddr += G_EDRAM1_BASE(bar) << 20;
1641 break;
1642
1643 case FW_MEMTYPE_CF_EXTMEM:
1644 if (!(val & F_EXT_MEM_ENABLE))
1645 goto err;
1646 bar = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1647 maddr += G_EXT_MEM_BASE(bar) << 20;
1648 break;
1649
1650 default:
1651err:
1652 device_printf(sc->dev,
1653 "cannot upload config file (type %u, enabled %u).\n",
1654 mtype, val);
1655 return (EFAULT);
1656 }
1657
1658 /*
1659 * Position the PCIe window (we use memwin2) to the 16B aligned area
1660 * just at/before the upload location.
1661 */
1662 win = maddr & ~0xf;
1663 off = maddr - win; /* offset from the start of the window. */
1664 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2), win);
1665 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2));
1666
1667 remaining = fw->datasize;
1668 if (remaining > FLASH_CFG_MAX_SIZE ||
1669 remaining > MEMWIN2_APERTURE - off) {
1670 device_printf(sc->dev, "cannot upload config file all at once "
1671 "(size %u, max %u, room %u).\n",
1672 remaining, FLASH_CFG_MAX_SIZE, MEMWIN2_APERTURE - off);
1673 return (EFBIG);
1674 }
1675
1676 /*
1677 * XXX: sheer laziness. We deliberately added 4 bytes of useless
1678 * stuffing/comments at the end of the config file so it's ok to simply
1679 * throw away the last remaining bytes when the config file is not an
1680 * exact multiple of 4.
1681 */
1682 b = fw->data;
1683 for (i = 0; remaining >= 4; i += 4, remaining -= 4)
1684 t4_write_reg(sc, MEMWIN2_BASE + off + i, *b++);
1685
1686 return (rc);
1687}
1688
1689/*
1690 * Partition chip resources for use between various PFs, VFs, etc. This is done
1691 * by uploading the firmware configuration file to the adapter and instructing
1692 * the firmware to process it.
1693 */
1694static int
1695partition_resources(struct adapter *sc, const struct firmware *cfg)
1696{
1697 int rc;
1698 struct fw_caps_config_cmd caps;
1699 uint32_t mtype, maddr, finicsum, cfcsum;
1700
1701 rc = cfg ? upload_config_file(sc, cfg, &mtype, &maddr) : ENOENT;
1702 if (rc != 0) {
1703 mtype = FW_MEMTYPE_CF_FLASH;
1704 maddr = t4_flash_cfg_addr(sc);
1705 }
1706
1707 bzero(&caps, sizeof(caps));
1708 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1709 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1710 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
1711 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
1712 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps));
1713 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
1714 if (rc != 0) {
1715 device_printf(sc->dev,
1716 "failed to pre-process config file: %d.\n", rc);
1717 return (rc);
1718 }
1719
1720 finicsum = be32toh(caps.finicsum);
1721 cfcsum = be32toh(caps.cfcsum);
1722 if (finicsum != cfcsum) {
1723 device_printf(sc->dev,
1724 "WARNING: config file checksum mismatch: %08x %08x\n",
1725 finicsum, cfcsum);
1726 }
1727 sc->cfcsum = cfcsum;
1728
1729#define LIMIT_CAPS(x) do { \
1730 caps.x &= htobe16(t4_##x##_allowed); \
1731 sc->x = htobe16(caps.x); \
1732} while (0)
1733
1734 /*
1735 * Let the firmware know what features will (not) be used so it can tune
1736 * things accordingly.
1737 */
1738 LIMIT_CAPS(linkcaps);
1739 LIMIT_CAPS(niccaps);
1740 LIMIT_CAPS(toecaps);
1741 LIMIT_CAPS(rdmacaps);
1742 LIMIT_CAPS(iscsicaps);
1743 LIMIT_CAPS(fcoecaps);
1744#undef LIMIT_CAPS
1745
1746 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1747 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1748 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
1749 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
1750 if (rc != 0) {
1751 device_printf(sc->dev,
1752 "failed to process config file: %d.\n", rc);
1753 return (rc);
1754 }
1755
1756 return (0);
1757}
1758
1759/*
1760 * Retrieve parameters that are needed (or nice to have) prior to calling
1761 * t4_sge_init and t4_fw_initialize.
1762 */
1763static int
1764get_params__pre_init(struct adapter *sc)
1765{
1766 int rc;
1767 uint32_t param[2], val[2];
1768 struct fw_devlog_cmd cmd;
1769 struct devlog_params *dlog = &sc->params.devlog;
1770
1771 param[0] = FW_PARAM_DEV(PORTVEC);
1772 param[1] = FW_PARAM_DEV(CCLK);
1773 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1774 if (rc != 0) {
1775 device_printf(sc->dev,
1776 "failed to query parameters (pre_init): %d.\n", rc);
1777 return (rc);
1778 }
1779
1780 sc->params.portvec = val[0];
1781 sc->params.nports = 0;
1782 while (val[0]) {
1783 sc->params.nports++;
1784 val[0] &= val[0] - 1;
1785 }
1786
1787 sc->params.vpd.cclk = val[1];
1788
1789 /* Read device log parameters. */
1790 bzero(&cmd, sizeof(cmd));
1791 cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
1792 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1793 cmd.retval_len16 = htobe32(FW_LEN16(cmd));
1794 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
1795 if (rc != 0) {
1796 device_printf(sc->dev,
1797 "failed to get devlog parameters: %d.\n", rc);
1798 bzero(dlog, sizeof (*dlog));
1799 rc = 0; /* devlog isn't critical for device operation */
1800 } else {
1801 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
1802 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
1803 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
1804 dlog->size = be32toh(cmd.memsize_devlog);
1805 }
1806
1807 return (rc);
1808}
1809
1810/*
1811 * Retrieve various parameters that are of interest to the driver. The device
1812 * has been initialized by the firmware at this point.
1813 */
1814static int
1815get_params__post_init(struct adapter *sc)
1816{
1817 int rc;
1818 uint32_t param[7], val[7];
1819 struct fw_caps_config_cmd caps;
1820
1821 param[0] = FW_PARAM_PFVF(IQFLINT_START);
1822 param[1] = FW_PARAM_PFVF(EQ_START);
1823 param[2] = FW_PARAM_PFVF(FILTER_START);
1824 param[3] = FW_PARAM_PFVF(FILTER_END);
1825 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
1826 if (rc != 0) {
1827 device_printf(sc->dev,
1828 "failed to query parameters (post_init): %d.\n", rc);
1829 return (rc);
1830 }
1831
1832 sc->sge.iq_start = val[0];
1833 sc->sge.eq_start = val[1];
1834 sc->tids.ftid_base = val[2];
1835 sc->tids.nftids = val[3] - val[2] + 1;
1836
1837 /* get capabilites */
1838 bzero(&caps, sizeof(caps));
1839 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1840 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1841 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
1842 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
1843 if (rc != 0) {
1844 device_printf(sc->dev,
1845 "failed to get card capabilities: %d.\n", rc);
1846 return (rc);
1847 }
1848
1849 if (caps.toecaps) {
1850 /* query offload-related parameters */
1851 param[0] = FW_PARAM_DEV(NTID);
1852 param[1] = FW_PARAM_PFVF(SERVER_START);
1853 param[2] = FW_PARAM_PFVF(SERVER_END);
1854 param[3] = FW_PARAM_PFVF(TDDP_START);
1855 param[4] = FW_PARAM_PFVF(TDDP_END);
1856 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
1857 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1858 if (rc != 0) {
1859 device_printf(sc->dev,
1860 "failed to query TOE parameters: %d.\n", rc);
1861 return (rc);
1862 }
1863 sc->tids.ntids = val[0];
1864 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
1865 sc->tids.stid_base = val[1];
1866 sc->tids.nstids = val[2] - val[1] + 1;
1867 sc->vres.ddp.start = val[3];
1868 sc->vres.ddp.size = val[4] - val[3] + 1;
1869 sc->params.ofldq_wr_cred = val[5];
1870 sc->params.offload = 1;
1871 }
1872 if (caps.rdmacaps) {
1873 param[0] = FW_PARAM_PFVF(STAG_START);
1874 param[1] = FW_PARAM_PFVF(STAG_END);
1875 param[2] = FW_PARAM_PFVF(RQ_START);
1876 param[3] = FW_PARAM_PFVF(RQ_END);
1877 param[4] = FW_PARAM_PFVF(PBL_START);
1878 param[5] = FW_PARAM_PFVF(PBL_END);
1879 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1880 if (rc != 0) {
1881 device_printf(sc->dev,
1882 "failed to query RDMA parameters(1): %d.\n", rc);
1883 return (rc);
1884 }
1885 sc->vres.stag.start = val[0];
1886 sc->vres.stag.size = val[1] - val[0] + 1;
1887 sc->vres.rq.start = val[2];
1888 sc->vres.rq.size = val[3] - val[2] + 1;
1889 sc->vres.pbl.start = val[4];
1890 sc->vres.pbl.size = val[5] - val[4] + 1;
1891
1892 param[0] = FW_PARAM_PFVF(SQRQ_START);
1893 param[1] = FW_PARAM_PFVF(SQRQ_END);
1894 param[2] = FW_PARAM_PFVF(CQ_START);
1895 param[3] = FW_PARAM_PFVF(CQ_END);
1896 param[4] = FW_PARAM_PFVF(OCQ_START);
1897 param[5] = FW_PARAM_PFVF(OCQ_END);
1898 rc = -t4_query_params(sc, 0, 0, 0, 6, param, val);
1899 if (rc != 0) {
1900 device_printf(sc->dev,
1901 "failed to query RDMA parameters(2): %d.\n", rc);
1902 return (rc);
1903 }
1904 sc->vres.qp.start = val[0];
1905 sc->vres.qp.size = val[1] - val[0] + 1;
1906 sc->vres.cq.start = val[2];
1907 sc->vres.cq.size = val[3] - val[2] + 1;
1908 sc->vres.ocq.start = val[4];
1909 sc->vres.ocq.size = val[5] - val[4] + 1;
1910 }
1911 if (caps.iscsicaps) {
1912 param[0] = FW_PARAM_PFVF(ISCSI_START);
1913 param[1] = FW_PARAM_PFVF(ISCSI_END);
1914 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1915 if (rc != 0) {
1916 device_printf(sc->dev,
1917 "failed to query iSCSI parameters: %d.\n", rc);
1918 return (rc);
1919 }
1920 sc->vres.iscsi.start = val[0];
1921 sc->vres.iscsi.size = val[1] - val[0] + 1;
1922 }
1923
1924 /* These are finalized by FW initialization, load their values now */
1925 val[0] = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
1926 sc->params.tp.tre = G_TIMERRESOLUTION(val[0]);
1927 sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(val[0]);
1928 t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
1929
1930 return (rc);
1931}
1932
1933#undef FW_PARAM_PFVF
1934#undef FW_PARAM_DEV
1935
1936static void
1937t4_set_desc(struct adapter *sc)
1938{
1939 char buf[128];
1940 struct adapter_params *p = &sc->params;
1941
1942 snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, E/C:%s",
1943 p->vpd.id, is_offload(sc) ? "R" : "", p->rev, p->vpd.sn, p->vpd.ec);
1944
1945 device_set_desc_copy(sc->dev, buf);
1946}
1947
1948static void
1949build_medialist(struct port_info *pi)
1950{
1951 struct ifmedia *media = &pi->media;

--- 200 unchanged lines hidden (view full) ---

2152 return (rc);
2153}
2154
2155static int
2156cxgbe_init_synchronized(struct port_info *pi)
2157{
2158 struct adapter *sc = pi->adapter;
2159 struct ifnet *ifp = pi->ifp;
2160 int rc = 0;
2161
2162 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2163
2164 if (isset(&sc->open_device_map, pi->port_id)) {
2165 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2166 ("mismatch between open_device_map and if_drv_flags"));
2167 return (0); /* already running */
2168 }
2169
2170 if (!(sc->flags & FULL_INIT_DONE) &&
2171 ((rc = adapter_full_init(sc)) != 0))
2172 return (rc); /* error message displayed already */
2173
2174 if (!(pi->flags & PORT_INIT_DONE) &&
2175 ((rc = port_full_init(pi)) != 0))
2176 return (rc); /* error message displayed already */
2177
2178 PORT_LOCK(pi);
2179 rc = update_mac_settings(pi, XGMAC_ALL);
2180 PORT_UNLOCK(pi);
2181 if (rc)
2182 goto done; /* error message displayed already */
2183
2184 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2185 if (rc != 0) {
2186 if_printf(ifp, "start_link failed: %d\n", rc);
2187 goto done;
2188 }
2189
2190 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2191 if (rc != 0) {
2192 if_printf(ifp, "enable_vi failed: %d\n", rc);
2193 goto done;
2194 }
2195
2196 /* all ok */
2197 setbit(&sc->open_device_map, pi->port_id);
2198 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2199
2200 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2201done:
2202 if (rc != 0)
2203 cxgbe_uninit_synchronized(pi);
2204
2205 return (rc);
2206}

--- 36 unchanged lines hidden (view full) ---

2243 */
2244static int
2245cxgbe_uninit_synchronized(struct port_info *pi)
2246{
2247 struct adapter *sc = pi->adapter;
2248 struct ifnet *ifp = pi->ifp;
2249 int rc;
2250
2251 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2252
2253 /*
2254 * Disable the VI so that all its data in either direction is discarded
2255 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
2256 * tick) intact as the TP can deliver negative advice or data that it's
2257 * holding in its RAM (for an offloaded connection) even after the VI is
2258 * disabled.
2259 */
2260 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2261 if (rc) {
2262 if_printf(ifp, "disable_vi failed: %d\n", rc);
2263 return (rc);
2264 }
2265
2266 clrbit(&sc->open_device_map, pi->port_id);
2267 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2268
2269 pi->link_cfg.link_ok = 0;
2270 pi->link_cfg.speed = 0;
2271 t4_os_link_changed(sc, pi->port_id, 0);
2272
2273 return (0);
2274}
2275
2276#define T4_ALLOC_IRQ(sc, irq, rid, handler, arg, name) do { \
2277 rc = t4_alloc_irq(sc, irq, rid, handler, arg, name); \
2278 if (rc != 0) \
2279 goto done; \
2280} while (0)
2281
2282static int
2283adapter_full_init(struct adapter *sc)
2284{
2285 int rc, i, rid, p, q;
2286 char s[8];
2287 struct irq *irq;
2288 struct port_info *pi;
2289 struct sge_rxq *rxq;
2290#ifndef TCP_OFFLOAD_DISABLE
2291 struct sge_ofld_rxq *ofld_rxq;
2292#endif
2293
2294 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2295 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
2296 ("%s: FULL_INIT_DONE already", __func__));
2297
2298 /*
2299 * queues that belong to the adapter (not any particular port).
2300 */
2301 rc = t4_setup_adapter_queues(sc);
2302 if (rc != 0)
2303 goto done;
2304
2305 for (i = 0; i < ARRAY_SIZE(sc->tq); i++) {
2306 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
2307 taskqueue_thread_enqueue, &sc->tq[i]);
2308 if (sc->tq[i] == NULL) {
2309 device_printf(sc->dev,
2310 "failed to allocate task queue %d\n", i);
2311 rc = ENOMEM;
2312 goto done;
2313 }
2314 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
2315 device_get_nameunit(sc->dev), i);
2316 }
2317
2318 /*
2319 * Setup interrupts.
2320 */
2321 irq = &sc->irq[0];
2322 rid = sc->intr_type == INTR_INTX ? 0 : 1;
2323 if (sc->intr_count == 1) {
2324 KASSERT(!(sc->flags & INTR_DIRECT),
2325 ("%s: single interrupt && INTR_DIRECT?", __func__));
2326
2327 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_all, sc, "all");
2328 } else {
2329 /* Multiple interrupts. */
2330 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
2331 ("%s: too few intr.", __func__));
2332
2333 /* The first one is always error intr */
2334 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_err, sc, "err");
2335 irq++;
2336 rid++;
2337
2338 /* The second one is always the firmware event queue */
2339 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, "evt");
2340 irq++;
2341 rid++;
2342
2343 /*
2344 * Note that if INTR_DIRECT is not set then either the NIC rx
2345 * queues or (exclusive or) the TOE rx queueus will be taking
2346 * direct interrupts.
2347 *
2348 * There is no need to check for is_offload(sc) as nofldrxq
2349 * will be 0 if offload is disabled.
2350 */
2351 for_each_port(sc, p) {
2352 pi = sc->port[p];
2353
2354#ifndef TCP_OFFLOAD_DISABLE
2355 /*
2356 * Skip over the NIC queues if they aren't taking direct
2357 * interrupts.
2358 */
2359 if (!(sc->flags & INTR_DIRECT) &&
2360 pi->nofldrxq > pi->nrxq)
2361 goto ofld_queues;
2362#endif
2363 rxq = &sc->sge.rxq[pi->first_rxq];
2364 for (q = 0; q < pi->nrxq; q++, rxq++) {
2365 snprintf(s, sizeof(s), "%d.%d", p, q);
2366 T4_ALLOC_IRQ(sc, irq, rid, t4_intr, rxq, s);
2367 irq++;
2368 rid++;
2369 }
2370
2371#ifndef TCP_OFFLOAD_DISABLE
2372 /*
2373 * Skip over the offload queues if they aren't taking
2374 * direct interrupts.
2375 */
2376 if (!(sc->flags & INTR_DIRECT))
2377 continue;
2378ofld_queues:
2379 ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
2380 for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
2381 snprintf(s, sizeof(s), "%d,%d", p, q);
2382 T4_ALLOC_IRQ(sc, irq, rid, t4_intr, ofld_rxq, s);
2383 irq++;
2384 rid++;
2385 }
2386#endif
2387 }
2388 }
2389
2390 t4_intr_enable(sc);
2391 sc->flags |= FULL_INIT_DONE;
2392done:
2393 if (rc != 0)
2394 adapter_full_uninit(sc);
2395
2396 return (rc);
2397}
2398#undef T4_ALLOC_IRQ
2399
2400static int
2401adapter_full_uninit(struct adapter *sc)
2402{
2403 int i;
2404
2405 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2406
2407 t4_teardown_adapter_queues(sc);
2408
2409 for (i = 0; i < sc->intr_count; i++)
2410 t4_free_irq(sc, &sc->irq[i]);
2411
2412 for (i = 0; i < ARRAY_SIZE(sc->tq) && sc->tq[i]; i++) {
2413 taskqueue_free(sc->tq[i]);
2414 sc->tq[i] = NULL;
2415 }
2416
2417 sc->flags &= ~FULL_INIT_DONE;
2418
2419 return (0);
2420}
2421
2422static int
2423port_full_init(struct port_info *pi)
2424{
2425 struct adapter *sc = pi->adapter;
2426 struct ifnet *ifp = pi->ifp;
2427 uint16_t *rss;
2428 struct sge_rxq *rxq;
2429 int rc, i;
2430
2431 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2432 KASSERT((pi->flags & PORT_INIT_DONE) == 0,
2433 ("%s: PORT_INIT_DONE already", __func__));
2434
2435 sysctl_ctx_init(&pi->ctx);
2436 pi->flags |= PORT_SYSCTL_CTX;
2437
2438 /*
2439 * Allocate tx/rx/fl queues for this port.
2440 */
2441 rc = t4_setup_port_queues(pi);
2442 if (rc != 0)
2443 goto done; /* error message displayed already */
2444
2445 /*
2446 * Setup RSS for this port.
2447 */
2448 rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
2449 M_ZERO | M_WAITOK);
2450 for_each_rxq(pi, i, rxq) {
2451 rss[i] = rxq->iq.abs_id;
2452 }
2453 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
2454 pi->rss_size, rss, pi->nrxq);
2455 free(rss, M_CXGBE);
2456 if (rc != 0) {
2457 if_printf(ifp, "rss_config failed: %d\n", rc);
2458 goto done;
2459 }
2460
2461 pi->flags |= PORT_INIT_DONE;
2462done:
2463 if (rc != 0)
2464 port_full_uninit(pi);
2465
2466 return (rc);
2467}
2468
2469/*
2470 * Idempotent.
2471 */
2472static int
2473port_full_uninit(struct port_info *pi)
2474{
2475 struct adapter *sc = pi->adapter;
2476 int i;
2477 struct sge_rxq *rxq;
2478 struct sge_txq *txq;
2479#ifndef TCP_OFFLOAD_DISABLE
2480 struct sge_ofld_rxq *ofld_rxq;
2481 struct sge_wrq *ofld_txq;
2482#endif
2483
2484 if (pi->flags & PORT_INIT_DONE) {
2485
2486 /* Need to quiesce queues. XXX: ctrl queues? */
2487
2488 for_each_txq(pi, i, txq) {
2489 quiesce_eq(sc, &txq->eq);
2490 }
2491
2492#ifndef TCP_OFFLOAD_DISABLE
2493 for_each_ofld_txq(pi, i, ofld_txq) {
2494 quiesce_eq(sc, &ofld_txq->eq);
2495 }
2496#endif
2497
2498 for_each_rxq(pi, i, rxq) {
2499 quiesce_iq(sc, &rxq->iq);
2500 quiesce_fl(sc, &rxq->fl);
2501 }
2502
2503#ifndef TCP_OFFLOAD_DISABLE
2504 for_each_ofld_rxq(pi, i, ofld_rxq) {
2505 quiesce_iq(sc, &ofld_rxq->iq);
2506 quiesce_fl(sc, &ofld_rxq->fl);
2507 }
2508#endif
2509 }
2510
2511 t4_teardown_port_queues(pi);
2512 pi->flags &= ~PORT_INIT_DONE;
2513
2514 return (0);
2515}
2516
2517static void
2518quiesce_eq(struct adapter *sc, struct sge_eq *eq)
2519{
2520 EQ_LOCK(eq);
2521 eq->flags |= EQ_DOOMED;
2522
2523 /*
2524 * Wait for the response to a credit flush if one's
2525 * pending.
2526 */
2527 while (eq->flags & EQ_CRFLUSHED)
2528 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
2529 EQ_UNLOCK(eq);
2530
2531 callout_drain(&eq->tx_callout); /* XXX: iffy */
2532 pause("callout", 10); /* Still iffy */
2533
2534 taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
2535}
2536
2537static void
2538quiesce_iq(struct adapter *sc, struct sge_iq *iq)
2539{
2540 (void) sc; /* unused */
2541
2542 /* Synchronize with the interrupt handler */
2543 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
2544 pause("iqfree", 1);
2545}
2546
2547static void
2548quiesce_fl(struct adapter *sc, struct sge_fl *fl)
2549{
2550 mtx_lock(&sc->sfl_lock);
2551 FL_LOCK(fl);
2552 fl->flags |= FL_DOOMED;
2553 FL_UNLOCK(fl);
2554 mtx_unlock(&sc->sfl_lock);
2555
2556 callout_drain(&sc->sfl_callout);
2557 KASSERT((fl->flags & FL_STARVING) == 0,
2558 ("%s: still starving", __func__));
2559}
2560
2561static int
2562t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
2563 driver_intr_t *handler, void *arg, char *name)
2564{
2565 int rc;
2566
2567 irq->rid = rid;
2568 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
2569 RF_SHAREABLE | RF_ACTIVE);
2570 if (irq->res == NULL) {
2571 device_printf(sc->dev,

--- 275 unchanged lines hidden (view full) ---

2847 PORT_LOCK(pi);
2848 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2849 PORT_UNLOCK(pi);
2850 return; /* without scheduling another callout */
2851 }
2852
2853 t4_get_port_stats(pi->adapter, pi->tx_chan, s);
2854
2855 ifp->if_opackets = s->tx_frames - s->tx_pause;
2856 ifp->if_ipackets = s->rx_frames - s->rx_pause;
2857 ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
2858 ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
2859 ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
2860 ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
2861 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
2862 s->rx_ovflow3;
2863
2864 drops = s->tx_drop;
2865 for_each_txq(pi, i, txq)
2866 drops += txq->br->br_drops;
2867 ifp->if_snd.ifq_drops = drops;
2868
2869 ifp->if_oerrors = s->tx_error_frames;
2870 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
2871 s->rx_fcs_err + s->rx_len_err;
2872
2873 callout_schedule(&pi->tick, hz);
2874 PORT_UNLOCK(pi);
2875}
2876
2877static int
2878cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
2879{
2880#ifdef INVARIANTS
2881 panic("%s: opcode %02x on iq %p with payload %p",
2882 __func__, rss->opcode, iq, m);
2883#else
2884 log(LOG_ERR, "%s: opcode %02x on iq %p with payload %p",
2885 __func__, rss->opcode, iq, m);
2886 m_freem(m);
2887#endif
2888 return (EDOOFUS);
2889}
2890
2891int
2892t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
2893{
2894 uintptr_t *loc, new;
2895
2896 if (opcode >= ARRAY_SIZE(sc->cpl_handler))
2897 return (EINVAL);
2898
2899 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
2900 loc = (uintptr_t *) &sc->cpl_handler[opcode];
2901 atomic_store_rel_ptr(loc, new);
2902
2903 return (0);
2904}
2905
2906static int
2907t4_sysctls(struct adapter *sc)
2908{
2909 struct sysctl_ctx_list *ctx;
2910 struct sysctl_oid *oid;
2911 struct sysctl_oid_list *children, *c0;
2912 static char *caps[] = {
2913 "\20\1PPP\2QFC\3DCBX", /* caps[0] linkcaps */
2914 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL", /* caps[1] niccaps */
2915 "\20\1TOE", /* caps[2] toecaps */
2916 "\20\1RDDP\2RDMAC", /* caps[3] rdmacaps */
2917 "\20\1INITIATOR_PDU\2TARGET_PDU" /* caps[4] iscsicaps */
2918 "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
2919 "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
2920 "\20\1INITIATOR\2TARGET\3CTRL_OFLD" /* caps[5] fcoecaps */
2921 };
2922
2923 ctx = device_get_sysctl_ctx(sc->dev);
2924
2925 /*
2926 * dev.t4nex.X.
2927 */
2928 oid = device_get_sysctl_tree(sc->dev);
2929 c0 = children = SYSCTL_CHILDREN(oid);
2930
2931 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD,
2932 &sc->params.nports, 0, "# of ports");
2933
2934 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
2935 &sc->params.rev, 0, "chip hardware revision");
2936
2937 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
2938 CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
2939
2940 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
2941 CTLFLAG_RD, &t4_cfg_file, 0, "configuration file");
2942
2943 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD,
2944 &sc->cfcsum, 0, "config file checksum");
2945
2946 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
2947 CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
2948 sysctl_bitfield, "A", "available link capabilities");
2949
2950 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
2951 CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
2952 sysctl_bitfield, "A", "available NIC capabilities");
2953
2954 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
2955 CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
2956 sysctl_bitfield, "A", "available TCP offload capabilities");
2957
2958 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
2959 CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
2960 sysctl_bitfield, "A", "available RDMA capabilities");
2961
2962 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
2963 CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
2964 sysctl_bitfield, "A", "available iSCSI capabilities");
2965
2966 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
2967 CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
2968 sysctl_bitfield, "A", "available FCoE capabilities");
2969
2970 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD,
2971 &sc->params.vpd.cclk, 0, "core clock frequency (in KHz)");
2972
2973 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
2974 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
2975 sizeof(sc->sge.timer_val), sysctl_int_array, "A",
2976 "interrupt holdoff timer values (us)");
2977
2978 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
2979 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
2980 sizeof(sc->sge.counter_val), sysctl_int_array, "A",
2981 "interrupt holdoff packet counter values");
2982
2983 /*
2984 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
2985 */
2986 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
2987 CTLFLAG_RD | CTLFLAG_SKIP, NULL,
2988 "logs and miscellaneous information");
2989 children = SYSCTL_CHILDREN(oid);
2990
2991 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
2992 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2993 sysctl_cctrl, "A", "congestion control");
2994
2995 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
2996 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2997 sysctl_cpl_stats, "A", "CPL statistics");
2998
2999 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
3000 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3001 sysctl_ddp_stats, "A", "DDP statistics");
3002
3003 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
3004 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3005 sysctl_devlog, "A", "firmware's device log");
3006
3007 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
3008 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3009 sysctl_fcoe_stats, "A", "FCoE statistics");
3010
3011 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
3012 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3013 sysctl_hw_sched, "A", "hardware scheduler ");
3014
3015 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
3016 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3017 sysctl_l2t, "A", "hardware L2 table");
3018
3019 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
3020 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3021 sysctl_lb_stats, "A", "loopback statistics");
3022
3023 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
3024 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3025 sysctl_meminfo, "A", "memory regions");
3026
3027 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
3028 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3029 sysctl_path_mtus, "A", "path MTUs");
3030
3031 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
3032 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3033 sysctl_pm_stats, "A", "PM statistics");
3034
3035 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
3036 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3037 sysctl_rdma_stats, "A", "RDMA statistics");
3038
3039 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
3040 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3041 sysctl_tcp_stats, "A", "TCP statistics");
3042
3043 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
3044 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3045 sysctl_tids, "A", "TID information");
3046
3047 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
3048 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3049 sysctl_tp_err_stats, "A", "TP error statistics");
3050
3051 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
3052 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3053 sysctl_tx_rate, "A", "Tx rate");
3054
3055#ifndef TCP_OFFLOAD_DISABLE
3056 if (is_offload(sc)) {
3057 /*
3058 * dev.t4nex.X.toe.
3059 */
3060 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
3061 NULL, "TOE parameters");
3062 children = SYSCTL_CHILDREN(oid);
3063
3064 sc->tt.sndbuf = 256 * 1024;
3065 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
3066 &sc->tt.sndbuf, 0, "max hardware send buffer size");
3067
3068 sc->tt.ddp = 0;
3069 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
3070 &sc->tt.ddp, 0, "DDP allowed");
3071 sc->tt.indsz = M_INDICATESIZE;
3072 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
3073 &sc->tt.indsz, 0, "DDP max indicate size allowed");
3074 sc->tt.ddp_thres = 3*4096;
3075 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
3076 &sc->tt.ddp_thres, 0, "DDP threshold");
3077 }
3078#endif
3079
3080
3081 return (0);
3082}
3083
3084static int
3085cxgbe_sysctls(struct port_info *pi)
3086{
3087 struct sysctl_ctx_list *ctx;
3088 struct sysctl_oid *oid;

--- 11 unchanged lines hidden (view full) ---

3100 &pi->nrxq, 0, "# of rx queues");
3101 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
3102 &pi->ntxq, 0, "# of tx queues");
3103 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
3104 &pi->first_rxq, 0, "index of first rx queue");
3105 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
3106 &pi->first_txq, 0, "index of first tx queue");
3107
3108#ifndef TCP_OFFLOAD_DISABLE
3109 if (is_offload(pi->adapter)) {
3110 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
3111 &pi->nofldrxq, 0,
3112 "# of rx queues for offloaded TCP connections");
3113 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
3114 &pi->nofldtxq, 0,
3115 "# of tx queues for offloaded TCP connections");
3116 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
3117 CTLFLAG_RD, &pi->first_ofld_rxq, 0,
3118 "index of first TOE rx queue");
3119 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
3120 CTLFLAG_RD, &pi->first_ofld_txq, 0,
3121 "index of first TOE tx queue");
3122 }
3123#endif
3124
3125 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
3126 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
3127 "holdoff timer index");
3128 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
3129 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
3130 "holdoff packet counter index");
3131
3132 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",

--- 174 unchanged lines hidden (view full) ---

3307 sbuf_trim(&sb);
3308 sbuf_finish(&sb);
3309 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
3310 sbuf_delete(&sb);
3311 return (rc);
3312}
3313
3314static int
3315sysctl_bitfield(SYSCTL_HANDLER_ARGS)
3316{
3317 int rc;
3318 struct sbuf *sb;
3319
3320 rc = sysctl_wire_old_buffer(req, 0);
3321 if (rc != 0)
3322 return(rc);
3323
3324 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3325 if (sb == NULL)
3326 return (ENOMEM);
3327
3328 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
3329 rc = sbuf_finish(sb);
3330 sbuf_delete(sb);
3331
3332 return (rc);
3333}
3334
3335static int
3336sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
3337{
3338 struct port_info *pi = arg1;
3339 struct adapter *sc = pi->adapter;
3340 int idx, rc, i;
3341
3342 idx = pi->tmr_idx;
3343
3344 rc = sysctl_handle_int(oidp, &idx, 0, req);
3345 if (rc != 0 || req->newptr == NULL)
3346 return (rc);
3347
3348 if (idx < 0 || idx >= SGE_NTIMERS)
3349 return (EINVAL);
3350
3351 ADAPTER_LOCK(sc);
3352 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
3353 if (rc == 0) {
3354 struct sge_rxq *rxq;
3355 uint8_t v;
3356
3357 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
3358 for_each_rxq(pi, i, rxq) {
3359#ifdef atomic_store_rel_8
3360 atomic_store_rel_8(&rxq->iq.intr_params, v);
3361#else
3362 rxq->iq.intr_params = v;
3363#endif
3364 }
3365 pi->tmr_idx = idx;
3366 }
3367
3368 ADAPTER_UNLOCK(sc);
3369 return (rc);
3370}
3371

--- 10 unchanged lines hidden (view full) ---

3382 if (rc != 0 || req->newptr == NULL)
3383 return (rc);
3384
3385 if (idx < -1 || idx >= SGE_NCOUNTERS)
3386 return (EINVAL);
3387
3388 ADAPTER_LOCK(sc);
3389 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
3390 if (rc == 0 && pi->flags & PORT_INIT_DONE)
3391 rc = EBUSY; /* cannot be changed once the queues are created */
3392
3393 if (rc == 0)
3394 pi->pktc_idx = idx;
3395
3396 ADAPTER_UNLOCK(sc);
3397 return (rc);
3398}
3399

--- 10 unchanged lines hidden (view full) ---

3410 if (rc != 0 || req->newptr == NULL)
3411 return (rc);
3412
3413 if (qsize < 128 || (qsize & 7))
3414 return (EINVAL);
3415
3416 ADAPTER_LOCK(sc);
3417 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
3418 if (rc == 0 && pi->flags & PORT_INIT_DONE)
3419 rc = EBUSY; /* cannot be changed once the queues are created */
3420
3421 if (rc == 0)
3422 pi->qsize_rxq = qsize;
3423
3424 ADAPTER_UNLOCK(sc);
3425 return (rc);
3426}
3427

--- 10 unchanged lines hidden (view full) ---

3438 if (rc != 0 || req->newptr == NULL)
3439 return (rc);
3440
3441 if (qsize < 128)
3442 return (EINVAL);
3443
3444 ADAPTER_LOCK(sc);
3445 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
3446 if (rc == 0 && pi->flags & PORT_INIT_DONE)
3447 rc = EBUSY; /* cannot be changed once the queues are created */
3448
3449 if (rc == 0)
3450 pi->qsize_txq = qsize;
3451
3452 ADAPTER_UNLOCK(sc);
3453 return (rc);
3454}
3455

--- 4 unchanged lines hidden (view full) ---

3460 int reg = arg2;
3461 uint64_t val;
3462
3463 val = t4_read_reg64(sc, reg);
3464
3465 return (sysctl_handle_64(oidp, &val, 0, req));
3466}
3467
3468static int
3469sysctl_cctrl(SYSCTL_HANDLER_ARGS)
3470{
3471 struct adapter *sc = arg1;
3472 struct sbuf *sb;
3473 int rc, i;
3474 uint16_t incr[NMTUS][NCCTRL_WIN];
3475 static const char *dec_fac[] = {
3476 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
3477 "0.9375"
3478 };
3479
3480 rc = sysctl_wire_old_buffer(req, 0);
3481 if (rc != 0)
3482 return (rc);
3483
3484 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
3485 if (sb == NULL)
3486 return (ENOMEM);
3487
3488 t4_read_cong_tbl(sc, incr);
3489
3490 for (i = 0; i < NCCTRL_WIN; ++i) {
3491 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
3492 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
3493 incr[5][i], incr[6][i], incr[7][i]);
3494 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
3495 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
3496 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
3497 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
3498 }
3499
3500 rc = sbuf_finish(sb);
3501 sbuf_delete(sb);
3502
3503 return (rc);
3504}
3505
3506static int
3507sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
3508{
3509 struct adapter *sc = arg1;
3510 struct sbuf *sb;
3511 int rc;
3512 struct tp_cpl_stats stats;
3513
3514 rc = sysctl_wire_old_buffer(req, 0);
3515 if (rc != 0)
3516 return (rc);
3517
3518 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
3519 if (sb == NULL)
3520 return (ENOMEM);
3521
3522 t4_tp_get_cpl_stats(sc, &stats);
3523
3524 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
3525 "channel 3\n");
3526 sbuf_printf(sb, "CPL requests: %10u %10u %10u %10u\n",
3527 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
3528 sbuf_printf(sb, "CPL responses: %10u %10u %10u %10u",
3529 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
3530
3531 rc = sbuf_finish(sb);
3532 sbuf_delete(sb);
3533
3534 return (rc);
3535}
3536
3537static int
3538sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
3539{
3540 struct adapter *sc = arg1;
3541 struct sbuf *sb;
3542 int rc;
3543 struct tp_usm_stats stats;
3544
3545 rc = sysctl_wire_old_buffer(req, 0);
3546 if (rc != 0)
3547 return(rc);
3548
3549 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
3550 if (sb == NULL)
3551 return (ENOMEM);
3552
3553 t4_get_usm_stats(sc, &stats);
3554
3555 sbuf_printf(sb, "Frames: %u\n", stats.frames);
3556 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
3557 sbuf_printf(sb, "Drops: %u", stats.drops);
3558
3559 rc = sbuf_finish(sb);
3560 sbuf_delete(sb);
3561
3562 return (rc);
3563}
3564
3565const char *devlog_level_strings[] = {
3566 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
3567 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
3568 [FW_DEVLOG_LEVEL_ERR] = "ERR",
3569 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
3570 [FW_DEVLOG_LEVEL_INFO] = "INFO",
3571 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
3572};

--- 68 unchanged lines hidden (view full) ---

3641 if (buf[first].timestamp == 0)
3642 goto done; /* nothing in the log */
3643
3644 rc = sysctl_wire_old_buffer(req, 0);
3645 if (rc != 0)
3646 goto done;
3647
3648 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
3649 if (sb == NULL) {
3650 rc = ENOMEM;
3651 goto done;
3652 }
3653 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
3654 "Seq#", "Tstamp", "Level", "Facility", "Message");
3655
3656 i = first;
3657 do {
3658 e = &buf[i];
3659 if (e->timestamp == 0)
3660 break; /* end */
3661

--- 13 unchanged lines hidden (view full) ---

3675
3676 rc = sbuf_finish(sb);
3677 sbuf_delete(sb);
3678done:
3679 free(buf, M_CXGBE);
3680 return (rc);
3681}
3682
3683static int
3684sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
3685{
3686 struct adapter *sc = arg1;
3687 struct sbuf *sb;
3688 int rc;
3689 struct tp_fcoe_stats stats[4];
3690
3691 rc = sysctl_wire_old_buffer(req, 0);
3692 if (rc != 0)
3693 return (rc);
3694
3695 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
3696 if (sb == NULL)
3697 return (ENOMEM);
3698
3699 t4_get_fcoe_stats(sc, 0, &stats[0]);
3700 t4_get_fcoe_stats(sc, 1, &stats[1]);
3701 t4_get_fcoe_stats(sc, 2, &stats[2]);
3702 t4_get_fcoe_stats(sc, 3, &stats[3]);
3703
3704 sbuf_printf(sb, " channel 0 channel 1 "
3705 "channel 2 channel 3\n");
3706 sbuf_printf(sb, "octetsDDP: %16ju %16ju %16ju %16ju\n",
3707 stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
3708 stats[3].octetsDDP);
3709 sbuf_printf(sb, "framesDDP: %16u %16u %16u %16u\n", stats[0].framesDDP,
3710 stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
3711 sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
3712 stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
3713 stats[3].framesDrop);
3714
3715 rc = sbuf_finish(sb);
3716 sbuf_delete(sb);
3717
3718 return (rc);
3719}
3720
3721static int
3722sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
3723{
3724 struct adapter *sc = arg1;
3725 struct sbuf *sb;
3726 int rc, i;
3727 unsigned int map, kbps, ipg, mode;
3728 unsigned int pace_tab[NTX_SCHED];
3729
3730 rc = sysctl_wire_old_buffer(req, 0);
3731 if (rc != 0)
3732 return (rc);
3733
3734 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
3735 if (sb == NULL)
3736 return (ENOMEM);
3737
3738 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
3739 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
3740 t4_read_pace_tbl(sc, pace_tab);
3741
3742 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
3743 "Class IPG (0.1 ns) Flow IPG (us)");
3744
3745 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
3746 t4_get_tx_sched(sc, i, &kbps, &ipg);
3747 sbuf_printf(sb, "\n %u %-5s %u ", i,
3748 (mode & (1 << i)) ? "flow" : "class", map & 3);
3749 if (kbps)
3750 sbuf_printf(sb, "%9u ", kbps);
3751 else
3752 sbuf_printf(sb, " disabled ");
3753
3754 if (ipg)
3755 sbuf_printf(sb, "%13u ", ipg);
3756 else
3757 sbuf_printf(sb, " disabled ");
3758
3759 if (pace_tab[i])
3760 sbuf_printf(sb, "%10u", pace_tab[i]);
3761 else
3762 sbuf_printf(sb, " disabled");
3763 }
3764
3765 rc = sbuf_finish(sb);
3766 sbuf_delete(sb);
3767
3768 return (rc);
3769}
3770
3771static int
3772sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
3773{
3774 struct adapter *sc = arg1;
3775 struct sbuf *sb;
3776 int rc, i, j;
3777 uint64_t *p0, *p1;
3778 struct lb_port_stats s[2];
3779 static const char *stat_name[] = {
3780 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
3781 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
3782 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
3783 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
3784 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
3785 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
3786 "BG2FramesTrunc:", "BG3FramesTrunc:"
3787 };
3788
3789 rc = sysctl_wire_old_buffer(req, 0);
3790 if (rc != 0)
3791 return (rc);
3792
3793 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
3794 if (sb == NULL)
3795 return (ENOMEM);
3796
3797 memset(s, 0, sizeof(s));
3798
3799 for (i = 0; i < 4; i += 2) {
3800 t4_get_lb_stats(sc, i, &s[0]);
3801 t4_get_lb_stats(sc, i + 1, &s[1]);
3802
3803 p0 = &s[0].octets;
3804 p1 = &s[1].octets;
3805 sbuf_printf(sb, "%s Loopback %u"
3806 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
3807
3808 for (j = 0; j < ARRAY_SIZE(stat_name); j++)
3809 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
3810 *p0++, *p1++);
3811 }
3812
3813 rc = sbuf_finish(sb);
3814 sbuf_delete(sb);
3815
3816 return (rc);
3817}
3818
3819struct mem_desc {
3820 unsigned int base;
3821 unsigned int limit;
3822 unsigned int idx;
3823};
3824
3825static int
3826mem_desc_cmp(const void *a, const void *b)
3827{
3828 return ((const struct mem_desc *)a)->base -
3829 ((const struct mem_desc *)b)->base;
3830}
3831
3832static void
3833mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
3834 unsigned int to)
3835{
3836 unsigned int size;
3837
3838 size = to - from + 1;
3839 if (size == 0)
3840 return;
3841
3842 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
3843 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
3844}
3845
3846static int
3847sysctl_meminfo(SYSCTL_HANDLER_ARGS)
3848{
3849 struct adapter *sc = arg1;
3850 struct sbuf *sb;
3851 int rc, i, n;
3852 uint32_t lo, hi;
3853 static const char *memory[] = { "EDC0:", "EDC1:", "MC:" };
3854 static const char *region[] = {
3855 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
3856 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
3857 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
3858 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
3859 "RQUDP region:", "PBL region:", "TXPBL region:", "ULPRX state:",
3860 "ULPTX state:", "On-chip queues:"
3861 };
3862 struct mem_desc avail[3];
3863 struct mem_desc mem[ARRAY_SIZE(region) + 3]; /* up to 3 holes */
3864 struct mem_desc *md = mem;
3865
3866 rc = sysctl_wire_old_buffer(req, 0);
3867 if (rc != 0)
3868 return (rc);
3869
3870 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
3871 if (sb == NULL)
3872 return (ENOMEM);
3873
3874 for (i = 0; i < ARRAY_SIZE(mem); i++) {
3875 mem[i].limit = 0;
3876 mem[i].idx = i;
3877 }
3878
3879 /* Find and sort the populated memory ranges */
3880 i = 0;
3881 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
3882 if (lo & F_EDRAM0_ENABLE) {
3883 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
3884 avail[i].base = G_EDRAM0_BASE(hi) << 20;
3885 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
3886 avail[i].idx = 0;
3887 i++;
3888 }
3889 if (lo & F_EDRAM1_ENABLE) {
3890 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
3891 avail[i].base = G_EDRAM1_BASE(hi) << 20;
3892 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
3893 avail[i].idx = 1;
3894 i++;
3895 }
3896 if (lo & F_EXT_MEM_ENABLE) {
3897 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
3898 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
3899 avail[i].limit = avail[i].base + (G_EXT_MEM_SIZE(hi) << 20);
3900 avail[i].idx = 2;
3901 i++;
3902 }
3903 if (!i) /* no memory available */
3904 return 0;
3905 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
3906
3907 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
3908 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
3909 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
3910 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
3911 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
3912 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
3913 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
3914 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
3915 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
3916
3917 /* the next few have explicit upper bounds */
3918 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
3919 md->limit = md->base - 1 +
3920 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
3921 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
3922 md++;
3923
3924 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
3925 md->limit = md->base - 1 +
3926 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
3927 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
3928 md++;
3929
3930 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
3931 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
3932 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
3933 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
3934 } else {
3935 md->base = 0;
3936 md->idx = ARRAY_SIZE(region); /* hide it */
3937 }
3938 md++;
3939
3940#define ulp_region(reg) \
3941 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
3942 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
3943
3944 ulp_region(RX_ISCSI);
3945 ulp_region(RX_TDDP);
3946 ulp_region(TX_TPT);
3947 ulp_region(RX_STAG);
3948 ulp_region(RX_RQ);
3949 ulp_region(RX_RQUDP);
3950 ulp_region(RX_PBL);
3951 ulp_region(TX_PBL);
3952#undef ulp_region
3953
3954 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
3955 md->limit = md->base + sc->tids.ntids - 1;
3956 md++;
3957 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
3958 md->limit = md->base + sc->tids.ntids - 1;
3959 md++;
3960
3961 md->base = sc->vres.ocq.start;
3962 if (sc->vres.ocq.size)
3963 md->limit = md->base + sc->vres.ocq.size - 1;
3964 else
3965 md->idx = ARRAY_SIZE(region); /* hide it */
3966 md++;
3967
3968 /* add any address-space holes, there can be up to 3 */
3969 for (n = 0; n < i - 1; n++)
3970 if (avail[n].limit < avail[n + 1].base)
3971 (md++)->base = avail[n].limit;
3972 if (avail[n].limit)
3973 (md++)->base = avail[n].limit;
3974
3975 n = md - mem;
3976 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
3977
3978 for (lo = 0; lo < i; lo++)
3979 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
3980 avail[lo].limit - 1);
3981
3982 sbuf_printf(sb, "\n");
3983 for (i = 0; i < n; i++) {
3984 if (mem[i].idx >= ARRAY_SIZE(region))
3985 continue; /* skip holes */
3986 if (!mem[i].limit)
3987 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
3988 mem_region_show(sb, region[mem[i].idx], mem[i].base,
3989 mem[i].limit);
3990 }
3991
3992 sbuf_printf(sb, "\n");
3993 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
3994 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
3995 mem_region_show(sb, "uP RAM:", lo, hi);
3996
3997 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
3998 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
3999 mem_region_show(sb, "uP Extmem2:", lo, hi);
4000
4001 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
4002 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
4003 G_PMRXMAXPAGE(lo),
4004 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
4005 (lo & F_PMRXNUMCHN) ? 2 : 1);
4006
4007 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
4008 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
4009 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
4010 G_PMTXMAXPAGE(lo),
4011 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
4012 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
4013 sbuf_printf(sb, "%u p-structs\n",
4014 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
4015
4016 for (i = 0; i < 4; i++) {
4017 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
4018 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
4019 i, G_USED(lo), G_ALLOC(lo));
4020 }
4021 for (i = 0; i < 4; i++) {
4022 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
4023 sbuf_printf(sb,
4024 "\nLoopback %d using %u pages out of %u allocated",
4025 i, G_USED(lo), G_ALLOC(lo));
4026 }
4027
4028 rc = sbuf_finish(sb);
4029 sbuf_delete(sb);
4030
4031 return (rc);
4032}
4033
4034static int
4035sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
4036{
4037 struct adapter *sc = arg1;
4038 struct sbuf *sb;
4039 int rc;
4040 uint16_t mtus[NMTUS];
4041
4042 rc = sysctl_wire_old_buffer(req, 0);
4043 if (rc != 0)
4044 return (rc);
4045
4046 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4047 if (sb == NULL)
4048 return (ENOMEM);
4049
4050 t4_read_mtu_tbl(sc, mtus, NULL);
4051
4052 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
4053 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
4054 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
4055 mtus[14], mtus[15]);
4056
4057 rc = sbuf_finish(sb);
4058 sbuf_delete(sb);
4059
4060 return (rc);
4061}
4062
4063static int
4064sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
4065{
4066 struct adapter *sc = arg1;
4067 struct sbuf *sb;
4068 int rc, i;
4069 uint32_t tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
4070 uint64_t tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
4071 static const char *pm_stats[] = {
4072 "Read:", "Write bypass:", "Write mem:", "Flush:", "FIFO wait:"
4073 };
4074
4075 rc = sysctl_wire_old_buffer(req, 0);
4076 if (rc != 0)
4077 return (rc);
4078
4079 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4080 if (sb == NULL)
4081 return (ENOMEM);
4082
4083 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
4084 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
4085
4086 sbuf_printf(sb, " Tx count Tx cycles "
4087 "Rx count Rx cycles");
4088 for (i = 0; i < PM_NSTATS; i++)
4089 sbuf_printf(sb, "\n%-13s %10u %20ju %10u %20ju",
4090 pm_stats[i], tx_cnt[i], tx_cyc[i], rx_cnt[i], rx_cyc[i]);
4091
4092 rc = sbuf_finish(sb);
4093 sbuf_delete(sb);
4094
4095 return (rc);
4096}
4097
4098static int
4099sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
4100{
4101 struct adapter *sc = arg1;
4102 struct sbuf *sb;
4103 int rc;
4104 struct tp_rdma_stats stats;
4105
4106 rc = sysctl_wire_old_buffer(req, 0);
4107 if (rc != 0)
4108 return (rc);
4109
4110 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4111 if (sb == NULL)
4112 return (ENOMEM);
4113
4114 t4_tp_get_rdma_stats(sc, &stats);
4115 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
4116 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
4117
4118 rc = sbuf_finish(sb);
4119 sbuf_delete(sb);
4120
4121 return (rc);
4122}
4123
4124static int
4125sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
4126{
4127 struct adapter *sc = arg1;
4128 struct sbuf *sb;
4129 int rc;
4130 struct tp_tcp_stats v4, v6;
4131
4132 rc = sysctl_wire_old_buffer(req, 0);
4133 if (rc != 0)
4134 return (rc);
4135
4136 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4137 if (sb == NULL)
4138 return (ENOMEM);
4139
4140 t4_tp_get_tcp_stats(sc, &v4, &v6);
4141 sbuf_printf(sb,
4142 " IP IPv6\n");
4143 sbuf_printf(sb, "OutRsts: %20u %20u\n",
4144 v4.tcpOutRsts, v6.tcpOutRsts);
4145 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
4146 v4.tcpInSegs, v6.tcpInSegs);
4147 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
4148 v4.tcpOutSegs, v6.tcpOutSegs);
4149 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
4150 v4.tcpRetransSegs, v6.tcpRetransSegs);
4151
4152 rc = sbuf_finish(sb);
4153 sbuf_delete(sb);
4154
4155 return (rc);
4156}
4157
4158static int
4159sysctl_tids(SYSCTL_HANDLER_ARGS)
4160{
4161 struct adapter *sc = arg1;
4162 struct sbuf *sb;
4163 int rc;
4164 struct tid_info *t = &sc->tids;
4165
4166 rc = sysctl_wire_old_buffer(req, 0);
4167 if (rc != 0)
4168 return (rc);
4169
4170 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4171 if (sb == NULL)
4172 return (ENOMEM);
4173
4174 if (t->natids) {
4175 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
4176 t->atids_in_use);
4177 }
4178
4179 if (t->ntids) {
4180 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
4181 uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
4182
4183 if (b) {
4184 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
4185 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
4186 t->ntids - 1);
4187 } else {
4188 sbuf_printf(sb, "TID range: %u-%u",
4189 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
4190 t->ntids - 1);
4191 }
4192 } else
4193 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
4194 sbuf_printf(sb, ", in use: %u\n",
4195 atomic_load_acq_int(&t->tids_in_use));
4196 }
4197
4198 if (t->nstids) {
4199 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
4200 t->stid_base + t->nstids - 1, t->stids_in_use);
4201 }
4202
4203 if (t->nftids) {
4204 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
4205 t->ftid_base + t->nftids - 1);
4206 }
4207
4208 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
4209 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
4210 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
4211
4212 rc = sbuf_finish(sb);
4213 sbuf_delete(sb);
4214
4215 return (rc);
4216}
4217
4218static int
4219sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
4220{
4221 struct adapter *sc = arg1;
4222 struct sbuf *sb;
4223 int rc;
4224 struct tp_err_stats stats;
4225
4226 rc = sysctl_wire_old_buffer(req, 0);
4227 if (rc != 0)
4228 return (rc);
4229
4230 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4231 if (sb == NULL)
4232 return (ENOMEM);
4233
4234 t4_tp_get_err_stats(sc, &stats);
4235
4236 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
4237 "channel 3\n");
4238 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
4239 stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
4240 stats.macInErrs[3]);
4241 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
4242 stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
4243 stats.hdrInErrs[3]);
4244 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
4245 stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
4246 stats.tcpInErrs[3]);
4247 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
4248 stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
4249 stats.tcp6InErrs[3]);
4250 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
4251 stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
4252 stats.tnlCongDrops[3]);
4253 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
4254 stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
4255 stats.tnlTxDrops[3]);
4256 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
4257 stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
4258 stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
4259 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
4260 stats.ofldChanDrops[0], stats.ofldChanDrops[1],
4261 stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
4262 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
4263 stats.ofldNoNeigh, stats.ofldCongDefer);
4264
4265 rc = sbuf_finish(sb);
4266 sbuf_delete(sb);
4267
4268 return (rc);
4269}
4270
4271static int
4272sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
4273{
4274 struct adapter *sc = arg1;
4275 struct sbuf *sb;
4276 int rc;
4277 u64 nrate[NCHAN], orate[NCHAN];
4278
4279 rc = sysctl_wire_old_buffer(req, 0);
4280 if (rc != 0)
4281 return (rc);
4282
4283 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4284 if (sb == NULL)
4285 return (ENOMEM);
4286
4287 t4_get_chan_txrate(sc, nrate, orate);
4288 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
4289 "channel 3\n");
4290 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
4291 nrate[0], nrate[1], nrate[2], nrate[3]);
4292 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
4293 orate[0], orate[1], orate[2], orate[3]);
4294
4295 rc = sbuf_finish(sb);
4296 sbuf_delete(sb);
4297
4298 return (rc);
4299}
4300
4301static inline void
4302txq_start(struct ifnet *ifp, struct sge_txq *txq)
4303{
4304 struct buf_ring *br;
4305 struct mbuf *m;
4306
4307 TXQ_LOCK_ASSERT_OWNED(txq);
4308
4309 br = txq->br;
4310 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
4311 if (m)
4312 t4_eth_tx(ifp, txq, m);
4313}
4314
4315void
4316t4_tx_callout(void *arg)
4317{
4318 struct sge_eq *eq = arg;
4319 struct adapter *sc;
4320
4321 if (EQ_TRYLOCK(eq) == 0)
4322 goto reschedule;
4323
4324 if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
4325 EQ_UNLOCK(eq);
4326reschedule:
4327 if (__predict_true(!(eq->flags && EQ_DOOMED)))
4328 callout_schedule(&eq->tx_callout, 1);
4329 return;
4330 }
4331
4332 EQ_LOCK_ASSERT_OWNED(eq);
4333
4334 if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
4335
4336 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
4337 struct sge_txq *txq = arg;
4338 struct port_info *pi = txq->ifp->if_softc;
4339
4340 sc = pi->adapter;
4341 } else {
4342 struct sge_wrq *wrq = arg;
4343
4344 sc = wrq->adapter;
4345 }
4346
4347 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
4348 }
4349
4350 EQ_UNLOCK(eq);
4351}
4352
4353void
4354t4_tx_task(void *arg, int count)
4355{
4356 struct sge_eq *eq = arg;
4357
4358 EQ_LOCK(eq);
4359 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
4360 struct sge_txq *txq = arg;
4361 txq_start(txq->ifp, txq);
4362 } else {
4363 struct sge_wrq *wrq = arg;
4364 t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
4365 }
4366 EQ_UNLOCK(eq);
4367}
4368
4369static uint32_t
4370fconf_to_mode(uint32_t fconf)
4371{
4372 uint32_t mode;
4373
4374 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |

--- 13 unchanged lines hidden (view full) ---

4388
4389 if (fconf & F_PROTOCOL)
4390 mode |= T4_FILTER_IP_PROTO;
4391
4392 if (fconf & F_TOS)
4393 mode |= T4_FILTER_IP_TOS;
4394
4395 if (fconf & F_VLAN)
4396 mode |= T4_FILTER_VLAN;
4397
4398 if (fconf & F_VNIC_ID)
4399 mode |= T4_FILTER_VNIC;
4400
4401 if (fconf & F_PORT)
4402 mode |= T4_FILTER_PORT;
4403
4404 if (fconf & F_FCOE)
4405 mode |= T4_FILTER_FCoE;
4406
4407 return (mode);

--- 17 unchanged lines hidden (view full) ---

4425 fconf |= F_ETHERTYPE;
4426
4427 if (mode & T4_FILTER_IP_PROTO)
4428 fconf |= F_PROTOCOL;
4429
4430 if (mode & T4_FILTER_IP_TOS)
4431 fconf |= F_TOS;
4432
4433 if (mode & T4_FILTER_VLAN)
4434 fconf |= F_VLAN;
4435
4436 if (mode & T4_FILTER_VNIC)
4437 fconf |= F_VNIC_ID;
4438
4439 if (mode & T4_FILTER_PORT)
4440 fconf |= F_PORT;
4441
4442 if (mode & T4_FILTER_FCoE)
4443 fconf |= F_FCOE;
4444

--- 18 unchanged lines hidden (view full) ---

4463 fconf |= F_ETHERTYPE;
4464
4465 if (fs->val.proto || fs->mask.proto)
4466 fconf |= F_PROTOCOL;
4467
4468 if (fs->val.tos || fs->mask.tos)
4469 fconf |= F_TOS;
4470
4471 if (fs->val.vlan_vld || fs->mask.vlan_vld)
4472 fconf |= F_VLAN;
4473
4474 if (fs->val.vnic_vld || fs->mask.vnic_vld)
4475 fconf |= F_VNIC_ID;
4476
4477 if (fs->val.iport || fs->mask.iport)
4478 fconf |= F_PORT;
4479
4480 if (fs->val.fcoe || fs->mask.fcoe)
4481 fconf |= F_FCOE;
4482
4483 return (fconf);
4484}
4485
4486static int
4487get_filter_mode(struct adapter *sc, uint32_t *mode)
4488{
4489 uint32_t fconf;
4490
4491 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
4492 A_TP_VLAN_PRI_MAP);
4493
4494 if (sc->filter_mode != fconf) {
4495 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
4496 device_get_nameunit(sc->dev), sc->filter_mode, fconf);
4497 sc->filter_mode = fconf;
4498 }
4499
4500 *mode = fconf_to_mode(sc->filter_mode);
4501
4502 return (0);
4503}
4504
4505static int
4506set_filter_mode(struct adapter *sc, uint32_t mode)
4507{
4508 uint32_t fconf;
4509 int rc;

--- 6 unchanged lines hidden (view full) ---

4516 goto done;
4517 }
4518
4519 if (sc->tids.ftids_in_use > 0) {
4520 rc = EBUSY;
4521 goto done;
4522 }
4523
4524#ifndef TCP_OFFLOAD_DISABLE
4525 if (sc->offload_map) {
4526 rc = EBUSY;
4527 goto done;
4528 }
4529#endif
4530
4531#ifdef notyet
4532 rc = -t4_set_filter_mode(sc, fconf);
4533 if (rc == 0)
4534 sc->filter_mode = fconf;
4535#else
4536 rc = ENOTSUP;
4537#endif
4538
4539done:
4540 ADAPTER_UNLOCK(sc);
4541 return (rc);
4542}
4543
4544static inline uint64_t
4545get_filter_hits(struct adapter *sc, uint32_t fid)
4546{

--- 43 unchanged lines hidden (view full) ---

4590
4591 t->idx = 0xffffffff;
4592 return (0);
4593}
4594
4595static int
4596set_filter(struct adapter *sc, struct t4_filter *t)
4597{
4598 unsigned int nfilters, nports;
4599 struct filter_entry *f;
4600 int i;
4601
4602 ADAPTER_LOCK_ASSERT_OWNED(sc);
4603
4604 nfilters = sc->tids.nftids;
4605 nports = sc->params.nports;
4606
4607 if (nfilters == 0)
4608 return (ENOTSUP);
4609
4610 if (!(sc->flags & FULL_INIT_DONE))
4611 return (EAGAIN);
4612
4613 if (t->idx >= nfilters)
4614 return (EINVAL);
4615
4616 /* Validate against the global filter mode */
4617 if ((sc->filter_mode | fspec_to_fconf(&t->fs)) != sc->filter_mode)
4618 return (E2BIG);
4619
4620 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports)
4621 return (EINVAL);
4622
4623 if (t->fs.val.iport >= nports)
4624 return (EINVAL);
4625

--- 80 unchanged lines hidden (view full) ---

4706 t4_l2t_release(f->l2t);
4707
4708 bzero(f, sizeof (*f));
4709}
4710
4711static int
4712set_filter_wr(struct adapter *sc, int fidx)
4713{
4714 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
4715 struct mbuf *m;
4716 struct fw_filter_wr *fwr;
4717 unsigned int ftid;
4718
4719 ADAPTER_LOCK_ASSERT_OWNED(sc);
4720
4721 if (f->fs.newdmac || f->fs.newvlan) {

--- 43 unchanged lines hidden (view full) ---

4765 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
4766 V_FW_FILTER_WR_PRIO(f->fs.prio) |
4767 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
4768 fwr->ethtype = htobe16(f->fs.val.ethtype);
4769 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
4770 fwr->frag_to_ovlan_vldm =
4771 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
4772 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
4773 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
4774 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
4775 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
4776 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
4777 fwr->smac_sel = 0;
4778 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
4779 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
4780 fwr->maci_to_matchtypem =
4781 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
4782 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
4783 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
4784 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
4785 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
4786 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
4787 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
4788 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
4789 fwr->ptcl = f->fs.val.proto;
4790 fwr->ptclm = f->fs.mask.proto;
4791 fwr->ttyp = f->fs.val.tos;
4792 fwr->ttypm = f->fs.mask.tos;
4793 fwr->ivlan = htobe16(f->fs.val.vlan);
4794 fwr->ivlanm = htobe16(f->fs.mask.vlan);
4795 fwr->ovlan = htobe16(f->fs.val.vnic);
4796 fwr->ovlanm = htobe16(f->fs.mask.vnic);
4797 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
4798 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
4799 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
4800 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
4801 fwr->lp = htobe16(f->fs.val.dport);
4802 fwr->lpm = htobe16(f->fs.mask.dport);
4803 fwr->fp = htobe16(f->fs.val.sport);
4804 fwr->fpm = htobe16(f->fs.mask.sport);
4805 if (f->fs.newsmac)
4806 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
4807
4808 f->pending = 1;
4809 sc->tids.ftids_in_use++;
4810
4811 t4_mgmt_tx(sc, m);
4812 return (0);
4813}
4814
4815static int
4816del_filter_wr(struct adapter *sc, int fidx)
4817{
4818 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
4819 struct mbuf *m;
4820 struct fw_filter_wr *fwr;
4821 unsigned int ftid;
4822
4823 ADAPTER_LOCK_ASSERT_OWNED(sc);
4824
4825 ftid = sc->tids.ftid_base + fidx;
4826
4827 m = m_gethdr(M_NOWAIT, MT_DATA);
4828 if (m == NULL)
4829 return (ENOMEM);
4830
4831 fwr = mtod(m, struct fw_filter_wr *);
4832 m->m_len = m->m_pkthdr.len = sizeof(*fwr);
4833 bzero(fwr, sizeof (*fwr));
4834
4835 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
4836
4837 f->pending = 1;
4838 t4_mgmt_tx(sc, m);
4839 return (0);
4840}
4841
4842static int
4843filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4844{
4845 struct adapter *sc = iq->adapter;
4846 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
4847 unsigned int idx = GET_TID(rpl);
4848
4849 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
4850 rss->opcode));
4851
4852 if (idx >= sc->tids.ftid_base &&
4853 (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
4854 unsigned int rc = G_COOKIE(rpl->cookie);
4855 struct filter_entry *f = &sc->tids.ftid_tab[idx];
4856
4857 if (rc == FW_FILTER_WR_FLT_ADDED) {
4858 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
4859 f->pending = 0; /* asynchronous setup completed */
4860 f->valid = 1;
4861 return (0);
4862 }
4863
4864 if (rc != FW_FILTER_WR_FLT_DELETED) {
4865 /* Add or delete failed, need to display an error */
4866 device_printf(sc->dev,
4867 "filter %u setup failed with error %u\n", idx, rc);
4868 }
4869
4870 clear_filter(f);
4871 ADAPTER_LOCK(sc);
4872 sc->tids.ftids_in_use--;
4873 ADAPTER_UNLOCK(sc);
4874 }
4875
4876 return (0);
4877}
4878
4879static int
4880get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
4881{
4882 int rc = EINVAL;
4883
4884 if (cntxt->cid > M_CTXTQID)

--- 15 unchanged lines hidden (view full) ---

4900
4901 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id,
4902 &cntxt->data[0]);
4903 }
4904
4905 return (rc);
4906}
4907
4908static int
4909read_card_mem(struct adapter *sc, struct t4_mem_range *mr)
4910{
4911 uint32_t base, size, lo, hi, win, off, remaining, i, n;
4912 uint32_t *buf, *b;
4913 int rc;
4914
4915 /* reads are in multiples of 32 bits */
4916 if (mr->addr & 3 || mr->len & 3 || mr->len == 0)
4917 return (EINVAL);
4918
4919 /*
4920 * We don't want to deal with potential holes so we mandate that the
4921 * requested region must lie entirely within one of the 3 memories.
4922 */
4923 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
4924 if (lo & F_EDRAM0_ENABLE) {
4925 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
4926 base = G_EDRAM0_BASE(hi) << 20;
4927 size = G_EDRAM0_SIZE(hi) << 20;
4928 if (size > 0 &&
4929 mr->addr >= base && mr->addr < base + size &&
4930 mr->addr + mr->len <= base + size)
4931 goto proceed;
4932 }
4933 if (lo & F_EDRAM1_ENABLE) {
4934 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
4935 base = G_EDRAM1_BASE(hi) << 20;
4936 size = G_EDRAM1_SIZE(hi) << 20;
4937 if (size > 0 &&
4938 mr->addr >= base && mr->addr < base + size &&
4939 mr->addr + mr->len <= base + size)
4940 goto proceed;
4941 }
4942 if (lo & F_EXT_MEM_ENABLE) {
4943 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
4944 base = G_EXT_MEM_BASE(hi) << 20;
4945 size = G_EXT_MEM_SIZE(hi) << 20;
4946 if (size > 0 &&
4947 mr->addr >= base && mr->addr < base + size &&
4948 mr->addr + mr->len <= base + size)
4949 goto proceed;
4950 }
4951 return (ENXIO);
4952
4953proceed:
4954 buf = b = malloc(mr->len, M_CXGBE, M_WAITOK);
4955
4956 /*
4957 * Position the PCIe window (we use memwin2) to the 16B aligned area
4958 * just at/before the requested region.
4959 */
4960 win = mr->addr & ~0xf;
4961 off = mr->addr - win; /* offset of the requested region in the win */
4962 remaining = mr->len;
4963
4964 while (remaining) {
4965 t4_write_reg(sc,
4966 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2), win);
4967 t4_read_reg(sc,
4968 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2));
4969
4970 /* number of bytes that we'll copy in the inner loop */
4971 n = min(remaining, MEMWIN2_APERTURE - off);
4972
4973 for (i = 0; i < n; i += 4, remaining -= 4)
4974 *b++ = t4_read_reg(sc, MEMWIN2_BASE + off + i);
4975
4976 win += MEMWIN2_APERTURE;
4977 off = 0;
4978 }
4979
4980 rc = copyout(buf, mr->data, mr->len);
4981 free(buf, M_CXGBE);
4982
4983 return (rc);
4984}
4985
4986int
4987t4_os_find_pci_capability(struct adapter *sc, int cap)
4988{
4989 int i;
4990
4991 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
4992}
4993

--- 54 unchanged lines hidden (view full) ---

5048
5049 if (link_stat) {
5050 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
5051 if_link_state_change(ifp, LINK_STATE_UP);
5052 } else
5053 if_link_state_change(ifp, LINK_STATE_DOWN);
5054}
5055
5056void
5057t4_iterate(void (*func)(struct adapter *, void *), void *arg)
5058{
5059 struct adapter *sc;
5060
5061 mtx_lock(&t4_list_lock);
5062 SLIST_FOREACH(sc, &t4_list, link) {
5063 /*
5064 * func should not make any assumptions about what state sc is
5065 * in - the only guarantee is that sc->sc_lock is a valid lock.
5066 */
5067 func(sc, arg);
5068 }
5069 mtx_unlock(&t4_list_lock);
5070}
5071
5072static int
5073t4_open(struct cdev *dev, int flags, int type, struct thread *td)
5074{
5075 return (0);
5076}
5077
5078static int
5079t4_close(struct cdev *dev, int flags, int type, struct thread *td)

--- 80 unchanged lines hidden (view full) ---

5160 case CHELSIO_T4_DEL_FILTER:
5161 ADAPTER_LOCK(sc);
5162 rc = del_filter(sc, (struct t4_filter *)data);
5163 ADAPTER_UNLOCK(sc);
5164 break;
5165 case CHELSIO_T4_GET_SGE_CONTEXT:
5166 rc = get_sge_context(sc, (struct t4_sge_context *)data);
5167 break;
5168 case CHELSIO_T4_LOAD_FW: {
5169 struct t4_data *fw = (struct t4_data *)data;
5170 uint8_t *fw_data;
5171
5172 if (sc->flags & FULL_INIT_DONE)
5173 return (EBUSY);
5174
5175 fw_data = malloc(fw->len, M_CXGBE, M_NOWAIT);
5176 if (fw_data == NULL)
5177 return (ENOMEM);
5178
5179 rc = copyin(fw->data, fw_data, fw->len);
5180 if (rc == 0)
5181 rc = -t4_load_fw(sc, fw_data, fw->len);
5182
5183 free(fw_data, M_CXGBE);
5184 break;
5185 }
5186 case CHELSIO_T4_GET_MEM:
5187 rc = read_card_mem(sc, (struct t4_mem_range *)data);
5188 break;
5189 default:
5190 rc = EINVAL;
5191 }
5192
5193 return (rc);
5194}
5195
5196#ifndef TCP_OFFLOAD_DISABLE
5197static int
5198toe_capability(struct port_info *pi, int enable)
5199{
5200 int rc;
5201 struct adapter *sc = pi->adapter;
5202
5203 ADAPTER_LOCK_ASSERT_OWNED(sc);
5204
5205 if (!is_offload(sc))
5206 return (ENODEV);
5207
5208 if (enable) {
5209 if (isset(&sc->offload_map, pi->port_id))
5210 return (0);
5211
5212 if (sc->offload_map == 0) {
5213 rc = activate_uld(sc, ULD_TOM, &sc->tom);
5214 if (rc != 0)
5215 return (rc);
5216 }
5217
5218 setbit(&sc->offload_map, pi->port_id);
5219 } else {
5220 if (!isset(&sc->offload_map, pi->port_id))
5221 return (0);
5222
5223 clrbit(&sc->offload_map, pi->port_id);
5224
5225 if (sc->offload_map == 0) {
5226 rc = deactivate_uld(&sc->tom);
5227 if (rc != 0) {
5228 setbit(&sc->offload_map, pi->port_id);
5229 return (rc);
5230 }
5231 }
5232 }
5233
5234 return (0);
5235}
5236
5237/*
5238 * Add an upper layer driver to the global list.
5239 */
5240int
5241t4_register_uld(struct uld_info *ui)
5242{
5243 int rc = 0;
5244 struct uld_info *u;
5245
5246 mtx_lock(&t4_uld_list_lock);
5247 SLIST_FOREACH(u, &t4_uld_list, link) {
5248 if (u->uld_id == ui->uld_id) {
5249 rc = EEXIST;
5250 goto done;
5251 }
5252 }
5253
5254 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
5255 ui->refcount = 0;
5256done:
5257 mtx_unlock(&t4_uld_list_lock);
5258 return (rc);
5259}
5260
5261int
5262t4_unregister_uld(struct uld_info *ui)
5263{
5264 int rc = EINVAL;
5265 struct uld_info *u;
5266
5267 mtx_lock(&t4_uld_list_lock);
5268
5269 SLIST_FOREACH(u, &t4_uld_list, link) {
5270 if (u == ui) {
5271 if (ui->refcount > 0) {
5272 rc = EBUSY;
5273 goto done;
5274 }
5275
5276 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
5277 rc = 0;
5278 goto done;
5279 }
5280 }
5281done:
5282 mtx_unlock(&t4_uld_list_lock);
5283 return (rc);
5284}
5285
5286static int
5287activate_uld(struct adapter *sc, int id, struct uld_softc *usc)
5288{
5289 int rc = EAGAIN;
5290 struct uld_info *ui;
5291
5292 mtx_lock(&t4_uld_list_lock);
5293
5294 SLIST_FOREACH(ui, &t4_uld_list, link) {
5295 if (ui->uld_id == id) {
5296 rc = ui->attach(sc, &usc->softc);
5297 if (rc == 0) {
5298 KASSERT(usc->softc != NULL,
5299 ("%s: ULD %d has no state", __func__, id));
5300 ui->refcount++;
5301 usc->uld = ui;
5302 }
5303 goto done;
5304 }
5305 }
5306done:
5307 mtx_unlock(&t4_uld_list_lock);
5308
5309 return (rc);
5310}
5311
5312static int
5313deactivate_uld(struct uld_softc *usc)
5314{
5315 int rc;
5316
5317 mtx_lock(&t4_uld_list_lock);
5318
5319 if (usc->uld == NULL || usc->softc == NULL) {
5320 rc = EINVAL;
5321 goto done;
5322 }
5323
5324 rc = usc->uld->detach(usc->softc);
5325 if (rc == 0) {
5326 KASSERT(usc->uld->refcount > 0,
5327 ("%s: ULD has bad refcount", __func__));
5328 usc->uld->refcount--;
5329 usc->uld = NULL;
5330 usc->softc = NULL;
5331 }
5332done:
5333 mtx_unlock(&t4_uld_list_lock);
5334
5335 return (rc);
5336}
5337#endif
5338
5339/*
5340 * Come up with reasonable defaults for some of the tunables, provided they're
5341 * not set by the user (in which case we'll use the values as is).
5342 */
5343static void
5344tweak_tunables(void)
5345{
5346 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
5347
5348 if (t4_ntxq10g < 1)
5349 t4_ntxq10g = min(nc, NTXQ_10G);
5350
5351 if (t4_ntxq1g < 1)
5352 t4_ntxq1g = min(nc, NTXQ_1G);
5353
5354 if (t4_nrxq10g < 1)
5355 t4_nrxq10g = min(nc, NRXQ_10G);
5356
5357 if (t4_nrxq1g < 1)
5358 t4_nrxq1g = min(nc, NRXQ_1G);
5359
5360#ifndef TCP_OFFLOAD_DISABLE
5361 if (t4_nofldtxq10g < 1)
5362 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
5363
5364 if (t4_nofldtxq1g < 1)
5365 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
5366
5367 if (t4_nofldrxq10g < 1)
5368 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
5369
5370 if (t4_nofldrxq1g < 1)
5371 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
5372#endif
5373
5374 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
5375 t4_tmr_idx_10g = TMR_IDX_10G;
5376
5377 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
5378 t4_pktc_idx_10g = PKTC_IDX_10G;
5379
5380 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
5381 t4_tmr_idx_1g = TMR_IDX_1G;
5382
5383 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
5384 t4_pktc_idx_1g = PKTC_IDX_1G;
5385
5386 if (t4_qsize_txq < 128)
5387 t4_qsize_txq = 128;
5388
5389 if (t4_qsize_rxq < 128)
5390 t4_qsize_rxq = 128;
5391 while (t4_qsize_rxq & 7)
5392 t4_qsize_rxq++;
5393
5394 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
5395}
5396
5397static int
5398t4_mod_event(module_t mod, int cmd, void *arg)
5399{
5400 int rc = 0;
5401
5402 switch (cmd) {
5403 case MOD_LOAD:
5404 t4_sge_modload();
5405 mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF);
5406 SLIST_INIT(&t4_list);
5407#ifndef TCP_OFFLOAD_DISABLE
5408 mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF);
5409 SLIST_INIT(&t4_uld_list);
5410#endif
5411 tweak_tunables();
5412 break;
5413
5414 case MOD_UNLOAD:
5415#ifndef TCP_OFFLOAD_DISABLE
5416 mtx_lock(&t4_uld_list_lock);
5417 if (!SLIST_EMPTY(&t4_uld_list)) {
5418 rc = EBUSY;
5419 mtx_unlock(&t4_uld_list_lock);
5420 break;
5421 }
5422 mtx_unlock(&t4_uld_list_lock);
5423 mtx_destroy(&t4_uld_list_lock);
5424#endif
5425 mtx_lock(&t4_list_lock);
5426 if (!SLIST_EMPTY(&t4_list)) {
5427 rc = EBUSY;
5428 mtx_unlock(&t4_list_lock);
5429 break;
5430 }
5431 mtx_unlock(&t4_list_lock);
5432 mtx_destroy(&t4_list_lock);
5433 break;
5434 }
5435
5436 return (rc);
5437}
5438
5439static devclass_t t4_devclass;
5440static devclass_t cxgbe_devclass;
5441
5442DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, t4_mod_event, 0);
5443MODULE_VERSION(t4nex, 1);
5444
5445DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
5446MODULE_VERSION(cxgbe, 1);