Deleted Added
full compact
t4_main.c (227843) t4_main.c (228561)
1/*-
2 * Copyright (c) 2011 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2011 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/dev/cxgbe/t4_main.c 227843 2011-11-22 21:28:20Z marius $");
29__FBSDID("$FreeBSD: head/sys/dev/cxgbe/t4_main.c 228561 2011-12-16 02:09:51Z np $");
30
31#include "opt_inet.h"
32
33#include <sys/param.h>
34#include <sys/conf.h>
35#include <sys/priv.h>
36#include <sys/kernel.h>
37#include <sys/bus.h>
38#include <sys/module.h>
39#include <sys/malloc.h>
40#include <sys/queue.h>
41#include <sys/taskqueue.h>
42#include <sys/pciio.h>
43#include <dev/pci/pcireg.h>
44#include <dev/pci/pcivar.h>
45#include <dev/pci/pci_private.h>
46#include <sys/firmware.h>
47#include <sys/sbuf.h>
48#include <sys/smp.h>
49#include <sys/socket.h>
50#include <sys/sockio.h>
51#include <sys/sysctl.h>
52#include <net/ethernet.h>
53#include <net/if.h>
54#include <net/if_types.h>
55#include <net/if_dl.h>
56#include <net/if_vlan_var.h>
57
30
31#include "opt_inet.h"
32
33#include <sys/param.h>
34#include <sys/conf.h>
35#include <sys/priv.h>
36#include <sys/kernel.h>
37#include <sys/bus.h>
38#include <sys/module.h>
39#include <sys/malloc.h>
40#include <sys/queue.h>
41#include <sys/taskqueue.h>
42#include <sys/pciio.h>
43#include <dev/pci/pcireg.h>
44#include <dev/pci/pcivar.h>
45#include <dev/pci/pci_private.h>
46#include <sys/firmware.h>
47#include <sys/sbuf.h>
48#include <sys/smp.h>
49#include <sys/socket.h>
50#include <sys/sockio.h>
51#include <sys/sysctl.h>
52#include <net/ethernet.h>
53#include <net/if.h>
54#include <net/if_types.h>
55#include <net/if_dl.h>
56#include <net/if_vlan_var.h>
57
58#include "common/t4_hw.h"
59#include "common/common.h"
60#include "common/t4_msg.h"
61#include "common/t4_regs.h"
62#include "common/t4_regs_values.h"
58#include "common/common.h"
59#include "common/t4_msg.h"
60#include "common/t4_regs.h"
61#include "common/t4_regs_values.h"
63#include "common/t4fw_interface.h"
64#include "t4_ioctl.h"
65#include "t4_l2t.h"
66
67/* T4 bus driver interface */
68static int t4_probe(device_t);
69static int t4_attach(device_t);
70static int t4_detach(device_t);
71static device_method_t t4_methods[] = {
72 DEVMETHOD(device_probe, t4_probe),
73 DEVMETHOD(device_attach, t4_attach),
74 DEVMETHOD(device_detach, t4_detach),
75
76 DEVMETHOD_END
77};
78static driver_t t4_driver = {
79 "t4nex",
80 t4_methods,
81 sizeof(struct adapter)
82};
83
84
85/* T4 port (cxgbe) interface */
86static int cxgbe_probe(device_t);
87static int cxgbe_attach(device_t);
88static int cxgbe_detach(device_t);
89static device_method_t cxgbe_methods[] = {
90 DEVMETHOD(device_probe, cxgbe_probe),
91 DEVMETHOD(device_attach, cxgbe_attach),
92 DEVMETHOD(device_detach, cxgbe_detach),
93 { 0, 0 }
94};
95static driver_t cxgbe_driver = {
96 "cxgbe",
97 cxgbe_methods,
98 sizeof(struct port_info)
99};
100
101static d_ioctl_t t4_ioctl;
102static d_open_t t4_open;
103static d_close_t t4_close;
104
105static struct cdevsw t4_cdevsw = {
106 .d_version = D_VERSION,
107 .d_flags = 0,
108 .d_open = t4_open,
109 .d_close = t4_close,
110 .d_ioctl = t4_ioctl,
111 .d_name = "t4nex",
112};
113
114/* ifnet + media interface */
115static void cxgbe_init(void *);
116static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
117static void cxgbe_start(struct ifnet *);
118static int cxgbe_transmit(struct ifnet *, struct mbuf *);
119static void cxgbe_qflush(struct ifnet *);
120static int cxgbe_media_change(struct ifnet *);
121static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
122
123MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4 Ethernet driver and services");
124
62#include "t4_ioctl.h"
63#include "t4_l2t.h"
64
65/* T4 bus driver interface */
66static int t4_probe(device_t);
67static int t4_attach(device_t);
68static int t4_detach(device_t);
69static device_method_t t4_methods[] = {
70 DEVMETHOD(device_probe, t4_probe),
71 DEVMETHOD(device_attach, t4_attach),
72 DEVMETHOD(device_detach, t4_detach),
73
74 DEVMETHOD_END
75};
76static driver_t t4_driver = {
77 "t4nex",
78 t4_methods,
79 sizeof(struct adapter)
80};
81
82
83/* T4 port (cxgbe) interface */
84static int cxgbe_probe(device_t);
85static int cxgbe_attach(device_t);
86static int cxgbe_detach(device_t);
87static device_method_t cxgbe_methods[] = {
88 DEVMETHOD(device_probe, cxgbe_probe),
89 DEVMETHOD(device_attach, cxgbe_attach),
90 DEVMETHOD(device_detach, cxgbe_detach),
91 { 0, 0 }
92};
93static driver_t cxgbe_driver = {
94 "cxgbe",
95 cxgbe_methods,
96 sizeof(struct port_info)
97};
98
99static d_ioctl_t t4_ioctl;
100static d_open_t t4_open;
101static d_close_t t4_close;
102
103static struct cdevsw t4_cdevsw = {
104 .d_version = D_VERSION,
105 .d_flags = 0,
106 .d_open = t4_open,
107 .d_close = t4_close,
108 .d_ioctl = t4_ioctl,
109 .d_name = "t4nex",
110};
111
112/* ifnet + media interface */
113static void cxgbe_init(void *);
114static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
115static void cxgbe_start(struct ifnet *);
116static int cxgbe_transmit(struct ifnet *, struct mbuf *);
117static void cxgbe_qflush(struct ifnet *);
118static int cxgbe_media_change(struct ifnet *);
119static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
120
121MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4 Ethernet driver and services");
122
125/*
126 * Tunables.
127 */
128static SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0,
129 "cxgbe driver parameters");
123static struct mtx t4_list_lock;
124static SLIST_HEAD(, adapter) t4_list;
125#ifndef TCP_OFFLOAD_DISABLE
126static struct mtx t4_uld_list_lock;
127static SLIST_HEAD(, uld_info) t4_uld_list;
128#endif
130
129
131static int force_firmware_install = 0;
132TUNABLE_INT("hw.cxgbe.force_firmware_install", &force_firmware_install);
133SYSCTL_UINT(_hw_cxgbe, OID_AUTO, force_firmware_install, CTLFLAG_RDTUN,
134 &force_firmware_install, 0, "install firmware on every attach.");
135
136/*
130/*
137 * Holdoff timer and packet counter values.
131 * Tunables. See tweak_tunables() too.
138 */
132 */
139static unsigned int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200};
140static unsigned int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
141
142/*
133
134/*
143 * Max # of tx and rx queues to use for each 10G and 1G port.
135 * Number of queues for tx and rx, 10G and 1G, NIC and offload.
144 */
136 */
145static unsigned int max_ntxq_10g = 8;
146TUNABLE_INT("hw.cxgbe.max_ntxq_10G_port", &max_ntxq_10g);
147SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_ntxq_10G_port, CTLFLAG_RDTUN,
148 &max_ntxq_10g, 0, "maximum number of tx queues per 10G port.");
137#define NTXQ_10G 16
138static int t4_ntxq10g = -1;
139TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
149
140
150static unsigned int max_nrxq_10g = 8;
151TUNABLE_INT("hw.cxgbe.max_nrxq_10G_port", &max_nrxq_10g);
152SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_nrxq_10G_port, CTLFLAG_RDTUN,
153 &max_nrxq_10g, 0, "maximum number of rxq's (per 10G port).");
141#define NRXQ_10G 8
142static int t4_nrxq10g = -1;
143TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
154
144
155static unsigned int max_ntxq_1g = 2;
156TUNABLE_INT("hw.cxgbe.max_ntxq_1G_port", &max_ntxq_1g);
157SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_ntxq_1G_port, CTLFLAG_RDTUN,
158 &max_ntxq_1g, 0, "maximum number of tx queues per 1G port.");
145#define NTXQ_1G 4
146static int t4_ntxq1g = -1;
147TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
159
148
160static unsigned int max_nrxq_1g = 2;
161TUNABLE_INT("hw.cxgbe.max_nrxq_1G_port", &max_nrxq_1g);
162SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_nrxq_1G_port, CTLFLAG_RDTUN,
163 &max_nrxq_1g, 0, "maximum number of rxq's (per 1G port).");
149#define NRXQ_1G 2
150static int t4_nrxq1g = -1;
151TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
164
152
153#ifndef TCP_OFFLOAD_DISABLE
154#define NOFLDTXQ_10G 8
155static int t4_nofldtxq10g = -1;
156TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
157
158#define NOFLDRXQ_10G 2
159static int t4_nofldrxq10g = -1;
160TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
161
162#define NOFLDTXQ_1G 2
163static int t4_nofldtxq1g = -1;
164TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
165
166#define NOFLDRXQ_1G 1
167static int t4_nofldrxq1g = -1;
168TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
169#endif
170
165/*
166 * Holdoff parameters for 10G and 1G ports.
167 */
171/*
172 * Holdoff parameters for 10G and 1G ports.
173 */
168static unsigned int tmr_idx_10g = 1;
169TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &tmr_idx_10g);
170SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_10G, CTLFLAG_RDTUN,
171 &tmr_idx_10g, 0,
172 "default timer index for interrupt holdoff (10G ports).");
174#define TMR_IDX_10G 1
175static int t4_tmr_idx_10g = TMR_IDX_10G;
176TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
173
177
174static int pktc_idx_10g = 2;
175TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &pktc_idx_10g);
176SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_10G, CTLFLAG_RDTUN,
177 &pktc_idx_10g, 0,
178 "default pkt counter index for interrupt holdoff (10G ports).");
178#define PKTC_IDX_10G 2
179static int t4_pktc_idx_10g = PKTC_IDX_10G;
180TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
179
181
180static unsigned int tmr_idx_1g = 1;
181TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &tmr_idx_1g);
182SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_1G, CTLFLAG_RDTUN,
183 &tmr_idx_1g, 0,
184 "default timer index for interrupt holdoff (1G ports).");
182#define TMR_IDX_1G 1
183static int t4_tmr_idx_1g = TMR_IDX_1G;
184TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
185
185
186static int pktc_idx_1g = 2;
187TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &pktc_idx_1g);
188SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_1G, CTLFLAG_RDTUN,
189 &pktc_idx_1g, 0,
190 "default pkt counter index for interrupt holdoff (1G ports).");
186#define PKTC_IDX_1G 2
187static int t4_pktc_idx_1g = PKTC_IDX_1G;
188TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
191
192/*
193 * Size (# of entries) of each tx and rx queue.
194 */
189
190/*
191 * Size (# of entries) of each tx and rx queue.
192 */
195static unsigned int qsize_txq = TX_EQ_QSIZE;
196TUNABLE_INT("hw.cxgbe.qsize_txq", &qsize_txq);
197SYSCTL_UINT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN,
198 &qsize_txq, 0, "default queue size of NIC tx queues.");
193static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
194TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
199
195
200static unsigned int qsize_rxq = RX_IQ_QSIZE;
201TUNABLE_INT("hw.cxgbe.qsize_rxq", &qsize_rxq);
202SYSCTL_UINT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN,
203 &qsize_rxq, 0, "default queue size of NIC rx queues.");
196static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
197TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
204
205/*
198
199/*
206 * Interrupt types allowed.
200 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
207 */
201 */
208static int intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
209TUNABLE_INT("hw.cxgbe.interrupt_types", &intr_types);
210SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &intr_types, 0,
211 "interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively)");
202static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
203TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
212
213/*
204
205/*
214 * Force the driver to use the same set of interrupts for all ports.
206 * Configuration file.
215 */
207 */
216static int intr_shared = 0;
217TUNABLE_INT("hw.cxgbe.interrupts_shared", &intr_shared);
218SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupts_shared, CTLFLAG_RDTUN,
219 &intr_shared, 0, "interrupts shared between all ports");
208static char t4_cfg_file[32] = "default";
209TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
220
210
221static unsigned int filter_mode = HW_TPL_FR_MT_PR_IV_P_FC;
222TUNABLE_INT("hw.cxgbe.filter_mode", &filter_mode);
223SYSCTL_UINT(_hw_cxgbe, OID_AUTO, filter_mode, CTLFLAG_RDTUN,
224 &filter_mode, 0, "default global filter mode.");
211/*
212 * ASIC features that will be used. Disable the ones you don't want so that the
213 * chip resources aren't wasted on features that will not be used.
214 */
215static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
216TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
225
217
218static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
219TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
220
221static int t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
222TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
223
224static int t4_rdmacaps_allowed = 0;
225TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
226
227static int t4_iscsicaps_allowed = 0;
228TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
229
230static int t4_fcoecaps_allowed = 0;
231TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
232
226struct intrs_and_queues {
227 int intr_type; /* INTx, MSI, or MSI-X */
228 int nirq; /* Number of vectors */
233struct intrs_and_queues {
234 int intr_type; /* INTx, MSI, or MSI-X */
235 int nirq; /* Number of vectors */
229 int intr_shared; /* Interrupts shared between all ports */
236 int intr_flags;
230 int ntxq10g; /* # of NIC txq's for each 10G port */
231 int nrxq10g; /* # of NIC rxq's for each 10G port */
232 int ntxq1g; /* # of NIC txq's for each 1G port */
233 int nrxq1g; /* # of NIC rxq's for each 1G port */
237 int ntxq10g; /* # of NIC txq's for each 10G port */
238 int nrxq10g; /* # of NIC rxq's for each 10G port */
239 int ntxq1g; /* # of NIC txq's for each 1G port */
240 int nrxq1g; /* # of NIC rxq's for each 1G port */
241#ifndef TCP_OFFLOAD_DISABLE
242 int nofldtxq10g; /* # of TOE txq's for each 10G port */
243 int nofldrxq10g; /* # of TOE rxq's for each 10G port */
244 int nofldtxq1g; /* # of TOE txq's for each 1G port */
245 int nofldrxq1g; /* # of TOE rxq's for each 1G port */
246#endif
234};
235
236struct filter_entry {
237 uint32_t valid:1; /* filter allocated and valid */
238 uint32_t locked:1; /* filter is administratively locked */
239 uint32_t pending:1; /* filter action is pending firmware reply */
240 uint32_t smtidx:8; /* Source MAC Table index for smac */
241 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
242
243 struct t4_filter_specification fs;
244};
245
246enum {
247};
248
249struct filter_entry {
250 uint32_t valid:1; /* filter allocated and valid */
251 uint32_t locked:1; /* filter is administratively locked */
252 uint32_t pending:1; /* filter action is pending firmware reply */
253 uint32_t smtidx:8; /* Source MAC Table index for smac */
254 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
255
256 struct t4_filter_specification fs;
257};
258
259enum {
247 MEMWIN0_APERTURE = 2048,
248 MEMWIN0_BASE = 0x1b800,
249 MEMWIN1_APERTURE = 32768,
250 MEMWIN1_BASE = 0x28000,
251 MEMWIN2_APERTURE = 65536,
252 MEMWIN2_BASE = 0x30000,
253};
254
255enum {
256 XGMAC_MTU = (1 << 0),
257 XGMAC_PROMISC = (1 << 1),
258 XGMAC_ALLMULTI = (1 << 2),
259 XGMAC_VLANEX = (1 << 3),
260 XGMAC_UCADDR = (1 << 4),
261 XGMAC_MCADDRS = (1 << 5),
262
263 XGMAC_ALL = 0xffff
264};
265
266static int map_bars(struct adapter *);
267static void setup_memwin(struct adapter *);
268static int cfg_itype_and_nqueues(struct adapter *, int, int,
269 struct intrs_and_queues *);
270static int prep_firmware(struct adapter *);
260 XGMAC_MTU = (1 << 0),
261 XGMAC_PROMISC = (1 << 1),
262 XGMAC_ALLMULTI = (1 << 2),
263 XGMAC_VLANEX = (1 << 3),
264 XGMAC_UCADDR = (1 << 4),
265 XGMAC_MCADDRS = (1 << 5),
266
267 XGMAC_ALL = 0xffff
268};
269
270static int map_bars(struct adapter *);
271static void setup_memwin(struct adapter *);
272static int cfg_itype_and_nqueues(struct adapter *, int, int,
273 struct intrs_and_queues *);
274static int prep_firmware(struct adapter *);
271static int get_devlog_params(struct adapter *, struct devlog_params *);
272static int get_capabilities(struct adapter *, struct fw_caps_config_cmd *);
273static int get_params(struct adapter *, struct fw_caps_config_cmd *);
275static int upload_config_file(struct adapter *, const struct firmware *,
276 uint32_t *, uint32_t *);
277static int partition_resources(struct adapter *, const struct firmware *);
278static int get_params__pre_init(struct adapter *);
279static int get_params__post_init(struct adapter *);
274static void t4_set_desc(struct adapter *);
275static void build_medialist(struct port_info *);
276static int update_mac_settings(struct port_info *, int);
277static int cxgbe_init_locked(struct port_info *);
278static int cxgbe_init_synchronized(struct port_info *);
279static int cxgbe_uninit_locked(struct port_info *);
280static int cxgbe_uninit_synchronized(struct port_info *);
280static void t4_set_desc(struct adapter *);
281static void build_medialist(struct port_info *);
282static int update_mac_settings(struct port_info *, int);
283static int cxgbe_init_locked(struct port_info *);
284static int cxgbe_init_synchronized(struct port_info *);
285static int cxgbe_uninit_locked(struct port_info *);
286static int cxgbe_uninit_synchronized(struct port_info *);
281static int first_port_up(struct adapter *);
282static int last_port_down(struct adapter *);
287static int adapter_full_init(struct adapter *);
288static int adapter_full_uninit(struct adapter *);
289static int port_full_init(struct port_info *);
290static int port_full_uninit(struct port_info *);
291static void quiesce_eq(struct adapter *, struct sge_eq *);
292static void quiesce_iq(struct adapter *, struct sge_iq *);
293static void quiesce_fl(struct adapter *, struct sge_fl *);
283static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
294static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
284 iq_intr_handler_t *, void *, char *);
295 driver_intr_t *, void *, char *);
285static int t4_free_irq(struct adapter *, struct irq *);
286static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
287 unsigned int);
288static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
289static void cxgbe_tick(void *);
296static int t4_free_irq(struct adapter *, struct irq *);
297static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
298 unsigned int);
299static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
300static void cxgbe_tick(void *);
301static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
302 struct mbuf *);
290static int t4_sysctls(struct adapter *);
291static int cxgbe_sysctls(struct port_info *);
292static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
303static int t4_sysctls(struct adapter *);
304static int cxgbe_sysctls(struct port_info *);
305static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
306static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
293static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
294static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
295static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
296static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
297static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
307static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
308static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
309static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
310static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
311static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
312static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
313static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
314static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
298static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
315static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
316static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
317static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
318static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
319static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
320static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
321static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
322static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
323static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
324static int sysctl_tids(SYSCTL_HANDLER_ARGS);
325static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
326static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
299static inline void txq_start(struct ifnet *, struct sge_txq *);
300static uint32_t fconf_to_mode(uint32_t);
301static uint32_t mode_to_fconf(uint32_t);
302static uint32_t fspec_to_fconf(struct t4_filter_specification *);
303static int get_filter_mode(struct adapter *, uint32_t *);
304static int set_filter_mode(struct adapter *, uint32_t);
305static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
306static int get_filter(struct adapter *, struct t4_filter *);
307static int set_filter(struct adapter *, struct t4_filter *);
308static int del_filter(struct adapter *, struct t4_filter *);
309static void clear_filter(struct filter_entry *);
310static int set_filter_wr(struct adapter *, int);
311static int del_filter_wr(struct adapter *, int);
327static inline void txq_start(struct ifnet *, struct sge_txq *);
328static uint32_t fconf_to_mode(uint32_t);
329static uint32_t mode_to_fconf(uint32_t);
330static uint32_t fspec_to_fconf(struct t4_filter_specification *);
331static int get_filter_mode(struct adapter *, uint32_t *);
332static int set_filter_mode(struct adapter *, uint32_t);
333static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
334static int get_filter(struct adapter *, struct t4_filter *);
335static int set_filter(struct adapter *, struct t4_filter *);
336static int del_filter(struct adapter *, struct t4_filter *);
337static void clear_filter(struct filter_entry *);
338static int set_filter_wr(struct adapter *, int);
339static int del_filter_wr(struct adapter *, int);
312void filter_rpl(struct adapter *, const struct cpl_set_tcb_rpl *);
340static int filter_rpl(struct sge_iq *, const struct rss_header *,
341 struct mbuf *);
313static int get_sge_context(struct adapter *, struct t4_sge_context *);
342static int get_sge_context(struct adapter *, struct t4_sge_context *);
343static int read_card_mem(struct adapter *, struct t4_mem_range *);
344#ifndef TCP_OFFLOAD_DISABLE
345static int toe_capability(struct port_info *, int);
346static int activate_uld(struct adapter *, int, struct uld_softc *);
347static int deactivate_uld(struct uld_softc *);
348#endif
314static int t4_mod_event(module_t, int, void *);
315
316struct t4_pciids {
317 uint16_t device;
318 uint8_t mpf;
319 char *desc;
320} t4_pciids[] = {
321 {0xa000, 0, "Chelsio Terminator 4 FPGA"},
322 {0x4400, 4, "Chelsio T440-dbg"},
323 {0x4401, 4, "Chelsio T420-CR"},
324 {0x4402, 4, "Chelsio T422-CR"},
325 {0x4403, 4, "Chelsio T440-CR"},
326 {0x4404, 4, "Chelsio T420-BCH"},
327 {0x4405, 4, "Chelsio T440-BCH"},
328 {0x4406, 4, "Chelsio T440-CH"},
329 {0x4407, 4, "Chelsio T420-SO"},
330 {0x4408, 4, "Chelsio T420-CX"},
331 {0x4409, 4, "Chelsio T420-BT"},
332 {0x440a, 4, "Chelsio T404-BT"},
333};
334
349static int t4_mod_event(module_t, int, void *);
350
351struct t4_pciids {
352 uint16_t device;
353 uint8_t mpf;
354 char *desc;
355} t4_pciids[] = {
356 {0xa000, 0, "Chelsio Terminator 4 FPGA"},
357 {0x4400, 4, "Chelsio T440-dbg"},
358 {0x4401, 4, "Chelsio T420-CR"},
359 {0x4402, 4, "Chelsio T422-CR"},
360 {0x4403, 4, "Chelsio T440-CR"},
361 {0x4404, 4, "Chelsio T420-BCH"},
362 {0x4405, 4, "Chelsio T440-BCH"},
363 {0x4406, 4, "Chelsio T440-CH"},
364 {0x4407, 4, "Chelsio T420-SO"},
365 {0x4408, 4, "Chelsio T420-CX"},
366 {0x4409, 4, "Chelsio T420-BT"},
367 {0x440a, 4, "Chelsio T404-BT"},
368};
369
370#ifndef TCP_OFFLOAD_DISABLE
371/* This is used in service_iq() to get to the fl associated with an iq. */
372CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
373#endif
374
335static int
336t4_probe(device_t dev)
337{
338 int i;
339 uint16_t v = pci_get_vendor(dev);
340 uint16_t d = pci_get_device(dev);
341
342 if (v != PCI_VENDOR_ID_CHELSIO)
343 return (ENXIO);
344
345 for (i = 0; i < ARRAY_SIZE(t4_pciids); i++) {
346 if (d == t4_pciids[i].device &&
347 pci_get_function(dev) == t4_pciids[i].mpf) {
348 device_set_desc(dev, t4_pciids[i].desc);
349 return (BUS_PROBE_DEFAULT);
350 }
351 }
352
353 return (ENXIO);
354}
355
356static int
357t4_attach(device_t dev)
358{
359 struct adapter *sc;
360 int rc = 0, i, n10g, n1g, rqidx, tqidx;
375static int
376t4_probe(device_t dev)
377{
378 int i;
379 uint16_t v = pci_get_vendor(dev);
380 uint16_t d = pci_get_device(dev);
381
382 if (v != PCI_VENDOR_ID_CHELSIO)
383 return (ENXIO);
384
385 for (i = 0; i < ARRAY_SIZE(t4_pciids); i++) {
386 if (d == t4_pciids[i].device &&
387 pci_get_function(dev) == t4_pciids[i].mpf) {
388 device_set_desc(dev, t4_pciids[i].desc);
389 return (BUS_PROBE_DEFAULT);
390 }
391 }
392
393 return (ENXIO);
394}
395
396static int
397t4_attach(device_t dev)
398{
399 struct adapter *sc;
400 int rc = 0, i, n10g, n1g, rqidx, tqidx;
361 struct fw_caps_config_cmd caps;
362 uint32_t p, v;
363 struct intrs_and_queues iaq;
364 struct sge *s;
401 struct intrs_and_queues iaq;
402 struct sge *s;
403#ifndef TCP_OFFLOAD_DISABLE
404 int ofld_rqidx, ofld_tqidx;
405#endif
365
366 sc = device_get_softc(dev);
367 sc->dev = dev;
368 sc->pf = pci_get_function(dev);
369 sc->mbox = sc->pf;
370
371 pci_enable_busmaster(dev);
372 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
406
407 sc = device_get_softc(dev);
408 sc->dev = dev;
409 sc->pf = pci_get_function(dev);
410 sc->mbox = sc->pf;
411
412 pci_enable_busmaster(dev);
413 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
414 uint32_t v;
415
373 pci_set_max_read_req(dev, 4096);
374 v = pci_read_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, 2);
375 v |= PCIM_EXP_CTL_RELAXED_ORD_ENABLE;
376 pci_write_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, v, 2);
377 }
378
379 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
380 device_get_nameunit(dev));
381 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
416 pci_set_max_read_req(dev, 4096);
417 v = pci_read_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, 2);
418 v |= PCIM_EXP_CTL_RELAXED_ORD_ENABLE;
419 pci_write_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, v, 2);
420 }
421
422 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
423 device_get_nameunit(dev));
424 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
425 mtx_lock(&t4_list_lock);
426 SLIST_INSERT_HEAD(&t4_list, sc, link);
427 mtx_unlock(&t4_list_lock);
382
428
429 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
430 TAILQ_INIT(&sc->sfl);
431 callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
432
383 rc = map_bars(sc);
384 if (rc != 0)
385 goto done; /* error message displayed already */
386
387 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
433 rc = map_bars(sc);
434 if (rc != 0)
435 goto done; /* error message displayed already */
436
437 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
438 for (i = 0; i < ARRAY_SIZE(sc->cpl_handler); i++)
439 sc->cpl_handler[i] = cpl_not_handled;
440 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, filter_rpl);
388
389 /* Prepare the adapter for operation */
390 rc = -t4_prep_adapter(sc);
391 if (rc != 0) {
392 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
393 goto done;
394 }
395
441
442 /* Prepare the adapter for operation */
443 rc = -t4_prep_adapter(sc);
444 if (rc != 0) {
445 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
446 goto done;
447 }
448
396 /* Do this really early */
449 /*
450 * Do this really early, with the memory windows set up even before the
451 * character device. The userland tool's register i/o and mem read
452 * will work even in "recovery mode".
453 */
454 setup_memwin(sc);
397 sc->cdev = make_dev(&t4_cdevsw, device_get_unit(dev), UID_ROOT,
398 GID_WHEEL, 0600, "%s", device_get_nameunit(dev));
399 sc->cdev->si_drv1 = sc;
400
455 sc->cdev = make_dev(&t4_cdevsw, device_get_unit(dev), UID_ROOT,
456 GID_WHEEL, 0600, "%s", device_get_nameunit(dev));
457 sc->cdev->si_drv1 = sc;
458
459 /* Go no further if recovery mode has been requested. */
460 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
461 device_printf(dev, "recovery mode.\n");
462 goto done;
463 }
464
401 /* Prepare the firmware for operation */
402 rc = prep_firmware(sc);
403 if (rc != 0)
404 goto done; /* error message displayed already */
405
465 /* Prepare the firmware for operation */
466 rc = prep_firmware(sc);
467 if (rc != 0)
468 goto done; /* error message displayed already */
469
406 /* Read firmware devlog parameters */
407 (void) get_devlog_params(sc, &sc->params.devlog);
470 rc = get_params__pre_init(sc);
471 if (rc != 0)
472 goto done; /* error message displayed already */
408
473
409 /* Get device capabilities and select which ones we'll use */
410 rc = get_capabilities(sc, &caps);
411 if (rc != 0) {
412 device_printf(dev,
413 "failed to initialize adapter capabilities: %d.\n", rc);
414 goto done;
415 }
474 rc = t4_sge_init(sc);
475 if (rc != 0)
476 goto done; /* error message displayed already */
416
477
417 /* Choose the global RSS mode. */
418 rc = -t4_config_glbl_rss(sc, sc->mbox,
419 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
420 F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
421 F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
422 F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
423 if (rc != 0) {
424 device_printf(dev,
425 "failed to select global RSS mode: %d.\n", rc);
426 goto done;
478 if (sc->flags & MASTER_PF) {
479 /* get basic stuff going */
480 rc = -t4_fw_initialize(sc, sc->mbox);
481 if (rc != 0) {
482 device_printf(dev, "early init failed: %d.\n", rc);
483 goto done;
484 }
427 }
428
485 }
486
429 /* These are total (sum of all ports) limits for a bus driver */
430 rc = -t4_cfg_pfvf(sc, sc->mbox, sc->pf, 0,
431 128, /* max # of egress queues */
432 64, /* max # of egress Ethernet or control queues */
433 64, /* max # of ingress queues with fl/interrupt */
434 0, /* max # of ingress queues without interrupt */
435 0, /* PCIe traffic class */
436 4, /* max # of virtual interfaces */
437 M_FW_PFVF_CMD_CMASK, M_FW_PFVF_CMD_PMASK, 16,
438 FW_CMD_CAP_PF, FW_CMD_CAP_PF);
439 if (rc != 0) {
440 device_printf(dev,
441 "failed to configure pf/vf resources: %d.\n", rc);
442 goto done;
443 }
487 rc = get_params__post_init(sc);
488 if (rc != 0)
489 goto done; /* error message displayed already */
444
490
445 /* Need this before sge_init */
446 for (i = 0; i < SGE_NTIMERS; i++)
447 sc->sge.timer_val[i] = min(intr_timer[i], 200U);
448 for (i = 0; i < SGE_NCOUNTERS; i++)
449 sc->sge.counter_val[i] = min(intr_pktcount[i], M_THRESHOLD_0);
491 if (sc->flags & MASTER_PF) {
450
492
451 /* Also need the cooked value of cclk before sge_init */
452 p = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
453 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
454 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &p, &v);
455 if (rc != 0) {
456 device_printf(sc->dev,
457 "failed to obtain core clock value: %d.\n", rc);
458 goto done;
459 }
460 sc->params.vpd.cclk = v;
493 /* final tweaks to some settings */
461
494
462 t4_sge_init(sc);
463
464 t4_set_filter_mode(sc, filter_mode);
465 t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG,
466 V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP),
467 V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP));
468 t4_tp_wr_bits_indirect(sc, A_TP_INGRESS_CONFIG, F_CSUM_HAS_PSEUDO_HDR,
469 F_LOOKUPEVERYPKT);
470
471 /* get basic stuff going */
472 rc = -t4_early_init(sc, sc->mbox);
473 if (rc != 0) {
474 device_printf(dev, "early init failed: %d.\n", rc);
475 goto done;
495 t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd,
496 sc->params.b_wnd);
497 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
498 t4_set_reg_field(sc, A_TP_PARA_REG3, F_TUNNELCNGDROP0 |
499 F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | F_TUNNELCNGDROP3, 0);
500 t4_set_reg_field(sc, A_TP_PARA_REG5,
501 V_INDICATESIZE(M_INDICATESIZE) |
502 F_REARMDDPOFFSET | F_RESETDDPOFFSET,
503 V_INDICATESIZE(M_INDICATESIZE) |
504 F_REARMDDPOFFSET | F_RESETDDPOFFSET);
505 } else {
506 /*
507 * XXX: Verify that we can live with whatever the master driver
508 * has done so far, and hope that it doesn't change any global
509 * setting from underneath us in the future.
510 */
476 }
477
511 }
512
478 rc = get_params(sc, &caps);
479 if (rc != 0)
480 goto done; /* error message displayed already */
513 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &sc->filter_mode, 1,
514 A_TP_VLAN_PRI_MAP);
481
515
482 /* These are finalized by FW initialization, load their values now */
483 v = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
484 sc->params.tp.tre = G_TIMERRESOLUTION(v);
485 sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
486 t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
516 for (i = 0; i < NCHAN; i++)
517 sc->params.tp.tx_modq[i] = i;
487
518
488 /* tweak some settings */
489 t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) |
490 V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
491 V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9));
492 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
493 t4_set_reg_field(sc, A_TP_PARA_REG3, F_TUNNELCNGDROP0 |
494 F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | F_TUNNELCNGDROP3, 0);
495
496 setup_memwin(sc);
497
498 rc = t4_create_dma_tag(sc);
499 if (rc != 0)
500 goto done; /* error message displayed already */
501
502 /*
503 * First pass over all the ports - allocate VIs and initialize some
504 * basic parameters like mac address, port type, etc. We also figure
505 * out whether a port is 10G or 1G and use that information when
506 * calculating how many interrupts to attempt to allocate.
507 */
508 n10g = n1g = 0;
509 for_each_port(sc, i) {
510 struct port_info *pi;
511
512 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
513 sc->port[i] = pi;
514
515 /* These must be set before t4_port_init */
516 pi->adapter = sc;
517 pi->port_id = i;
518
519 /* Allocate the vi and initialize parameters like mac addr */
520 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
521 if (rc != 0) {
522 device_printf(dev, "unable to initialize port %d: %d\n",
523 i, rc);
524 free(pi, M_CXGBE);
525 sc->port[i] = NULL;
526 goto done;
527 }
528
529 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
530 device_get_nameunit(dev), i);
531 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
532
533 if (is_10G_port(pi)) {
534 n10g++;
519 rc = t4_create_dma_tag(sc);
520 if (rc != 0)
521 goto done; /* error message displayed already */
522
523 /*
524 * First pass over all the ports - allocate VIs and initialize some
525 * basic parameters like mac address, port type, etc. We also figure
526 * out whether a port is 10G or 1G and use that information when
527 * calculating how many interrupts to attempt to allocate.
528 */
529 n10g = n1g = 0;
530 for_each_port(sc, i) {
531 struct port_info *pi;
532
533 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
534 sc->port[i] = pi;
535
536 /* These must be set before t4_port_init */
537 pi->adapter = sc;
538 pi->port_id = i;
539
540 /* Allocate the vi and initialize parameters like mac addr */
541 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
542 if (rc != 0) {
543 device_printf(dev, "unable to initialize port %d: %d\n",
544 i, rc);
545 free(pi, M_CXGBE);
546 sc->port[i] = NULL;
547 goto done;
548 }
549
550 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
551 device_get_nameunit(dev), i);
552 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
553
554 if (is_10G_port(pi)) {
555 n10g++;
535 pi->tmr_idx = tmr_idx_10g;
536 pi->pktc_idx = pktc_idx_10g;
556 pi->tmr_idx = t4_tmr_idx_10g;
557 pi->pktc_idx = t4_pktc_idx_10g;
537 } else {
538 n1g++;
558 } else {
559 n1g++;
539 pi->tmr_idx = tmr_idx_1g;
540 pi->pktc_idx = pktc_idx_1g;
560 pi->tmr_idx = t4_tmr_idx_1g;
561 pi->pktc_idx = t4_pktc_idx_1g;
541 }
542
543 pi->xact_addr_filt = -1;
544
562 }
563
564 pi->xact_addr_filt = -1;
565
545 pi->qsize_rxq = max(qsize_rxq, 128);
546 while (pi->qsize_rxq & 7)
547 pi->qsize_rxq++;
548 pi->qsize_txq = max(qsize_txq, 128);
566 pi->qsize_rxq = t4_qsize_rxq;
567 pi->qsize_txq = t4_qsize_txq;
549
568
550 if (pi->qsize_rxq != qsize_rxq) {
551 device_printf(dev,
552 "using %d instead of %d as the rx queue size.\n",
553 pi->qsize_rxq, qsize_rxq);
554 }
555 if (pi->qsize_txq != qsize_txq) {
556 device_printf(dev,
557 "using %d instead of %d as the tx queue size.\n",
558 pi->qsize_txq, qsize_txq);
559 }
560
561 pi->dev = device_add_child(dev, "cxgbe", -1);
562 if (pi->dev == NULL) {
563 device_printf(dev,
564 "failed to add device for port %d.\n", i);
565 rc = ENXIO;
566 goto done;
567 }
568 device_set_softc(pi->dev, pi);
569 pi->dev = device_add_child(dev, "cxgbe", -1);
570 if (pi->dev == NULL) {
571 device_printf(dev,
572 "failed to add device for port %d.\n", i);
573 rc = ENXIO;
574 goto done;
575 }
576 device_set_softc(pi->dev, pi);
569
570 setbit(&sc->registered_device_map, i);
571 }
572
577 }
578
573 if (sc->registered_device_map == 0) {
574 device_printf(dev, "no usable ports\n");
575 rc = ENXIO;
576 goto done;
577 }
578
579 /*
580 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
581 */
582 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
583 if (rc != 0)
584 goto done; /* error message displayed already */
585
586 sc->intr_type = iaq.intr_type;
587 sc->intr_count = iaq.nirq;
579 /*
580 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
581 */
582 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
583 if (rc != 0)
584 goto done; /* error message displayed already */
585
586 sc->intr_type = iaq.intr_type;
587 sc->intr_count = iaq.nirq;
588 sc->flags |= iaq.intr_flags;
588
589 s = &sc->sge;
590 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
591 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
592 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
589
590 s = &sc->sge;
591 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
592 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
593 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
593 s->neq += sc->params.nports; /* control queues, 1 per port */
594 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
594 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
595 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
595 if (iaq.intr_shared)
596 sc->flags |= INTR_SHARED;
597 s->niq += NINTRQ(sc); /* interrupt queues */
598
596
599 s->intrq = malloc(NINTRQ(sc) * sizeof(struct sge_iq), M_CXGBE,
597#ifndef TCP_OFFLOAD_DISABLE
598 if (is_offload(sc)) {
599
600 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
601 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
602 s->neq += s->nofldtxq + s->nofldrxq;
603 s->niq += s->nofldrxq;
604
605 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
606 M_CXGBE, M_ZERO | M_WAITOK);
607 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
608 M_CXGBE, M_ZERO | M_WAITOK);
609 }
610#endif
611
612 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
600 M_ZERO | M_WAITOK);
613 M_ZERO | M_WAITOK);
601 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_ctrlq), M_CXGBE,
602 M_ZERO | M_WAITOK);
603 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
604 M_ZERO | M_WAITOK);
605 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
606 M_ZERO | M_WAITOK);
607 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
608 M_ZERO | M_WAITOK);
609 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
610 M_ZERO | M_WAITOK);
611
612 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
613 M_ZERO | M_WAITOK);
614
614 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
615 M_ZERO | M_WAITOK);
616 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
617 M_ZERO | M_WAITOK);
618 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
619 M_ZERO | M_WAITOK);
620 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
621 M_ZERO | M_WAITOK);
622
623 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
624 M_ZERO | M_WAITOK);
625
615 sc->l2t = t4_init_l2t(M_WAITOK);
626 t4_init_l2t(sc, M_WAITOK);
616
627
617 t4_sysctls(sc);
618
619 /*
620 * Second pass over the ports. This time we know the number of rx and
621 * tx queues that each port should get.
622 */
623 rqidx = tqidx = 0;
628 /*
629 * Second pass over the ports. This time we know the number of rx and
630 * tx queues that each port should get.
631 */
632 rqidx = tqidx = 0;
633#ifndef TCP_OFFLOAD_DISABLE
634 ofld_rqidx = ofld_tqidx = 0;
635#endif
624 for_each_port(sc, i) {
625 struct port_info *pi = sc->port[i];
626
627 if (pi == NULL)
628 continue;
629
630 pi->first_rxq = rqidx;
636 for_each_port(sc, i) {
637 struct port_info *pi = sc->port[i];
638
639 if (pi == NULL)
640 continue;
641
642 pi->first_rxq = rqidx;
631 pi->nrxq = is_10G_port(pi) ? iaq.nrxq10g : iaq.nrxq1g;
632
633 pi->first_txq = tqidx;
643 pi->first_txq = tqidx;
634 pi->ntxq = is_10G_port(pi) ? iaq.ntxq10g : iaq.ntxq1g;
644 if (is_10G_port(pi)) {
645 pi->nrxq = iaq.nrxq10g;
646 pi->ntxq = iaq.ntxq10g;
647 } else {
648 pi->nrxq = iaq.nrxq1g;
649 pi->ntxq = iaq.ntxq1g;
650 }
635
636 rqidx += pi->nrxq;
637 tqidx += pi->ntxq;
651
652 rqidx += pi->nrxq;
653 tqidx += pi->ntxq;
654
655#ifndef TCP_OFFLOAD_DISABLE
656 if (is_offload(sc)) {
657 pi->first_ofld_rxq = ofld_rqidx;
658 pi->first_ofld_txq = ofld_tqidx;
659 if (is_10G_port(pi)) {
660 pi->nofldrxq = iaq.nofldrxq10g;
661 pi->nofldtxq = iaq.nofldtxq10g;
662 } else {
663 pi->nofldrxq = iaq.nofldrxq1g;
664 pi->nofldtxq = iaq.nofldtxq1g;
665 }
666 ofld_rqidx += pi->nofldrxq;
667 ofld_tqidx += pi->nofldtxq;
668 }
669#endif
638 }
639
640 rc = bus_generic_attach(dev);
641 if (rc != 0) {
642 device_printf(dev,
643 "failed to attach all child ports: %d\n", rc);
644 goto done;
645 }
646
670 }
671
672 rc = bus_generic_attach(dev);
673 if (rc != 0) {
674 device_printf(dev,
675 "failed to attach all child ports: %d\n", rc);
676 goto done;
677 }
678
647#ifdef INVARIANTS
648 device_printf(dev,
679 device_printf(dev,
649 "%p, %d ports (0x%x), %d intr_type, %d intr_count\n",
650 sc, sc->params.nports, sc->params.portvec,
651 sc->intr_type, sc->intr_count);
652#endif
680 "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
681 sc->params.pci.width, sc->params.nports, sc->intr_count,
682 sc->intr_type == INTR_MSIX ? "MSI-X" :
683 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
684 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
685
653 t4_set_desc(sc);
654
655done:
686 t4_set_desc(sc);
687
688done:
689 if (rc != 0 && sc->cdev) {
690 /* cdev was created and so cxgbetool works; recover that way. */
691 device_printf(dev,
692 "error during attach, adapter is now in recovery mode.\n");
693 rc = 0;
694 }
695
656 if (rc != 0)
657 t4_detach(dev);
696 if (rc != 0)
697 t4_detach(dev);
698 else
699 t4_sysctls(sc);
658
659 return (rc);
660}
661
662/*
663 * Idempotent
664 */
665static int
666t4_detach(device_t dev)
667{
668 struct adapter *sc;
669 struct port_info *pi;
700
701 return (rc);
702}
703
704/*
705 * Idempotent
706 */
707static int
708t4_detach(device_t dev)
709{
710 struct adapter *sc;
711 struct port_info *pi;
670 int i;
712 int i, rc;
671
672 sc = device_get_softc(dev);
673
713
714 sc = device_get_softc(dev);
715
674 if (sc->cdev)
716 if (sc->flags & FULL_INIT_DONE)
717 t4_intr_disable(sc);
718
719 if (sc->cdev) {
675 destroy_dev(sc->cdev);
720 destroy_dev(sc->cdev);
721 sc->cdev = NULL;
722 }
676
723
677 bus_generic_detach(dev);
724 rc = bus_generic_detach(dev);
725 if (rc) {
726 device_printf(dev,
727 "failed to detach child devices: %d\n", rc);
728 return (rc);
729 }
730
678 for (i = 0; i < MAX_NPORTS; i++) {
679 pi = sc->port[i];
680 if (pi) {
681 t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
682 if (pi->dev)
683 device_delete_child(dev, pi->dev);
684
685 mtx_destroy(&pi->pi_lock);
686 free(pi, M_CXGBE);
687 }
688 }
689
731 for (i = 0; i < MAX_NPORTS; i++) {
732 pi = sc->port[i];
733 if (pi) {
734 t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
735 if (pi->dev)
736 device_delete_child(dev, pi->dev);
737
738 mtx_destroy(&pi->pi_lock);
739 free(pi, M_CXGBE);
740 }
741 }
742
743 if (sc->flags & FULL_INIT_DONE)
744 adapter_full_uninit(sc);
745
690 if (sc->flags & FW_OK)
691 t4_fw_bye(sc, sc->mbox);
692
693 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
694 pci_release_msi(dev);
695
696 if (sc->regs_res)
697 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
698 sc->regs_res);
699
700 if (sc->msix_res)
701 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
702 sc->msix_res);
703
704 if (sc->l2t)
705 t4_free_l2t(sc->l2t);
706
746 if (sc->flags & FW_OK)
747 t4_fw_bye(sc, sc->mbox);
748
749 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
750 pci_release_msi(dev);
751
752 if (sc->regs_res)
753 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
754 sc->regs_res);
755
756 if (sc->msix_res)
757 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
758 sc->msix_res);
759
760 if (sc->l2t)
761 t4_free_l2t(sc->l2t);
762
763#ifndef TCP_OFFLOAD_DISABLE
764 free(sc->sge.ofld_rxq, M_CXGBE);
765 free(sc->sge.ofld_txq, M_CXGBE);
766#endif
707 free(sc->irq, M_CXGBE);
708 free(sc->sge.rxq, M_CXGBE);
709 free(sc->sge.txq, M_CXGBE);
710 free(sc->sge.ctrlq, M_CXGBE);
767 free(sc->irq, M_CXGBE);
768 free(sc->sge.rxq, M_CXGBE);
769 free(sc->sge.txq, M_CXGBE);
770 free(sc->sge.ctrlq, M_CXGBE);
711 free(sc->sge.intrq, M_CXGBE);
712 free(sc->sge.iqmap, M_CXGBE);
713 free(sc->sge.eqmap, M_CXGBE);
714 free(sc->tids.ftid_tab, M_CXGBE);
715 t4_destroy_dma_tag(sc);
771 free(sc->sge.iqmap, M_CXGBE);
772 free(sc->sge.eqmap, M_CXGBE);
773 free(sc->tids.ftid_tab, M_CXGBE);
774 t4_destroy_dma_tag(sc);
716 mtx_destroy(&sc->sc_lock);
775 if (mtx_initialized(&sc->sc_lock)) {
776 mtx_lock(&t4_list_lock);
777 SLIST_REMOVE(&t4_list, sc, adapter, link);
778 mtx_unlock(&t4_list_lock);
779 mtx_destroy(&sc->sc_lock);
780 }
717
781
782 if (mtx_initialized(&sc->sfl_lock))
783 mtx_destroy(&sc->sfl_lock);
784
718 bzero(sc, sizeof(*sc));
719
720 return (0);
721}
722
723
724static int
725cxgbe_probe(device_t dev)
726{
727 char buf[128];
728 struct port_info *pi = device_get_softc(dev);
729
785 bzero(sc, sizeof(*sc));
786
787 return (0);
788}
789
790
791static int
792cxgbe_probe(device_t dev)
793{
794 char buf[128];
795 struct port_info *pi = device_get_softc(dev);
796
730 snprintf(buf, sizeof(buf), "Port %d", pi->port_id);
797 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
731 device_set_desc_copy(dev, buf);
732
733 return (BUS_PROBE_DEFAULT);
734}
735
736#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
737 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
738 IFCAP_VLAN_HWTSO)
739#define T4_CAP_ENABLE (T4_CAP & ~IFCAP_TSO6)
740
741static int
742cxgbe_attach(device_t dev)
743{
744 struct port_info *pi = device_get_softc(dev);
745 struct ifnet *ifp;
746
747 /* Allocate an ifnet and set it up */
748 ifp = if_alloc(IFT_ETHER);
749 if (ifp == NULL) {
750 device_printf(dev, "Cannot allocate ifnet\n");
751 return (ENOMEM);
752 }
753 pi->ifp = ifp;
754 ifp->if_softc = pi;
755
756 callout_init(&pi->tick, CALLOUT_MPSAFE);
798 device_set_desc_copy(dev, buf);
799
800 return (BUS_PROBE_DEFAULT);
801}
802
803#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
804 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
805 IFCAP_VLAN_HWTSO)
806#define T4_CAP_ENABLE (T4_CAP & ~IFCAP_TSO6)
807
808static int
809cxgbe_attach(device_t dev)
810{
811 struct port_info *pi = device_get_softc(dev);
812 struct ifnet *ifp;
813
814 /* Allocate an ifnet and set it up */
815 ifp = if_alloc(IFT_ETHER);
816 if (ifp == NULL) {
817 device_printf(dev, "Cannot allocate ifnet\n");
818 return (ENOMEM);
819 }
820 pi->ifp = ifp;
821 ifp->if_softc = pi;
822
823 callout_init(&pi->tick, CALLOUT_MPSAFE);
757 pi->tq = taskqueue_create("cxgbe_taskq", M_NOWAIT,
758 taskqueue_thread_enqueue, &pi->tq);
759 if (pi->tq == NULL) {
760 device_printf(dev, "failed to allocate port task queue\n");
761 if_free(pi->ifp);
762 return (ENOMEM);
763 }
764 taskqueue_start_threads(&pi->tq, 1, PI_NET, "%s taskq",
765 device_get_nameunit(dev));
766
767 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
768 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
769
770 ifp->if_init = cxgbe_init;
771 ifp->if_ioctl = cxgbe_ioctl;
772 ifp->if_start = cxgbe_start;
773 ifp->if_transmit = cxgbe_transmit;
774 ifp->if_qflush = cxgbe_qflush;
775
776 ifp->if_snd.ifq_drv_maxlen = 1024;
777 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
778 IFQ_SET_READY(&ifp->if_snd);
779
780 ifp->if_capabilities = T4_CAP;
824
825 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
826 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
827
828 ifp->if_init = cxgbe_init;
829 ifp->if_ioctl = cxgbe_ioctl;
830 ifp->if_start = cxgbe_start;
831 ifp->if_transmit = cxgbe_transmit;
832 ifp->if_qflush = cxgbe_qflush;
833
834 ifp->if_snd.ifq_drv_maxlen = 1024;
835 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
836 IFQ_SET_READY(&ifp->if_snd);
837
838 ifp->if_capabilities = T4_CAP;
839#ifndef TCP_OFFLOAD_DISABLE
840 if (is_offload(pi->adapter))
841 ifp->if_capabilities |= IFCAP_TOE4;
842#endif
781 ifp->if_capenable = T4_CAP_ENABLE;
782 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO;
783
784 /* Initialize ifmedia for this port */
785 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
786 cxgbe_media_status);
787 build_medialist(pi);
788
789 ether_ifattach(ifp, pi->hw_addr);
790
843 ifp->if_capenable = T4_CAP_ENABLE;
844 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO;
845
846 /* Initialize ifmedia for this port */
847 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
848 cxgbe_media_status);
849 build_medialist(pi);
850
851 ether_ifattach(ifp, pi->hw_addr);
852
791#ifdef INVARIANTS
792 device_printf(dev, "%p, %d txq, %d rxq\n", pi, pi->ntxq, pi->nrxq);
853#ifndef TCP_OFFLOAD_DISABLE
854 if (is_offload(pi->adapter)) {
855 device_printf(dev,
856 "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
857 pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
858 } else
793#endif
859#endif
860 device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
794
795 cxgbe_sysctls(pi);
796
797 return (0);
798}
799
800static int
801cxgbe_detach(device_t dev)
802{
803 struct port_info *pi = device_get_softc(dev);
804 struct adapter *sc = pi->adapter;
861
862 cxgbe_sysctls(pi);
863
864 return (0);
865}
866
867static int
868cxgbe_detach(device_t dev)
869{
870 struct port_info *pi = device_get_softc(dev);
871 struct adapter *sc = pi->adapter;
805 int rc;
872 struct ifnet *ifp = pi->ifp;
806
807 /* Tell if_ioctl and if_init that the port is going away */
808 ADAPTER_LOCK(sc);
809 SET_DOOMED(pi);
810 wakeup(&sc->flags);
811 while (IS_BUSY(sc))
812 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
813 SET_BUSY(sc);
814 ADAPTER_UNLOCK(sc);
815
873
874 /* Tell if_ioctl and if_init that the port is going away */
875 ADAPTER_LOCK(sc);
876 SET_DOOMED(pi);
877 wakeup(&sc->flags);
878 while (IS_BUSY(sc))
879 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
880 SET_BUSY(sc);
881 ADAPTER_UNLOCK(sc);
882
816 rc = cxgbe_uninit_synchronized(pi);
817 if (rc != 0)
818 device_printf(dev, "port uninit failed: %d.\n", rc);
883 PORT_LOCK(pi);
884 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
885 callout_stop(&pi->tick);
886 PORT_UNLOCK(pi);
887 callout_drain(&pi->tick);
819
888
820 taskqueue_free(pi->tq);
889 /* Let detach proceed even if these fail. */
890 cxgbe_uninit_synchronized(pi);
891 port_full_uninit(pi);
821
822 ifmedia_removeall(&pi->media);
823 ether_ifdetach(pi->ifp);
824 if_free(pi->ifp);
825
826 ADAPTER_LOCK(sc);
827 CLR_BUSY(sc);
828 wakeup_one(&sc->flags);
829 ADAPTER_UNLOCK(sc);
830
831 return (0);
832}
833
834static void
835cxgbe_init(void *arg)
836{
837 struct port_info *pi = arg;
838 struct adapter *sc = pi->adapter;
839
840 ADAPTER_LOCK(sc);
841 cxgbe_init_locked(pi); /* releases adapter lock */
842 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
843}
844
845static int
846cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
847{
848 int rc = 0, mtu, flags;
849 struct port_info *pi = ifp->if_softc;
850 struct adapter *sc = pi->adapter;
851 struct ifreq *ifr = (struct ifreq *)data;
852 uint32_t mask;
853
854 switch (cmd) {
855 case SIOCSIFMTU:
856 ADAPTER_LOCK(sc);
857 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
858 if (rc) {
859fail:
860 ADAPTER_UNLOCK(sc);
861 return (rc);
862 }
863
864 mtu = ifr->ifr_mtu;
865 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
866 rc = EINVAL;
867 } else {
868 ifp->if_mtu = mtu;
869 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
870 t4_update_fl_bufsize(ifp);
871 PORT_LOCK(pi);
872 rc = update_mac_settings(pi, XGMAC_MTU);
873 PORT_UNLOCK(pi);
874 }
875 }
876 ADAPTER_UNLOCK(sc);
877 break;
878
879 case SIOCSIFFLAGS:
880 ADAPTER_LOCK(sc);
881 if (IS_DOOMED(pi)) {
882 rc = ENXIO;
883 goto fail;
884 }
885 if (ifp->if_flags & IFF_UP) {
886 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
887 flags = pi->if_flags;
888 if ((ifp->if_flags ^ flags) &
889 (IFF_PROMISC | IFF_ALLMULTI)) {
890 if (IS_BUSY(sc)) {
891 rc = EBUSY;
892 goto fail;
893 }
894 PORT_LOCK(pi);
895 rc = update_mac_settings(pi,
896 XGMAC_PROMISC | XGMAC_ALLMULTI);
897 PORT_UNLOCK(pi);
898 }
899 ADAPTER_UNLOCK(sc);
900 } else
901 rc = cxgbe_init_locked(pi);
902 pi->if_flags = ifp->if_flags;
903 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
904 rc = cxgbe_uninit_locked(pi);
905 else
906 ADAPTER_UNLOCK(sc);
907
908 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
909 break;
910
911 case SIOCADDMULTI:
912 case SIOCDELMULTI: /* these two can be called with a mutex held :-( */
913 ADAPTER_LOCK(sc);
914 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
915 if (rc)
916 goto fail;
917
918 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
919 PORT_LOCK(pi);
920 rc = update_mac_settings(pi, XGMAC_MCADDRS);
921 PORT_UNLOCK(pi);
922 }
923 ADAPTER_UNLOCK(sc);
924 break;
925
926 case SIOCSIFCAP:
927 ADAPTER_LOCK(sc);
928 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
929 if (rc)
930 goto fail;
931
932 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
933 if (mask & IFCAP_TXCSUM) {
934 ifp->if_capenable ^= IFCAP_TXCSUM;
935 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
936
937 if (IFCAP_TSO & ifp->if_capenable &&
938 !(IFCAP_TXCSUM & ifp->if_capenable)) {
939 ifp->if_capenable &= ~IFCAP_TSO;
940 ifp->if_hwassist &= ~CSUM_TSO;
941 if_printf(ifp,
942 "tso disabled due to -txcsum.\n");
943 }
944 }
945 if (mask & IFCAP_RXCSUM)
946 ifp->if_capenable ^= IFCAP_RXCSUM;
947 if (mask & IFCAP_TSO4) {
948 ifp->if_capenable ^= IFCAP_TSO4;
949
950 if (IFCAP_TSO & ifp->if_capenable) {
951 if (IFCAP_TXCSUM & ifp->if_capenable)
952 ifp->if_hwassist |= CSUM_TSO;
953 else {
954 ifp->if_capenable &= ~IFCAP_TSO;
955 ifp->if_hwassist &= ~CSUM_TSO;
956 if_printf(ifp,
957 "enable txcsum first.\n");
958 rc = EAGAIN;
892
893 ifmedia_removeall(&pi->media);
894 ether_ifdetach(pi->ifp);
895 if_free(pi->ifp);
896
897 ADAPTER_LOCK(sc);
898 CLR_BUSY(sc);
899 wakeup_one(&sc->flags);
900 ADAPTER_UNLOCK(sc);
901
902 return (0);
903}
904
905static void
906cxgbe_init(void *arg)
907{
908 struct port_info *pi = arg;
909 struct adapter *sc = pi->adapter;
910
911 ADAPTER_LOCK(sc);
912 cxgbe_init_locked(pi); /* releases adapter lock */
913 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
914}
915
916static int
917cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
918{
919 int rc = 0, mtu, flags;
920 struct port_info *pi = ifp->if_softc;
921 struct adapter *sc = pi->adapter;
922 struct ifreq *ifr = (struct ifreq *)data;
923 uint32_t mask;
924
925 switch (cmd) {
926 case SIOCSIFMTU:
927 ADAPTER_LOCK(sc);
928 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
929 if (rc) {
930fail:
931 ADAPTER_UNLOCK(sc);
932 return (rc);
933 }
934
935 mtu = ifr->ifr_mtu;
936 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
937 rc = EINVAL;
938 } else {
939 ifp->if_mtu = mtu;
940 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
941 t4_update_fl_bufsize(ifp);
942 PORT_LOCK(pi);
943 rc = update_mac_settings(pi, XGMAC_MTU);
944 PORT_UNLOCK(pi);
945 }
946 }
947 ADAPTER_UNLOCK(sc);
948 break;
949
950 case SIOCSIFFLAGS:
951 ADAPTER_LOCK(sc);
952 if (IS_DOOMED(pi)) {
953 rc = ENXIO;
954 goto fail;
955 }
956 if (ifp->if_flags & IFF_UP) {
957 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
958 flags = pi->if_flags;
959 if ((ifp->if_flags ^ flags) &
960 (IFF_PROMISC | IFF_ALLMULTI)) {
961 if (IS_BUSY(sc)) {
962 rc = EBUSY;
963 goto fail;
964 }
965 PORT_LOCK(pi);
966 rc = update_mac_settings(pi,
967 XGMAC_PROMISC | XGMAC_ALLMULTI);
968 PORT_UNLOCK(pi);
969 }
970 ADAPTER_UNLOCK(sc);
971 } else
972 rc = cxgbe_init_locked(pi);
973 pi->if_flags = ifp->if_flags;
974 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
975 rc = cxgbe_uninit_locked(pi);
976 else
977 ADAPTER_UNLOCK(sc);
978
979 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
980 break;
981
982 case SIOCADDMULTI:
983 case SIOCDELMULTI: /* these two can be called with a mutex held :-( */
984 ADAPTER_LOCK(sc);
985 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
986 if (rc)
987 goto fail;
988
989 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
990 PORT_LOCK(pi);
991 rc = update_mac_settings(pi, XGMAC_MCADDRS);
992 PORT_UNLOCK(pi);
993 }
994 ADAPTER_UNLOCK(sc);
995 break;
996
997 case SIOCSIFCAP:
998 ADAPTER_LOCK(sc);
999 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1000 if (rc)
1001 goto fail;
1002
1003 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1004 if (mask & IFCAP_TXCSUM) {
1005 ifp->if_capenable ^= IFCAP_TXCSUM;
1006 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1007
1008 if (IFCAP_TSO & ifp->if_capenable &&
1009 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1010 ifp->if_capenable &= ~IFCAP_TSO;
1011 ifp->if_hwassist &= ~CSUM_TSO;
1012 if_printf(ifp,
1013 "tso disabled due to -txcsum.\n");
1014 }
1015 }
1016 if (mask & IFCAP_RXCSUM)
1017 ifp->if_capenable ^= IFCAP_RXCSUM;
1018 if (mask & IFCAP_TSO4) {
1019 ifp->if_capenable ^= IFCAP_TSO4;
1020
1021 if (IFCAP_TSO & ifp->if_capenable) {
1022 if (IFCAP_TXCSUM & ifp->if_capenable)
1023 ifp->if_hwassist |= CSUM_TSO;
1024 else {
1025 ifp->if_capenable &= ~IFCAP_TSO;
1026 ifp->if_hwassist &= ~CSUM_TSO;
1027 if_printf(ifp,
1028 "enable txcsum first.\n");
1029 rc = EAGAIN;
1030 goto fail;
959 }
960 } else
961 ifp->if_hwassist &= ~CSUM_TSO;
962 }
963 if (mask & IFCAP_LRO) {
964#ifdef INET
965 int i;
966 struct sge_rxq *rxq;
967
968 ifp->if_capenable ^= IFCAP_LRO;
969 for_each_rxq(pi, i, rxq) {
970 if (ifp->if_capenable & IFCAP_LRO)
1031 }
1032 } else
1033 ifp->if_hwassist &= ~CSUM_TSO;
1034 }
1035 if (mask & IFCAP_LRO) {
1036#ifdef INET
1037 int i;
1038 struct sge_rxq *rxq;
1039
1040 ifp->if_capenable ^= IFCAP_LRO;
1041 for_each_rxq(pi, i, rxq) {
1042 if (ifp->if_capenable & IFCAP_LRO)
971 rxq->flags |= RXQ_LRO_ENABLED;
1043 rxq->iq.flags |= IQ_LRO_ENABLED;
972 else
1044 else
973 rxq->flags &= ~RXQ_LRO_ENABLED;
1045 rxq->iq.flags &= ~IQ_LRO_ENABLED;
974 }
975#endif
976 }
977#ifndef TCP_OFFLOAD_DISABLE
1046 }
1047#endif
1048 }
1049#ifndef TCP_OFFLOAD_DISABLE
978 if (mask & IFCAP_TOE4) {
979 rc = EOPNOTSUPP;
1050 if (mask & IFCAP_TOE) {
1051 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1052
1053 rc = toe_capability(pi, enable);
1054 if (rc != 0)
1055 goto fail;
1056
1057 ifp->if_capenable ^= mask;
980 }
981#endif
982 if (mask & IFCAP_VLAN_HWTAGGING) {
983 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
984 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
985 PORT_LOCK(pi);
986 rc = update_mac_settings(pi, XGMAC_VLANEX);
987 PORT_UNLOCK(pi);
988 }
989 }
990 if (mask & IFCAP_VLAN_MTU) {
991 ifp->if_capenable ^= IFCAP_VLAN_MTU;
992
993 /* Need to find out how to disable auto-mtu-inflation */
994 }
995 if (mask & IFCAP_VLAN_HWTSO)
996 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
997 if (mask & IFCAP_VLAN_HWCSUM)
998 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
999
1000#ifdef VLAN_CAPABILITIES
1001 VLAN_CAPABILITIES(ifp);
1002#endif
1003 ADAPTER_UNLOCK(sc);
1004 break;
1005
1006 case SIOCSIFMEDIA:
1007 case SIOCGIFMEDIA:
1008 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1009 break;
1010
1011 default:
1012 rc = ether_ioctl(ifp, cmd, data);
1013 }
1014
1015 return (rc);
1016}
1017
1018static void
1019cxgbe_start(struct ifnet *ifp)
1020{
1021 struct port_info *pi = ifp->if_softc;
1022 struct sge_txq *txq;
1023 int i;
1024
1025 for_each_txq(pi, i, txq) {
1026 if (TXQ_TRYLOCK(txq)) {
1027 txq_start(ifp, txq);
1028 TXQ_UNLOCK(txq);
1029 }
1030 }
1031}
1032
1033static int
1034cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1035{
1036 struct port_info *pi = ifp->if_softc;
1037 struct adapter *sc = pi->adapter;
1038 struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1039 struct buf_ring *br;
1040 int rc;
1041
1042 M_ASSERTPKTHDR(m);
1043
1058 }
1059#endif
1060 if (mask & IFCAP_VLAN_HWTAGGING) {
1061 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1062 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1063 PORT_LOCK(pi);
1064 rc = update_mac_settings(pi, XGMAC_VLANEX);
1065 PORT_UNLOCK(pi);
1066 }
1067 }
1068 if (mask & IFCAP_VLAN_MTU) {
1069 ifp->if_capenable ^= IFCAP_VLAN_MTU;
1070
1071 /* Need to find out how to disable auto-mtu-inflation */
1072 }
1073 if (mask & IFCAP_VLAN_HWTSO)
1074 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1075 if (mask & IFCAP_VLAN_HWCSUM)
1076 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1077
1078#ifdef VLAN_CAPABILITIES
1079 VLAN_CAPABILITIES(ifp);
1080#endif
1081 ADAPTER_UNLOCK(sc);
1082 break;
1083
1084 case SIOCSIFMEDIA:
1085 case SIOCGIFMEDIA:
1086 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1087 break;
1088
1089 default:
1090 rc = ether_ioctl(ifp, cmd, data);
1091 }
1092
1093 return (rc);
1094}
1095
1096static void
1097cxgbe_start(struct ifnet *ifp)
1098{
1099 struct port_info *pi = ifp->if_softc;
1100 struct sge_txq *txq;
1101 int i;
1102
1103 for_each_txq(pi, i, txq) {
1104 if (TXQ_TRYLOCK(txq)) {
1105 txq_start(ifp, txq);
1106 TXQ_UNLOCK(txq);
1107 }
1108 }
1109}
1110
1111static int
1112cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1113{
1114 struct port_info *pi = ifp->if_softc;
1115 struct adapter *sc = pi->adapter;
1116 struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1117 struct buf_ring *br;
1118 int rc;
1119
1120 M_ASSERTPKTHDR(m);
1121
1044 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1122 if (__predict_false(pi->link_cfg.link_ok == 0)) {
1045 m_freem(m);
1123 m_freem(m);
1046 return (0);
1124 return (ENETDOWN);
1047 }
1048
1049 if (m->m_flags & M_FLOWID)
1050 txq += (m->m_pkthdr.flowid % pi->ntxq);
1051 br = txq->br;
1052
1053 if (TXQ_TRYLOCK(txq) == 0) {
1125 }
1126
1127 if (m->m_flags & M_FLOWID)
1128 txq += (m->m_pkthdr.flowid % pi->ntxq);
1129 br = txq->br;
1130
1131 if (TXQ_TRYLOCK(txq) == 0) {
1132 struct sge_eq *eq = &txq->eq;
1133
1054 /*
1134 /*
1055 * XXX: make sure that this packet really is sent out. There is
1056 * a small race where t4_eth_tx may stop draining the drbr and
1057 * goes away, just before we enqueued this mbuf.
1135 * It is possible that t4_eth_tx finishes up and releases the
1136 * lock between the TRYLOCK above and the drbr_enqueue here. We
1137 * need to make sure that this mbuf doesn't just sit there in
1138 * the drbr.
1058 */
1059
1139 */
1140
1060 return (drbr_enqueue(ifp, br, m));
1141 rc = drbr_enqueue(ifp, br, m);
1142 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1143 !(eq->flags & EQ_DOOMED))
1144 callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1145 return (rc);
1061 }
1062
1063 /*
1064 * txq->m is the mbuf that is held up due to a temporary shortage of
1065 * resources and it should be put on the wire first. Then what's in
1066 * drbr and finally the mbuf that was just passed in to us.
1067 *
1068 * Return code should indicate the fate of the mbuf that was passed in
1069 * this time.
1070 */
1071
1072 TXQ_LOCK_ASSERT_OWNED(txq);
1073 if (drbr_needs_enqueue(ifp, br) || txq->m) {
1074
1075 /* Queued for transmission. */
1076
1077 rc = drbr_enqueue(ifp, br, m);
1078 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1079 (void) t4_eth_tx(ifp, txq, m);
1080 TXQ_UNLOCK(txq);
1081 return (rc);
1082 }
1083
1084 /* Direct transmission. */
1085 rc = t4_eth_tx(ifp, txq, m);
1086 if (rc != 0 && txq->m)
1087 rc = 0; /* held, will be transmitted soon (hopefully) */
1088
1089 TXQ_UNLOCK(txq);
1090 return (rc);
1091}
1092
1093static void
1094cxgbe_qflush(struct ifnet *ifp)
1095{
1096 struct port_info *pi = ifp->if_softc;
1097 struct sge_txq *txq;
1098 int i;
1099 struct mbuf *m;
1100
1146 }
1147
1148 /*
1149 * txq->m is the mbuf that is held up due to a temporary shortage of
1150 * resources and it should be put on the wire first. Then what's in
1151 * drbr and finally the mbuf that was just passed in to us.
1152 *
1153 * Return code should indicate the fate of the mbuf that was passed in
1154 * this time.
1155 */
1156
1157 TXQ_LOCK_ASSERT_OWNED(txq);
1158 if (drbr_needs_enqueue(ifp, br) || txq->m) {
1159
1160 /* Queued for transmission. */
1161
1162 rc = drbr_enqueue(ifp, br, m);
1163 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1164 (void) t4_eth_tx(ifp, txq, m);
1165 TXQ_UNLOCK(txq);
1166 return (rc);
1167 }
1168
1169 /* Direct transmission. */
1170 rc = t4_eth_tx(ifp, txq, m);
1171 if (rc != 0 && txq->m)
1172 rc = 0; /* held, will be transmitted soon (hopefully) */
1173
1174 TXQ_UNLOCK(txq);
1175 return (rc);
1176}
1177
1178static void
1179cxgbe_qflush(struct ifnet *ifp)
1180{
1181 struct port_info *pi = ifp->if_softc;
1182 struct sge_txq *txq;
1183 int i;
1184 struct mbuf *m;
1185
1101 /* queues do not exist if !IFF_DRV_RUNNING. */
1102 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1186 /* queues do not exist if !PORT_INIT_DONE. */
1187 if (pi->flags & PORT_INIT_DONE) {
1103 for_each_txq(pi, i, txq) {
1104 TXQ_LOCK(txq);
1105 m_freem(txq->m);
1188 for_each_txq(pi, i, txq) {
1189 TXQ_LOCK(txq);
1190 m_freem(txq->m);
1191 txq->m = NULL;
1106 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1107 m_freem(m);
1108 TXQ_UNLOCK(txq);
1109 }
1110 }
1111 if_qflush(ifp);
1112}
1113
1114static int
1115cxgbe_media_change(struct ifnet *ifp)
1116{
1117 struct port_info *pi = ifp->if_softc;
1118
1119 device_printf(pi->dev, "%s unimplemented.\n", __func__);
1120
1121 return (EOPNOTSUPP);
1122}
1123
1124static void
1125cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1126{
1127 struct port_info *pi = ifp->if_softc;
1128 struct ifmedia_entry *cur = pi->media.ifm_cur;
1129 int speed = pi->link_cfg.speed;
1130 int data = (pi->port_type << 8) | pi->mod_type;
1131
1132 if (cur->ifm_data != data) {
1133 build_medialist(pi);
1134 cur = pi->media.ifm_cur;
1135 }
1136
1137 ifmr->ifm_status = IFM_AVALID;
1138 if (!pi->link_cfg.link_ok)
1139 return;
1140
1141 ifmr->ifm_status |= IFM_ACTIVE;
1142
1143 /* active and current will differ iff current media is autoselect. */
1144 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1145 return;
1146
1147 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1148 if (speed == SPEED_10000)
1149 ifmr->ifm_active |= IFM_10G_T;
1150 else if (speed == SPEED_1000)
1151 ifmr->ifm_active |= IFM_1000_T;
1152 else if (speed == SPEED_100)
1153 ifmr->ifm_active |= IFM_100_TX;
1154 else if (speed == SPEED_10)
1155 ifmr->ifm_active |= IFM_10_T;
1156 else
1157 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1158 speed));
1159}
1160
1161void
1162t4_fatal_err(struct adapter *sc)
1163{
1164 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1165 t4_intr_disable(sc);
1166 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1167 device_get_nameunit(sc->dev));
1168}
1169
1170static int
1171map_bars(struct adapter *sc)
1172{
1173 sc->regs_rid = PCIR_BAR(0);
1174 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1175 &sc->regs_rid, RF_ACTIVE);
1176 if (sc->regs_res == NULL) {
1177 device_printf(sc->dev, "cannot map registers.\n");
1178 return (ENXIO);
1179 }
1180 sc->bt = rman_get_bustag(sc->regs_res);
1181 sc->bh = rman_get_bushandle(sc->regs_res);
1182 sc->mmio_len = rman_get_size(sc->regs_res);
1183
1184 sc->msix_rid = PCIR_BAR(4);
1185 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1186 &sc->msix_rid, RF_ACTIVE);
1187 if (sc->msix_res == NULL) {
1188 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1189 return (ENXIO);
1190 }
1191
1192 return (0);
1193}
1194
1195static void
1196setup_memwin(struct adapter *sc)
1197{
1198 u_long bar0;
1199
1200 bar0 = rman_get_start(sc->regs_res);
1201
1202 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0),
1203 (bar0 + MEMWIN0_BASE) | V_BIR(0) |
1204 V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
1205
1206 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1),
1207 (bar0 + MEMWIN1_BASE) | V_BIR(0) |
1208 V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
1209
1210 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2),
1211 (bar0 + MEMWIN2_BASE) | V_BIR(0) |
1212 V_WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
1213}
1214
1215static int
1216cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1217 struct intrs_and_queues *iaq)
1218{
1192 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1193 m_freem(m);
1194 TXQ_UNLOCK(txq);
1195 }
1196 }
1197 if_qflush(ifp);
1198}
1199
1200static int
1201cxgbe_media_change(struct ifnet *ifp)
1202{
1203 struct port_info *pi = ifp->if_softc;
1204
1205 device_printf(pi->dev, "%s unimplemented.\n", __func__);
1206
1207 return (EOPNOTSUPP);
1208}
1209
1210static void
1211cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1212{
1213 struct port_info *pi = ifp->if_softc;
1214 struct ifmedia_entry *cur = pi->media.ifm_cur;
1215 int speed = pi->link_cfg.speed;
1216 int data = (pi->port_type << 8) | pi->mod_type;
1217
1218 if (cur->ifm_data != data) {
1219 build_medialist(pi);
1220 cur = pi->media.ifm_cur;
1221 }
1222
1223 ifmr->ifm_status = IFM_AVALID;
1224 if (!pi->link_cfg.link_ok)
1225 return;
1226
1227 ifmr->ifm_status |= IFM_ACTIVE;
1228
1229 /* active and current will differ iff current media is autoselect. */
1230 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1231 return;
1232
1233 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1234 if (speed == SPEED_10000)
1235 ifmr->ifm_active |= IFM_10G_T;
1236 else if (speed == SPEED_1000)
1237 ifmr->ifm_active |= IFM_1000_T;
1238 else if (speed == SPEED_100)
1239 ifmr->ifm_active |= IFM_100_TX;
1240 else if (speed == SPEED_10)
1241 ifmr->ifm_active |= IFM_10_T;
1242 else
1243 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1244 speed));
1245}
1246
1247void
1248t4_fatal_err(struct adapter *sc)
1249{
1250 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1251 t4_intr_disable(sc);
1252 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1253 device_get_nameunit(sc->dev));
1254}
1255
1256static int
1257map_bars(struct adapter *sc)
1258{
1259 sc->regs_rid = PCIR_BAR(0);
1260 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1261 &sc->regs_rid, RF_ACTIVE);
1262 if (sc->regs_res == NULL) {
1263 device_printf(sc->dev, "cannot map registers.\n");
1264 return (ENXIO);
1265 }
1266 sc->bt = rman_get_bustag(sc->regs_res);
1267 sc->bh = rman_get_bushandle(sc->regs_res);
1268 sc->mmio_len = rman_get_size(sc->regs_res);
1269
1270 sc->msix_rid = PCIR_BAR(4);
1271 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1272 &sc->msix_rid, RF_ACTIVE);
1273 if (sc->msix_res == NULL) {
1274 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1275 return (ENXIO);
1276 }
1277
1278 return (0);
1279}
1280
1281static void
1282setup_memwin(struct adapter *sc)
1283{
1284 u_long bar0;
1285
1286 bar0 = rman_get_start(sc->regs_res);
1287
1288 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0),
1289 (bar0 + MEMWIN0_BASE) | V_BIR(0) |
1290 V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
1291
1292 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1),
1293 (bar0 + MEMWIN1_BASE) | V_BIR(0) |
1294 V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
1295
1296 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2),
1297 (bar0 + MEMWIN2_BASE) | V_BIR(0) |
1298 V_WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
1299}
1300
1301static int
1302cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1303 struct intrs_and_queues *iaq)
1304{
1219 int rc, itype, navail, nc, nrxq10g, nrxq1g;
1305 int rc, itype, navail, nrxq10g, nrxq1g, n;
1306 int nofldrxq10g = 0, nofldrxq1g = 0;
1220
1221 bzero(iaq, sizeof(*iaq));
1307
1308 bzero(iaq, sizeof(*iaq));
1222 nc = mp_ncpus; /* our snapshot of the number of CPUs */
1223
1309
1310 iaq->ntxq10g = t4_ntxq10g;
1311 iaq->ntxq1g = t4_ntxq1g;
1312 iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1313 iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1314#ifndef TCP_OFFLOAD_DISABLE
1315 iaq->nofldtxq10g = t4_nofldtxq10g;
1316 iaq->nofldtxq1g = t4_nofldtxq1g;
1317 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1318 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1319#endif
1320
1224 for (itype = INTR_MSIX; itype; itype >>= 1) {
1225
1321 for (itype = INTR_MSIX; itype; itype >>= 1) {
1322
1226 if ((itype & intr_types) == 0)
1323 if ((itype & t4_intr_types) == 0)
1227 continue; /* not allowed */
1228
1229 if (itype == INTR_MSIX)
1230 navail = pci_msix_count(sc->dev);
1231 else if (itype == INTR_MSI)
1232 navail = pci_msi_count(sc->dev);
1233 else
1234 navail = 1;
1324 continue; /* not allowed */
1325
1326 if (itype == INTR_MSIX)
1327 navail = pci_msix_count(sc->dev);
1328 else if (itype == INTR_MSI)
1329 navail = pci_msi_count(sc->dev);
1330 else
1331 navail = 1;
1235
1332restart:
1236 if (navail == 0)
1237 continue;
1238
1239 iaq->intr_type = itype;
1333 if (navail == 0)
1334 continue;
1335
1336 iaq->intr_type = itype;
1337 iaq->intr_flags = 0;
1240
1338
1241 iaq->ntxq10g = min(nc, max_ntxq_10g);
1242 iaq->ntxq1g = min(nc, max_ntxq_1g);
1339 /*
1340 * Best option: an interrupt vector for errors, one for the
1341 * firmware event queue, and one each for each rxq (NIC as well
1342 * as offload).
1343 */
1344 iaq->nirq = T4_EXTRA_INTR;
1345 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1346 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1347 if (iaq->nirq <= navail &&
1348 (itype != INTR_MSI || powerof2(iaq->nirq))) {
1349 iaq->intr_flags |= INTR_DIRECT;
1350 goto allocate;
1351 }
1243
1352
1244 nrxq10g = min(nc, max_nrxq_10g);
1245 nrxq1g = min(nc, max_nrxq_1g);
1353 /*
1354 * Second best option: an interrupt vector for errors, one for
1355 * the firmware event queue, and one each for either NIC or
1356 * offload rxq's.
1357 */
1358 iaq->nirq = T4_EXTRA_INTR;
1359 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1360 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1361 if (iaq->nirq <= navail &&
1362 (itype != INTR_MSI || powerof2(iaq->nirq)))
1363 goto allocate;
1246
1364
1247 iaq->nirq = n10g * nrxq10g + n1g * nrxq1g + T4_EXTRA_INTR;
1248 if (iaq->nirq <= navail && intr_shared == 0) {
1365 /*
1366 * Next best option: an interrupt vector for errors, one for the
1367 * firmware event queue, and at least one per port. At this
1368 * point we know we'll have to downsize nrxq or nofldrxq to fit
1369 * what's available to us.
1370 */
1371 iaq->nirq = T4_EXTRA_INTR;
1372 iaq->nirq += n10g + n1g;
1373 if (iaq->nirq <= navail) {
1374 int leftover = navail - iaq->nirq;
1249
1375
1250 if (itype == INTR_MSI && !powerof2(iaq->nirq))
1251 goto share;
1376 if (n10g > 0) {
1377 int target = max(nrxq10g, nofldrxq10g);
1252
1378
1253 /* One for err, one for fwq, and one for each rxq */
1379 n = 1;
1380 while (n < target && leftover >= n10g) {
1381 leftover -= n10g;
1382 iaq->nirq += n10g;
1383 n++;
1384 }
1385 iaq->nrxq10g = min(n, nrxq10g);
1386#ifndef TCP_OFFLOAD_DISABLE
1387 iaq->nofldrxq10g = min(n, nofldrxq10g);
1388#endif
1389 }
1254
1390
1255 iaq->intr_shared = 0;
1256 iaq->nrxq10g = nrxq10g;
1257 iaq->nrxq1g = nrxq1g;
1391 if (n1g > 0) {
1392 int target = max(nrxq1g, nofldrxq1g);
1258
1393
1259 } else {
1260share:
1261 iaq->intr_shared = 1;
1262
1263 if (navail >= nc + T4_EXTRA_INTR) {
1264 if (itype == INTR_MSIX)
1265 navail = nc + T4_EXTRA_INTR;
1266
1267 /* navail is and must remain a pow2 for MSI */
1268 if (itype == INTR_MSI) {
1269 KASSERT(powerof2(navail),
1270 ("%d not power of 2", navail));
1271
1272 while (navail / 2 >= nc + T4_EXTRA_INTR)
1273 navail /= 2;
1394 n = 1;
1395 while (n < target && leftover >= n1g) {
1396 leftover -= n1g;
1397 iaq->nirq += n1g;
1398 n++;
1274 }
1399 }
1400 iaq->nrxq1g = min(n, nrxq1g);
1401#ifndef TCP_OFFLOAD_DISABLE
1402 iaq->nofldrxq1g = min(n, nofldrxq1g);
1403#endif
1275 }
1404 }
1276 iaq->nirq = navail; /* total # of interrupts */
1277
1405
1278 /*
1279 * If we have multiple vectors available reserve one
1280 * exclusively for errors. The rest will be shared by
1281 * the fwq and data.
1282 */
1283 if (navail > 1)
1284 navail--;
1285 iaq->nrxq10g = min(nrxq10g, navail);
1286 iaq->nrxq1g = min(nrxq1g, navail);
1406 if (itype != INTR_MSI || powerof2(iaq->nirq))
1407 goto allocate;
1287 }
1288
1408 }
1409
1410 /*
1411 * Least desirable option: one interrupt vector for everything.
1412 */
1413 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1414#ifndef TCP_OFFLOAD_DISABLE
1415 iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1416#endif
1417
1418allocate:
1289 navail = iaq->nirq;
1290 rc = 0;
1291 if (itype == INTR_MSIX)
1292 rc = pci_alloc_msix(sc->dev, &navail);
1293 else if (itype == INTR_MSI)
1294 rc = pci_alloc_msi(sc->dev, &navail);
1295
1296 if (rc == 0) {
1297 if (navail == iaq->nirq)
1298 return (0);
1299
1300 /*
1301 * Didn't get the number requested. Use whatever number
1302 * the kernel is willing to allocate (it's in navail).
1303 */
1419 navail = iaq->nirq;
1420 rc = 0;
1421 if (itype == INTR_MSIX)
1422 rc = pci_alloc_msix(sc->dev, &navail);
1423 else if (itype == INTR_MSI)
1424 rc = pci_alloc_msi(sc->dev, &navail);
1425
1426 if (rc == 0) {
1427 if (navail == iaq->nirq)
1428 return (0);
1429
1430 /*
1431 * Didn't get the number requested. Use whatever number
1432 * the kernel is willing to allocate (it's in navail).
1433 */
1434 device_printf(sc->dev, "fewer vectors than requested, "
1435 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1436 itype, iaq->nirq, navail);
1304 pci_release_msi(sc->dev);
1437 pci_release_msi(sc->dev);
1305 goto share;
1438 goto restart;
1306 }
1307
1308 device_printf(sc->dev,
1309 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1310 itype, rc, iaq->nirq, navail);
1311 }
1312
1313 device_printf(sc->dev,
1314 "failed to find a usable interrupt type. "
1439 }
1440
1441 device_printf(sc->dev,
1442 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1443 itype, rc, iaq->nirq, navail);
1444 }
1445
1446 device_printf(sc->dev,
1447 "failed to find a usable interrupt type. "
1315 "allowed=%d, msi-x=%d, msi=%d, intx=1", intr_types,
1448 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1316 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1317
1318 return (ENXIO);
1319}
1320
1321/*
1449 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1450
1451 return (ENXIO);
1452}
1453
1454/*
1322 * Install a compatible firmware (if required), establish contact with it,
1323 * become the master, and reset the device.
1455 * Install a compatible firmware (if required), establish contact with it (by
1456 * saying hello), and reset the device. If we end up as the master driver,
1457 * partition adapter resources by providing a configuration file to the
1458 * firmware.
1324 */
1325static int
1326prep_firmware(struct adapter *sc)
1327{
1459 */
1460static int
1461prep_firmware(struct adapter *sc)
1462{
1328 const struct firmware *fw;
1463 const struct firmware *fw = NULL, *cfg = NULL, *default_cfg;
1329 int rc;
1330 enum dev_state state;
1331
1464 int rc;
1465 enum dev_state state;
1466
1467 default_cfg = firmware_get(T4_CFGNAME);
1468
1332 /* Check firmware version and install a different one if necessary */
1333 rc = t4_check_fw_version(sc);
1469 /* Check firmware version and install a different one if necessary */
1470 rc = t4_check_fw_version(sc);
1334 if (rc != 0 || force_firmware_install) {
1471 if (rc != 0) {
1335 uint32_t v = 0;
1336
1337 fw = firmware_get(T4_FWNAME);
1338 if (fw != NULL) {
1339 const struct fw_hdr *hdr = (const void *)fw->data;
1340
1341 v = ntohl(hdr->fw_ver);
1342
1343 /*
1344 * The firmware module will not be used if it isn't the
1345 * same major version as what the driver was compiled
1472 uint32_t v = 0;
1473
1474 fw = firmware_get(T4_FWNAME);
1475 if (fw != NULL) {
1476 const struct fw_hdr *hdr = (const void *)fw->data;
1477
1478 v = ntohl(hdr->fw_ver);
1479
1480 /*
1481 * The firmware module will not be used if it isn't the
1482 * same major version as what the driver was compiled
1346 * with. This check trumps force_firmware_install.
1483 * with.
1347 */
1348 if (G_FW_HDR_FW_VER_MAJOR(v) != FW_VERSION_MAJOR) {
1349 device_printf(sc->dev,
1350 "Found firmware image but version %d "
1351 "can not be used with this driver (%d)\n",
1352 G_FW_HDR_FW_VER_MAJOR(v), FW_VERSION_MAJOR);
1353
1354 firmware_put(fw, FIRMWARE_UNLOAD);
1355 fw = NULL;
1356 }
1357 }
1358
1484 */
1485 if (G_FW_HDR_FW_VER_MAJOR(v) != FW_VERSION_MAJOR) {
1486 device_printf(sc->dev,
1487 "Found firmware image but version %d "
1488 "can not be used with this driver (%d)\n",
1489 G_FW_HDR_FW_VER_MAJOR(v), FW_VERSION_MAJOR);
1490
1491 firmware_put(fw, FIRMWARE_UNLOAD);
1492 fw = NULL;
1493 }
1494 }
1495
1359 if (fw == NULL && (rc < 0 || force_firmware_install)) {
1496 if (fw == NULL && rc < 0) {
1360 device_printf(sc->dev, "No usable firmware. "
1497 device_printf(sc->dev, "No usable firmware. "
1361 "card has %d.%d.%d, driver compiled with %d.%d.%d, "
1362 "force_firmware_install%s set",
1498 "card has %d.%d.%d, driver compiled with %d.%d.%d",
1363 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1364 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1365 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1366 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1499 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1500 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1501 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1502 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1367 FW_VERSION_MICRO,
1368 force_firmware_install ? "" : " not");
1369 return (EAGAIN);
1503 FW_VERSION_MICRO);
1504 rc = EAGAIN;
1505 goto done;
1370 }
1371
1372 /*
1373 * Always upgrade, even for minor/micro/build mismatches.
1374 * Downgrade only for a major version mismatch or if
1375 * force_firmware_install was specified.
1376 */
1506 }
1507
1508 /*
1509 * Always upgrade, even for minor/micro/build mismatches.
1510 * Downgrade only for a major version mismatch or if
1511 * force_firmware_install was specified.
1512 */
1377 if (fw != NULL && (rc < 0 || force_firmware_install ||
1378 v > sc->params.fw_vers)) {
1513 if (fw != NULL && (rc < 0 || v > sc->params.fw_vers)) {
1379 device_printf(sc->dev,
1380 "installing firmware %d.%d.%d.%d on card.\n",
1381 G_FW_HDR_FW_VER_MAJOR(v), G_FW_HDR_FW_VER_MINOR(v),
1382 G_FW_HDR_FW_VER_MICRO(v), G_FW_HDR_FW_VER_BUILD(v));
1383
1384 rc = -t4_load_fw(sc, fw->data, fw->datasize);
1385 if (rc != 0) {
1386 device_printf(sc->dev,
1387 "failed to install firmware: %d\n", rc);
1514 device_printf(sc->dev,
1515 "installing firmware %d.%d.%d.%d on card.\n",
1516 G_FW_HDR_FW_VER_MAJOR(v), G_FW_HDR_FW_VER_MINOR(v),
1517 G_FW_HDR_FW_VER_MICRO(v), G_FW_HDR_FW_VER_BUILD(v));
1518
1519 rc = -t4_load_fw(sc, fw->data, fw->datasize);
1520 if (rc != 0) {
1521 device_printf(sc->dev,
1522 "failed to install firmware: %d\n", rc);
1388 firmware_put(fw, FIRMWARE_UNLOAD);
1389 return (rc);
1523 goto done;
1390 } else {
1391 /* refresh */
1392 (void) t4_check_fw_version(sc);
1393 }
1394 }
1524 } else {
1525 /* refresh */
1526 (void) t4_check_fw_version(sc);
1527 }
1528 }
1395
1396 if (fw != NULL)
1397 firmware_put(fw, FIRMWARE_UNLOAD);
1398 }
1399
1529 }
1530
1400 /* Contact firmware, request master */
1401 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state);
1531 /* Contact firmware. */
1532 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
1402 if (rc < 0) {
1403 rc = -rc;
1404 device_printf(sc->dev,
1405 "failed to connect to the firmware: %d.\n", rc);
1533 if (rc < 0) {
1534 rc = -rc;
1535 device_printf(sc->dev,
1536 "failed to connect to the firmware: %d.\n", rc);
1406 return (rc);
1537 goto done;
1407 }
1538 }
1539 if (rc == sc->mbox)
1540 sc->flags |= MASTER_PF;
1408
1409 /* Reset device */
1410 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
1411 if (rc != 0) {
1412 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
1413 if (rc != ETIMEDOUT && rc != EIO)
1414 t4_fw_bye(sc, sc->mbox);
1541
1542 /* Reset device */
1543 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
1544 if (rc != 0) {
1545 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
1546 if (rc != ETIMEDOUT && rc != EIO)
1547 t4_fw_bye(sc, sc->mbox);
1415 return (rc);
1548 goto done;
1416 }
1417
1549 }
1550
1551 /* Partition adapter resources as specified in the config file. */
1552 if (sc->flags & MASTER_PF) {
1553 if (strncmp(t4_cfg_file, "default", sizeof(t4_cfg_file))) {
1554 char s[32];
1555
1556 snprintf(s, sizeof(s), "t4fw_cfg_%s", t4_cfg_file);
1557 cfg = firmware_get(s);
1558 if (cfg == NULL) {
1559 device_printf(sc->dev,
1560 "unable to locate %s module, "
1561 "will use default config file.\n", s);
1562 }
1563 }
1564
1565 rc = partition_resources(sc, cfg ? cfg : default_cfg);
1566 if (rc != 0)
1567 goto done; /* error message displayed already */
1568 }
1569
1418 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
1419 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1420 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1421 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1422 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
1423 sc->flags |= FW_OK;
1424
1570 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
1571 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1572 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1573 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1574 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
1575 sc->flags |= FW_OK;
1576
1425 return (0);
1577done:
1578 if (fw != NULL)
1579 firmware_put(fw, FIRMWARE_UNLOAD);
1580 if (cfg != NULL)
1581 firmware_put(cfg, FIRMWARE_UNLOAD);
1582 if (default_cfg != NULL)
1583 firmware_put(default_cfg, FIRMWARE_UNLOAD);
1584
1585 return (rc);
1426}
1427
1586}
1587
1588#define FW_PARAM_DEV(param) \
1589 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1590 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1591#define FW_PARAM_PFVF(param) \
1592 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1593 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1594
1595/*
1596 * Upload configuration file to card's memory.
1597 */
1428static int
1598static int
1429get_devlog_params(struct adapter *sc, struct devlog_params *dlog)
1599upload_config_file(struct adapter *sc, const struct firmware *fw, uint32_t *mt,
1600 uint32_t *ma)
1430{
1601{
1431 struct fw_devlog_cmd devlog_cmd;
1432 uint32_t meminfo;
1433 int rc;
1602 int rc, i;
1603 uint32_t param, val, mtype, maddr, bar, off, win, remaining;
1604 const uint32_t *b;
1434
1605
1435 bzero(&devlog_cmd, sizeof(devlog_cmd));
1436 devlog_cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
1437 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1438 devlog_cmd.retval_len16 = htobe32(FW_LEN16(devlog_cmd));
1439 rc = -t4_wr_mbox(sc, sc->mbox, &devlog_cmd, sizeof(devlog_cmd),
1440 &devlog_cmd);
1606 /* Figure out where the firmware wants us to upload it. */
1607 param = FW_PARAM_DEV(CF);
1608 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
1441 if (rc != 0) {
1609 if (rc != 0) {
1610 /* Firmwares without config file support will fail this way */
1442 device_printf(sc->dev,
1611 device_printf(sc->dev,
1443 "failed to get devlog parameters: %d.\n", rc);
1444 bzero(dlog, sizeof (*dlog));
1612 "failed to query config file location: %d.\n", rc);
1445 return (rc);
1446 }
1613 return (rc);
1614 }
1615 *mt = mtype = G_FW_PARAMS_PARAM_Y(val);
1616 *ma = maddr = G_FW_PARAMS_PARAM_Z(val) << 16;
1447
1617
1448 meminfo = be32toh(devlog_cmd.memtype_devlog_memaddr16_devlog);
1449 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(meminfo);
1450 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(meminfo) << 4;
1451 dlog->size = be32toh(devlog_cmd.memsize_devlog);
1618 if (maddr & 3) {
1619 device_printf(sc->dev,
1620 "cannot upload config file (type %u, addr %x).\n",
1621 mtype, maddr);
1622 return (EFAULT);
1623 }
1452
1624
1453 return (0);
1625 /* Translate mtype/maddr to an address suitable for the PCIe window */
1626 val = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1627 val &= F_EDRAM0_ENABLE | F_EDRAM1_ENABLE | F_EXT_MEM_ENABLE;
1628 switch (mtype) {
1629 case FW_MEMTYPE_CF_EDC0:
1630 if (!(val & F_EDRAM0_ENABLE))
1631 goto err;
1632 bar = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1633 maddr += G_EDRAM0_BASE(bar) << 20;
1634 break;
1635
1636 case FW_MEMTYPE_CF_EDC1:
1637 if (!(val & F_EDRAM1_ENABLE))
1638 goto err;
1639 bar = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1640 maddr += G_EDRAM1_BASE(bar) << 20;
1641 break;
1642
1643 case FW_MEMTYPE_CF_EXTMEM:
1644 if (!(val & F_EXT_MEM_ENABLE))
1645 goto err;
1646 bar = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1647 maddr += G_EXT_MEM_BASE(bar) << 20;
1648 break;
1649
1650 default:
1651err:
1652 device_printf(sc->dev,
1653 "cannot upload config file (type %u, enabled %u).\n",
1654 mtype, val);
1655 return (EFAULT);
1656 }
1657
1658 /*
1659 * Position the PCIe window (we use memwin2) to the 16B aligned area
1660 * just at/before the upload location.
1661 */
1662 win = maddr & ~0xf;
1663 off = maddr - win; /* offset from the start of the window. */
1664 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2), win);
1665 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2));
1666
1667 remaining = fw->datasize;
1668 if (remaining > FLASH_CFG_MAX_SIZE ||
1669 remaining > MEMWIN2_APERTURE - off) {
1670 device_printf(sc->dev, "cannot upload config file all at once "
1671 "(size %u, max %u, room %u).\n",
1672 remaining, FLASH_CFG_MAX_SIZE, MEMWIN2_APERTURE - off);
1673 return (EFBIG);
1674 }
1675
1676 /*
1677 * XXX: sheer laziness. We deliberately added 4 bytes of useless
1678 * stuffing/comments at the end of the config file so it's ok to simply
1679 * throw away the last remaining bytes when the config file is not an
1680 * exact multiple of 4.
1681 */
1682 b = fw->data;
1683 for (i = 0; remaining >= 4; i += 4, remaining -= 4)
1684 t4_write_reg(sc, MEMWIN2_BASE + off + i, *b++);
1685
1686 return (rc);
1454}
1455
1687}
1688
1689/*
1690 * Partition chip resources for use between various PFs, VFs, etc. This is done
1691 * by uploading the firmware configuration file to the adapter and instructing
1692 * the firmware to process it.
1693 */
1456static int
1694static int
1457get_capabilities(struct adapter *sc, struct fw_caps_config_cmd *caps)
1695partition_resources(struct adapter *sc, const struct firmware *cfg)
1458{
1459 int rc;
1696{
1697 int rc;
1698 struct fw_caps_config_cmd caps;
1699 uint32_t mtype, maddr, finicsum, cfcsum;
1460
1700
1461 bzero(caps, sizeof(*caps));
1462 caps->op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1463 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1464 caps->retval_len16 = htobe32(FW_LEN16(*caps));
1701 rc = cfg ? upload_config_file(sc, cfg, &mtype, &maddr) : ENOENT;
1702 if (rc != 0) {
1703 mtype = FW_MEMTYPE_CF_FLASH;
1704 maddr = t4_flash_cfg_addr(sc);
1705 }
1465
1706
1466 rc = -t4_wr_mbox(sc, sc->mbox, caps, sizeof(*caps), caps);
1467 if (rc != 0)
1707 bzero(&caps, sizeof(caps));
1708 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1709 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1710 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
1711 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
1712 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps));
1713 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
1714 if (rc != 0) {
1715 device_printf(sc->dev,
1716 "failed to pre-process config file: %d.\n", rc);
1468 return (rc);
1717 return (rc);
1718 }
1469
1719
1470 if (caps->niccaps & htobe16(FW_CAPS_CONFIG_NIC_VM))
1471 caps->niccaps ^= htobe16(FW_CAPS_CONFIG_NIC_VM);
1720 finicsum = be32toh(caps.finicsum);
1721 cfcsum = be32toh(caps.cfcsum);
1722 if (finicsum != cfcsum) {
1723 device_printf(sc->dev,
1724 "WARNING: config file checksum mismatch: %08x %08x\n",
1725 finicsum, cfcsum);
1726 }
1727 sc->cfcsum = cfcsum;
1472
1728
1473 caps->op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1729#define LIMIT_CAPS(x) do { \
1730 caps.x &= htobe16(t4_##x##_allowed); \
1731 sc->x = htobe16(caps.x); \
1732} while (0)
1733
1734 /*
1735 * Let the firmware know what features will (not) be used so it can tune
1736 * things accordingly.
1737 */
1738 LIMIT_CAPS(linkcaps);
1739 LIMIT_CAPS(niccaps);
1740 LIMIT_CAPS(toecaps);
1741 LIMIT_CAPS(rdmacaps);
1742 LIMIT_CAPS(iscsicaps);
1743 LIMIT_CAPS(fcoecaps);
1744#undef LIMIT_CAPS
1745
1746 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1474 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1747 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1475 rc = -t4_wr_mbox(sc, sc->mbox, caps, sizeof(*caps), NULL);
1748 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
1749 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
1750 if (rc != 0) {
1751 device_printf(sc->dev,
1752 "failed to process config file: %d.\n", rc);
1753 return (rc);
1754 }
1476
1755
1477 return (rc);
1756 return (0);
1478}
1479
1757}
1758
1759/*
1760 * Retrieve parameters that are needed (or nice to have) prior to calling
1761 * t4_sge_init and t4_fw_initialize.
1762 */
1480static int
1763static int
1481get_params(struct adapter *sc, struct fw_caps_config_cmd *caps)
1764get_params__pre_init(struct adapter *sc)
1482{
1483 int rc;
1765{
1766 int rc;
1484 uint32_t params[7], val[7];
1767 uint32_t param[2], val[2];
1768 struct fw_devlog_cmd cmd;
1769 struct devlog_params *dlog = &sc->params.devlog;
1485
1770
1486#define FW_PARAM_DEV(param) \
1487 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1488 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1489#define FW_PARAM_PFVF(param) \
1490 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1491 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1492
1493 params[0] = FW_PARAM_DEV(PORTVEC);
1494 params[1] = FW_PARAM_PFVF(IQFLINT_START);
1495 params[2] = FW_PARAM_PFVF(EQ_START);
1496 params[3] = FW_PARAM_PFVF(FILTER_START);
1497 params[4] = FW_PARAM_PFVF(FILTER_END);
1498 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 5, params, val);
1771 param[0] = FW_PARAM_DEV(PORTVEC);
1772 param[1] = FW_PARAM_DEV(CCLK);
1773 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1499 if (rc != 0) {
1500 device_printf(sc->dev,
1774 if (rc != 0) {
1775 device_printf(sc->dev,
1501 "failed to query parameters: %d.\n", rc);
1502 goto done;
1776 "failed to query parameters (pre_init): %d.\n", rc);
1777 return (rc);
1503 }
1504
1505 sc->params.portvec = val[0];
1506 sc->params.nports = 0;
1507 while (val[0]) {
1508 sc->params.nports++;
1509 val[0] &= val[0] - 1;
1510 }
1511
1778 }
1779
1780 sc->params.portvec = val[0];
1781 sc->params.nports = 0;
1782 while (val[0]) {
1783 sc->params.nports++;
1784 val[0] &= val[0] - 1;
1785 }
1786
1512 sc->sge.iq_start = val[1];
1513 sc->sge.eq_start = val[2];
1514 sc->tids.ftid_base = val[3];
1515 sc->tids.nftids = val[4] - val[3] + 1;
1787 sc->params.vpd.cclk = val[1];
1516
1788
1517 if (caps->toecaps) {
1789 /* Read device log parameters. */
1790 bzero(&cmd, sizeof(cmd));
1791 cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
1792 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1793 cmd.retval_len16 = htobe32(FW_LEN16(cmd));
1794 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
1795 if (rc != 0) {
1796 device_printf(sc->dev,
1797 "failed to get devlog parameters: %d.\n", rc);
1798 bzero(dlog, sizeof (*dlog));
1799 rc = 0; /* devlog isn't critical for device operation */
1800 } else {
1801 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
1802 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
1803 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
1804 dlog->size = be32toh(cmd.memsize_devlog);
1805 }
1806
1807 return (rc);
1808}
1809
1810/*
1811 * Retrieve various parameters that are of interest to the driver. The device
1812 * has been initialized by the firmware at this point.
1813 */
1814static int
1815get_params__post_init(struct adapter *sc)
1816{
1817 int rc;
1818 uint32_t param[7], val[7];
1819 struct fw_caps_config_cmd caps;
1820
1821 param[0] = FW_PARAM_PFVF(IQFLINT_START);
1822 param[1] = FW_PARAM_PFVF(EQ_START);
1823 param[2] = FW_PARAM_PFVF(FILTER_START);
1824 param[3] = FW_PARAM_PFVF(FILTER_END);
1825 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
1826 if (rc != 0) {
1827 device_printf(sc->dev,
1828 "failed to query parameters (post_init): %d.\n", rc);
1829 return (rc);
1830 }
1831
1832 sc->sge.iq_start = val[0];
1833 sc->sge.eq_start = val[1];
1834 sc->tids.ftid_base = val[2];
1835 sc->tids.nftids = val[3] - val[2] + 1;
1836
1837 /* get capabilites */
1838 bzero(&caps, sizeof(caps));
1839 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1840 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1841 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
1842 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
1843 if (rc != 0) {
1844 device_printf(sc->dev,
1845 "failed to get card capabilities: %d.\n", rc);
1846 return (rc);
1847 }
1848
1849 if (caps.toecaps) {
1518 /* query offload-related parameters */
1850 /* query offload-related parameters */
1519 params[0] = FW_PARAM_DEV(NTID);
1520 params[1] = FW_PARAM_PFVF(SERVER_START);
1521 params[2] = FW_PARAM_PFVF(SERVER_END);
1522 params[3] = FW_PARAM_PFVF(TDDP_START);
1523 params[4] = FW_PARAM_PFVF(TDDP_END);
1524 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
1525 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, params, val);
1851 param[0] = FW_PARAM_DEV(NTID);
1852 param[1] = FW_PARAM_PFVF(SERVER_START);
1853 param[2] = FW_PARAM_PFVF(SERVER_END);
1854 param[3] = FW_PARAM_PFVF(TDDP_START);
1855 param[4] = FW_PARAM_PFVF(TDDP_END);
1856 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
1857 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1526 if (rc != 0) {
1527 device_printf(sc->dev,
1528 "failed to query TOE parameters: %d.\n", rc);
1858 if (rc != 0) {
1859 device_printf(sc->dev,
1860 "failed to query TOE parameters: %d.\n", rc);
1529 goto done;
1861 return (rc);
1530 }
1531 sc->tids.ntids = val[0];
1532 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
1533 sc->tids.stid_base = val[1];
1534 sc->tids.nstids = val[2] - val[1] + 1;
1535 sc->vres.ddp.start = val[3];
1536 sc->vres.ddp.size = val[4] - val[3] + 1;
1537 sc->params.ofldq_wr_cred = val[5];
1538 sc->params.offload = 1;
1539 }
1862 }
1863 sc->tids.ntids = val[0];
1864 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
1865 sc->tids.stid_base = val[1];
1866 sc->tids.nstids = val[2] - val[1] + 1;
1867 sc->vres.ddp.start = val[3];
1868 sc->vres.ddp.size = val[4] - val[3] + 1;
1869 sc->params.ofldq_wr_cred = val[5];
1870 sc->params.offload = 1;
1871 }
1540 if (caps->rdmacaps) {
1541 params[0] = FW_PARAM_PFVF(STAG_START);
1542 params[1] = FW_PARAM_PFVF(STAG_END);
1543 params[2] = FW_PARAM_PFVF(RQ_START);
1544 params[3] = FW_PARAM_PFVF(RQ_END);
1545 params[4] = FW_PARAM_PFVF(PBL_START);
1546 params[5] = FW_PARAM_PFVF(PBL_END);
1547 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, params, val);
1872 if (caps.rdmacaps) {
1873 param[0] = FW_PARAM_PFVF(STAG_START);
1874 param[1] = FW_PARAM_PFVF(STAG_END);
1875 param[2] = FW_PARAM_PFVF(RQ_START);
1876 param[3] = FW_PARAM_PFVF(RQ_END);
1877 param[4] = FW_PARAM_PFVF(PBL_START);
1878 param[5] = FW_PARAM_PFVF(PBL_END);
1879 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1548 if (rc != 0) {
1549 device_printf(sc->dev,
1880 if (rc != 0) {
1881 device_printf(sc->dev,
1550 "failed to query RDMA parameters: %d.\n", rc);
1551 goto done;
1882 "failed to query RDMA parameters(1): %d.\n", rc);
1883 return (rc);
1552 }
1553 sc->vres.stag.start = val[0];
1554 sc->vres.stag.size = val[1] - val[0] + 1;
1555 sc->vres.rq.start = val[2];
1556 sc->vres.rq.size = val[3] - val[2] + 1;
1557 sc->vres.pbl.start = val[4];
1558 sc->vres.pbl.size = val[5] - val[4] + 1;
1884 }
1885 sc->vres.stag.start = val[0];
1886 sc->vres.stag.size = val[1] - val[0] + 1;
1887 sc->vres.rq.start = val[2];
1888 sc->vres.rq.size = val[3] - val[2] + 1;
1889 sc->vres.pbl.start = val[4];
1890 sc->vres.pbl.size = val[5] - val[4] + 1;
1891
1892 param[0] = FW_PARAM_PFVF(SQRQ_START);
1893 param[1] = FW_PARAM_PFVF(SQRQ_END);
1894 param[2] = FW_PARAM_PFVF(CQ_START);
1895 param[3] = FW_PARAM_PFVF(CQ_END);
1896 param[4] = FW_PARAM_PFVF(OCQ_START);
1897 param[5] = FW_PARAM_PFVF(OCQ_END);
1898 rc = -t4_query_params(sc, 0, 0, 0, 6, param, val);
1899 if (rc != 0) {
1900 device_printf(sc->dev,
1901 "failed to query RDMA parameters(2): %d.\n", rc);
1902 return (rc);
1903 }
1904 sc->vres.qp.start = val[0];
1905 sc->vres.qp.size = val[1] - val[0] + 1;
1906 sc->vres.cq.start = val[2];
1907 sc->vres.cq.size = val[3] - val[2] + 1;
1908 sc->vres.ocq.start = val[4];
1909 sc->vres.ocq.size = val[5] - val[4] + 1;
1559 }
1910 }
1560 if (caps->iscsicaps) {
1561 params[0] = FW_PARAM_PFVF(ISCSI_START);
1562 params[1] = FW_PARAM_PFVF(ISCSI_END);
1563 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, params, val);
1911 if (caps.iscsicaps) {
1912 param[0] = FW_PARAM_PFVF(ISCSI_START);
1913 param[1] = FW_PARAM_PFVF(ISCSI_END);
1914 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1564 if (rc != 0) {
1565 device_printf(sc->dev,
1566 "failed to query iSCSI parameters: %d.\n", rc);
1915 if (rc != 0) {
1916 device_printf(sc->dev,
1917 "failed to query iSCSI parameters: %d.\n", rc);
1567 goto done;
1918 return (rc);
1568 }
1569 sc->vres.iscsi.start = val[0];
1570 sc->vres.iscsi.size = val[1] - val[0] + 1;
1571 }
1919 }
1920 sc->vres.iscsi.start = val[0];
1921 sc->vres.iscsi.size = val[1] - val[0] + 1;
1922 }
1572#undef FW_PARAM_PFVF
1573#undef FW_PARAM_DEV
1574
1923
1575done:
1924 /* These are finalized by FW initialization, load their values now */
1925 val[0] = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
1926 sc->params.tp.tre = G_TIMERRESOLUTION(val[0]);
1927 sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(val[0]);
1928 t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
1929
1576 return (rc);
1577}
1578
1930 return (rc);
1931}
1932
1933#undef FW_PARAM_PFVF
1934#undef FW_PARAM_DEV
1935
1579static void
1580t4_set_desc(struct adapter *sc)
1581{
1582 char buf[128];
1583 struct adapter_params *p = &sc->params;
1584
1936static void
1937t4_set_desc(struct adapter *sc)
1938{
1939 char buf[128];
1940 struct adapter_params *p = &sc->params;
1941
1585 snprintf(buf, sizeof(buf),
1586 "Chelsio %s (rev %d) %d port %sNIC PCIe-x%d %d %s, S/N:%s, E/C:%s",
1587 p->vpd.id, p->rev, p->nports, is_offload(sc) ? "R" : "",
1588 p->pci.width, sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1589 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), p->vpd.sn, p->vpd.ec);
1942 snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, E/C:%s",
1943 p->vpd.id, is_offload(sc) ? "R" : "", p->rev, p->vpd.sn, p->vpd.ec);
1590
1591 device_set_desc_copy(sc->dev, buf);
1592}
1593
1594static void
1595build_medialist(struct port_info *pi)
1596{
1597 struct ifmedia *media = &pi->media;
1598 int data, m;
1599
1600 PORT_LOCK(pi);
1601
1602 ifmedia_removeall(media);
1603
1604 m = IFM_ETHER | IFM_FDX;
1605 data = (pi->port_type << 8) | pi->mod_type;
1606
1607 switch(pi->port_type) {
1608 case FW_PORT_TYPE_BT_XFI:
1609 ifmedia_add(media, m | IFM_10G_T, data, NULL);
1610 break;
1611
1612 case FW_PORT_TYPE_BT_XAUI:
1613 ifmedia_add(media, m | IFM_10G_T, data, NULL);
1614 /* fall through */
1615
1616 case FW_PORT_TYPE_BT_SGMII:
1617 ifmedia_add(media, m | IFM_1000_T, data, NULL);
1618 ifmedia_add(media, m | IFM_100_TX, data, NULL);
1619 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
1620 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
1621 break;
1622
1623 case FW_PORT_TYPE_CX4:
1624 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
1625 ifmedia_set(media, m | IFM_10G_CX4);
1626 break;
1627
1628 case FW_PORT_TYPE_SFP:
1629 case FW_PORT_TYPE_FIBER_XFI:
1630 case FW_PORT_TYPE_FIBER_XAUI:
1631 switch (pi->mod_type) {
1632
1633 case FW_PORT_MOD_TYPE_LR:
1634 ifmedia_add(media, m | IFM_10G_LR, data, NULL);
1635 ifmedia_set(media, m | IFM_10G_LR);
1636 break;
1637
1638 case FW_PORT_MOD_TYPE_SR:
1639 ifmedia_add(media, m | IFM_10G_SR, data, NULL);
1640 ifmedia_set(media, m | IFM_10G_SR);
1641 break;
1642
1643 case FW_PORT_MOD_TYPE_LRM:
1644 ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
1645 ifmedia_set(media, m | IFM_10G_LRM);
1646 break;
1647
1648 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
1649 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
1650 ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
1651 ifmedia_set(media, m | IFM_10G_TWINAX);
1652 break;
1653
1654 case FW_PORT_MOD_TYPE_NONE:
1655 m &= ~IFM_FDX;
1656 ifmedia_add(media, m | IFM_NONE, data, NULL);
1657 ifmedia_set(media, m | IFM_NONE);
1658 break;
1659
1660 case FW_PORT_MOD_TYPE_NA:
1661 case FW_PORT_MOD_TYPE_ER:
1662 default:
1663 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
1664 ifmedia_set(media, m | IFM_UNKNOWN);
1665 break;
1666 }
1667 break;
1668
1669 case FW_PORT_TYPE_KX4:
1670 case FW_PORT_TYPE_KX:
1671 case FW_PORT_TYPE_KR:
1672 default:
1673 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
1674 ifmedia_set(media, m | IFM_UNKNOWN);
1675 break;
1676 }
1677
1678 PORT_UNLOCK(pi);
1679}
1680
1681/*
1682 * Program the port's XGMAC based on parameters in ifnet. The caller also
1683 * indicates which parameters should be programmed (the rest are left alone).
1684 */
1685static int
1686update_mac_settings(struct port_info *pi, int flags)
1687{
1688 int rc;
1689 struct ifnet *ifp = pi->ifp;
1690 struct adapter *sc = pi->adapter;
1691 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
1692
1693 PORT_LOCK_ASSERT_OWNED(pi);
1694 KASSERT(flags, ("%s: not told what to update.", __func__));
1695
1696 if (flags & XGMAC_MTU)
1697 mtu = ifp->if_mtu;
1698
1699 if (flags & XGMAC_PROMISC)
1700 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
1701
1702 if (flags & XGMAC_ALLMULTI)
1703 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
1704
1705 if (flags & XGMAC_VLANEX)
1706 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
1707
1708 rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
1709 vlanex, false);
1710 if (rc) {
1711 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
1712 return (rc);
1713 }
1714
1715 if (flags & XGMAC_UCADDR) {
1716 uint8_t ucaddr[ETHER_ADDR_LEN];
1717
1718 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
1719 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
1720 ucaddr, true, true);
1721 if (rc < 0) {
1722 rc = -rc;
1723 if_printf(ifp, "change_mac failed: %d\n", rc);
1724 return (rc);
1725 } else {
1726 pi->xact_addr_filt = rc;
1727 rc = 0;
1728 }
1729 }
1730
1731 if (flags & XGMAC_MCADDRS) {
1732 const uint8_t *mcaddr;
1733 int del = 1;
1734 uint64_t hash = 0;
1735 struct ifmultiaddr *ifma;
1736
1737 if_maddr_rlock(ifp);
1738 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1739 if (ifma->ifma_addr->sa_family != AF_LINK)
1740 continue;
1741 mcaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1742
1743 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid, del, 1,
1744 &mcaddr, NULL, &hash, 0);
1745 if (rc < 0) {
1746 rc = -rc;
1747 if_printf(ifp, "failed to add mc address"
1748 " %02x:%02x:%02x:%02x:%02x:%02x rc=%d\n",
1749 mcaddr[0], mcaddr[1], mcaddr[2], mcaddr[3],
1750 mcaddr[4], mcaddr[5], rc);
1751 goto mcfail;
1752 }
1753 del = 0;
1754 }
1755
1756 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
1757 if (rc != 0)
1758 if_printf(ifp, "failed to set mc address hash: %d", rc);
1759mcfail:
1760 if_maddr_runlock(ifp);
1761 }
1762
1763 return (rc);
1764}
1765
1766static int
1767cxgbe_init_locked(struct port_info *pi)
1768{
1769 struct adapter *sc = pi->adapter;
1770 int rc = 0;
1771
1772 ADAPTER_LOCK_ASSERT_OWNED(sc);
1773
1774 while (!IS_DOOMED(pi) && IS_BUSY(sc)) {
1775 if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4init", 0)) {
1776 rc = EINTR;
1777 goto done;
1778 }
1779 }
1780 if (IS_DOOMED(pi)) {
1781 rc = ENXIO;
1782 goto done;
1783 }
1784 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1785
1786 /* Give up the adapter lock, port init code can sleep. */
1787 SET_BUSY(sc);
1788 ADAPTER_UNLOCK(sc);
1789
1790 rc = cxgbe_init_synchronized(pi);
1791
1792done:
1793 ADAPTER_LOCK(sc);
1794 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1795 CLR_BUSY(sc);
1796 wakeup_one(&sc->flags);
1797 ADAPTER_UNLOCK(sc);
1798 return (rc);
1799}
1800
1801static int
1802cxgbe_init_synchronized(struct port_info *pi)
1803{
1804 struct adapter *sc = pi->adapter;
1805 struct ifnet *ifp = pi->ifp;
1944
1945 device_set_desc_copy(sc->dev, buf);
1946}
1947
1948static void
1949build_medialist(struct port_info *pi)
1950{
1951 struct ifmedia *media = &pi->media;
1952 int data, m;
1953
1954 PORT_LOCK(pi);
1955
1956 ifmedia_removeall(media);
1957
1958 m = IFM_ETHER | IFM_FDX;
1959 data = (pi->port_type << 8) | pi->mod_type;
1960
1961 switch(pi->port_type) {
1962 case FW_PORT_TYPE_BT_XFI:
1963 ifmedia_add(media, m | IFM_10G_T, data, NULL);
1964 break;
1965
1966 case FW_PORT_TYPE_BT_XAUI:
1967 ifmedia_add(media, m | IFM_10G_T, data, NULL);
1968 /* fall through */
1969
1970 case FW_PORT_TYPE_BT_SGMII:
1971 ifmedia_add(media, m | IFM_1000_T, data, NULL);
1972 ifmedia_add(media, m | IFM_100_TX, data, NULL);
1973 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
1974 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
1975 break;
1976
1977 case FW_PORT_TYPE_CX4:
1978 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
1979 ifmedia_set(media, m | IFM_10G_CX4);
1980 break;
1981
1982 case FW_PORT_TYPE_SFP:
1983 case FW_PORT_TYPE_FIBER_XFI:
1984 case FW_PORT_TYPE_FIBER_XAUI:
1985 switch (pi->mod_type) {
1986
1987 case FW_PORT_MOD_TYPE_LR:
1988 ifmedia_add(media, m | IFM_10G_LR, data, NULL);
1989 ifmedia_set(media, m | IFM_10G_LR);
1990 break;
1991
1992 case FW_PORT_MOD_TYPE_SR:
1993 ifmedia_add(media, m | IFM_10G_SR, data, NULL);
1994 ifmedia_set(media, m | IFM_10G_SR);
1995 break;
1996
1997 case FW_PORT_MOD_TYPE_LRM:
1998 ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
1999 ifmedia_set(media, m | IFM_10G_LRM);
2000 break;
2001
2002 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2003 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2004 ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2005 ifmedia_set(media, m | IFM_10G_TWINAX);
2006 break;
2007
2008 case FW_PORT_MOD_TYPE_NONE:
2009 m &= ~IFM_FDX;
2010 ifmedia_add(media, m | IFM_NONE, data, NULL);
2011 ifmedia_set(media, m | IFM_NONE);
2012 break;
2013
2014 case FW_PORT_MOD_TYPE_NA:
2015 case FW_PORT_MOD_TYPE_ER:
2016 default:
2017 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2018 ifmedia_set(media, m | IFM_UNKNOWN);
2019 break;
2020 }
2021 break;
2022
2023 case FW_PORT_TYPE_KX4:
2024 case FW_PORT_TYPE_KX:
2025 case FW_PORT_TYPE_KR:
2026 default:
2027 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2028 ifmedia_set(media, m | IFM_UNKNOWN);
2029 break;
2030 }
2031
2032 PORT_UNLOCK(pi);
2033}
2034
2035/*
2036 * Program the port's XGMAC based on parameters in ifnet. The caller also
2037 * indicates which parameters should be programmed (the rest are left alone).
2038 */
2039static int
2040update_mac_settings(struct port_info *pi, int flags)
2041{
2042 int rc;
2043 struct ifnet *ifp = pi->ifp;
2044 struct adapter *sc = pi->adapter;
2045 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2046
2047 PORT_LOCK_ASSERT_OWNED(pi);
2048 KASSERT(flags, ("%s: not told what to update.", __func__));
2049
2050 if (flags & XGMAC_MTU)
2051 mtu = ifp->if_mtu;
2052
2053 if (flags & XGMAC_PROMISC)
2054 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2055
2056 if (flags & XGMAC_ALLMULTI)
2057 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2058
2059 if (flags & XGMAC_VLANEX)
2060 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2061
2062 rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2063 vlanex, false);
2064 if (rc) {
2065 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2066 return (rc);
2067 }
2068
2069 if (flags & XGMAC_UCADDR) {
2070 uint8_t ucaddr[ETHER_ADDR_LEN];
2071
2072 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2073 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2074 ucaddr, true, true);
2075 if (rc < 0) {
2076 rc = -rc;
2077 if_printf(ifp, "change_mac failed: %d\n", rc);
2078 return (rc);
2079 } else {
2080 pi->xact_addr_filt = rc;
2081 rc = 0;
2082 }
2083 }
2084
2085 if (flags & XGMAC_MCADDRS) {
2086 const uint8_t *mcaddr;
2087 int del = 1;
2088 uint64_t hash = 0;
2089 struct ifmultiaddr *ifma;
2090
2091 if_maddr_rlock(ifp);
2092 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2093 if (ifma->ifma_addr->sa_family != AF_LINK)
2094 continue;
2095 mcaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2096
2097 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid, del, 1,
2098 &mcaddr, NULL, &hash, 0);
2099 if (rc < 0) {
2100 rc = -rc;
2101 if_printf(ifp, "failed to add mc address"
2102 " %02x:%02x:%02x:%02x:%02x:%02x rc=%d\n",
2103 mcaddr[0], mcaddr[1], mcaddr[2], mcaddr[3],
2104 mcaddr[4], mcaddr[5], rc);
2105 goto mcfail;
2106 }
2107 del = 0;
2108 }
2109
2110 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2111 if (rc != 0)
2112 if_printf(ifp, "failed to set mc address hash: %d", rc);
2113mcfail:
2114 if_maddr_runlock(ifp);
2115 }
2116
2117 return (rc);
2118}
2119
2120static int
2121cxgbe_init_locked(struct port_info *pi)
2122{
2123 struct adapter *sc = pi->adapter;
2124 int rc = 0;
2125
2126 ADAPTER_LOCK_ASSERT_OWNED(sc);
2127
2128 while (!IS_DOOMED(pi) && IS_BUSY(sc)) {
2129 if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4init", 0)) {
2130 rc = EINTR;
2131 goto done;
2132 }
2133 }
2134 if (IS_DOOMED(pi)) {
2135 rc = ENXIO;
2136 goto done;
2137 }
2138 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2139
2140 /* Give up the adapter lock, port init code can sleep. */
2141 SET_BUSY(sc);
2142 ADAPTER_UNLOCK(sc);
2143
2144 rc = cxgbe_init_synchronized(pi);
2145
2146done:
2147 ADAPTER_LOCK(sc);
2148 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2149 CLR_BUSY(sc);
2150 wakeup_one(&sc->flags);
2151 ADAPTER_UNLOCK(sc);
2152 return (rc);
2153}
2154
2155static int
2156cxgbe_init_synchronized(struct port_info *pi)
2157{
2158 struct adapter *sc = pi->adapter;
2159 struct ifnet *ifp = pi->ifp;
1806 int rc = 0, i;
1807 uint16_t *rss;
1808 struct sge_rxq *rxq;
2160 int rc = 0;
1809
1810 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1811
1812 if (isset(&sc->open_device_map, pi->port_id)) {
1813 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
1814 ("mismatch between open_device_map and if_drv_flags"));
1815 return (0); /* already running */
1816 }
1817
2161
2162 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2163
2164 if (isset(&sc->open_device_map, pi->port_id)) {
2165 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2166 ("mismatch between open_device_map and if_drv_flags"));
2167 return (0); /* already running */
2168 }
2169
1818 if (sc->open_device_map == 0 && ((rc = first_port_up(sc)) != 0))
2170 if (!(sc->flags & FULL_INIT_DONE) &&
2171 ((rc = adapter_full_init(sc)) != 0))
1819 return (rc); /* error message displayed already */
1820
2172 return (rc); /* error message displayed already */
2173
1821 /*
1822 * Allocate tx/rx/fl queues for this port.
1823 */
1824 rc = t4_setup_eth_queues(pi);
1825 if (rc != 0)
1826 goto done; /* error message displayed already */
2174 if (!(pi->flags & PORT_INIT_DONE) &&
2175 ((rc = port_full_init(pi)) != 0))
2176 return (rc); /* error message displayed already */
1827
2177
1828 /*
1829 * Setup RSS for this port.
1830 */
1831 rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
1832 for_each_rxq(pi, i, rxq) {
1833 rss[i] = rxq->iq.abs_id;
1834 }
1835 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
1836 pi->nrxq);
1837 free(rss, M_CXGBE);
1838 if (rc != 0) {
1839 if_printf(ifp, "rss_config failed: %d\n", rc);
1840 goto done;
1841 }
1842
1843 PORT_LOCK(pi);
1844 rc = update_mac_settings(pi, XGMAC_ALL);
1845 PORT_UNLOCK(pi);
1846 if (rc)
1847 goto done; /* error message displayed already */
1848
1849 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
1850 if (rc != 0) {
1851 if_printf(ifp, "start_link failed: %d\n", rc);
1852 goto done;
1853 }
1854
1855 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
1856 if (rc != 0) {
1857 if_printf(ifp, "enable_vi failed: %d\n", rc);
1858 goto done;
1859 }
2178 PORT_LOCK(pi);
2179 rc = update_mac_settings(pi, XGMAC_ALL);
2180 PORT_UNLOCK(pi);
2181 if (rc)
2182 goto done; /* error message displayed already */
2183
2184 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2185 if (rc != 0) {
2186 if_printf(ifp, "start_link failed: %d\n", rc);
2187 goto done;
2188 }
2189
2190 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2191 if (rc != 0) {
2192 if_printf(ifp, "enable_vi failed: %d\n", rc);
2193 goto done;
2194 }
1860 pi->flags |= VI_ENABLED;
1861
1862 /* all ok */
1863 setbit(&sc->open_device_map, pi->port_id);
1864 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2195
2196 /* all ok */
2197 setbit(&sc->open_device_map, pi->port_id);
2198 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1865 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1866
1867 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
1868done:
1869 if (rc != 0)
1870 cxgbe_uninit_synchronized(pi);
1871
1872 return (rc);
1873}
1874
1875static int
1876cxgbe_uninit_locked(struct port_info *pi)
1877{
1878 struct adapter *sc = pi->adapter;
1879 int rc;
1880
1881 ADAPTER_LOCK_ASSERT_OWNED(sc);
1882
1883 while (!IS_DOOMED(pi) && IS_BUSY(sc)) {
1884 if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4uninit", 0)) {
1885 rc = EINTR;
1886 goto done;
1887 }
1888 }
1889 if (IS_DOOMED(pi)) {
1890 rc = ENXIO;
1891 goto done;
1892 }
1893 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1894 SET_BUSY(sc);
1895 ADAPTER_UNLOCK(sc);
1896
1897 rc = cxgbe_uninit_synchronized(pi);
1898
1899 ADAPTER_LOCK(sc);
1900 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1901 CLR_BUSY(sc);
1902 wakeup_one(&sc->flags);
1903done:
1904 ADAPTER_UNLOCK(sc);
1905 return (rc);
1906}
1907
1908/*
1909 * Idempotent.
1910 */
1911static int
1912cxgbe_uninit_synchronized(struct port_info *pi)
1913{
1914 struct adapter *sc = pi->adapter;
1915 struct ifnet *ifp = pi->ifp;
1916 int rc;
1917
2199
2200 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2201done:
2202 if (rc != 0)
2203 cxgbe_uninit_synchronized(pi);
2204
2205 return (rc);
2206}
2207
2208static int
2209cxgbe_uninit_locked(struct port_info *pi)
2210{
2211 struct adapter *sc = pi->adapter;
2212 int rc;
2213
2214 ADAPTER_LOCK_ASSERT_OWNED(sc);
2215
2216 while (!IS_DOOMED(pi) && IS_BUSY(sc)) {
2217 if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4uninit", 0)) {
2218 rc = EINTR;
2219 goto done;
2220 }
2221 }
2222 if (IS_DOOMED(pi)) {
2223 rc = ENXIO;
2224 goto done;
2225 }
2226 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2227 SET_BUSY(sc);
2228 ADAPTER_UNLOCK(sc);
2229
2230 rc = cxgbe_uninit_synchronized(pi);
2231
2232 ADAPTER_LOCK(sc);
2233 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2234 CLR_BUSY(sc);
2235 wakeup_one(&sc->flags);
2236done:
2237 ADAPTER_UNLOCK(sc);
2238 return (rc);
2239}
2240
2241/*
2242 * Idempotent.
2243 */
2244static int
2245cxgbe_uninit_synchronized(struct port_info *pi)
2246{
2247 struct adapter *sc = pi->adapter;
2248 struct ifnet *ifp = pi->ifp;
2249 int rc;
2250
1918 /*
1919 * taskqueue_drain may cause a deadlock if the adapter lock is held.
1920 */
1921 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1922
1923 /*
2251 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2252
2253 /*
1924 * Clear this port's bit from the open device map, and then drain
1925 * tasks and callouts.
2254 * Disable the VI so that all its data in either direction is discarded
2255 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
2256 * tick) intact as the TP can deliver negative advice or data that it's
2257 * holding in its RAM (for an offloaded connection) even after the VI is
2258 * disabled.
1926 */
2259 */
1927 clrbit(&sc->open_device_map, pi->port_id);
1928
1929 PORT_LOCK(pi);
1930 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1931 callout_stop(&pi->tick);
1932 PORT_UNLOCK(pi);
1933 callout_drain(&pi->tick);
1934
1935 /*
1936 * Stop and then free the queues' resources, including the queues
1937 * themselves.
1938 *
1939 * XXX: we could just stop the queues here (on ifconfig down) and free
1940 * them later (on port detach), but having up/down go through the entire
1941 * allocate/activate/deactivate/free sequence is a good way to find
1942 * leaks and bugs.
1943 */
1944 rc = t4_teardown_eth_queues(pi);
1945 if (rc != 0)
1946 if_printf(ifp, "teardown failed: %d\n", rc);
1947
1948 if (pi->flags & VI_ENABLED) {
1949 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
1950 if (rc)
1951 if_printf(ifp, "disable_vi failed: %d\n", rc);
1952 else
1953 pi->flags &= ~VI_ENABLED;
2260 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2261 if (rc) {
2262 if_printf(ifp, "disable_vi failed: %d\n", rc);
2263 return (rc);
1954 }
1955
2264 }
2265
2266 clrbit(&sc->open_device_map, pi->port_id);
2267 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2268
1956 pi->link_cfg.link_ok = 0;
1957 pi->link_cfg.speed = 0;
1958 t4_os_link_changed(sc, pi->port_id, 0);
1959
2269 pi->link_cfg.link_ok = 0;
2270 pi->link_cfg.speed = 0;
2271 t4_os_link_changed(sc, pi->port_id, 0);
2272
1960 if (sc->open_device_map == 0)
1961 last_port_down(sc);
1962
1963 return (0);
1964}
1965
1966#define T4_ALLOC_IRQ(sc, irq, rid, handler, arg, name) do { \
1967 rc = t4_alloc_irq(sc, irq, rid, handler, arg, name); \
1968 if (rc != 0) \
1969 goto done; \
1970} while (0)
2273 return (0);
2274}
2275
2276#define T4_ALLOC_IRQ(sc, irq, rid, handler, arg, name) do { \
2277 rc = t4_alloc_irq(sc, irq, rid, handler, arg, name); \
2278 if (rc != 0) \
2279 goto done; \
2280} while (0)
2281
1971static int
2282static int
1972first_port_up(struct adapter *sc)
2283adapter_full_init(struct adapter *sc)
1973{
1974 int rc, i, rid, p, q;
1975 char s[8];
1976 struct irq *irq;
2284{
2285 int rc, i, rid, p, q;
2286 char s[8];
2287 struct irq *irq;
1977 struct sge_iq *intrq;
2288 struct port_info *pi;
2289 struct sge_rxq *rxq;
2290#ifndef TCP_OFFLOAD_DISABLE
2291 struct sge_ofld_rxq *ofld_rxq;
2292#endif
1978
1979 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2293
2294 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2295 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
2296 ("%s: FULL_INIT_DONE already", __func__));
1980
1981 /*
1982 * queues that belong to the adapter (not any particular port).
1983 */
1984 rc = t4_setup_adapter_queues(sc);
1985 if (rc != 0)
1986 goto done;
1987
2297
2298 /*
2299 * queues that belong to the adapter (not any particular port).
2300 */
2301 rc = t4_setup_adapter_queues(sc);
2302 if (rc != 0)
2303 goto done;
2304
2305 for (i = 0; i < ARRAY_SIZE(sc->tq); i++) {
2306 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
2307 taskqueue_thread_enqueue, &sc->tq[i]);
2308 if (sc->tq[i] == NULL) {
2309 device_printf(sc->dev,
2310 "failed to allocate task queue %d\n", i);
2311 rc = ENOMEM;
2312 goto done;
2313 }
2314 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
2315 device_get_nameunit(sc->dev), i);
2316 }
2317
1988 /*
1989 * Setup interrupts.
1990 */
1991 irq = &sc->irq[0];
1992 rid = sc->intr_type == INTR_INTX ? 0 : 1;
1993 if (sc->intr_count == 1) {
2318 /*
2319 * Setup interrupts.
2320 */
2321 irq = &sc->irq[0];
2322 rid = sc->intr_type == INTR_INTX ? 0 : 1;
2323 if (sc->intr_count == 1) {
1994 KASSERT(sc->flags & INTR_SHARED,
1995 ("%s: single interrupt but not shared?", __func__));
2324 KASSERT(!(sc->flags & INTR_DIRECT),
2325 ("%s: single interrupt && INTR_DIRECT?", __func__));
1996
1997 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_all, sc, "all");
1998 } else {
2326
2327 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_all, sc, "all");
2328 } else {
1999 /* Multiple interrupts. The first one is always error intr */
2329 /* Multiple interrupts. */
2330 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
2331 ("%s: too few intr.", __func__));
2332
2333 /* The first one is always error intr */
2000 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_err, sc, "err");
2001 irq++;
2002 rid++;
2003
2334 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_err, sc, "err");
2335 irq++;
2336 rid++;
2337
2004 /* Firmware event queue normally has an interrupt of its own */
2005 if (sc->intr_count > T4_EXTRA_INTR) {
2006 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
2007 "evt");
2008 irq++;
2009 rid++;
2010 }
2338 /* The second one is always the firmware event queue */
2339 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, "evt");
2340 irq++;
2341 rid++;
2011
2342
2012 intrq = &sc->sge.intrq[0];
2013 if (sc->flags & INTR_SHARED) {
2343 /*
2344 * Note that if INTR_DIRECT is not set then either the NIC rx
2345 * queues or (exclusive or) the TOE rx queueus will be taking
2346 * direct interrupts.
2347 *
2348 * There is no need to check for is_offload(sc) as nofldrxq
2349 * will be 0 if offload is disabled.
2350 */
2351 for_each_port(sc, p) {
2352 pi = sc->port[p];
2014
2353
2015 /* All ports share these interrupt queues */
2016
2017 for (i = 0; i < NINTRQ(sc); i++) {
2018 snprintf(s, sizeof(s), "*.%d", i);
2019 T4_ALLOC_IRQ(sc, irq, rid, t4_intr, intrq, s);
2354#ifndef TCP_OFFLOAD_DISABLE
2355 /*
2356 * Skip over the NIC queues if they aren't taking direct
2357 * interrupts.
2358 */
2359 if (!(sc->flags & INTR_DIRECT) &&
2360 pi->nofldrxq > pi->nrxq)
2361 goto ofld_queues;
2362#endif
2363 rxq = &sc->sge.rxq[pi->first_rxq];
2364 for (q = 0; q < pi->nrxq; q++, rxq++) {
2365 snprintf(s, sizeof(s), "%d.%d", p, q);
2366 T4_ALLOC_IRQ(sc, irq, rid, t4_intr, rxq, s);
2020 irq++;
2021 rid++;
2367 irq++;
2368 rid++;
2022 intrq++;
2023 }
2369 }
2024 } else {
2025
2370
2026 /* Each port has its own set of interrupt queues */
2027
2028 for (p = 0; p < sc->params.nports; p++) {
2029 for (q = 0; q < sc->port[p]->nrxq; q++) {
2030 snprintf(s, sizeof(s), "%d.%d", p, q);
2031 T4_ALLOC_IRQ(sc, irq, rid, t4_intr,
2032 intrq, s);
2033 irq++;
2034 rid++;
2035 intrq++;
2036 }
2371#ifndef TCP_OFFLOAD_DISABLE
2372 /*
2373 * Skip over the offload queues if they aren't taking
2374 * direct interrupts.
2375 */
2376 if (!(sc->flags & INTR_DIRECT))
2377 continue;
2378ofld_queues:
2379 ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
2380 for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
2381 snprintf(s, sizeof(s), "%d,%d", p, q);
2382 T4_ALLOC_IRQ(sc, irq, rid, t4_intr, ofld_rxq, s);
2383 irq++;
2384 rid++;
2037 }
2385 }
2386#endif
2038 }
2039 }
2040
2041 t4_intr_enable(sc);
2042 sc->flags |= FULL_INIT_DONE;
2387 }
2388 }
2389
2390 t4_intr_enable(sc);
2391 sc->flags |= FULL_INIT_DONE;
2043
2044done:
2045 if (rc != 0)
2392done:
2393 if (rc != 0)
2046 last_port_down(sc);
2394 adapter_full_uninit(sc);
2047
2048 return (rc);
2049}
2050#undef T4_ALLOC_IRQ
2051
2395
2396 return (rc);
2397}
2398#undef T4_ALLOC_IRQ
2399
2052/*
2053 * Idempotent.
2054 */
2055static int
2400static int
2056last_port_down(struct adapter *sc)
2401adapter_full_uninit(struct adapter *sc)
2057{
2058 int i;
2059
2060 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2061
2402{
2403 int i;
2404
2405 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2406
2062 t4_intr_disable(sc);
2063
2064 t4_teardown_adapter_queues(sc);
2065
2066 for (i = 0; i < sc->intr_count; i++)
2067 t4_free_irq(sc, &sc->irq[i]);
2068
2407 t4_teardown_adapter_queues(sc);
2408
2409 for (i = 0; i < sc->intr_count; i++)
2410 t4_free_irq(sc, &sc->irq[i]);
2411
2412 for (i = 0; i < ARRAY_SIZE(sc->tq) && sc->tq[i]; i++) {
2413 taskqueue_free(sc->tq[i]);
2414 sc->tq[i] = NULL;
2415 }
2416
2069 sc->flags &= ~FULL_INIT_DONE;
2070
2071 return (0);
2072}
2073
2074static int
2417 sc->flags &= ~FULL_INIT_DONE;
2418
2419 return (0);
2420}
2421
2422static int
2423port_full_init(struct port_info *pi)
2424{
2425 struct adapter *sc = pi->adapter;
2426 struct ifnet *ifp = pi->ifp;
2427 uint16_t *rss;
2428 struct sge_rxq *rxq;
2429 int rc, i;
2430
2431 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2432 KASSERT((pi->flags & PORT_INIT_DONE) == 0,
2433 ("%s: PORT_INIT_DONE already", __func__));
2434
2435 sysctl_ctx_init(&pi->ctx);
2436 pi->flags |= PORT_SYSCTL_CTX;
2437
2438 /*
2439 * Allocate tx/rx/fl queues for this port.
2440 */
2441 rc = t4_setup_port_queues(pi);
2442 if (rc != 0)
2443 goto done; /* error message displayed already */
2444
2445 /*
2446 * Setup RSS for this port.
2447 */
2448 rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
2449 M_ZERO | M_WAITOK);
2450 for_each_rxq(pi, i, rxq) {
2451 rss[i] = rxq->iq.abs_id;
2452 }
2453 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
2454 pi->rss_size, rss, pi->nrxq);
2455 free(rss, M_CXGBE);
2456 if (rc != 0) {
2457 if_printf(ifp, "rss_config failed: %d\n", rc);
2458 goto done;
2459 }
2460
2461 pi->flags |= PORT_INIT_DONE;
2462done:
2463 if (rc != 0)
2464 port_full_uninit(pi);
2465
2466 return (rc);
2467}
2468
2469/*
2470 * Idempotent.
2471 */
2472static int
2473port_full_uninit(struct port_info *pi)
2474{
2475 struct adapter *sc = pi->adapter;
2476 int i;
2477 struct sge_rxq *rxq;
2478 struct sge_txq *txq;
2479#ifndef TCP_OFFLOAD_DISABLE
2480 struct sge_ofld_rxq *ofld_rxq;
2481 struct sge_wrq *ofld_txq;
2482#endif
2483
2484 if (pi->flags & PORT_INIT_DONE) {
2485
2486 /* Need to quiesce queues. XXX: ctrl queues? */
2487
2488 for_each_txq(pi, i, txq) {
2489 quiesce_eq(sc, &txq->eq);
2490 }
2491
2492#ifndef TCP_OFFLOAD_DISABLE
2493 for_each_ofld_txq(pi, i, ofld_txq) {
2494 quiesce_eq(sc, &ofld_txq->eq);
2495 }
2496#endif
2497
2498 for_each_rxq(pi, i, rxq) {
2499 quiesce_iq(sc, &rxq->iq);
2500 quiesce_fl(sc, &rxq->fl);
2501 }
2502
2503#ifndef TCP_OFFLOAD_DISABLE
2504 for_each_ofld_rxq(pi, i, ofld_rxq) {
2505 quiesce_iq(sc, &ofld_rxq->iq);
2506 quiesce_fl(sc, &ofld_rxq->fl);
2507 }
2508#endif
2509 }
2510
2511 t4_teardown_port_queues(pi);
2512 pi->flags &= ~PORT_INIT_DONE;
2513
2514 return (0);
2515}
2516
2517static void
2518quiesce_eq(struct adapter *sc, struct sge_eq *eq)
2519{
2520 EQ_LOCK(eq);
2521 eq->flags |= EQ_DOOMED;
2522
2523 /*
2524 * Wait for the response to a credit flush if one's
2525 * pending.
2526 */
2527 while (eq->flags & EQ_CRFLUSHED)
2528 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
2529 EQ_UNLOCK(eq);
2530
2531 callout_drain(&eq->tx_callout); /* XXX: iffy */
2532 pause("callout", 10); /* Still iffy */
2533
2534 taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
2535}
2536
2537static void
2538quiesce_iq(struct adapter *sc, struct sge_iq *iq)
2539{
2540 (void) sc; /* unused */
2541
2542 /* Synchronize with the interrupt handler */
2543 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
2544 pause("iqfree", 1);
2545}
2546
2547static void
2548quiesce_fl(struct adapter *sc, struct sge_fl *fl)
2549{
2550 mtx_lock(&sc->sfl_lock);
2551 FL_LOCK(fl);
2552 fl->flags |= FL_DOOMED;
2553 FL_UNLOCK(fl);
2554 mtx_unlock(&sc->sfl_lock);
2555
2556 callout_drain(&sc->sfl_callout);
2557 KASSERT((fl->flags & FL_STARVING) == 0,
2558 ("%s: still starving", __func__));
2559}
2560
2561static int
2075t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
2562t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
2076 iq_intr_handler_t *handler, void *arg, char *name)
2563 driver_intr_t *handler, void *arg, char *name)
2077{
2078 int rc;
2079
2080 irq->rid = rid;
2081 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
2082 RF_SHAREABLE | RF_ACTIVE);
2083 if (irq->res == NULL) {
2084 device_printf(sc->dev,
2085 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
2086 return (ENOMEM);
2087 }
2088
2089 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
2090 NULL, handler, arg, &irq->tag);
2091 if (rc != 0) {
2092 device_printf(sc->dev,
2093 "failed to setup interrupt for rid %d, name %s: %d\n",
2094 rid, name, rc);
2095 } else if (name)
2096 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
2097
2098 return (rc);
2099}
2100
2101static int
2102t4_free_irq(struct adapter *sc, struct irq *irq)
2103{
2104 if (irq->tag)
2105 bus_teardown_intr(sc->dev, irq->res, irq->tag);
2106 if (irq->res)
2107 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
2108
2109 bzero(irq, sizeof(*irq));
2110
2111 return (0);
2112}
2113
2114static void
2115reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
2116 unsigned int end)
2117{
2118 uint32_t *p = (uint32_t *)(buf + start);
2119
2120 for ( ; start <= end; start += sizeof(uint32_t))
2121 *p++ = t4_read_reg(sc, start);
2122}
2123
2124static void
2125t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
2126{
2127 int i;
2128 static const unsigned int reg_ranges[] = {
2129 0x1008, 0x1108,
2130 0x1180, 0x11b4,
2131 0x11fc, 0x123c,
2132 0x1300, 0x173c,
2133 0x1800, 0x18fc,
2134 0x3000, 0x30d8,
2135 0x30e0, 0x5924,
2136 0x5960, 0x59d4,
2137 0x5a00, 0x5af8,
2138 0x6000, 0x6098,
2139 0x6100, 0x6150,
2140 0x6200, 0x6208,
2141 0x6240, 0x6248,
2142 0x6280, 0x6338,
2143 0x6370, 0x638c,
2144 0x6400, 0x643c,
2145 0x6500, 0x6524,
2146 0x6a00, 0x6a38,
2147 0x6a60, 0x6a78,
2148 0x6b00, 0x6b84,
2149 0x6bf0, 0x6c84,
2150 0x6cf0, 0x6d84,
2151 0x6df0, 0x6e84,
2152 0x6ef0, 0x6f84,
2153 0x6ff0, 0x7084,
2154 0x70f0, 0x7184,
2155 0x71f0, 0x7284,
2156 0x72f0, 0x7384,
2157 0x73f0, 0x7450,
2158 0x7500, 0x7530,
2159 0x7600, 0x761c,
2160 0x7680, 0x76cc,
2161 0x7700, 0x7798,
2162 0x77c0, 0x77fc,
2163 0x7900, 0x79fc,
2164 0x7b00, 0x7c38,
2165 0x7d00, 0x7efc,
2166 0x8dc0, 0x8e1c,
2167 0x8e30, 0x8e78,
2168 0x8ea0, 0x8f6c,
2169 0x8fc0, 0x9074,
2170 0x90fc, 0x90fc,
2171 0x9400, 0x9458,
2172 0x9600, 0x96bc,
2173 0x9800, 0x9808,
2174 0x9820, 0x983c,
2175 0x9850, 0x9864,
2176 0x9c00, 0x9c6c,
2177 0x9c80, 0x9cec,
2178 0x9d00, 0x9d6c,
2179 0x9d80, 0x9dec,
2180 0x9e00, 0x9e6c,
2181 0x9e80, 0x9eec,
2182 0x9f00, 0x9f6c,
2183 0x9f80, 0x9fec,
2184 0xd004, 0xd03c,
2185 0xdfc0, 0xdfe0,
2186 0xe000, 0xea7c,
2187 0xf000, 0x11190,
2188 0x19040, 0x19124,
2189 0x19150, 0x191b0,
2190 0x191d0, 0x191e8,
2191 0x19238, 0x1924c,
2192 0x193f8, 0x19474,
2193 0x19490, 0x194f8,
2194 0x19800, 0x19f30,
2195 0x1a000, 0x1a06c,
2196 0x1a0b0, 0x1a120,
2197 0x1a128, 0x1a138,
2198 0x1a190, 0x1a1c4,
2199 0x1a1fc, 0x1a1fc,
2200 0x1e040, 0x1e04c,
2201 0x1e240, 0x1e28c,
2202 0x1e2c0, 0x1e2c0,
2203 0x1e2e0, 0x1e2e0,
2204 0x1e300, 0x1e384,
2205 0x1e3c0, 0x1e3c8,
2206 0x1e440, 0x1e44c,
2207 0x1e640, 0x1e68c,
2208 0x1e6c0, 0x1e6c0,
2209 0x1e6e0, 0x1e6e0,
2210 0x1e700, 0x1e784,
2211 0x1e7c0, 0x1e7c8,
2212 0x1e840, 0x1e84c,
2213 0x1ea40, 0x1ea8c,
2214 0x1eac0, 0x1eac0,
2215 0x1eae0, 0x1eae0,
2216 0x1eb00, 0x1eb84,
2217 0x1ebc0, 0x1ebc8,
2218 0x1ec40, 0x1ec4c,
2219 0x1ee40, 0x1ee8c,
2220 0x1eec0, 0x1eec0,
2221 0x1eee0, 0x1eee0,
2222 0x1ef00, 0x1ef84,
2223 0x1efc0, 0x1efc8,
2224 0x1f040, 0x1f04c,
2225 0x1f240, 0x1f28c,
2226 0x1f2c0, 0x1f2c0,
2227 0x1f2e0, 0x1f2e0,
2228 0x1f300, 0x1f384,
2229 0x1f3c0, 0x1f3c8,
2230 0x1f440, 0x1f44c,
2231 0x1f640, 0x1f68c,
2232 0x1f6c0, 0x1f6c0,
2233 0x1f6e0, 0x1f6e0,
2234 0x1f700, 0x1f784,
2235 0x1f7c0, 0x1f7c8,
2236 0x1f840, 0x1f84c,
2237 0x1fa40, 0x1fa8c,
2238 0x1fac0, 0x1fac0,
2239 0x1fae0, 0x1fae0,
2240 0x1fb00, 0x1fb84,
2241 0x1fbc0, 0x1fbc8,
2242 0x1fc40, 0x1fc4c,
2243 0x1fe40, 0x1fe8c,
2244 0x1fec0, 0x1fec0,
2245 0x1fee0, 0x1fee0,
2246 0x1ff00, 0x1ff84,
2247 0x1ffc0, 0x1ffc8,
2248 0x20000, 0x2002c,
2249 0x20100, 0x2013c,
2250 0x20190, 0x201c8,
2251 0x20200, 0x20318,
2252 0x20400, 0x20528,
2253 0x20540, 0x20614,
2254 0x21000, 0x21040,
2255 0x2104c, 0x21060,
2256 0x210c0, 0x210ec,
2257 0x21200, 0x21268,
2258 0x21270, 0x21284,
2259 0x212fc, 0x21388,
2260 0x21400, 0x21404,
2261 0x21500, 0x21518,
2262 0x2152c, 0x2153c,
2263 0x21550, 0x21554,
2264 0x21600, 0x21600,
2265 0x21608, 0x21628,
2266 0x21630, 0x2163c,
2267 0x21700, 0x2171c,
2268 0x21780, 0x2178c,
2269 0x21800, 0x21c38,
2270 0x21c80, 0x21d7c,
2271 0x21e00, 0x21e04,
2272 0x22000, 0x2202c,
2273 0x22100, 0x2213c,
2274 0x22190, 0x221c8,
2275 0x22200, 0x22318,
2276 0x22400, 0x22528,
2277 0x22540, 0x22614,
2278 0x23000, 0x23040,
2279 0x2304c, 0x23060,
2280 0x230c0, 0x230ec,
2281 0x23200, 0x23268,
2282 0x23270, 0x23284,
2283 0x232fc, 0x23388,
2284 0x23400, 0x23404,
2285 0x23500, 0x23518,
2286 0x2352c, 0x2353c,
2287 0x23550, 0x23554,
2288 0x23600, 0x23600,
2289 0x23608, 0x23628,
2290 0x23630, 0x2363c,
2291 0x23700, 0x2371c,
2292 0x23780, 0x2378c,
2293 0x23800, 0x23c38,
2294 0x23c80, 0x23d7c,
2295 0x23e00, 0x23e04,
2296 0x24000, 0x2402c,
2297 0x24100, 0x2413c,
2298 0x24190, 0x241c8,
2299 0x24200, 0x24318,
2300 0x24400, 0x24528,
2301 0x24540, 0x24614,
2302 0x25000, 0x25040,
2303 0x2504c, 0x25060,
2304 0x250c0, 0x250ec,
2305 0x25200, 0x25268,
2306 0x25270, 0x25284,
2307 0x252fc, 0x25388,
2308 0x25400, 0x25404,
2309 0x25500, 0x25518,
2310 0x2552c, 0x2553c,
2311 0x25550, 0x25554,
2312 0x25600, 0x25600,
2313 0x25608, 0x25628,
2314 0x25630, 0x2563c,
2315 0x25700, 0x2571c,
2316 0x25780, 0x2578c,
2317 0x25800, 0x25c38,
2318 0x25c80, 0x25d7c,
2319 0x25e00, 0x25e04,
2320 0x26000, 0x2602c,
2321 0x26100, 0x2613c,
2322 0x26190, 0x261c8,
2323 0x26200, 0x26318,
2324 0x26400, 0x26528,
2325 0x26540, 0x26614,
2326 0x27000, 0x27040,
2327 0x2704c, 0x27060,
2328 0x270c0, 0x270ec,
2329 0x27200, 0x27268,
2330 0x27270, 0x27284,
2331 0x272fc, 0x27388,
2332 0x27400, 0x27404,
2333 0x27500, 0x27518,
2334 0x2752c, 0x2753c,
2335 0x27550, 0x27554,
2336 0x27600, 0x27600,
2337 0x27608, 0x27628,
2338 0x27630, 0x2763c,
2339 0x27700, 0x2771c,
2340 0x27780, 0x2778c,
2341 0x27800, 0x27c38,
2342 0x27c80, 0x27d7c,
2343 0x27e00, 0x27e04
2344 };
2345
2346 regs->version = 4 | (sc->params.rev << 10);
2347 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
2348 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
2349}
2350
2351static void
2352cxgbe_tick(void *arg)
2353{
2354 struct port_info *pi = arg;
2355 struct ifnet *ifp = pi->ifp;
2356 struct sge_txq *txq;
2357 int i, drops;
2358 struct port_stats *s = &pi->stats;
2359
2360 PORT_LOCK(pi);
2361 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2362 PORT_UNLOCK(pi);
2363 return; /* without scheduling another callout */
2364 }
2365
2366 t4_get_port_stats(pi->adapter, pi->tx_chan, s);
2367
2564{
2565 int rc;
2566
2567 irq->rid = rid;
2568 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
2569 RF_SHAREABLE | RF_ACTIVE);
2570 if (irq->res == NULL) {
2571 device_printf(sc->dev,
2572 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
2573 return (ENOMEM);
2574 }
2575
2576 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
2577 NULL, handler, arg, &irq->tag);
2578 if (rc != 0) {
2579 device_printf(sc->dev,
2580 "failed to setup interrupt for rid %d, name %s: %d\n",
2581 rid, name, rc);
2582 } else if (name)
2583 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
2584
2585 return (rc);
2586}
2587
2588static int
2589t4_free_irq(struct adapter *sc, struct irq *irq)
2590{
2591 if (irq->tag)
2592 bus_teardown_intr(sc->dev, irq->res, irq->tag);
2593 if (irq->res)
2594 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
2595
2596 bzero(irq, sizeof(*irq));
2597
2598 return (0);
2599}
2600
2601static void
2602reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
2603 unsigned int end)
2604{
2605 uint32_t *p = (uint32_t *)(buf + start);
2606
2607 for ( ; start <= end; start += sizeof(uint32_t))
2608 *p++ = t4_read_reg(sc, start);
2609}
2610
2611static void
2612t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
2613{
2614 int i;
2615 static const unsigned int reg_ranges[] = {
2616 0x1008, 0x1108,
2617 0x1180, 0x11b4,
2618 0x11fc, 0x123c,
2619 0x1300, 0x173c,
2620 0x1800, 0x18fc,
2621 0x3000, 0x30d8,
2622 0x30e0, 0x5924,
2623 0x5960, 0x59d4,
2624 0x5a00, 0x5af8,
2625 0x6000, 0x6098,
2626 0x6100, 0x6150,
2627 0x6200, 0x6208,
2628 0x6240, 0x6248,
2629 0x6280, 0x6338,
2630 0x6370, 0x638c,
2631 0x6400, 0x643c,
2632 0x6500, 0x6524,
2633 0x6a00, 0x6a38,
2634 0x6a60, 0x6a78,
2635 0x6b00, 0x6b84,
2636 0x6bf0, 0x6c84,
2637 0x6cf0, 0x6d84,
2638 0x6df0, 0x6e84,
2639 0x6ef0, 0x6f84,
2640 0x6ff0, 0x7084,
2641 0x70f0, 0x7184,
2642 0x71f0, 0x7284,
2643 0x72f0, 0x7384,
2644 0x73f0, 0x7450,
2645 0x7500, 0x7530,
2646 0x7600, 0x761c,
2647 0x7680, 0x76cc,
2648 0x7700, 0x7798,
2649 0x77c0, 0x77fc,
2650 0x7900, 0x79fc,
2651 0x7b00, 0x7c38,
2652 0x7d00, 0x7efc,
2653 0x8dc0, 0x8e1c,
2654 0x8e30, 0x8e78,
2655 0x8ea0, 0x8f6c,
2656 0x8fc0, 0x9074,
2657 0x90fc, 0x90fc,
2658 0x9400, 0x9458,
2659 0x9600, 0x96bc,
2660 0x9800, 0x9808,
2661 0x9820, 0x983c,
2662 0x9850, 0x9864,
2663 0x9c00, 0x9c6c,
2664 0x9c80, 0x9cec,
2665 0x9d00, 0x9d6c,
2666 0x9d80, 0x9dec,
2667 0x9e00, 0x9e6c,
2668 0x9e80, 0x9eec,
2669 0x9f00, 0x9f6c,
2670 0x9f80, 0x9fec,
2671 0xd004, 0xd03c,
2672 0xdfc0, 0xdfe0,
2673 0xe000, 0xea7c,
2674 0xf000, 0x11190,
2675 0x19040, 0x19124,
2676 0x19150, 0x191b0,
2677 0x191d0, 0x191e8,
2678 0x19238, 0x1924c,
2679 0x193f8, 0x19474,
2680 0x19490, 0x194f8,
2681 0x19800, 0x19f30,
2682 0x1a000, 0x1a06c,
2683 0x1a0b0, 0x1a120,
2684 0x1a128, 0x1a138,
2685 0x1a190, 0x1a1c4,
2686 0x1a1fc, 0x1a1fc,
2687 0x1e040, 0x1e04c,
2688 0x1e240, 0x1e28c,
2689 0x1e2c0, 0x1e2c0,
2690 0x1e2e0, 0x1e2e0,
2691 0x1e300, 0x1e384,
2692 0x1e3c0, 0x1e3c8,
2693 0x1e440, 0x1e44c,
2694 0x1e640, 0x1e68c,
2695 0x1e6c0, 0x1e6c0,
2696 0x1e6e0, 0x1e6e0,
2697 0x1e700, 0x1e784,
2698 0x1e7c0, 0x1e7c8,
2699 0x1e840, 0x1e84c,
2700 0x1ea40, 0x1ea8c,
2701 0x1eac0, 0x1eac0,
2702 0x1eae0, 0x1eae0,
2703 0x1eb00, 0x1eb84,
2704 0x1ebc0, 0x1ebc8,
2705 0x1ec40, 0x1ec4c,
2706 0x1ee40, 0x1ee8c,
2707 0x1eec0, 0x1eec0,
2708 0x1eee0, 0x1eee0,
2709 0x1ef00, 0x1ef84,
2710 0x1efc0, 0x1efc8,
2711 0x1f040, 0x1f04c,
2712 0x1f240, 0x1f28c,
2713 0x1f2c0, 0x1f2c0,
2714 0x1f2e0, 0x1f2e0,
2715 0x1f300, 0x1f384,
2716 0x1f3c0, 0x1f3c8,
2717 0x1f440, 0x1f44c,
2718 0x1f640, 0x1f68c,
2719 0x1f6c0, 0x1f6c0,
2720 0x1f6e0, 0x1f6e0,
2721 0x1f700, 0x1f784,
2722 0x1f7c0, 0x1f7c8,
2723 0x1f840, 0x1f84c,
2724 0x1fa40, 0x1fa8c,
2725 0x1fac0, 0x1fac0,
2726 0x1fae0, 0x1fae0,
2727 0x1fb00, 0x1fb84,
2728 0x1fbc0, 0x1fbc8,
2729 0x1fc40, 0x1fc4c,
2730 0x1fe40, 0x1fe8c,
2731 0x1fec0, 0x1fec0,
2732 0x1fee0, 0x1fee0,
2733 0x1ff00, 0x1ff84,
2734 0x1ffc0, 0x1ffc8,
2735 0x20000, 0x2002c,
2736 0x20100, 0x2013c,
2737 0x20190, 0x201c8,
2738 0x20200, 0x20318,
2739 0x20400, 0x20528,
2740 0x20540, 0x20614,
2741 0x21000, 0x21040,
2742 0x2104c, 0x21060,
2743 0x210c0, 0x210ec,
2744 0x21200, 0x21268,
2745 0x21270, 0x21284,
2746 0x212fc, 0x21388,
2747 0x21400, 0x21404,
2748 0x21500, 0x21518,
2749 0x2152c, 0x2153c,
2750 0x21550, 0x21554,
2751 0x21600, 0x21600,
2752 0x21608, 0x21628,
2753 0x21630, 0x2163c,
2754 0x21700, 0x2171c,
2755 0x21780, 0x2178c,
2756 0x21800, 0x21c38,
2757 0x21c80, 0x21d7c,
2758 0x21e00, 0x21e04,
2759 0x22000, 0x2202c,
2760 0x22100, 0x2213c,
2761 0x22190, 0x221c8,
2762 0x22200, 0x22318,
2763 0x22400, 0x22528,
2764 0x22540, 0x22614,
2765 0x23000, 0x23040,
2766 0x2304c, 0x23060,
2767 0x230c0, 0x230ec,
2768 0x23200, 0x23268,
2769 0x23270, 0x23284,
2770 0x232fc, 0x23388,
2771 0x23400, 0x23404,
2772 0x23500, 0x23518,
2773 0x2352c, 0x2353c,
2774 0x23550, 0x23554,
2775 0x23600, 0x23600,
2776 0x23608, 0x23628,
2777 0x23630, 0x2363c,
2778 0x23700, 0x2371c,
2779 0x23780, 0x2378c,
2780 0x23800, 0x23c38,
2781 0x23c80, 0x23d7c,
2782 0x23e00, 0x23e04,
2783 0x24000, 0x2402c,
2784 0x24100, 0x2413c,
2785 0x24190, 0x241c8,
2786 0x24200, 0x24318,
2787 0x24400, 0x24528,
2788 0x24540, 0x24614,
2789 0x25000, 0x25040,
2790 0x2504c, 0x25060,
2791 0x250c0, 0x250ec,
2792 0x25200, 0x25268,
2793 0x25270, 0x25284,
2794 0x252fc, 0x25388,
2795 0x25400, 0x25404,
2796 0x25500, 0x25518,
2797 0x2552c, 0x2553c,
2798 0x25550, 0x25554,
2799 0x25600, 0x25600,
2800 0x25608, 0x25628,
2801 0x25630, 0x2563c,
2802 0x25700, 0x2571c,
2803 0x25780, 0x2578c,
2804 0x25800, 0x25c38,
2805 0x25c80, 0x25d7c,
2806 0x25e00, 0x25e04,
2807 0x26000, 0x2602c,
2808 0x26100, 0x2613c,
2809 0x26190, 0x261c8,
2810 0x26200, 0x26318,
2811 0x26400, 0x26528,
2812 0x26540, 0x26614,
2813 0x27000, 0x27040,
2814 0x2704c, 0x27060,
2815 0x270c0, 0x270ec,
2816 0x27200, 0x27268,
2817 0x27270, 0x27284,
2818 0x272fc, 0x27388,
2819 0x27400, 0x27404,
2820 0x27500, 0x27518,
2821 0x2752c, 0x2753c,
2822 0x27550, 0x27554,
2823 0x27600, 0x27600,
2824 0x27608, 0x27628,
2825 0x27630, 0x2763c,
2826 0x27700, 0x2771c,
2827 0x27780, 0x2778c,
2828 0x27800, 0x27c38,
2829 0x27c80, 0x27d7c,
2830 0x27e00, 0x27e04
2831 };
2832
2833 regs->version = 4 | (sc->params.rev << 10);
2834 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
2835 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
2836}
2837
2838static void
2839cxgbe_tick(void *arg)
2840{
2841 struct port_info *pi = arg;
2842 struct ifnet *ifp = pi->ifp;
2843 struct sge_txq *txq;
2844 int i, drops;
2845 struct port_stats *s = &pi->stats;
2846
2847 PORT_LOCK(pi);
2848 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2849 PORT_UNLOCK(pi);
2850 return; /* without scheduling another callout */
2851 }
2852
2853 t4_get_port_stats(pi->adapter, pi->tx_chan, s);
2854
2368 ifp->if_opackets = s->tx_frames;
2369 ifp->if_ipackets = s->rx_frames;
2370 ifp->if_obytes = s->tx_octets;
2371 ifp->if_ibytes = s->rx_octets;
2372 ifp->if_omcasts = s->tx_mcast_frames;
2373 ifp->if_imcasts = s->rx_mcast_frames;
2855 ifp->if_opackets = s->tx_frames - s->tx_pause;
2856 ifp->if_ipackets = s->rx_frames - s->rx_pause;
2857 ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
2858 ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
2859 ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
2860 ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
2374 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
2375 s->rx_ovflow3;
2376
2377 drops = s->tx_drop;
2378 for_each_txq(pi, i, txq)
2379 drops += txq->br->br_drops;
2380 ifp->if_snd.ifq_drops = drops;
2381
2382 ifp->if_oerrors = s->tx_error_frames;
2383 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
2384 s->rx_fcs_err + s->rx_len_err;
2385
2386 callout_schedule(&pi->tick, hz);
2387 PORT_UNLOCK(pi);
2388}
2389
2390static int
2861 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
2862 s->rx_ovflow3;
2863
2864 drops = s->tx_drop;
2865 for_each_txq(pi, i, txq)
2866 drops += txq->br->br_drops;
2867 ifp->if_snd.ifq_drops = drops;
2868
2869 ifp->if_oerrors = s->tx_error_frames;
2870 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
2871 s->rx_fcs_err + s->rx_len_err;
2872
2873 callout_schedule(&pi->tick, hz);
2874 PORT_UNLOCK(pi);
2875}
2876
2877static int
2878cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
2879{
2880#ifdef INVARIANTS
2881 panic("%s: opcode %02x on iq %p with payload %p",
2882 __func__, rss->opcode, iq, m);
2883#else
2884 log(LOG_ERR, "%s: opcode %02x on iq %p with payload %p",
2885 __func__, rss->opcode, iq, m);
2886 m_freem(m);
2887#endif
2888 return (EDOOFUS);
2889}
2890
2891int
2892t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
2893{
2894 uintptr_t *loc, new;
2895
2896 if (opcode >= ARRAY_SIZE(sc->cpl_handler))
2897 return (EINVAL);
2898
2899 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
2900 loc = (uintptr_t *) &sc->cpl_handler[opcode];
2901 atomic_store_rel_ptr(loc, new);
2902
2903 return (0);
2904}
2905
2906static int
2391t4_sysctls(struct adapter *sc)
2392{
2393 struct sysctl_ctx_list *ctx;
2394 struct sysctl_oid *oid;
2907t4_sysctls(struct adapter *sc)
2908{
2909 struct sysctl_ctx_list *ctx;
2910 struct sysctl_oid *oid;
2395 struct sysctl_oid_list *children;
2911 struct sysctl_oid_list *children, *c0;
2912 static char *caps[] = {
2913 "\20\1PPP\2QFC\3DCBX", /* caps[0] linkcaps */
2914 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL", /* caps[1] niccaps */
2915 "\20\1TOE", /* caps[2] toecaps */
2916 "\20\1RDDP\2RDMAC", /* caps[3] rdmacaps */
2917 "\20\1INITIATOR_PDU\2TARGET_PDU" /* caps[4] iscsicaps */
2918 "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
2919 "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
2920 "\20\1INITIATOR\2TARGET\3CTRL_OFLD" /* caps[5] fcoecaps */
2921 };
2396
2397 ctx = device_get_sysctl_ctx(sc->dev);
2922
2923 ctx = device_get_sysctl_ctx(sc->dev);
2924
2925 /*
2926 * dev.t4nex.X.
2927 */
2398 oid = device_get_sysctl_tree(sc->dev);
2928 oid = device_get_sysctl_tree(sc->dev);
2399 children = SYSCTL_CHILDREN(oid);
2929 c0 = children = SYSCTL_CHILDREN(oid);
2400
2401 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD,
2402 &sc->params.nports, 0, "# of ports");
2403
2404 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
2405 &sc->params.rev, 0, "chip hardware revision");
2406
2407 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
2408 CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
2409
2930
2931 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD,
2932 &sc->params.nports, 0, "# of ports");
2933
2934 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
2935 &sc->params.rev, 0, "chip hardware revision");
2936
2937 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
2938 CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
2939
2410 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "TOE", CTLFLAG_RD,
2411 &sc->params.offload, 0, "hardware is capable of TCP offload");
2940 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
2941 CTLFLAG_RD, &t4_cfg_file, 0, "configuration file");
2412
2942
2943 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD,
2944 &sc->cfcsum, 0, "config file checksum");
2945
2946 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
2947 CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
2948 sysctl_bitfield, "A", "available link capabilities");
2949
2950 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
2951 CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
2952 sysctl_bitfield, "A", "available NIC capabilities");
2953
2954 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
2955 CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
2956 sysctl_bitfield, "A", "available TCP offload capabilities");
2957
2958 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
2959 CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
2960 sysctl_bitfield, "A", "available RDMA capabilities");
2961
2962 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
2963 CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
2964 sysctl_bitfield, "A", "available iSCSI capabilities");
2965
2966 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
2967 CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
2968 sysctl_bitfield, "A", "available FCoE capabilities");
2969
2413 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD,
2414 &sc->params.vpd.cclk, 0, "core clock frequency (in KHz)");
2415
2416 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
2970 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD,
2971 &sc->params.vpd.cclk, 0, "core clock frequency (in KHz)");
2972
2973 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
2417 CTLTYPE_STRING | CTLFLAG_RD, &intr_timer, sizeof(intr_timer),
2418 sysctl_int_array, "A", "interrupt holdoff timer values (us)");
2974 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
2975 sizeof(sc->sge.timer_val), sysctl_int_array, "A",
2976 "interrupt holdoff timer values (us)");
2419
2420 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
2977
2978 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
2421 CTLTYPE_STRING | CTLFLAG_RD, &intr_pktcount, sizeof(intr_pktcount),
2422 sysctl_int_array, "A", "interrupt holdoff packet counter values");
2979 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
2980 sizeof(sc->sge.counter_val), sysctl_int_array, "A",
2981 "interrupt holdoff packet counter values");
2423
2982
2983 /*
2984 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
2985 */
2986 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
2987 CTLFLAG_RD | CTLFLAG_SKIP, NULL,
2988 "logs and miscellaneous information");
2989 children = SYSCTL_CHILDREN(oid);
2990
2991 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
2992 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2993 sysctl_cctrl, "A", "congestion control");
2994
2995 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
2996 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2997 sysctl_cpl_stats, "A", "CPL statistics");
2998
2999 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
3000 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3001 sysctl_ddp_stats, "A", "DDP statistics");
3002
2424 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
2425 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3003 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
3004 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2426 sysctl_devlog, "A", "device log");
3005 sysctl_devlog, "A", "firmware's device log");
2427
3006
3007 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
3008 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3009 sysctl_fcoe_stats, "A", "FCoE statistics");
3010
3011 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
3012 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3013 sysctl_hw_sched, "A", "hardware scheduler ");
3014
3015 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
3016 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3017 sysctl_l2t, "A", "hardware L2 table");
3018
3019 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
3020 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3021 sysctl_lb_stats, "A", "loopback statistics");
3022
3023 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
3024 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3025 sysctl_meminfo, "A", "memory regions");
3026
3027 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
3028 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3029 sysctl_path_mtus, "A", "path MTUs");
3030
3031 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
3032 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3033 sysctl_pm_stats, "A", "PM statistics");
3034
3035 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
3036 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3037 sysctl_rdma_stats, "A", "RDMA statistics");
3038
3039 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
3040 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3041 sysctl_tcp_stats, "A", "TCP statistics");
3042
3043 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
3044 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3045 sysctl_tids, "A", "TID information");
3046
3047 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
3048 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3049 sysctl_tp_err_stats, "A", "TP error statistics");
3050
3051 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
3052 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3053 sysctl_tx_rate, "A", "Tx rate");
3054
3055#ifndef TCP_OFFLOAD_DISABLE
3056 if (is_offload(sc)) {
3057 /*
3058 * dev.t4nex.X.toe.
3059 */
3060 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
3061 NULL, "TOE parameters");
3062 children = SYSCTL_CHILDREN(oid);
3063
3064 sc->tt.sndbuf = 256 * 1024;
3065 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
3066 &sc->tt.sndbuf, 0, "max hardware send buffer size");
3067
3068 sc->tt.ddp = 0;
3069 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
3070 &sc->tt.ddp, 0, "DDP allowed");
3071 sc->tt.indsz = M_INDICATESIZE;
3072 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
3073 &sc->tt.indsz, 0, "DDP max indicate size allowed");
3074 sc->tt.ddp_thres = 3*4096;
3075 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
3076 &sc->tt.ddp_thres, 0, "DDP threshold");
3077 }
3078#endif
3079
3080
2428 return (0);
2429}
2430
2431static int
2432cxgbe_sysctls(struct port_info *pi)
2433{
2434 struct sysctl_ctx_list *ctx;
2435 struct sysctl_oid *oid;
2436 struct sysctl_oid_list *children;
2437
2438 ctx = device_get_sysctl_ctx(pi->dev);
2439
2440 /*
2441 * dev.cxgbe.X.
2442 */
2443 oid = device_get_sysctl_tree(pi->dev);
2444 children = SYSCTL_CHILDREN(oid);
2445
2446 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
2447 &pi->nrxq, 0, "# of rx queues");
2448 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
2449 &pi->ntxq, 0, "# of tx queues");
2450 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
2451 &pi->first_rxq, 0, "index of first rx queue");
2452 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
2453 &pi->first_txq, 0, "index of first tx queue");
2454
3081 return (0);
3082}
3083
3084static int
3085cxgbe_sysctls(struct port_info *pi)
3086{
3087 struct sysctl_ctx_list *ctx;
3088 struct sysctl_oid *oid;
3089 struct sysctl_oid_list *children;
3090
3091 ctx = device_get_sysctl_ctx(pi->dev);
3092
3093 /*
3094 * dev.cxgbe.X.
3095 */
3096 oid = device_get_sysctl_tree(pi->dev);
3097 children = SYSCTL_CHILDREN(oid);
3098
3099 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
3100 &pi->nrxq, 0, "# of rx queues");
3101 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
3102 &pi->ntxq, 0, "# of tx queues");
3103 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
3104 &pi->first_rxq, 0, "index of first rx queue");
3105 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
3106 &pi->first_txq, 0, "index of first tx queue");
3107
3108#ifndef TCP_OFFLOAD_DISABLE
3109 if (is_offload(pi->adapter)) {
3110 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
3111 &pi->nofldrxq, 0,
3112 "# of rx queues for offloaded TCP connections");
3113 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
3114 &pi->nofldtxq, 0,
3115 "# of tx queues for offloaded TCP connections");
3116 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
3117 CTLFLAG_RD, &pi->first_ofld_rxq, 0,
3118 "index of first TOE rx queue");
3119 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
3120 CTLFLAG_RD, &pi->first_ofld_txq, 0,
3121 "index of first TOE tx queue");
3122 }
3123#endif
3124
2455 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
2456 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
2457 "holdoff timer index");
2458 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
2459 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
2460 "holdoff packet counter index");
2461
2462 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
2463 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
2464 "rx queue size");
2465 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
2466 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
2467 "tx queue size");
2468
2469 /*
2470 * dev.cxgbe.X.stats.
2471 */
2472 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
2473 NULL, "port statistics");
2474 children = SYSCTL_CHILDREN(oid);
2475
2476#define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
2477 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
2478 CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
2479 sysctl_handle_t4_reg64, "QU", desc)
2480
2481 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
2482 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
2483 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
2484 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
2485 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
2486 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
2487 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
2488 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
2489 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
2490 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
2491 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
2492 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
2493 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
2494 "# of tx frames in this range",
2495 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
2496 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
2497 "# of tx frames in this range",
2498 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
2499 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
2500 "# of tx frames in this range",
2501 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
2502 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
2503 "# of tx frames in this range",
2504 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
2505 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
2506 "# of tx frames in this range",
2507 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
2508 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
2509 "# of tx frames in this range",
2510 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
2511 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
2512 "# of tx frames in this range",
2513 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
2514 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
2515 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
2516 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
2517 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
2518 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
2519 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
2520 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
2521 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
2522 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
2523 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
2524 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
2525 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
2526 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
2527 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
2528 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
2529 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
2530 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
2531 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
2532 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
2533 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
2534
2535 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
2536 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
2537 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
2538 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
2539 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
2540 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
2541 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
2542 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
2543 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
2544 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
2545 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
2546 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
2547 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
2548 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
2549 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
2550 "# of frames received with bad FCS",
2551 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
2552 SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
2553 "# of frames received with length error",
2554 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
2555 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
2556 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
2557 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
2558 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
2559 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
2560 "# of rx frames in this range",
2561 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
2562 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
2563 "# of rx frames in this range",
2564 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
2565 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
2566 "# of rx frames in this range",
2567 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
2568 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
2569 "# of rx frames in this range",
2570 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
2571 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
2572 "# of rx frames in this range",
2573 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
2574 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
2575 "# of rx frames in this range",
2576 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
2577 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
2578 "# of rx frames in this range",
2579 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
2580 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
2581 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
2582 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
2583 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
2584 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
2585 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
2586 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
2587 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
2588 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
2589 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
2590 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
2591 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
2592 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
2593 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
2594 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
2595 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
2596 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
2597 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
2598
2599#undef SYSCTL_ADD_T4_REG64
2600
2601#define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
2602 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
2603 &pi->stats.name, desc)
2604
2605 /* We get these from port_stats and they may be stale by upto 1s */
2606 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
2607 "# drops due to buffer-group 0 overflows");
2608 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
2609 "# drops due to buffer-group 1 overflows");
2610 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
2611 "# drops due to buffer-group 2 overflows");
2612 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
2613 "# drops due to buffer-group 3 overflows");
2614 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
2615 "# of buffer-group 0 truncated packets");
2616 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
2617 "# of buffer-group 1 truncated packets");
2618 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
2619 "# of buffer-group 2 truncated packets");
2620 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
2621 "# of buffer-group 3 truncated packets");
2622
2623#undef SYSCTL_ADD_T4_PORTSTAT
2624
2625 return (0);
2626}
2627
2628static int
2629sysctl_int_array(SYSCTL_HANDLER_ARGS)
2630{
2631 int rc, *i;
2632 struct sbuf sb;
2633
2634 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
2635 for (i = arg1; arg2; arg2 -= sizeof(int), i++)
2636 sbuf_printf(&sb, "%d ", *i);
2637 sbuf_trim(&sb);
2638 sbuf_finish(&sb);
2639 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
2640 sbuf_delete(&sb);
2641 return (rc);
2642}
2643
2644static int
3125 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
3126 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
3127 "holdoff timer index");
3128 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
3129 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
3130 "holdoff packet counter index");
3131
3132 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
3133 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
3134 "rx queue size");
3135 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
3136 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
3137 "tx queue size");
3138
3139 /*
3140 * dev.cxgbe.X.stats.
3141 */
3142 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
3143 NULL, "port statistics");
3144 children = SYSCTL_CHILDREN(oid);
3145
3146#define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
3147 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
3148 CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
3149 sysctl_handle_t4_reg64, "QU", desc)
3150
3151 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
3152 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
3153 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
3154 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
3155 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
3156 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
3157 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
3158 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
3159 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
3160 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
3161 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
3162 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
3163 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
3164 "# of tx frames in this range",
3165 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
3166 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
3167 "# of tx frames in this range",
3168 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
3169 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
3170 "# of tx frames in this range",
3171 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
3172 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
3173 "# of tx frames in this range",
3174 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
3175 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
3176 "# of tx frames in this range",
3177 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
3178 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
3179 "# of tx frames in this range",
3180 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
3181 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
3182 "# of tx frames in this range",
3183 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
3184 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
3185 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
3186 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
3187 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
3188 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
3189 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
3190 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
3191 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
3192 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
3193 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
3194 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
3195 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
3196 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
3197 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
3198 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
3199 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
3200 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
3201 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
3202 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
3203 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
3204
3205 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
3206 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
3207 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
3208 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
3209 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
3210 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
3211 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
3212 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
3213 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
3214 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
3215 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
3216 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
3217 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
3218 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
3219 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
3220 "# of frames received with bad FCS",
3221 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
3222 SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
3223 "# of frames received with length error",
3224 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
3225 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
3226 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
3227 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
3228 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
3229 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
3230 "# of rx frames in this range",
3231 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
3232 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
3233 "# of rx frames in this range",
3234 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
3235 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
3236 "# of rx frames in this range",
3237 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
3238 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
3239 "# of rx frames in this range",
3240 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
3241 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
3242 "# of rx frames in this range",
3243 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
3244 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
3245 "# of rx frames in this range",
3246 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
3247 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
3248 "# of rx frames in this range",
3249 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
3250 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
3251 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
3252 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
3253 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
3254 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
3255 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
3256 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
3257 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
3258 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
3259 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
3260 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
3261 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
3262 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
3263 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
3264 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
3265 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
3266 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
3267 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
3268
3269#undef SYSCTL_ADD_T4_REG64
3270
3271#define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
3272 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
3273 &pi->stats.name, desc)
3274
3275 /* We get these from port_stats and they may be stale by upto 1s */
3276 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
3277 "# drops due to buffer-group 0 overflows");
3278 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
3279 "# drops due to buffer-group 1 overflows");
3280 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
3281 "# drops due to buffer-group 2 overflows");
3282 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
3283 "# drops due to buffer-group 3 overflows");
3284 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
3285 "# of buffer-group 0 truncated packets");
3286 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
3287 "# of buffer-group 1 truncated packets");
3288 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
3289 "# of buffer-group 2 truncated packets");
3290 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
3291 "# of buffer-group 3 truncated packets");
3292
3293#undef SYSCTL_ADD_T4_PORTSTAT
3294
3295 return (0);
3296}
3297
3298static int
3299sysctl_int_array(SYSCTL_HANDLER_ARGS)
3300{
3301 int rc, *i;
3302 struct sbuf sb;
3303
3304 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
3305 for (i = arg1; arg2; arg2 -= sizeof(int), i++)
3306 sbuf_printf(&sb, "%d ", *i);
3307 sbuf_trim(&sb);
3308 sbuf_finish(&sb);
3309 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
3310 sbuf_delete(&sb);
3311 return (rc);
3312}
3313
3314static int
3315sysctl_bitfield(SYSCTL_HANDLER_ARGS)
3316{
3317 int rc;
3318 struct sbuf *sb;
3319
3320 rc = sysctl_wire_old_buffer(req, 0);
3321 if (rc != 0)
3322 return(rc);
3323
3324 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3325 if (sb == NULL)
3326 return (ENOMEM);
3327
3328 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
3329 rc = sbuf_finish(sb);
3330 sbuf_delete(sb);
3331
3332 return (rc);
3333}
3334
3335static int
2645sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
2646{
2647 struct port_info *pi = arg1;
2648 struct adapter *sc = pi->adapter;
3336sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
3337{
3338 struct port_info *pi = arg1;
3339 struct adapter *sc = pi->adapter;
2649 struct sge_rxq *rxq;
2650 int idx, rc, i;
2651
2652 idx = pi->tmr_idx;
2653
2654 rc = sysctl_handle_int(oidp, &idx, 0, req);
2655 if (rc != 0 || req->newptr == NULL)
2656 return (rc);
2657
2658 if (idx < 0 || idx >= SGE_NTIMERS)
2659 return (EINVAL);
2660
2661 ADAPTER_LOCK(sc);
2662 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2663 if (rc == 0) {
3340 int idx, rc, i;
3341
3342 idx = pi->tmr_idx;
3343
3344 rc = sysctl_handle_int(oidp, &idx, 0, req);
3345 if (rc != 0 || req->newptr == NULL)
3346 return (rc);
3347
3348 if (idx < 0 || idx >= SGE_NTIMERS)
3349 return (EINVAL);
3350
3351 ADAPTER_LOCK(sc);
3352 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
3353 if (rc == 0) {
3354 struct sge_rxq *rxq;
3355 uint8_t v;
3356
3357 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
2664 for_each_rxq(pi, i, rxq) {
3358 for_each_rxq(pi, i, rxq) {
2665 rxq->iq.intr_params = V_QINTR_TIMER_IDX(idx) |
2666 V_QINTR_CNT_EN(pi->pktc_idx != -1);
3359#ifdef atomic_store_rel_8
3360 atomic_store_rel_8(&rxq->iq.intr_params, v);
3361#else
3362 rxq->iq.intr_params = v;
3363#endif
2667 }
2668 pi->tmr_idx = idx;
2669 }
2670
2671 ADAPTER_UNLOCK(sc);
2672 return (rc);
2673}
2674
2675static int
2676sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
2677{
2678 struct port_info *pi = arg1;
2679 struct adapter *sc = pi->adapter;
2680 int idx, rc;
2681
2682 idx = pi->pktc_idx;
2683
2684 rc = sysctl_handle_int(oidp, &idx, 0, req);
2685 if (rc != 0 || req->newptr == NULL)
2686 return (rc);
2687
2688 if (idx < -1 || idx >= SGE_NCOUNTERS)
2689 return (EINVAL);
2690
2691 ADAPTER_LOCK(sc);
2692 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
3364 }
3365 pi->tmr_idx = idx;
3366 }
3367
3368 ADAPTER_UNLOCK(sc);
3369 return (rc);
3370}
3371
3372static int
3373sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
3374{
3375 struct port_info *pi = arg1;
3376 struct adapter *sc = pi->adapter;
3377 int idx, rc;
3378
3379 idx = pi->pktc_idx;
3380
3381 rc = sysctl_handle_int(oidp, &idx, 0, req);
3382 if (rc != 0 || req->newptr == NULL)
3383 return (rc);
3384
3385 if (idx < -1 || idx >= SGE_NCOUNTERS)
3386 return (EINVAL);
3387
3388 ADAPTER_LOCK(sc);
3389 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2693 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
2694 rc = EBUSY; /* can be changed only when port is down */
3390 if (rc == 0 && pi->flags & PORT_INIT_DONE)
3391 rc = EBUSY; /* cannot be changed once the queues are created */
2695
2696 if (rc == 0)
2697 pi->pktc_idx = idx;
2698
2699 ADAPTER_UNLOCK(sc);
2700 return (rc);
2701}
2702
2703static int
2704sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
2705{
2706 struct port_info *pi = arg1;
2707 struct adapter *sc = pi->adapter;
2708 int qsize, rc;
2709
2710 qsize = pi->qsize_rxq;
2711
2712 rc = sysctl_handle_int(oidp, &qsize, 0, req);
2713 if (rc != 0 || req->newptr == NULL)
2714 return (rc);
2715
2716 if (qsize < 128 || (qsize & 7))
2717 return (EINVAL);
2718
2719 ADAPTER_LOCK(sc);
2720 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
3392
3393 if (rc == 0)
3394 pi->pktc_idx = idx;
3395
3396 ADAPTER_UNLOCK(sc);
3397 return (rc);
3398}
3399
3400static int
3401sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
3402{
3403 struct port_info *pi = arg1;
3404 struct adapter *sc = pi->adapter;
3405 int qsize, rc;
3406
3407 qsize = pi->qsize_rxq;
3408
3409 rc = sysctl_handle_int(oidp, &qsize, 0, req);
3410 if (rc != 0 || req->newptr == NULL)
3411 return (rc);
3412
3413 if (qsize < 128 || (qsize & 7))
3414 return (EINVAL);
3415
3416 ADAPTER_LOCK(sc);
3417 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2721 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
2722 rc = EBUSY; /* can be changed only when port is down */
3418 if (rc == 0 && pi->flags & PORT_INIT_DONE)
3419 rc = EBUSY; /* cannot be changed once the queues are created */
2723
2724 if (rc == 0)
2725 pi->qsize_rxq = qsize;
2726
2727 ADAPTER_UNLOCK(sc);
2728 return (rc);
2729}
2730
2731static int
2732sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
2733{
2734 struct port_info *pi = arg1;
2735 struct adapter *sc = pi->adapter;
2736 int qsize, rc;
2737
2738 qsize = pi->qsize_txq;
2739
2740 rc = sysctl_handle_int(oidp, &qsize, 0, req);
2741 if (rc != 0 || req->newptr == NULL)
2742 return (rc);
2743
2744 if (qsize < 128)
2745 return (EINVAL);
2746
2747 ADAPTER_LOCK(sc);
2748 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
3420
3421 if (rc == 0)
3422 pi->qsize_rxq = qsize;
3423
3424 ADAPTER_UNLOCK(sc);
3425 return (rc);
3426}
3427
3428static int
3429sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
3430{
3431 struct port_info *pi = arg1;
3432 struct adapter *sc = pi->adapter;
3433 int qsize, rc;
3434
3435 qsize = pi->qsize_txq;
3436
3437 rc = sysctl_handle_int(oidp, &qsize, 0, req);
3438 if (rc != 0 || req->newptr == NULL)
3439 return (rc);
3440
3441 if (qsize < 128)
3442 return (EINVAL);
3443
3444 ADAPTER_LOCK(sc);
3445 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2749 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
2750 rc = EBUSY; /* can be changed only when port is down */
3446 if (rc == 0 && pi->flags & PORT_INIT_DONE)
3447 rc = EBUSY; /* cannot be changed once the queues are created */
2751
2752 if (rc == 0)
2753 pi->qsize_txq = qsize;
2754
2755 ADAPTER_UNLOCK(sc);
2756 return (rc);
2757}
2758
2759static int
2760sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
2761{
2762 struct adapter *sc = arg1;
2763 int reg = arg2;
2764 uint64_t val;
2765
2766 val = t4_read_reg64(sc, reg);
2767
2768 return (sysctl_handle_64(oidp, &val, 0, req));
2769}
2770
3448
3449 if (rc == 0)
3450 pi->qsize_txq = qsize;
3451
3452 ADAPTER_UNLOCK(sc);
3453 return (rc);
3454}
3455
3456static int
3457sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
3458{
3459 struct adapter *sc = arg1;
3460 int reg = arg2;
3461 uint64_t val;
3462
3463 val = t4_read_reg64(sc, reg);
3464
3465 return (sysctl_handle_64(oidp, &val, 0, req));
3466}
3467
3468static int
3469sysctl_cctrl(SYSCTL_HANDLER_ARGS)
3470{
3471 struct adapter *sc = arg1;
3472 struct sbuf *sb;
3473 int rc, i;
3474 uint16_t incr[NMTUS][NCCTRL_WIN];
3475 static const char *dec_fac[] = {
3476 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
3477 "0.9375"
3478 };
3479
3480 rc = sysctl_wire_old_buffer(req, 0);
3481 if (rc != 0)
3482 return (rc);
3483
3484 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
3485 if (sb == NULL)
3486 return (ENOMEM);
3487
3488 t4_read_cong_tbl(sc, incr);
3489
3490 for (i = 0; i < NCCTRL_WIN; ++i) {
3491 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
3492 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
3493 incr[5][i], incr[6][i], incr[7][i]);
3494 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
3495 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
3496 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
3497 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
3498 }
3499
3500 rc = sbuf_finish(sb);
3501 sbuf_delete(sb);
3502
3503 return (rc);
3504}
3505
3506static int
3507sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
3508{
3509 struct adapter *sc = arg1;
3510 struct sbuf *sb;
3511 int rc;
3512 struct tp_cpl_stats stats;
3513
3514 rc = sysctl_wire_old_buffer(req, 0);
3515 if (rc != 0)
3516 return (rc);
3517
3518 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
3519 if (sb == NULL)
3520 return (ENOMEM);
3521
3522 t4_tp_get_cpl_stats(sc, &stats);
3523
3524 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
3525 "channel 3\n");
3526 sbuf_printf(sb, "CPL requests: %10u %10u %10u %10u\n",
3527 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
3528 sbuf_printf(sb, "CPL responses: %10u %10u %10u %10u",
3529 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
3530
3531 rc = sbuf_finish(sb);
3532 sbuf_delete(sb);
3533
3534 return (rc);
3535}
3536
3537static int
3538sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
3539{
3540 struct adapter *sc = arg1;
3541 struct sbuf *sb;
3542 int rc;
3543 struct tp_usm_stats stats;
3544
3545 rc = sysctl_wire_old_buffer(req, 0);
3546 if (rc != 0)
3547 return(rc);
3548
3549 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
3550 if (sb == NULL)
3551 return (ENOMEM);
3552
3553 t4_get_usm_stats(sc, &stats);
3554
3555 sbuf_printf(sb, "Frames: %u\n", stats.frames);
3556 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
3557 sbuf_printf(sb, "Drops: %u", stats.drops);
3558
3559 rc = sbuf_finish(sb);
3560 sbuf_delete(sb);
3561
3562 return (rc);
3563}
3564
2771const char *devlog_level_strings[] = {
2772 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
2773 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
2774 [FW_DEVLOG_LEVEL_ERR] = "ERR",
2775 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
2776 [FW_DEVLOG_LEVEL_INFO] = "INFO",
2777 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
2778};
2779
2780const char *devlog_facility_strings[] = {
2781 [FW_DEVLOG_FACILITY_CORE] = "CORE",
2782 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
2783 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
2784 [FW_DEVLOG_FACILITY_RES] = "RES",
2785 [FW_DEVLOG_FACILITY_HW] = "HW",
2786 [FW_DEVLOG_FACILITY_FLR] = "FLR",
2787 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
2788 [FW_DEVLOG_FACILITY_PHY] = "PHY",
2789 [FW_DEVLOG_FACILITY_MAC] = "MAC",
2790 [FW_DEVLOG_FACILITY_PORT] = "PORT",
2791 [FW_DEVLOG_FACILITY_VI] = "VI",
2792 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
2793 [FW_DEVLOG_FACILITY_ACL] = "ACL",
2794 [FW_DEVLOG_FACILITY_TM] = "TM",
2795 [FW_DEVLOG_FACILITY_QFC] = "QFC",
2796 [FW_DEVLOG_FACILITY_DCB] = "DCB",
2797 [FW_DEVLOG_FACILITY_ETH] = "ETH",
2798 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
2799 [FW_DEVLOG_FACILITY_RI] = "RI",
2800 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
2801 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
2802 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
2803 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE"
2804};
2805
2806static int
2807sysctl_devlog(SYSCTL_HANDLER_ARGS)
2808{
2809 struct adapter *sc = arg1;
2810 struct devlog_params *dparams = &sc->params.devlog;
2811 struct fw_devlog_e *buf, *e;
2812 int i, j, rc, nentries, first = 0;
2813 struct sbuf *sb;
2814 uint64_t ftstamp = UINT64_MAX;
2815
2816 if (dparams->start == 0)
2817 return (ENXIO);
2818
2819 nentries = dparams->size / sizeof(struct fw_devlog_e);
2820
2821 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
2822 if (buf == NULL)
2823 return (ENOMEM);
2824
2825 rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size,
2826 (void *)buf);
2827 if (rc != 0)
2828 goto done;
2829
2830 for (i = 0; i < nentries; i++) {
2831 e = &buf[i];
2832
2833 if (e->timestamp == 0)
2834 break; /* end */
2835
2836 e->timestamp = be64toh(e->timestamp);
2837 e->seqno = be32toh(e->seqno);
2838 for (j = 0; j < 8; j++)
2839 e->params[j] = be32toh(e->params[j]);
2840
2841 if (e->timestamp < ftstamp) {
2842 ftstamp = e->timestamp;
2843 first = i;
2844 }
2845 }
2846
2847 if (buf[first].timestamp == 0)
2848 goto done; /* nothing in the log */
2849
2850 rc = sysctl_wire_old_buffer(req, 0);
2851 if (rc != 0)
2852 goto done;
2853
2854 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
3565const char *devlog_level_strings[] = {
3566 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
3567 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
3568 [FW_DEVLOG_LEVEL_ERR] = "ERR",
3569 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
3570 [FW_DEVLOG_LEVEL_INFO] = "INFO",
3571 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
3572};
3573
3574const char *devlog_facility_strings[] = {
3575 [FW_DEVLOG_FACILITY_CORE] = "CORE",
3576 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
3577 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
3578 [FW_DEVLOG_FACILITY_RES] = "RES",
3579 [FW_DEVLOG_FACILITY_HW] = "HW",
3580 [FW_DEVLOG_FACILITY_FLR] = "FLR",
3581 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
3582 [FW_DEVLOG_FACILITY_PHY] = "PHY",
3583 [FW_DEVLOG_FACILITY_MAC] = "MAC",
3584 [FW_DEVLOG_FACILITY_PORT] = "PORT",
3585 [FW_DEVLOG_FACILITY_VI] = "VI",
3586 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
3587 [FW_DEVLOG_FACILITY_ACL] = "ACL",
3588 [FW_DEVLOG_FACILITY_TM] = "TM",
3589 [FW_DEVLOG_FACILITY_QFC] = "QFC",
3590 [FW_DEVLOG_FACILITY_DCB] = "DCB",
3591 [FW_DEVLOG_FACILITY_ETH] = "ETH",
3592 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
3593 [FW_DEVLOG_FACILITY_RI] = "RI",
3594 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
3595 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
3596 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
3597 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE"
3598};
3599
3600static int
3601sysctl_devlog(SYSCTL_HANDLER_ARGS)
3602{
3603 struct adapter *sc = arg1;
3604 struct devlog_params *dparams = &sc->params.devlog;
3605 struct fw_devlog_e *buf, *e;
3606 int i, j, rc, nentries, first = 0;
3607 struct sbuf *sb;
3608 uint64_t ftstamp = UINT64_MAX;
3609
3610 if (dparams->start == 0)
3611 return (ENXIO);
3612
3613 nentries = dparams->size / sizeof(struct fw_devlog_e);
3614
3615 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
3616 if (buf == NULL)
3617 return (ENOMEM);
3618
3619 rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size,
3620 (void *)buf);
3621 if (rc != 0)
3622 goto done;
3623
3624 for (i = 0; i < nentries; i++) {
3625 e = &buf[i];
3626
3627 if (e->timestamp == 0)
3628 break; /* end */
3629
3630 e->timestamp = be64toh(e->timestamp);
3631 e->seqno = be32toh(e->seqno);
3632 for (j = 0; j < 8; j++)
3633 e->params[j] = be32toh(e->params[j]);
3634
3635 if (e->timestamp < ftstamp) {
3636 ftstamp = e->timestamp;
3637 first = i;
3638 }
3639 }
3640
3641 if (buf[first].timestamp == 0)
3642 goto done; /* nothing in the log */
3643
3644 rc = sysctl_wire_old_buffer(req, 0);
3645 if (rc != 0)
3646 goto done;
3647
3648 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
2855 sbuf_printf(sb, "\n%10s %15s %8s %8s %s\n",
3649 if (sb == NULL) {
3650 rc = ENOMEM;
3651 goto done;
3652 }
3653 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
2856 "Seq#", "Tstamp", "Level", "Facility", "Message");
2857
2858 i = first;
2859 do {
2860 e = &buf[i];
2861 if (e->timestamp == 0)
2862 break; /* end */
2863
2864 sbuf_printf(sb, "%10d %15ju %8s %8s ",
2865 e->seqno, e->timestamp,
2866 (e->level < ARRAY_SIZE(devlog_level_strings) ?
2867 devlog_level_strings[e->level] : "UNKNOWN"),
2868 (e->facility < ARRAY_SIZE(devlog_facility_strings) ?
2869 devlog_facility_strings[e->facility] : "UNKNOWN"));
2870 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
2871 e->params[2], e->params[3], e->params[4],
2872 e->params[5], e->params[6], e->params[7]);
2873
2874 if (++i == nentries)
2875 i = 0;
2876 } while (i != first);
2877
2878 rc = sbuf_finish(sb);
2879 sbuf_delete(sb);
2880done:
2881 free(buf, M_CXGBE);
2882 return (rc);
2883}
2884
3654 "Seq#", "Tstamp", "Level", "Facility", "Message");
3655
3656 i = first;
3657 do {
3658 e = &buf[i];
3659 if (e->timestamp == 0)
3660 break; /* end */
3661
3662 sbuf_printf(sb, "%10d %15ju %8s %8s ",
3663 e->seqno, e->timestamp,
3664 (e->level < ARRAY_SIZE(devlog_level_strings) ?
3665 devlog_level_strings[e->level] : "UNKNOWN"),
3666 (e->facility < ARRAY_SIZE(devlog_facility_strings) ?
3667 devlog_facility_strings[e->facility] : "UNKNOWN"));
3668 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
3669 e->params[2], e->params[3], e->params[4],
3670 e->params[5], e->params[6], e->params[7]);
3671
3672 if (++i == nentries)
3673 i = 0;
3674 } while (i != first);
3675
3676 rc = sbuf_finish(sb);
3677 sbuf_delete(sb);
3678done:
3679 free(buf, M_CXGBE);
3680 return (rc);
3681}
3682
3683static int
3684sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
3685{
3686 struct adapter *sc = arg1;
3687 struct sbuf *sb;
3688 int rc;
3689 struct tp_fcoe_stats stats[4];
3690
3691 rc = sysctl_wire_old_buffer(req, 0);
3692 if (rc != 0)
3693 return (rc);
3694
3695 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
3696 if (sb == NULL)
3697 return (ENOMEM);
3698
3699 t4_get_fcoe_stats(sc, 0, &stats[0]);
3700 t4_get_fcoe_stats(sc, 1, &stats[1]);
3701 t4_get_fcoe_stats(sc, 2, &stats[2]);
3702 t4_get_fcoe_stats(sc, 3, &stats[3]);
3703
3704 sbuf_printf(sb, " channel 0 channel 1 "
3705 "channel 2 channel 3\n");
3706 sbuf_printf(sb, "octetsDDP: %16ju %16ju %16ju %16ju\n",
3707 stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
3708 stats[3].octetsDDP);
3709 sbuf_printf(sb, "framesDDP: %16u %16u %16u %16u\n", stats[0].framesDDP,
3710 stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
3711 sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
3712 stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
3713 stats[3].framesDrop);
3714
3715 rc = sbuf_finish(sb);
3716 sbuf_delete(sb);
3717
3718 return (rc);
3719}
3720
3721static int
3722sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
3723{
3724 struct adapter *sc = arg1;
3725 struct sbuf *sb;
3726 int rc, i;
3727 unsigned int map, kbps, ipg, mode;
3728 unsigned int pace_tab[NTX_SCHED];
3729
3730 rc = sysctl_wire_old_buffer(req, 0);
3731 if (rc != 0)
3732 return (rc);
3733
3734 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
3735 if (sb == NULL)
3736 return (ENOMEM);
3737
3738 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
3739 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
3740 t4_read_pace_tbl(sc, pace_tab);
3741
3742 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
3743 "Class IPG (0.1 ns) Flow IPG (us)");
3744
3745 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
3746 t4_get_tx_sched(sc, i, &kbps, &ipg);
3747 sbuf_printf(sb, "\n %u %-5s %u ", i,
3748 (mode & (1 << i)) ? "flow" : "class", map & 3);
3749 if (kbps)
3750 sbuf_printf(sb, "%9u ", kbps);
3751 else
3752 sbuf_printf(sb, " disabled ");
3753
3754 if (ipg)
3755 sbuf_printf(sb, "%13u ", ipg);
3756 else
3757 sbuf_printf(sb, " disabled ");
3758
3759 if (pace_tab[i])
3760 sbuf_printf(sb, "%10u", pace_tab[i]);
3761 else
3762 sbuf_printf(sb, " disabled");
3763 }
3764
3765 rc = sbuf_finish(sb);
3766 sbuf_delete(sb);
3767
3768 return (rc);
3769}
3770
3771static int
3772sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
3773{
3774 struct adapter *sc = arg1;
3775 struct sbuf *sb;
3776 int rc, i, j;
3777 uint64_t *p0, *p1;
3778 struct lb_port_stats s[2];
3779 static const char *stat_name[] = {
3780 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
3781 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
3782 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
3783 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
3784 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
3785 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
3786 "BG2FramesTrunc:", "BG3FramesTrunc:"
3787 };
3788
3789 rc = sysctl_wire_old_buffer(req, 0);
3790 if (rc != 0)
3791 return (rc);
3792
3793 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
3794 if (sb == NULL)
3795 return (ENOMEM);
3796
3797 memset(s, 0, sizeof(s));
3798
3799 for (i = 0; i < 4; i += 2) {
3800 t4_get_lb_stats(sc, i, &s[0]);
3801 t4_get_lb_stats(sc, i + 1, &s[1]);
3802
3803 p0 = &s[0].octets;
3804 p1 = &s[1].octets;
3805 sbuf_printf(sb, "%s Loopback %u"
3806 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
3807
3808 for (j = 0; j < ARRAY_SIZE(stat_name); j++)
3809 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
3810 *p0++, *p1++);
3811 }
3812
3813 rc = sbuf_finish(sb);
3814 sbuf_delete(sb);
3815
3816 return (rc);
3817}
3818
3819struct mem_desc {
3820 unsigned int base;
3821 unsigned int limit;
3822 unsigned int idx;
3823};
3824
3825static int
3826mem_desc_cmp(const void *a, const void *b)
3827{
3828 return ((const struct mem_desc *)a)->base -
3829 ((const struct mem_desc *)b)->base;
3830}
3831
3832static void
3833mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
3834 unsigned int to)
3835{
3836 unsigned int size;
3837
3838 size = to - from + 1;
3839 if (size == 0)
3840 return;
3841
3842 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
3843 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
3844}
3845
3846static int
3847sysctl_meminfo(SYSCTL_HANDLER_ARGS)
3848{
3849 struct adapter *sc = arg1;
3850 struct sbuf *sb;
3851 int rc, i, n;
3852 uint32_t lo, hi;
3853 static const char *memory[] = { "EDC0:", "EDC1:", "MC:" };
3854 static const char *region[] = {
3855 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
3856 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
3857 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
3858 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
3859 "RQUDP region:", "PBL region:", "TXPBL region:", "ULPRX state:",
3860 "ULPTX state:", "On-chip queues:"
3861 };
3862 struct mem_desc avail[3];
3863 struct mem_desc mem[ARRAY_SIZE(region) + 3]; /* up to 3 holes */
3864 struct mem_desc *md = mem;
3865
3866 rc = sysctl_wire_old_buffer(req, 0);
3867 if (rc != 0)
3868 return (rc);
3869
3870 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
3871 if (sb == NULL)
3872 return (ENOMEM);
3873
3874 for (i = 0; i < ARRAY_SIZE(mem); i++) {
3875 mem[i].limit = 0;
3876 mem[i].idx = i;
3877 }
3878
3879 /* Find and sort the populated memory ranges */
3880 i = 0;
3881 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
3882 if (lo & F_EDRAM0_ENABLE) {
3883 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
3884 avail[i].base = G_EDRAM0_BASE(hi) << 20;
3885 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
3886 avail[i].idx = 0;
3887 i++;
3888 }
3889 if (lo & F_EDRAM1_ENABLE) {
3890 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
3891 avail[i].base = G_EDRAM1_BASE(hi) << 20;
3892 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
3893 avail[i].idx = 1;
3894 i++;
3895 }
3896 if (lo & F_EXT_MEM_ENABLE) {
3897 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
3898 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
3899 avail[i].limit = avail[i].base + (G_EXT_MEM_SIZE(hi) << 20);
3900 avail[i].idx = 2;
3901 i++;
3902 }
3903 if (!i) /* no memory available */
3904 return 0;
3905 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
3906
3907 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
3908 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
3909 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
3910 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
3911 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
3912 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
3913 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
3914 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
3915 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
3916
3917 /* the next few have explicit upper bounds */
3918 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
3919 md->limit = md->base - 1 +
3920 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
3921 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
3922 md++;
3923
3924 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
3925 md->limit = md->base - 1 +
3926 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
3927 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
3928 md++;
3929
3930 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
3931 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
3932 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
3933 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
3934 } else {
3935 md->base = 0;
3936 md->idx = ARRAY_SIZE(region); /* hide it */
3937 }
3938 md++;
3939
3940#define ulp_region(reg) \
3941 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
3942 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
3943
3944 ulp_region(RX_ISCSI);
3945 ulp_region(RX_TDDP);
3946 ulp_region(TX_TPT);
3947 ulp_region(RX_STAG);
3948 ulp_region(RX_RQ);
3949 ulp_region(RX_RQUDP);
3950 ulp_region(RX_PBL);
3951 ulp_region(TX_PBL);
3952#undef ulp_region
3953
3954 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
3955 md->limit = md->base + sc->tids.ntids - 1;
3956 md++;
3957 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
3958 md->limit = md->base + sc->tids.ntids - 1;
3959 md++;
3960
3961 md->base = sc->vres.ocq.start;
3962 if (sc->vres.ocq.size)
3963 md->limit = md->base + sc->vres.ocq.size - 1;
3964 else
3965 md->idx = ARRAY_SIZE(region); /* hide it */
3966 md++;
3967
3968 /* add any address-space holes, there can be up to 3 */
3969 for (n = 0; n < i - 1; n++)
3970 if (avail[n].limit < avail[n + 1].base)
3971 (md++)->base = avail[n].limit;
3972 if (avail[n].limit)
3973 (md++)->base = avail[n].limit;
3974
3975 n = md - mem;
3976 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
3977
3978 for (lo = 0; lo < i; lo++)
3979 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
3980 avail[lo].limit - 1);
3981
3982 sbuf_printf(sb, "\n");
3983 for (i = 0; i < n; i++) {
3984 if (mem[i].idx >= ARRAY_SIZE(region))
3985 continue; /* skip holes */
3986 if (!mem[i].limit)
3987 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
3988 mem_region_show(sb, region[mem[i].idx], mem[i].base,
3989 mem[i].limit);
3990 }
3991
3992 sbuf_printf(sb, "\n");
3993 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
3994 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
3995 mem_region_show(sb, "uP RAM:", lo, hi);
3996
3997 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
3998 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
3999 mem_region_show(sb, "uP Extmem2:", lo, hi);
4000
4001 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
4002 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
4003 G_PMRXMAXPAGE(lo),
4004 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
4005 (lo & F_PMRXNUMCHN) ? 2 : 1);
4006
4007 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
4008 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
4009 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
4010 G_PMTXMAXPAGE(lo),
4011 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
4012 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
4013 sbuf_printf(sb, "%u p-structs\n",
4014 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
4015
4016 for (i = 0; i < 4; i++) {
4017 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
4018 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
4019 i, G_USED(lo), G_ALLOC(lo));
4020 }
4021 for (i = 0; i < 4; i++) {
4022 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
4023 sbuf_printf(sb,
4024 "\nLoopback %d using %u pages out of %u allocated",
4025 i, G_USED(lo), G_ALLOC(lo));
4026 }
4027
4028 rc = sbuf_finish(sb);
4029 sbuf_delete(sb);
4030
4031 return (rc);
4032}
4033
4034static int
4035sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
4036{
4037 struct adapter *sc = arg1;
4038 struct sbuf *sb;
4039 int rc;
4040 uint16_t mtus[NMTUS];
4041
4042 rc = sysctl_wire_old_buffer(req, 0);
4043 if (rc != 0)
4044 return (rc);
4045
4046 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4047 if (sb == NULL)
4048 return (ENOMEM);
4049
4050 t4_read_mtu_tbl(sc, mtus, NULL);
4051
4052 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
4053 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
4054 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
4055 mtus[14], mtus[15]);
4056
4057 rc = sbuf_finish(sb);
4058 sbuf_delete(sb);
4059
4060 return (rc);
4061}
4062
4063static int
4064sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
4065{
4066 struct adapter *sc = arg1;
4067 struct sbuf *sb;
4068 int rc, i;
4069 uint32_t tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
4070 uint64_t tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
4071 static const char *pm_stats[] = {
4072 "Read:", "Write bypass:", "Write mem:", "Flush:", "FIFO wait:"
4073 };
4074
4075 rc = sysctl_wire_old_buffer(req, 0);
4076 if (rc != 0)
4077 return (rc);
4078
4079 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4080 if (sb == NULL)
4081 return (ENOMEM);
4082
4083 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
4084 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
4085
4086 sbuf_printf(sb, " Tx count Tx cycles "
4087 "Rx count Rx cycles");
4088 for (i = 0; i < PM_NSTATS; i++)
4089 sbuf_printf(sb, "\n%-13s %10u %20ju %10u %20ju",
4090 pm_stats[i], tx_cnt[i], tx_cyc[i], rx_cnt[i], rx_cyc[i]);
4091
4092 rc = sbuf_finish(sb);
4093 sbuf_delete(sb);
4094
4095 return (rc);
4096}
4097
4098static int
4099sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
4100{
4101 struct adapter *sc = arg1;
4102 struct sbuf *sb;
4103 int rc;
4104 struct tp_rdma_stats stats;
4105
4106 rc = sysctl_wire_old_buffer(req, 0);
4107 if (rc != 0)
4108 return (rc);
4109
4110 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4111 if (sb == NULL)
4112 return (ENOMEM);
4113
4114 t4_tp_get_rdma_stats(sc, &stats);
4115 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
4116 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
4117
4118 rc = sbuf_finish(sb);
4119 sbuf_delete(sb);
4120
4121 return (rc);
4122}
4123
4124static int
4125sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
4126{
4127 struct adapter *sc = arg1;
4128 struct sbuf *sb;
4129 int rc;
4130 struct tp_tcp_stats v4, v6;
4131
4132 rc = sysctl_wire_old_buffer(req, 0);
4133 if (rc != 0)
4134 return (rc);
4135
4136 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4137 if (sb == NULL)
4138 return (ENOMEM);
4139
4140 t4_tp_get_tcp_stats(sc, &v4, &v6);
4141 sbuf_printf(sb,
4142 " IP IPv6\n");
4143 sbuf_printf(sb, "OutRsts: %20u %20u\n",
4144 v4.tcpOutRsts, v6.tcpOutRsts);
4145 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
4146 v4.tcpInSegs, v6.tcpInSegs);
4147 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
4148 v4.tcpOutSegs, v6.tcpOutSegs);
4149 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
4150 v4.tcpRetransSegs, v6.tcpRetransSegs);
4151
4152 rc = sbuf_finish(sb);
4153 sbuf_delete(sb);
4154
4155 return (rc);
4156}
4157
4158static int
4159sysctl_tids(SYSCTL_HANDLER_ARGS)
4160{
4161 struct adapter *sc = arg1;
4162 struct sbuf *sb;
4163 int rc;
4164 struct tid_info *t = &sc->tids;
4165
4166 rc = sysctl_wire_old_buffer(req, 0);
4167 if (rc != 0)
4168 return (rc);
4169
4170 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4171 if (sb == NULL)
4172 return (ENOMEM);
4173
4174 if (t->natids) {
4175 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
4176 t->atids_in_use);
4177 }
4178
4179 if (t->ntids) {
4180 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
4181 uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
4182
4183 if (b) {
4184 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
4185 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
4186 t->ntids - 1);
4187 } else {
4188 sbuf_printf(sb, "TID range: %u-%u",
4189 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
4190 t->ntids - 1);
4191 }
4192 } else
4193 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
4194 sbuf_printf(sb, ", in use: %u\n",
4195 atomic_load_acq_int(&t->tids_in_use));
4196 }
4197
4198 if (t->nstids) {
4199 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
4200 t->stid_base + t->nstids - 1, t->stids_in_use);
4201 }
4202
4203 if (t->nftids) {
4204 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
4205 t->ftid_base + t->nftids - 1);
4206 }
4207
4208 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
4209 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
4210 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
4211
4212 rc = sbuf_finish(sb);
4213 sbuf_delete(sb);
4214
4215 return (rc);
4216}
4217
4218static int
4219sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
4220{
4221 struct adapter *sc = arg1;
4222 struct sbuf *sb;
4223 int rc;
4224 struct tp_err_stats stats;
4225
4226 rc = sysctl_wire_old_buffer(req, 0);
4227 if (rc != 0)
4228 return (rc);
4229
4230 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4231 if (sb == NULL)
4232 return (ENOMEM);
4233
4234 t4_tp_get_err_stats(sc, &stats);
4235
4236 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
4237 "channel 3\n");
4238 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
4239 stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
4240 stats.macInErrs[3]);
4241 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
4242 stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
4243 stats.hdrInErrs[3]);
4244 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
4245 stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
4246 stats.tcpInErrs[3]);
4247 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
4248 stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
4249 stats.tcp6InErrs[3]);
4250 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
4251 stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
4252 stats.tnlCongDrops[3]);
4253 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
4254 stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
4255 stats.tnlTxDrops[3]);
4256 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
4257 stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
4258 stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
4259 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
4260 stats.ofldChanDrops[0], stats.ofldChanDrops[1],
4261 stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
4262 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
4263 stats.ofldNoNeigh, stats.ofldCongDefer);
4264
4265 rc = sbuf_finish(sb);
4266 sbuf_delete(sb);
4267
4268 return (rc);
4269}
4270
4271static int
4272sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
4273{
4274 struct adapter *sc = arg1;
4275 struct sbuf *sb;
4276 int rc;
4277 u64 nrate[NCHAN], orate[NCHAN];
4278
4279 rc = sysctl_wire_old_buffer(req, 0);
4280 if (rc != 0)
4281 return (rc);
4282
4283 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4284 if (sb == NULL)
4285 return (ENOMEM);
4286
4287 t4_get_chan_txrate(sc, nrate, orate);
4288 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
4289 "channel 3\n");
4290 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
4291 nrate[0], nrate[1], nrate[2], nrate[3]);
4292 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
4293 orate[0], orate[1], orate[2], orate[3]);
4294
4295 rc = sbuf_finish(sb);
4296 sbuf_delete(sb);
4297
4298 return (rc);
4299}
4300
2885static inline void
2886txq_start(struct ifnet *ifp, struct sge_txq *txq)
2887{
2888 struct buf_ring *br;
2889 struct mbuf *m;
2890
2891 TXQ_LOCK_ASSERT_OWNED(txq);
2892
2893 br = txq->br;
2894 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
2895 if (m)
2896 t4_eth_tx(ifp, txq, m);
2897}
2898
2899void
4301static inline void
4302txq_start(struct ifnet *ifp, struct sge_txq *txq)
4303{
4304 struct buf_ring *br;
4305 struct mbuf *m;
4306
4307 TXQ_LOCK_ASSERT_OWNED(txq);
4308
4309 br = txq->br;
4310 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
4311 if (m)
4312 t4_eth_tx(ifp, txq, m);
4313}
4314
4315void
2900cxgbe_txq_start(void *arg, int count)
4316t4_tx_callout(void *arg)
2901{
4317{
2902 struct sge_txq *txq = arg;
4318 struct sge_eq *eq = arg;
4319 struct adapter *sc;
2903
4320
2904 TXQ_LOCK(txq);
2905 if (txq->eq.flags & EQ_CRFLUSHED) {
2906 txq->eq.flags &= ~EQ_CRFLUSHED;
4321 if (EQ_TRYLOCK(eq) == 0)
4322 goto reschedule;
4323
4324 if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
4325 EQ_UNLOCK(eq);
4326reschedule:
4327 if (__predict_true(!(eq->flags && EQ_DOOMED)))
4328 callout_schedule(&eq->tx_callout, 1);
4329 return;
4330 }
4331
4332 EQ_LOCK_ASSERT_OWNED(eq);
4333
4334 if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
4335
4336 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
4337 struct sge_txq *txq = arg;
4338 struct port_info *pi = txq->ifp->if_softc;
4339
4340 sc = pi->adapter;
4341 } else {
4342 struct sge_wrq *wrq = arg;
4343
4344 sc = wrq->adapter;
4345 }
4346
4347 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
4348 }
4349
4350 EQ_UNLOCK(eq);
4351}
4352
4353void
4354t4_tx_task(void *arg, int count)
4355{
4356 struct sge_eq *eq = arg;
4357
4358 EQ_LOCK(eq);
4359 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
4360 struct sge_txq *txq = arg;
2907 txq_start(txq->ifp, txq);
4361 txq_start(txq->ifp, txq);
2908 } else
2909 wakeup_one(txq); /* txq is going away, wakeup free_txq */
2910 TXQ_UNLOCK(txq);
4362 } else {
4363 struct sge_wrq *wrq = arg;
4364 t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
4365 }
4366 EQ_UNLOCK(eq);
2911}
2912
2913static uint32_t
2914fconf_to_mode(uint32_t fconf)
2915{
2916 uint32_t mode;
2917
2918 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
2919 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
2920
2921 if (fconf & F_FRAGMENTATION)
2922 mode |= T4_FILTER_IP_FRAGMENT;
2923
2924 if (fconf & F_MPSHITTYPE)
2925 mode |= T4_FILTER_MPS_HIT_TYPE;
2926
2927 if (fconf & F_MACMATCH)
2928 mode |= T4_FILTER_MAC_IDX;
2929
2930 if (fconf & F_ETHERTYPE)
2931 mode |= T4_FILTER_ETH_TYPE;
2932
2933 if (fconf & F_PROTOCOL)
2934 mode |= T4_FILTER_IP_PROTO;
2935
2936 if (fconf & F_TOS)
2937 mode |= T4_FILTER_IP_TOS;
2938
2939 if (fconf & F_VLAN)
4367}
4368
4369static uint32_t
4370fconf_to_mode(uint32_t fconf)
4371{
4372 uint32_t mode;
4373
4374 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
4375 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
4376
4377 if (fconf & F_FRAGMENTATION)
4378 mode |= T4_FILTER_IP_FRAGMENT;
4379
4380 if (fconf & F_MPSHITTYPE)
4381 mode |= T4_FILTER_MPS_HIT_TYPE;
4382
4383 if (fconf & F_MACMATCH)
4384 mode |= T4_FILTER_MAC_IDX;
4385
4386 if (fconf & F_ETHERTYPE)
4387 mode |= T4_FILTER_ETH_TYPE;
4388
4389 if (fconf & F_PROTOCOL)
4390 mode |= T4_FILTER_IP_PROTO;
4391
4392 if (fconf & F_TOS)
4393 mode |= T4_FILTER_IP_TOS;
4394
4395 if (fconf & F_VLAN)
2940 mode |= T4_FILTER_IVLAN;
4396 mode |= T4_FILTER_VLAN;
2941
2942 if (fconf & F_VNIC_ID)
4397
4398 if (fconf & F_VNIC_ID)
2943 mode |= T4_FILTER_OVLAN;
4399 mode |= T4_FILTER_VNIC;
2944
2945 if (fconf & F_PORT)
2946 mode |= T4_FILTER_PORT;
2947
2948 if (fconf & F_FCOE)
2949 mode |= T4_FILTER_FCoE;
2950
2951 return (mode);
2952}
2953
2954static uint32_t
2955mode_to_fconf(uint32_t mode)
2956{
2957 uint32_t fconf = 0;
2958
2959 if (mode & T4_FILTER_IP_FRAGMENT)
2960 fconf |= F_FRAGMENTATION;
2961
2962 if (mode & T4_FILTER_MPS_HIT_TYPE)
2963 fconf |= F_MPSHITTYPE;
2964
2965 if (mode & T4_FILTER_MAC_IDX)
2966 fconf |= F_MACMATCH;
2967
2968 if (mode & T4_FILTER_ETH_TYPE)
2969 fconf |= F_ETHERTYPE;
2970
2971 if (mode & T4_FILTER_IP_PROTO)
2972 fconf |= F_PROTOCOL;
2973
2974 if (mode & T4_FILTER_IP_TOS)
2975 fconf |= F_TOS;
2976
4400
4401 if (fconf & F_PORT)
4402 mode |= T4_FILTER_PORT;
4403
4404 if (fconf & F_FCOE)
4405 mode |= T4_FILTER_FCoE;
4406
4407 return (mode);
4408}
4409
4410static uint32_t
4411mode_to_fconf(uint32_t mode)
4412{
4413 uint32_t fconf = 0;
4414
4415 if (mode & T4_FILTER_IP_FRAGMENT)
4416 fconf |= F_FRAGMENTATION;
4417
4418 if (mode & T4_FILTER_MPS_HIT_TYPE)
4419 fconf |= F_MPSHITTYPE;
4420
4421 if (mode & T4_FILTER_MAC_IDX)
4422 fconf |= F_MACMATCH;
4423
4424 if (mode & T4_FILTER_ETH_TYPE)
4425 fconf |= F_ETHERTYPE;
4426
4427 if (mode & T4_FILTER_IP_PROTO)
4428 fconf |= F_PROTOCOL;
4429
4430 if (mode & T4_FILTER_IP_TOS)
4431 fconf |= F_TOS;
4432
2977 if (mode & T4_FILTER_IVLAN)
4433 if (mode & T4_FILTER_VLAN)
2978 fconf |= F_VLAN;
2979
4434 fconf |= F_VLAN;
4435
2980 if (mode & T4_FILTER_OVLAN)
4436 if (mode & T4_FILTER_VNIC)
2981 fconf |= F_VNIC_ID;
2982
2983 if (mode & T4_FILTER_PORT)
2984 fconf |= F_PORT;
2985
2986 if (mode & T4_FILTER_FCoE)
2987 fconf |= F_FCOE;
2988
2989 return (fconf);
2990}
2991
2992static uint32_t
2993fspec_to_fconf(struct t4_filter_specification *fs)
2994{
2995 uint32_t fconf = 0;
2996
2997 if (fs->val.frag || fs->mask.frag)
2998 fconf |= F_FRAGMENTATION;
2999
3000 if (fs->val.matchtype || fs->mask.matchtype)
3001 fconf |= F_MPSHITTYPE;
3002
3003 if (fs->val.macidx || fs->mask.macidx)
3004 fconf |= F_MACMATCH;
3005
3006 if (fs->val.ethtype || fs->mask.ethtype)
3007 fconf |= F_ETHERTYPE;
3008
3009 if (fs->val.proto || fs->mask.proto)
3010 fconf |= F_PROTOCOL;
3011
3012 if (fs->val.tos || fs->mask.tos)
3013 fconf |= F_TOS;
3014
4437 fconf |= F_VNIC_ID;
4438
4439 if (mode & T4_FILTER_PORT)
4440 fconf |= F_PORT;
4441
4442 if (mode & T4_FILTER_FCoE)
4443 fconf |= F_FCOE;
4444
4445 return (fconf);
4446}
4447
4448static uint32_t
4449fspec_to_fconf(struct t4_filter_specification *fs)
4450{
4451 uint32_t fconf = 0;
4452
4453 if (fs->val.frag || fs->mask.frag)
4454 fconf |= F_FRAGMENTATION;
4455
4456 if (fs->val.matchtype || fs->mask.matchtype)
4457 fconf |= F_MPSHITTYPE;
4458
4459 if (fs->val.macidx || fs->mask.macidx)
4460 fconf |= F_MACMATCH;
4461
4462 if (fs->val.ethtype || fs->mask.ethtype)
4463 fconf |= F_ETHERTYPE;
4464
4465 if (fs->val.proto || fs->mask.proto)
4466 fconf |= F_PROTOCOL;
4467
4468 if (fs->val.tos || fs->mask.tos)
4469 fconf |= F_TOS;
4470
3015 if (fs->val.ivlan_vld || fs->mask.ivlan_vld)
4471 if (fs->val.vlan_vld || fs->mask.vlan_vld)
3016 fconf |= F_VLAN;
3017
4472 fconf |= F_VLAN;
4473
3018 if (fs->val.ovlan_vld || fs->mask.ovlan_vld)
4474 if (fs->val.vnic_vld || fs->mask.vnic_vld)
3019 fconf |= F_VNIC_ID;
3020
3021 if (fs->val.iport || fs->mask.iport)
3022 fconf |= F_PORT;
3023
3024 if (fs->val.fcoe || fs->mask.fcoe)
3025 fconf |= F_FCOE;
3026
3027 return (fconf);
3028}
3029
3030static int
3031get_filter_mode(struct adapter *sc, uint32_t *mode)
3032{
3033 uint32_t fconf;
3034
3035 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
3036 A_TP_VLAN_PRI_MAP);
3037
4475 fconf |= F_VNIC_ID;
4476
4477 if (fs->val.iport || fs->mask.iport)
4478 fconf |= F_PORT;
4479
4480 if (fs->val.fcoe || fs->mask.fcoe)
4481 fconf |= F_FCOE;
4482
4483 return (fconf);
4484}
4485
4486static int
4487get_filter_mode(struct adapter *sc, uint32_t *mode)
4488{
4489 uint32_t fconf;
4490
4491 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
4492 A_TP_VLAN_PRI_MAP);
4493
3038 *mode = fconf_to_mode(fconf);
4494 if (sc->filter_mode != fconf) {
4495 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
4496 device_get_nameunit(sc->dev), sc->filter_mode, fconf);
4497 sc->filter_mode = fconf;
4498 }
3039
4499
4500 *mode = fconf_to_mode(sc->filter_mode);
4501
3040 return (0);
3041}
3042
3043static int
3044set_filter_mode(struct adapter *sc, uint32_t mode)
3045{
3046 uint32_t fconf;
3047 int rc;
3048
3049 fconf = mode_to_fconf(mode);
3050
3051 ADAPTER_LOCK(sc);
3052 if (IS_BUSY(sc)) {
3053 rc = EAGAIN;
3054 goto done;
3055 }
3056
3057 if (sc->tids.ftids_in_use > 0) {
3058 rc = EBUSY;
3059 goto done;
3060 }
3061
4502 return (0);
4503}
4504
4505static int
4506set_filter_mode(struct adapter *sc, uint32_t mode)
4507{
4508 uint32_t fconf;
4509 int rc;
4510
4511 fconf = mode_to_fconf(mode);
4512
4513 ADAPTER_LOCK(sc);
4514 if (IS_BUSY(sc)) {
4515 rc = EAGAIN;
4516 goto done;
4517 }
4518
4519 if (sc->tids.ftids_in_use > 0) {
4520 rc = EBUSY;
4521 goto done;
4522 }
4523
4524#ifndef TCP_OFFLOAD_DISABLE
4525 if (sc->offload_map) {
4526 rc = EBUSY;
4527 goto done;
4528 }
4529#endif
4530
4531#ifdef notyet
3062 rc = -t4_set_filter_mode(sc, fconf);
4532 rc = -t4_set_filter_mode(sc, fconf);
4533 if (rc == 0)
4534 sc->filter_mode = fconf;
4535#else
4536 rc = ENOTSUP;
4537#endif
4538
3063done:
3064 ADAPTER_UNLOCK(sc);
3065 return (rc);
3066}
3067
3068static inline uint64_t
3069get_filter_hits(struct adapter *sc, uint32_t fid)
3070{
3071 uint32_t tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
3072 uint64_t hits;
3073
3074 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0),
3075 tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
3076 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0));
3077 hits = t4_read_reg64(sc, MEMWIN0_BASE + 16);
3078
3079 return (be64toh(hits));
3080}
3081
3082static int
3083get_filter(struct adapter *sc, struct t4_filter *t)
3084{
3085 int i, nfilters = sc->tids.nftids;
3086 struct filter_entry *f;
3087
3088 ADAPTER_LOCK_ASSERT_OWNED(sc);
3089
3090 if (IS_BUSY(sc))
3091 return (EAGAIN);
3092
3093 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
3094 t->idx >= nfilters) {
3095 t->idx = 0xffffffff;
3096 return (0);
3097 }
3098
3099 f = &sc->tids.ftid_tab[t->idx];
3100 for (i = t->idx; i < nfilters; i++, f++) {
3101 if (f->valid) {
3102 t->idx = i;
3103 t->l2tidx = f->l2t ? f->l2t->idx : 0;
3104 t->smtidx = f->smtidx;
3105 if (f->fs.hitcnts)
3106 t->hits = get_filter_hits(sc, t->idx);
3107 else
3108 t->hits = UINT64_MAX;
3109 t->fs = f->fs;
3110
3111 return (0);
3112 }
3113 }
3114
3115 t->idx = 0xffffffff;
3116 return (0);
3117}
3118
3119static int
3120set_filter(struct adapter *sc, struct t4_filter *t)
3121{
4539done:
4540 ADAPTER_UNLOCK(sc);
4541 return (rc);
4542}
4543
4544static inline uint64_t
4545get_filter_hits(struct adapter *sc, uint32_t fid)
4546{
4547 uint32_t tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
4548 uint64_t hits;
4549
4550 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0),
4551 tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
4552 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0));
4553 hits = t4_read_reg64(sc, MEMWIN0_BASE + 16);
4554
4555 return (be64toh(hits));
4556}
4557
4558static int
4559get_filter(struct adapter *sc, struct t4_filter *t)
4560{
4561 int i, nfilters = sc->tids.nftids;
4562 struct filter_entry *f;
4563
4564 ADAPTER_LOCK_ASSERT_OWNED(sc);
4565
4566 if (IS_BUSY(sc))
4567 return (EAGAIN);
4568
4569 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
4570 t->idx >= nfilters) {
4571 t->idx = 0xffffffff;
4572 return (0);
4573 }
4574
4575 f = &sc->tids.ftid_tab[t->idx];
4576 for (i = t->idx; i < nfilters; i++, f++) {
4577 if (f->valid) {
4578 t->idx = i;
4579 t->l2tidx = f->l2t ? f->l2t->idx : 0;
4580 t->smtidx = f->smtidx;
4581 if (f->fs.hitcnts)
4582 t->hits = get_filter_hits(sc, t->idx);
4583 else
4584 t->hits = UINT64_MAX;
4585 t->fs = f->fs;
4586
4587 return (0);
4588 }
4589 }
4590
4591 t->idx = 0xffffffff;
4592 return (0);
4593}
4594
4595static int
4596set_filter(struct adapter *sc, struct t4_filter *t)
4597{
3122 uint32_t fconf;
3123 unsigned int nfilters, nports;
3124 struct filter_entry *f;
3125 int i;
3126
3127 ADAPTER_LOCK_ASSERT_OWNED(sc);
3128
3129 nfilters = sc->tids.nftids;
3130 nports = sc->params.nports;
3131
3132 if (nfilters == 0)
3133 return (ENOTSUP);
3134
3135 if (!(sc->flags & FULL_INIT_DONE))
3136 return (EAGAIN);
3137
3138 if (t->idx >= nfilters)
3139 return (EINVAL);
3140
3141 /* Validate against the global filter mode */
4598 unsigned int nfilters, nports;
4599 struct filter_entry *f;
4600 int i;
4601
4602 ADAPTER_LOCK_ASSERT_OWNED(sc);
4603
4604 nfilters = sc->tids.nftids;
4605 nports = sc->params.nports;
4606
4607 if (nfilters == 0)
4608 return (ENOTSUP);
4609
4610 if (!(sc->flags & FULL_INIT_DONE))
4611 return (EAGAIN);
4612
4613 if (t->idx >= nfilters)
4614 return (EINVAL);
4615
4616 /* Validate against the global filter mode */
3142 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
3143 A_TP_VLAN_PRI_MAP);
3144 if ((fconf | fspec_to_fconf(&t->fs)) != fconf)
4617 if ((sc->filter_mode | fspec_to_fconf(&t->fs)) != sc->filter_mode)
3145 return (E2BIG);
3146
3147 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports)
3148 return (EINVAL);
3149
3150 if (t->fs.val.iport >= nports)
3151 return (EINVAL);
3152
3153 /* Can't specify an iq if not steering to it */
3154 if (!t->fs.dirsteer && t->fs.iq)
3155 return (EINVAL);
3156
3157 /* IPv6 filter idx must be 4 aligned */
3158 if (t->fs.type == 1 &&
3159 ((t->idx & 0x3) || t->idx + 4 >= nfilters))
3160 return (EINVAL);
3161
3162 if (sc->tids.ftid_tab == NULL) {
3163 KASSERT(sc->tids.ftids_in_use == 0,
3164 ("%s: no memory allocated but filters_in_use > 0",
3165 __func__));
3166
3167 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
3168 nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
3169 if (sc->tids.ftid_tab == NULL)
3170 return (ENOMEM);
3171 }
3172
3173 for (i = 0; i < 4; i++) {
3174 f = &sc->tids.ftid_tab[t->idx + i];
3175
3176 if (f->pending || f->valid)
3177 return (EBUSY);
3178 if (f->locked)
3179 return (EPERM);
3180
3181 if (t->fs.type == 0)
3182 break;
3183 }
3184
3185 f = &sc->tids.ftid_tab[t->idx];
3186 f->fs = t->fs;
3187
3188 return set_filter_wr(sc, t->idx);
3189}
3190
3191static int
3192del_filter(struct adapter *sc, struct t4_filter *t)
3193{
3194 unsigned int nfilters;
3195 struct filter_entry *f;
3196
3197 ADAPTER_LOCK_ASSERT_OWNED(sc);
3198
3199 if (IS_BUSY(sc))
3200 return (EAGAIN);
3201
3202 nfilters = sc->tids.nftids;
3203
3204 if (nfilters == 0)
3205 return (ENOTSUP);
3206
3207 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
3208 t->idx >= nfilters)
3209 return (EINVAL);
3210
3211 if (!(sc->flags & FULL_INIT_DONE))
3212 return (EAGAIN);
3213
3214 f = &sc->tids.ftid_tab[t->idx];
3215
3216 if (f->pending)
3217 return (EBUSY);
3218 if (f->locked)
3219 return (EPERM);
3220
3221 if (f->valid) {
3222 t->fs = f->fs; /* extra info for the caller */
3223 return del_filter_wr(sc, t->idx);
3224 }
3225
3226 return (0);
3227}
3228
3229static void
3230clear_filter(struct filter_entry *f)
3231{
3232 if (f->l2t)
3233 t4_l2t_release(f->l2t);
3234
3235 bzero(f, sizeof (*f));
3236}
3237
3238static int
3239set_filter_wr(struct adapter *sc, int fidx)
3240{
4618 return (E2BIG);
4619
4620 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports)
4621 return (EINVAL);
4622
4623 if (t->fs.val.iport >= nports)
4624 return (EINVAL);
4625
4626 /* Can't specify an iq if not steering to it */
4627 if (!t->fs.dirsteer && t->fs.iq)
4628 return (EINVAL);
4629
4630 /* IPv6 filter idx must be 4 aligned */
4631 if (t->fs.type == 1 &&
4632 ((t->idx & 0x3) || t->idx + 4 >= nfilters))
4633 return (EINVAL);
4634
4635 if (sc->tids.ftid_tab == NULL) {
4636 KASSERT(sc->tids.ftids_in_use == 0,
4637 ("%s: no memory allocated but filters_in_use > 0",
4638 __func__));
4639
4640 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
4641 nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
4642 if (sc->tids.ftid_tab == NULL)
4643 return (ENOMEM);
4644 }
4645
4646 for (i = 0; i < 4; i++) {
4647 f = &sc->tids.ftid_tab[t->idx + i];
4648
4649 if (f->pending || f->valid)
4650 return (EBUSY);
4651 if (f->locked)
4652 return (EPERM);
4653
4654 if (t->fs.type == 0)
4655 break;
4656 }
4657
4658 f = &sc->tids.ftid_tab[t->idx];
4659 f->fs = t->fs;
4660
4661 return set_filter_wr(sc, t->idx);
4662}
4663
4664static int
4665del_filter(struct adapter *sc, struct t4_filter *t)
4666{
4667 unsigned int nfilters;
4668 struct filter_entry *f;
4669
4670 ADAPTER_LOCK_ASSERT_OWNED(sc);
4671
4672 if (IS_BUSY(sc))
4673 return (EAGAIN);
4674
4675 nfilters = sc->tids.nftids;
4676
4677 if (nfilters == 0)
4678 return (ENOTSUP);
4679
4680 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
4681 t->idx >= nfilters)
4682 return (EINVAL);
4683
4684 if (!(sc->flags & FULL_INIT_DONE))
4685 return (EAGAIN);
4686
4687 f = &sc->tids.ftid_tab[t->idx];
4688
4689 if (f->pending)
4690 return (EBUSY);
4691 if (f->locked)
4692 return (EPERM);
4693
4694 if (f->valid) {
4695 t->fs = f->fs; /* extra info for the caller */
4696 return del_filter_wr(sc, t->idx);
4697 }
4698
4699 return (0);
4700}
4701
4702static void
4703clear_filter(struct filter_entry *f)
4704{
4705 if (f->l2t)
4706 t4_l2t_release(f->l2t);
4707
4708 bzero(f, sizeof (*f));
4709}
4710
4711static int
4712set_filter_wr(struct adapter *sc, int fidx)
4713{
3241 int rc;
3242 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
3243 struct mbuf *m;
3244 struct fw_filter_wr *fwr;
3245 unsigned int ftid;
3246
3247 ADAPTER_LOCK_ASSERT_OWNED(sc);
3248
3249 if (f->fs.newdmac || f->fs.newvlan) {
3250 /* This filter needs an L2T entry; allocate one. */
3251 f->l2t = t4_l2t_alloc_switching(sc->l2t);
3252 if (f->l2t == NULL)
3253 return (EAGAIN);
3254 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
3255 f->fs.dmac)) {
3256 t4_l2t_release(f->l2t);
3257 f->l2t = NULL;
3258 return (ENOMEM);
3259 }
3260 }
3261
3262 ftid = sc->tids.ftid_base + fidx;
3263
3264 m = m_gethdr(M_NOWAIT, MT_DATA);
3265 if (m == NULL)
3266 return (ENOMEM);
3267
3268 fwr = mtod(m, struct fw_filter_wr *);
3269 m->m_len = m->m_pkthdr.len = sizeof(*fwr);
3270 bzero(fwr, sizeof (*fwr));
3271
3272 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
3273 fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
3274 fwr->tid_to_iq =
3275 htobe32(V_FW_FILTER_WR_TID(ftid) |
3276 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
3277 V_FW_FILTER_WR_NOREPLY(0) |
3278 V_FW_FILTER_WR_IQ(f->fs.iq));
3279 fwr->del_filter_to_l2tix =
3280 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
3281 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
3282 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
3283 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
3284 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
3285 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
3286 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
3287 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
3288 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
3289 f->fs.newvlan == VLAN_REWRITE) |
3290 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
3291 f->fs.newvlan == VLAN_REWRITE) |
3292 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
3293 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
3294 V_FW_FILTER_WR_PRIO(f->fs.prio) |
3295 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
3296 fwr->ethtype = htobe16(f->fs.val.ethtype);
3297 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
3298 fwr->frag_to_ovlan_vldm =
3299 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
3300 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
4714 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
4715 struct mbuf *m;
4716 struct fw_filter_wr *fwr;
4717 unsigned int ftid;
4718
4719 ADAPTER_LOCK_ASSERT_OWNED(sc);
4720
4721 if (f->fs.newdmac || f->fs.newvlan) {
4722 /* This filter needs an L2T entry; allocate one. */
4723 f->l2t = t4_l2t_alloc_switching(sc->l2t);
4724 if (f->l2t == NULL)
4725 return (EAGAIN);
4726 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
4727 f->fs.dmac)) {
4728 t4_l2t_release(f->l2t);
4729 f->l2t = NULL;
4730 return (ENOMEM);
4731 }
4732 }
4733
4734 ftid = sc->tids.ftid_base + fidx;
4735
4736 m = m_gethdr(M_NOWAIT, MT_DATA);
4737 if (m == NULL)
4738 return (ENOMEM);
4739
4740 fwr = mtod(m, struct fw_filter_wr *);
4741 m->m_len = m->m_pkthdr.len = sizeof(*fwr);
4742 bzero(fwr, sizeof (*fwr));
4743
4744 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
4745 fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
4746 fwr->tid_to_iq =
4747 htobe32(V_FW_FILTER_WR_TID(ftid) |
4748 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
4749 V_FW_FILTER_WR_NOREPLY(0) |
4750 V_FW_FILTER_WR_IQ(f->fs.iq));
4751 fwr->del_filter_to_l2tix =
4752 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
4753 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
4754 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
4755 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
4756 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
4757 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
4758 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
4759 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
4760 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
4761 f->fs.newvlan == VLAN_REWRITE) |
4762 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
4763 f->fs.newvlan == VLAN_REWRITE) |
4764 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
4765 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
4766 V_FW_FILTER_WR_PRIO(f->fs.prio) |
4767 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
4768 fwr->ethtype = htobe16(f->fs.val.ethtype);
4769 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
4770 fwr->frag_to_ovlan_vldm =
4771 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
4772 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
3301 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
3302 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
3303 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
3304 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
4773 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
4774 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
4775 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
4776 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
3305 fwr->smac_sel = 0;
3306 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
4777 fwr->smac_sel = 0;
4778 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
3307 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.intrq[0].abs_id));
4779 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
3308 fwr->maci_to_matchtypem =
3309 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
3310 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
3311 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
3312 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
3313 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
3314 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
3315 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
3316 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
3317 fwr->ptcl = f->fs.val.proto;
3318 fwr->ptclm = f->fs.mask.proto;
3319 fwr->ttyp = f->fs.val.tos;
3320 fwr->ttypm = f->fs.mask.tos;
4780 fwr->maci_to_matchtypem =
4781 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
4782 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
4783 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
4784 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
4785 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
4786 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
4787 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
4788 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
4789 fwr->ptcl = f->fs.val.proto;
4790 fwr->ptclm = f->fs.mask.proto;
4791 fwr->ttyp = f->fs.val.tos;
4792 fwr->ttypm = f->fs.mask.tos;
3321 fwr->ivlan = htobe16(f->fs.val.ivlan);
3322 fwr->ivlanm = htobe16(f->fs.mask.ivlan);
3323 fwr->ovlan = htobe16(f->fs.val.ovlan);
3324 fwr->ovlanm = htobe16(f->fs.mask.ovlan);
4793 fwr->ivlan = htobe16(f->fs.val.vlan);
4794 fwr->ivlanm = htobe16(f->fs.mask.vlan);
4795 fwr->ovlan = htobe16(f->fs.val.vnic);
4796 fwr->ovlanm = htobe16(f->fs.mask.vnic);
3325 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
3326 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
3327 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
3328 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
3329 fwr->lp = htobe16(f->fs.val.dport);
3330 fwr->lpm = htobe16(f->fs.mask.dport);
3331 fwr->fp = htobe16(f->fs.val.sport);
3332 fwr->fpm = htobe16(f->fs.mask.sport);
3333 if (f->fs.newsmac)
3334 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
3335
3336 f->pending = 1;
3337 sc->tids.ftids_in_use++;
4797 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
4798 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
4799 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
4800 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
4801 fwr->lp = htobe16(f->fs.val.dport);
4802 fwr->lpm = htobe16(f->fs.mask.dport);
4803 fwr->fp = htobe16(f->fs.val.sport);
4804 fwr->fpm = htobe16(f->fs.mask.sport);
4805 if (f->fs.newsmac)
4806 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
4807
4808 f->pending = 1;
4809 sc->tids.ftids_in_use++;
3338 rc = t4_mgmt_tx(sc, m);
3339 if (rc != 0) {
3340 sc->tids.ftids_in_use--;
3341 m_freem(m);
3342 clear_filter(f);
3343 }
3344 return (rc);
4810
4811 t4_mgmt_tx(sc, m);
4812 return (0);
3345}
3346
3347static int
3348del_filter_wr(struct adapter *sc, int fidx)
3349{
3350 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
3351 struct mbuf *m;
3352 struct fw_filter_wr *fwr;
4813}
4814
4815static int
4816del_filter_wr(struct adapter *sc, int fidx)
4817{
4818 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
4819 struct mbuf *m;
4820 struct fw_filter_wr *fwr;
3353 unsigned int rc, ftid;
4821 unsigned int ftid;
3354
3355 ADAPTER_LOCK_ASSERT_OWNED(sc);
3356
3357 ftid = sc->tids.ftid_base + fidx;
3358
3359 m = m_gethdr(M_NOWAIT, MT_DATA);
3360 if (m == NULL)
3361 return (ENOMEM);
3362
3363 fwr = mtod(m, struct fw_filter_wr *);
3364 m->m_len = m->m_pkthdr.len = sizeof(*fwr);
3365 bzero(fwr, sizeof (*fwr));
3366
4822
4823 ADAPTER_LOCK_ASSERT_OWNED(sc);
4824
4825 ftid = sc->tids.ftid_base + fidx;
4826
4827 m = m_gethdr(M_NOWAIT, MT_DATA);
4828 if (m == NULL)
4829 return (ENOMEM);
4830
4831 fwr = mtod(m, struct fw_filter_wr *);
4832 m->m_len = m->m_pkthdr.len = sizeof(*fwr);
4833 bzero(fwr, sizeof (*fwr));
4834
3367 t4_mk_filtdelwr(ftid, fwr, sc->sge.intrq[0].abs_id);
4835 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
3368
3369 f->pending = 1;
4836
4837 f->pending = 1;
3370 rc = t4_mgmt_tx(sc, m);
3371 if (rc != 0) {
3372 f->pending = 0;
3373 m_freem(m);
3374 }
3375 return (rc);
4838 t4_mgmt_tx(sc, m);
4839 return (0);
3376}
3377
4840}
4841
3378/* XXX move intr handlers to main.c and make this static */
3379void
3380filter_rpl(struct adapter *sc, const struct cpl_set_tcb_rpl *rpl)
4842static int
4843filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
3381{
4844{
4845 struct adapter *sc = iq->adapter;
4846 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
3382 unsigned int idx = GET_TID(rpl);
3383
4847 unsigned int idx = GET_TID(rpl);
4848
4849 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
4850 rss->opcode));
4851
3384 if (idx >= sc->tids.ftid_base &&
3385 (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
3386 unsigned int rc = G_COOKIE(rpl->cookie);
3387 struct filter_entry *f = &sc->tids.ftid_tab[idx];
3388
4852 if (idx >= sc->tids.ftid_base &&
4853 (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
4854 unsigned int rc = G_COOKIE(rpl->cookie);
4855 struct filter_entry *f = &sc->tids.ftid_tab[idx];
4856
3389 if (rc == FW_FILTER_WR_FLT_DELETED) {
3390 /*
3391 * Clear the filter when we get confirmation from the
3392 * hardware that the filter has been deleted.
3393 */
3394 clear_filter(f);
3395 sc->tids.ftids_in_use--;
3396 } else if (rc == FW_FILTER_WR_SMT_TBL_FULL) {
3397 device_printf(sc->dev,
3398 "filter %u setup failed due to full SMT\n", idx);
3399 clear_filter(f);
3400 sc->tids.ftids_in_use--;
3401 } else if (rc == FW_FILTER_WR_FLT_ADDED) {
4857 if (rc == FW_FILTER_WR_FLT_ADDED) {
3402 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
3403 f->pending = 0; /* asynchronous setup completed */
3404 f->valid = 1;
4858 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
4859 f->pending = 0; /* asynchronous setup completed */
4860 f->valid = 1;
3405 } else {
3406 /*
3407 * Something went wrong. Issue a warning about the
3408 * problem and clear everything out.
3409 */
4861 return (0);
4862 }
4863
4864 if (rc != FW_FILTER_WR_FLT_DELETED) {
4865 /* Add or delete failed, need to display an error */
3410 device_printf(sc->dev,
3411 "filter %u setup failed with error %u\n", idx, rc);
4866 device_printf(sc->dev,
4867 "filter %u setup failed with error %u\n", idx, rc);
3412 clear_filter(f);
3413 sc->tids.ftids_in_use--;
3414 }
4868 }
4869
4870 clear_filter(f);
4871 ADAPTER_LOCK(sc);
4872 sc->tids.ftids_in_use--;
4873 ADAPTER_UNLOCK(sc);
3415 }
4874 }
4875
4876 return (0);
3416}
3417
3418static int
3419get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
3420{
3421 int rc = EINVAL;
3422
3423 if (cntxt->cid > M_CTXTQID)
3424 return (rc);
3425
3426 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
3427 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
3428 return (rc);
3429
3430 if (sc->flags & FW_OK) {
3431 ADAPTER_LOCK(sc); /* Avoid parallel t4_wr_mbox */
3432 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
3433 &cntxt->data[0]);
3434 ADAPTER_UNLOCK(sc);
3435 }
3436
3437 if (rc != 0) {
3438 /* Read via firmware failed or wasn't even attempted */
3439
3440 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id,
3441 &cntxt->data[0]);
3442 }
3443
3444 return (rc);
3445}
3446
4877}
4878
4879static int
4880get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
4881{
4882 int rc = EINVAL;
4883
4884 if (cntxt->cid > M_CTXTQID)
4885 return (rc);
4886
4887 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
4888 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
4889 return (rc);
4890
4891 if (sc->flags & FW_OK) {
4892 ADAPTER_LOCK(sc); /* Avoid parallel t4_wr_mbox */
4893 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
4894 &cntxt->data[0]);
4895 ADAPTER_UNLOCK(sc);
4896 }
4897
4898 if (rc != 0) {
4899 /* Read via firmware failed or wasn't even attempted */
4900
4901 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id,
4902 &cntxt->data[0]);
4903 }
4904
4905 return (rc);
4906}
4907
4908static int
4909read_card_mem(struct adapter *sc, struct t4_mem_range *mr)
4910{
4911 uint32_t base, size, lo, hi, win, off, remaining, i, n;
4912 uint32_t *buf, *b;
4913 int rc;
4914
4915 /* reads are in multiples of 32 bits */
4916 if (mr->addr & 3 || mr->len & 3 || mr->len == 0)
4917 return (EINVAL);
4918
4919 /*
4920 * We don't want to deal with potential holes so we mandate that the
4921 * requested region must lie entirely within one of the 3 memories.
4922 */
4923 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
4924 if (lo & F_EDRAM0_ENABLE) {
4925 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
4926 base = G_EDRAM0_BASE(hi) << 20;
4927 size = G_EDRAM0_SIZE(hi) << 20;
4928 if (size > 0 &&
4929 mr->addr >= base && mr->addr < base + size &&
4930 mr->addr + mr->len <= base + size)
4931 goto proceed;
4932 }
4933 if (lo & F_EDRAM1_ENABLE) {
4934 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
4935 base = G_EDRAM1_BASE(hi) << 20;
4936 size = G_EDRAM1_SIZE(hi) << 20;
4937 if (size > 0 &&
4938 mr->addr >= base && mr->addr < base + size &&
4939 mr->addr + mr->len <= base + size)
4940 goto proceed;
4941 }
4942 if (lo & F_EXT_MEM_ENABLE) {
4943 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
4944 base = G_EXT_MEM_BASE(hi) << 20;
4945 size = G_EXT_MEM_SIZE(hi) << 20;
4946 if (size > 0 &&
4947 mr->addr >= base && mr->addr < base + size &&
4948 mr->addr + mr->len <= base + size)
4949 goto proceed;
4950 }
4951 return (ENXIO);
4952
4953proceed:
4954 buf = b = malloc(mr->len, M_CXGBE, M_WAITOK);
4955
4956 /*
4957 * Position the PCIe window (we use memwin2) to the 16B aligned area
4958 * just at/before the requested region.
4959 */
4960 win = mr->addr & ~0xf;
4961 off = mr->addr - win; /* offset of the requested region in the win */
4962 remaining = mr->len;
4963
4964 while (remaining) {
4965 t4_write_reg(sc,
4966 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2), win);
4967 t4_read_reg(sc,
4968 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2));
4969
4970 /* number of bytes that we'll copy in the inner loop */
4971 n = min(remaining, MEMWIN2_APERTURE - off);
4972
4973 for (i = 0; i < n; i += 4, remaining -= 4)
4974 *b++ = t4_read_reg(sc, MEMWIN2_BASE + off + i);
4975
4976 win += MEMWIN2_APERTURE;
4977 off = 0;
4978 }
4979
4980 rc = copyout(buf, mr->data, mr->len);
4981 free(buf, M_CXGBE);
4982
4983 return (rc);
4984}
4985
3447int
3448t4_os_find_pci_capability(struct adapter *sc, int cap)
3449{
3450 int i;
3451
3452 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
3453}
3454
3455int
3456t4_os_pci_save_state(struct adapter *sc)
3457{
3458 device_t dev;
3459 struct pci_devinfo *dinfo;
3460
3461 dev = sc->dev;
3462 dinfo = device_get_ivars(dev);
3463
3464 pci_cfg_save(dev, dinfo, 0);
3465 return (0);
3466}
3467
3468int
3469t4_os_pci_restore_state(struct adapter *sc)
3470{
3471 device_t dev;
3472 struct pci_devinfo *dinfo;
3473
3474 dev = sc->dev;
3475 dinfo = device_get_ivars(dev);
3476
3477 pci_cfg_restore(dev, dinfo);
3478 return (0);
3479}
3480
3481void
3482t4_os_portmod_changed(const struct adapter *sc, int idx)
3483{
3484 struct port_info *pi = sc->port[idx];
3485 static const char *mod_str[] = {
3486 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
3487 };
3488
3489 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
3490 if_printf(pi->ifp, "transceiver unplugged.\n");
3491 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
3492 if_printf(pi->ifp, "unknown transceiver inserted.\n");
3493 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
3494 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
3495 else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str)) {
3496 if_printf(pi->ifp, "%s transceiver inserted.\n",
3497 mod_str[pi->mod_type]);
3498 } else {
3499 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
3500 pi->mod_type);
3501 }
3502}
3503
3504void
3505t4_os_link_changed(struct adapter *sc, int idx, int link_stat)
3506{
3507 struct port_info *pi = sc->port[idx];
3508 struct ifnet *ifp = pi->ifp;
3509
3510 if (link_stat) {
3511 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
3512 if_link_state_change(ifp, LINK_STATE_UP);
3513 } else
3514 if_link_state_change(ifp, LINK_STATE_DOWN);
3515}
3516
4986int
4987t4_os_find_pci_capability(struct adapter *sc, int cap)
4988{
4989 int i;
4990
4991 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
4992}
4993
4994int
4995t4_os_pci_save_state(struct adapter *sc)
4996{
4997 device_t dev;
4998 struct pci_devinfo *dinfo;
4999
5000 dev = sc->dev;
5001 dinfo = device_get_ivars(dev);
5002
5003 pci_cfg_save(dev, dinfo, 0);
5004 return (0);
5005}
5006
5007int
5008t4_os_pci_restore_state(struct adapter *sc)
5009{
5010 device_t dev;
5011 struct pci_devinfo *dinfo;
5012
5013 dev = sc->dev;
5014 dinfo = device_get_ivars(dev);
5015
5016 pci_cfg_restore(dev, dinfo);
5017 return (0);
5018}
5019
5020void
5021t4_os_portmod_changed(const struct adapter *sc, int idx)
5022{
5023 struct port_info *pi = sc->port[idx];
5024 static const char *mod_str[] = {
5025 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
5026 };
5027
5028 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
5029 if_printf(pi->ifp, "transceiver unplugged.\n");
5030 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
5031 if_printf(pi->ifp, "unknown transceiver inserted.\n");
5032 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
5033 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
5034 else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str)) {
5035 if_printf(pi->ifp, "%s transceiver inserted.\n",
5036 mod_str[pi->mod_type]);
5037 } else {
5038 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
5039 pi->mod_type);
5040 }
5041}
5042
5043void
5044t4_os_link_changed(struct adapter *sc, int idx, int link_stat)
5045{
5046 struct port_info *pi = sc->port[idx];
5047 struct ifnet *ifp = pi->ifp;
5048
5049 if (link_stat) {
5050 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
5051 if_link_state_change(ifp, LINK_STATE_UP);
5052 } else
5053 if_link_state_change(ifp, LINK_STATE_DOWN);
5054}
5055
5056void
5057t4_iterate(void (*func)(struct adapter *, void *), void *arg)
5058{
5059 struct adapter *sc;
5060
5061 mtx_lock(&t4_list_lock);
5062 SLIST_FOREACH(sc, &t4_list, link) {
5063 /*
5064 * func should not make any assumptions about what state sc is
5065 * in - the only guarantee is that sc->sc_lock is a valid lock.
5066 */
5067 func(sc, arg);
5068 }
5069 mtx_unlock(&t4_list_lock);
5070}
5071
3517static int
3518t4_open(struct cdev *dev, int flags, int type, struct thread *td)
3519{
3520 return (0);
3521}
3522
3523static int
3524t4_close(struct cdev *dev, int flags, int type, struct thread *td)
3525{
3526 return (0);
3527}
3528
3529static int
3530t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
3531 struct thread *td)
3532{
3533 int rc;
3534 struct adapter *sc = dev->si_drv1;
3535
3536 rc = priv_check(td, PRIV_DRIVER);
3537 if (rc != 0)
3538 return (rc);
3539
3540 switch (cmd) {
3541 case CHELSIO_T4_GETREG: {
3542 struct t4_reg *edata = (struct t4_reg *)data;
3543
3544 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
3545 return (EFAULT);
3546
3547 if (edata->size == 4)
3548 edata->val = t4_read_reg(sc, edata->addr);
3549 else if (edata->size == 8)
3550 edata->val = t4_read_reg64(sc, edata->addr);
3551 else
3552 return (EINVAL);
3553
3554 break;
3555 }
3556 case CHELSIO_T4_SETREG: {
3557 struct t4_reg *edata = (struct t4_reg *)data;
3558
3559 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
3560 return (EFAULT);
3561
3562 if (edata->size == 4) {
3563 if (edata->val & 0xffffffff00000000)
3564 return (EINVAL);
3565 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
3566 } else if (edata->size == 8)
3567 t4_write_reg64(sc, edata->addr, edata->val);
3568 else
3569 return (EINVAL);
3570 break;
3571 }
3572 case CHELSIO_T4_REGDUMP: {
3573 struct t4_regdump *regs = (struct t4_regdump *)data;
3574 int reglen = T4_REGDUMP_SIZE;
3575 uint8_t *buf;
3576
3577 if (regs->len < reglen) {
3578 regs->len = reglen; /* hint to the caller */
3579 return (ENOBUFS);
3580 }
3581
3582 regs->len = reglen;
3583 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
3584 t4_get_regs(sc, regs, buf);
3585 rc = copyout(buf, regs->data, reglen);
3586 free(buf, M_CXGBE);
3587 break;
3588 }
3589 case CHELSIO_T4_GET_FILTER_MODE:
3590 rc = get_filter_mode(sc, (uint32_t *)data);
3591 break;
3592 case CHELSIO_T4_SET_FILTER_MODE:
3593 rc = set_filter_mode(sc, *(uint32_t *)data);
3594 break;
3595 case CHELSIO_T4_GET_FILTER:
3596 ADAPTER_LOCK(sc);
3597 rc = get_filter(sc, (struct t4_filter *)data);
3598 ADAPTER_UNLOCK(sc);
3599 break;
3600 case CHELSIO_T4_SET_FILTER:
3601 ADAPTER_LOCK(sc);
3602 rc = set_filter(sc, (struct t4_filter *)data);
3603 ADAPTER_UNLOCK(sc);
3604 break;
3605 case CHELSIO_T4_DEL_FILTER:
3606 ADAPTER_LOCK(sc);
3607 rc = del_filter(sc, (struct t4_filter *)data);
3608 ADAPTER_UNLOCK(sc);
3609 break;
3610 case CHELSIO_T4_GET_SGE_CONTEXT:
3611 rc = get_sge_context(sc, (struct t4_sge_context *)data);
3612 break;
5072static int
5073t4_open(struct cdev *dev, int flags, int type, struct thread *td)
5074{
5075 return (0);
5076}
5077
5078static int
5079t4_close(struct cdev *dev, int flags, int type, struct thread *td)
5080{
5081 return (0);
5082}
5083
5084static int
5085t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
5086 struct thread *td)
5087{
5088 int rc;
5089 struct adapter *sc = dev->si_drv1;
5090
5091 rc = priv_check(td, PRIV_DRIVER);
5092 if (rc != 0)
5093 return (rc);
5094
5095 switch (cmd) {
5096 case CHELSIO_T4_GETREG: {
5097 struct t4_reg *edata = (struct t4_reg *)data;
5098
5099 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
5100 return (EFAULT);
5101
5102 if (edata->size == 4)
5103 edata->val = t4_read_reg(sc, edata->addr);
5104 else if (edata->size == 8)
5105 edata->val = t4_read_reg64(sc, edata->addr);
5106 else
5107 return (EINVAL);
5108
5109 break;
5110 }
5111 case CHELSIO_T4_SETREG: {
5112 struct t4_reg *edata = (struct t4_reg *)data;
5113
5114 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
5115 return (EFAULT);
5116
5117 if (edata->size == 4) {
5118 if (edata->val & 0xffffffff00000000)
5119 return (EINVAL);
5120 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
5121 } else if (edata->size == 8)
5122 t4_write_reg64(sc, edata->addr, edata->val);
5123 else
5124 return (EINVAL);
5125 break;
5126 }
5127 case CHELSIO_T4_REGDUMP: {
5128 struct t4_regdump *regs = (struct t4_regdump *)data;
5129 int reglen = T4_REGDUMP_SIZE;
5130 uint8_t *buf;
5131
5132 if (regs->len < reglen) {
5133 regs->len = reglen; /* hint to the caller */
5134 return (ENOBUFS);
5135 }
5136
5137 regs->len = reglen;
5138 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
5139 t4_get_regs(sc, regs, buf);
5140 rc = copyout(buf, regs->data, reglen);
5141 free(buf, M_CXGBE);
5142 break;
5143 }
5144 case CHELSIO_T4_GET_FILTER_MODE:
5145 rc = get_filter_mode(sc, (uint32_t *)data);
5146 break;
5147 case CHELSIO_T4_SET_FILTER_MODE:
5148 rc = set_filter_mode(sc, *(uint32_t *)data);
5149 break;
5150 case CHELSIO_T4_GET_FILTER:
5151 ADAPTER_LOCK(sc);
5152 rc = get_filter(sc, (struct t4_filter *)data);
5153 ADAPTER_UNLOCK(sc);
5154 break;
5155 case CHELSIO_T4_SET_FILTER:
5156 ADAPTER_LOCK(sc);
5157 rc = set_filter(sc, (struct t4_filter *)data);
5158 ADAPTER_UNLOCK(sc);
5159 break;
5160 case CHELSIO_T4_DEL_FILTER:
5161 ADAPTER_LOCK(sc);
5162 rc = del_filter(sc, (struct t4_filter *)data);
5163 ADAPTER_UNLOCK(sc);
5164 break;
5165 case CHELSIO_T4_GET_SGE_CONTEXT:
5166 rc = get_sge_context(sc, (struct t4_sge_context *)data);
5167 break;
5168 case CHELSIO_T4_LOAD_FW: {
5169 struct t4_data *fw = (struct t4_data *)data;
5170 uint8_t *fw_data;
5171
5172 if (sc->flags & FULL_INIT_DONE)
5173 return (EBUSY);
5174
5175 fw_data = malloc(fw->len, M_CXGBE, M_NOWAIT);
5176 if (fw_data == NULL)
5177 return (ENOMEM);
5178
5179 rc = copyin(fw->data, fw_data, fw->len);
5180 if (rc == 0)
5181 rc = -t4_load_fw(sc, fw_data, fw->len);
5182
5183 free(fw_data, M_CXGBE);
5184 break;
5185 }
5186 case CHELSIO_T4_GET_MEM:
5187 rc = read_card_mem(sc, (struct t4_mem_range *)data);
5188 break;
3613 default:
3614 rc = EINVAL;
3615 }
3616
3617 return (rc);
3618}
3619
5189 default:
5190 rc = EINVAL;
5191 }
5192
5193 return (rc);
5194}
5195
5196#ifndef TCP_OFFLOAD_DISABLE
3620static int
5197static int
5198toe_capability(struct port_info *pi, int enable)
5199{
5200 int rc;
5201 struct adapter *sc = pi->adapter;
5202
5203 ADAPTER_LOCK_ASSERT_OWNED(sc);
5204
5205 if (!is_offload(sc))
5206 return (ENODEV);
5207
5208 if (enable) {
5209 if (isset(&sc->offload_map, pi->port_id))
5210 return (0);
5211
5212 if (sc->offload_map == 0) {
5213 rc = activate_uld(sc, ULD_TOM, &sc->tom);
5214 if (rc != 0)
5215 return (rc);
5216 }
5217
5218 setbit(&sc->offload_map, pi->port_id);
5219 } else {
5220 if (!isset(&sc->offload_map, pi->port_id))
5221 return (0);
5222
5223 clrbit(&sc->offload_map, pi->port_id);
5224
5225 if (sc->offload_map == 0) {
5226 rc = deactivate_uld(&sc->tom);
5227 if (rc != 0) {
5228 setbit(&sc->offload_map, pi->port_id);
5229 return (rc);
5230 }
5231 }
5232 }
5233
5234 return (0);
5235}
5236
5237/*
5238 * Add an upper layer driver to the global list.
5239 */
5240int
5241t4_register_uld(struct uld_info *ui)
5242{
5243 int rc = 0;
5244 struct uld_info *u;
5245
5246 mtx_lock(&t4_uld_list_lock);
5247 SLIST_FOREACH(u, &t4_uld_list, link) {
5248 if (u->uld_id == ui->uld_id) {
5249 rc = EEXIST;
5250 goto done;
5251 }
5252 }
5253
5254 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
5255 ui->refcount = 0;
5256done:
5257 mtx_unlock(&t4_uld_list_lock);
5258 return (rc);
5259}
5260
5261int
5262t4_unregister_uld(struct uld_info *ui)
5263{
5264 int rc = EINVAL;
5265 struct uld_info *u;
5266
5267 mtx_lock(&t4_uld_list_lock);
5268
5269 SLIST_FOREACH(u, &t4_uld_list, link) {
5270 if (u == ui) {
5271 if (ui->refcount > 0) {
5272 rc = EBUSY;
5273 goto done;
5274 }
5275
5276 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
5277 rc = 0;
5278 goto done;
5279 }
5280 }
5281done:
5282 mtx_unlock(&t4_uld_list_lock);
5283 return (rc);
5284}
5285
5286static int
5287activate_uld(struct adapter *sc, int id, struct uld_softc *usc)
5288{
5289 int rc = EAGAIN;
5290 struct uld_info *ui;
5291
5292 mtx_lock(&t4_uld_list_lock);
5293
5294 SLIST_FOREACH(ui, &t4_uld_list, link) {
5295 if (ui->uld_id == id) {
5296 rc = ui->attach(sc, &usc->softc);
5297 if (rc == 0) {
5298 KASSERT(usc->softc != NULL,
5299 ("%s: ULD %d has no state", __func__, id));
5300 ui->refcount++;
5301 usc->uld = ui;
5302 }
5303 goto done;
5304 }
5305 }
5306done:
5307 mtx_unlock(&t4_uld_list_lock);
5308
5309 return (rc);
5310}
5311
5312static int
5313deactivate_uld(struct uld_softc *usc)
5314{
5315 int rc;
5316
5317 mtx_lock(&t4_uld_list_lock);
5318
5319 if (usc->uld == NULL || usc->softc == NULL) {
5320 rc = EINVAL;
5321 goto done;
5322 }
5323
5324 rc = usc->uld->detach(usc->softc);
5325 if (rc == 0) {
5326 KASSERT(usc->uld->refcount > 0,
5327 ("%s: ULD has bad refcount", __func__));
5328 usc->uld->refcount--;
5329 usc->uld = NULL;
5330 usc->softc = NULL;
5331 }
5332done:
5333 mtx_unlock(&t4_uld_list_lock);
5334
5335 return (rc);
5336}
5337#endif
5338
5339/*
5340 * Come up with reasonable defaults for some of the tunables, provided they're
5341 * not set by the user (in which case we'll use the values as is).
5342 */
5343static void
5344tweak_tunables(void)
5345{
5346 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
5347
5348 if (t4_ntxq10g < 1)
5349 t4_ntxq10g = min(nc, NTXQ_10G);
5350
5351 if (t4_ntxq1g < 1)
5352 t4_ntxq1g = min(nc, NTXQ_1G);
5353
5354 if (t4_nrxq10g < 1)
5355 t4_nrxq10g = min(nc, NRXQ_10G);
5356
5357 if (t4_nrxq1g < 1)
5358 t4_nrxq1g = min(nc, NRXQ_1G);
5359
5360#ifndef TCP_OFFLOAD_DISABLE
5361 if (t4_nofldtxq10g < 1)
5362 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
5363
5364 if (t4_nofldtxq1g < 1)
5365 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
5366
5367 if (t4_nofldrxq10g < 1)
5368 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
5369
5370 if (t4_nofldrxq1g < 1)
5371 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
5372#endif
5373
5374 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
5375 t4_tmr_idx_10g = TMR_IDX_10G;
5376
5377 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
5378 t4_pktc_idx_10g = PKTC_IDX_10G;
5379
5380 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
5381 t4_tmr_idx_1g = TMR_IDX_1G;
5382
5383 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
5384 t4_pktc_idx_1g = PKTC_IDX_1G;
5385
5386 if (t4_qsize_txq < 128)
5387 t4_qsize_txq = 128;
5388
5389 if (t4_qsize_rxq < 128)
5390 t4_qsize_rxq = 128;
5391 while (t4_qsize_rxq & 7)
5392 t4_qsize_rxq++;
5393
5394 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
5395}
5396
5397static int
3621t4_mod_event(module_t mod, int cmd, void *arg)
3622{
5398t4_mod_event(module_t mod, int cmd, void *arg)
5399{
5400 int rc = 0;
3623
5401
3624 if (cmd == MOD_LOAD)
5402 switch (cmd) {
5403 case MOD_LOAD:
3625 t4_sge_modload();
5404 t4_sge_modload();
5405 mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF);
5406 SLIST_INIT(&t4_list);
5407#ifndef TCP_OFFLOAD_DISABLE
5408 mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF);
5409 SLIST_INIT(&t4_uld_list);
5410#endif
5411 tweak_tunables();
5412 break;
3626
5413
3627 return (0);
5414 case MOD_UNLOAD:
5415#ifndef TCP_OFFLOAD_DISABLE
5416 mtx_lock(&t4_uld_list_lock);
5417 if (!SLIST_EMPTY(&t4_uld_list)) {
5418 rc = EBUSY;
5419 mtx_unlock(&t4_uld_list_lock);
5420 break;
5421 }
5422 mtx_unlock(&t4_uld_list_lock);
5423 mtx_destroy(&t4_uld_list_lock);
5424#endif
5425 mtx_lock(&t4_list_lock);
5426 if (!SLIST_EMPTY(&t4_list)) {
5427 rc = EBUSY;
5428 mtx_unlock(&t4_list_lock);
5429 break;
5430 }
5431 mtx_unlock(&t4_list_lock);
5432 mtx_destroy(&t4_list_lock);
5433 break;
5434 }
5435
5436 return (rc);
3628}
3629
3630static devclass_t t4_devclass;
3631static devclass_t cxgbe_devclass;
3632
3633DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, t4_mod_event, 0);
3634MODULE_VERSION(t4nex, 1);
3635
3636DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
3637MODULE_VERSION(cxgbe, 1);
5437}
5438
5439static devclass_t t4_devclass;
5440static devclass_t cxgbe_devclass;
5441
5442DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, t4_mod_event, 0);
5443MODULE_VERSION(t4nex, 1);
5444
5445DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
5446MODULE_VERSION(cxgbe, 1);