Deleted Added
full compact
t4_main.c (227843) t4_main.c (228561)
1/*-
2 * Copyright (c) 2011 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 12 unchanged lines hidden (view full) ---

21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2011 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 12 unchanged lines hidden (view full) ---

21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/dev/cxgbe/t4_main.c 227843 2011-11-22 21:28:20Z marius $");
29__FBSDID("$FreeBSD: head/sys/dev/cxgbe/t4_main.c 228561 2011-12-16 02:09:51Z np $");
30
31#include "opt_inet.h"
32
33#include <sys/param.h>
34#include <sys/conf.h>
35#include <sys/priv.h>
36#include <sys/kernel.h>
37#include <sys/bus.h>

--- 12 unchanged lines hidden (view full) ---

50#include <sys/sockio.h>
51#include <sys/sysctl.h>
52#include <net/ethernet.h>
53#include <net/if.h>
54#include <net/if_types.h>
55#include <net/if_dl.h>
56#include <net/if_vlan_var.h>
57
30
31#include "opt_inet.h"
32
33#include <sys/param.h>
34#include <sys/conf.h>
35#include <sys/priv.h>
36#include <sys/kernel.h>
37#include <sys/bus.h>

--- 12 unchanged lines hidden (view full) ---

50#include <sys/sockio.h>
51#include <sys/sysctl.h>
52#include <net/ethernet.h>
53#include <net/if.h>
54#include <net/if_types.h>
55#include <net/if_dl.h>
56#include <net/if_vlan_var.h>
57
58#include "common/t4_hw.h"
59#include "common/common.h"
60#include "common/t4_msg.h"
61#include "common/t4_regs.h"
62#include "common/t4_regs_values.h"
58#include "common/common.h"
59#include "common/t4_msg.h"
60#include "common/t4_regs.h"
61#include "common/t4_regs_values.h"
63#include "common/t4fw_interface.h"
64#include "t4_ioctl.h"
65#include "t4_l2t.h"
66
67/* T4 bus driver interface */
68static int t4_probe(device_t);
69static int t4_attach(device_t);
70static int t4_detach(device_t);
71static device_method_t t4_methods[] = {

--- 45 unchanged lines hidden (view full) ---

117static void cxgbe_start(struct ifnet *);
118static int cxgbe_transmit(struct ifnet *, struct mbuf *);
119static void cxgbe_qflush(struct ifnet *);
120static int cxgbe_media_change(struct ifnet *);
121static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
122
123MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4 Ethernet driver and services");
124
62#include "t4_ioctl.h"
63#include "t4_l2t.h"
64
65/* T4 bus driver interface */
66static int t4_probe(device_t);
67static int t4_attach(device_t);
68static int t4_detach(device_t);
69static device_method_t t4_methods[] = {

--- 45 unchanged lines hidden (view full) ---

115static void cxgbe_start(struct ifnet *);
116static int cxgbe_transmit(struct ifnet *, struct mbuf *);
117static void cxgbe_qflush(struct ifnet *);
118static int cxgbe_media_change(struct ifnet *);
119static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
120
121MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4 Ethernet driver and services");
122
125/*
126 * Tunables.
127 */
128static SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0,
129 "cxgbe driver parameters");
123static struct mtx t4_list_lock;
124static SLIST_HEAD(, adapter) t4_list;
125#ifndef TCP_OFFLOAD_DISABLE
126static struct mtx t4_uld_list_lock;
127static SLIST_HEAD(, uld_info) t4_uld_list;
128#endif
130
129
131static int force_firmware_install = 0;
132TUNABLE_INT("hw.cxgbe.force_firmware_install", &force_firmware_install);
133SYSCTL_UINT(_hw_cxgbe, OID_AUTO, force_firmware_install, CTLFLAG_RDTUN,
134 &force_firmware_install, 0, "install firmware on every attach.");
135
136/*
130/*
137 * Holdoff timer and packet counter values.
131 * Tunables. See tweak_tunables() too.
138 */
132 */
139static unsigned int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200};
140static unsigned int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
141
142/*
133
134/*
143 * Max # of tx and rx queues to use for each 10G and 1G port.
135 * Number of queues for tx and rx, 10G and 1G, NIC and offload.
144 */
136 */
145static unsigned int max_ntxq_10g = 8;
146TUNABLE_INT("hw.cxgbe.max_ntxq_10G_port", &max_ntxq_10g);
147SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_ntxq_10G_port, CTLFLAG_RDTUN,
148 &max_ntxq_10g, 0, "maximum number of tx queues per 10G port.");
137#define NTXQ_10G 16
138static int t4_ntxq10g = -1;
139TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
149
140
150static unsigned int max_nrxq_10g = 8;
151TUNABLE_INT("hw.cxgbe.max_nrxq_10G_port", &max_nrxq_10g);
152SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_nrxq_10G_port, CTLFLAG_RDTUN,
153 &max_nrxq_10g, 0, "maximum number of rxq's (per 10G port).");
141#define NRXQ_10G 8
142static int t4_nrxq10g = -1;
143TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
154
144
155static unsigned int max_ntxq_1g = 2;
156TUNABLE_INT("hw.cxgbe.max_ntxq_1G_port", &max_ntxq_1g);
157SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_ntxq_1G_port, CTLFLAG_RDTUN,
158 &max_ntxq_1g, 0, "maximum number of tx queues per 1G port.");
145#define NTXQ_1G 4
146static int t4_ntxq1g = -1;
147TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
159
148
160static unsigned int max_nrxq_1g = 2;
161TUNABLE_INT("hw.cxgbe.max_nrxq_1G_port", &max_nrxq_1g);
162SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_nrxq_1G_port, CTLFLAG_RDTUN,
163 &max_nrxq_1g, 0, "maximum number of rxq's (per 1G port).");
149#define NRXQ_1G 2
150static int t4_nrxq1g = -1;
151TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
164
152
153#ifndef TCP_OFFLOAD_DISABLE
154#define NOFLDTXQ_10G 8
155static int t4_nofldtxq10g = -1;
156TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
157
158#define NOFLDRXQ_10G 2
159static int t4_nofldrxq10g = -1;
160TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
161
162#define NOFLDTXQ_1G 2
163static int t4_nofldtxq1g = -1;
164TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
165
166#define NOFLDRXQ_1G 1
167static int t4_nofldrxq1g = -1;
168TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
169#endif
170
165/*
166 * Holdoff parameters for 10G and 1G ports.
167 */
171/*
172 * Holdoff parameters for 10G and 1G ports.
173 */
168static unsigned int tmr_idx_10g = 1;
169TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &tmr_idx_10g);
170SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_10G, CTLFLAG_RDTUN,
171 &tmr_idx_10g, 0,
172 "default timer index for interrupt holdoff (10G ports).");
174#define TMR_IDX_10G 1
175static int t4_tmr_idx_10g = TMR_IDX_10G;
176TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
173
177
174static int pktc_idx_10g = 2;
175TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &pktc_idx_10g);
176SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_10G, CTLFLAG_RDTUN,
177 &pktc_idx_10g, 0,
178 "default pkt counter index for interrupt holdoff (10G ports).");
178#define PKTC_IDX_10G 2
179static int t4_pktc_idx_10g = PKTC_IDX_10G;
180TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
179
181
180static unsigned int tmr_idx_1g = 1;
181TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &tmr_idx_1g);
182SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_1G, CTLFLAG_RDTUN,
183 &tmr_idx_1g, 0,
184 "default timer index for interrupt holdoff (1G ports).");
182#define TMR_IDX_1G 1
183static int t4_tmr_idx_1g = TMR_IDX_1G;
184TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
185
185
186static int pktc_idx_1g = 2;
187TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &pktc_idx_1g);
188SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_1G, CTLFLAG_RDTUN,
189 &pktc_idx_1g, 0,
190 "default pkt counter index for interrupt holdoff (1G ports).");
186#define PKTC_IDX_1G 2
187static int t4_pktc_idx_1g = PKTC_IDX_1G;
188TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
191
192/*
193 * Size (# of entries) of each tx and rx queue.
194 */
189
190/*
191 * Size (# of entries) of each tx and rx queue.
192 */
195static unsigned int qsize_txq = TX_EQ_QSIZE;
196TUNABLE_INT("hw.cxgbe.qsize_txq", &qsize_txq);
197SYSCTL_UINT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN,
198 &qsize_txq, 0, "default queue size of NIC tx queues.");
193static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
194TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
199
195
200static unsigned int qsize_rxq = RX_IQ_QSIZE;
201TUNABLE_INT("hw.cxgbe.qsize_rxq", &qsize_rxq);
202SYSCTL_UINT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN,
203 &qsize_rxq, 0, "default queue size of NIC rx queues.");
196static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
197TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
204
205/*
198
199/*
206 * Interrupt types allowed.
200 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
207 */
201 */
208static int intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
209TUNABLE_INT("hw.cxgbe.interrupt_types", &intr_types);
210SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &intr_types, 0,
211 "interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively)");
202static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
203TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
212
213/*
204
205/*
214 * Force the driver to use the same set of interrupts for all ports.
206 * Configuration file.
215 */
207 */
216static int intr_shared = 0;
217TUNABLE_INT("hw.cxgbe.interrupts_shared", &intr_shared);
218SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupts_shared, CTLFLAG_RDTUN,
219 &intr_shared, 0, "interrupts shared between all ports");
208static char t4_cfg_file[32] = "default";
209TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
220
210
221static unsigned int filter_mode = HW_TPL_FR_MT_PR_IV_P_FC;
222TUNABLE_INT("hw.cxgbe.filter_mode", &filter_mode);
223SYSCTL_UINT(_hw_cxgbe, OID_AUTO, filter_mode, CTLFLAG_RDTUN,
224 &filter_mode, 0, "default global filter mode.");
211/*
212 * ASIC features that will be used. Disable the ones you don't want so that the
213 * chip resources aren't wasted on features that will not be used.
214 */
215static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
216TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
225
217
218static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
219TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
220
221static int t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
222TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
223
224static int t4_rdmacaps_allowed = 0;
225TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
226
227static int t4_iscsicaps_allowed = 0;
228TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
229
230static int t4_fcoecaps_allowed = 0;
231TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
232
226struct intrs_and_queues {
227 int intr_type; /* INTx, MSI, or MSI-X */
228 int nirq; /* Number of vectors */
233struct intrs_and_queues {
234 int intr_type; /* INTx, MSI, or MSI-X */
235 int nirq; /* Number of vectors */
229 int intr_shared; /* Interrupts shared between all ports */
236 int intr_flags;
230 int ntxq10g; /* # of NIC txq's for each 10G port */
231 int nrxq10g; /* # of NIC rxq's for each 10G port */
232 int ntxq1g; /* # of NIC txq's for each 1G port */
233 int nrxq1g; /* # of NIC rxq's for each 1G port */
237 int ntxq10g; /* # of NIC txq's for each 10G port */
238 int nrxq10g; /* # of NIC rxq's for each 10G port */
239 int ntxq1g; /* # of NIC txq's for each 1G port */
240 int nrxq1g; /* # of NIC rxq's for each 1G port */
241#ifndef TCP_OFFLOAD_DISABLE
242 int nofldtxq10g; /* # of TOE txq's for each 10G port */
243 int nofldrxq10g; /* # of TOE rxq's for each 10G port */
244 int nofldtxq1g; /* # of TOE txq's for each 1G port */
245 int nofldrxq1g; /* # of TOE rxq's for each 1G port */
246#endif
234};
235
236struct filter_entry {
237 uint32_t valid:1; /* filter allocated and valid */
238 uint32_t locked:1; /* filter is administratively locked */
239 uint32_t pending:1; /* filter action is pending firmware reply */
240 uint32_t smtidx:8; /* Source MAC Table index for smac */
241 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
242
243 struct t4_filter_specification fs;
244};
245
246enum {
247};
248
249struct filter_entry {
250 uint32_t valid:1; /* filter allocated and valid */
251 uint32_t locked:1; /* filter is administratively locked */
252 uint32_t pending:1; /* filter action is pending firmware reply */
253 uint32_t smtidx:8; /* Source MAC Table index for smac */
254 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
255
256 struct t4_filter_specification fs;
257};
258
259enum {
247 MEMWIN0_APERTURE = 2048,
248 MEMWIN0_BASE = 0x1b800,
249 MEMWIN1_APERTURE = 32768,
250 MEMWIN1_BASE = 0x28000,
251 MEMWIN2_APERTURE = 65536,
252 MEMWIN2_BASE = 0x30000,
253};
254
255enum {
256 XGMAC_MTU = (1 << 0),
257 XGMAC_PROMISC = (1 << 1),
258 XGMAC_ALLMULTI = (1 << 2),
259 XGMAC_VLANEX = (1 << 3),
260 XGMAC_UCADDR = (1 << 4),
261 XGMAC_MCADDRS = (1 << 5),
262
263 XGMAC_ALL = 0xffff
264};
265
266static int map_bars(struct adapter *);
267static void setup_memwin(struct adapter *);
268static int cfg_itype_and_nqueues(struct adapter *, int, int,
269 struct intrs_and_queues *);
270static int prep_firmware(struct adapter *);
260 XGMAC_MTU = (1 << 0),
261 XGMAC_PROMISC = (1 << 1),
262 XGMAC_ALLMULTI = (1 << 2),
263 XGMAC_VLANEX = (1 << 3),
264 XGMAC_UCADDR = (1 << 4),
265 XGMAC_MCADDRS = (1 << 5),
266
267 XGMAC_ALL = 0xffff
268};
269
270static int map_bars(struct adapter *);
271static void setup_memwin(struct adapter *);
272static int cfg_itype_and_nqueues(struct adapter *, int, int,
273 struct intrs_and_queues *);
274static int prep_firmware(struct adapter *);
271static int get_devlog_params(struct adapter *, struct devlog_params *);
272static int get_capabilities(struct adapter *, struct fw_caps_config_cmd *);
273static int get_params(struct adapter *, struct fw_caps_config_cmd *);
275static int upload_config_file(struct adapter *, const struct firmware *,
276 uint32_t *, uint32_t *);
277static int partition_resources(struct adapter *, const struct firmware *);
278static int get_params__pre_init(struct adapter *);
279static int get_params__post_init(struct adapter *);
274static void t4_set_desc(struct adapter *);
275static void build_medialist(struct port_info *);
276static int update_mac_settings(struct port_info *, int);
277static int cxgbe_init_locked(struct port_info *);
278static int cxgbe_init_synchronized(struct port_info *);
279static int cxgbe_uninit_locked(struct port_info *);
280static int cxgbe_uninit_synchronized(struct port_info *);
280static void t4_set_desc(struct adapter *);
281static void build_medialist(struct port_info *);
282static int update_mac_settings(struct port_info *, int);
283static int cxgbe_init_locked(struct port_info *);
284static int cxgbe_init_synchronized(struct port_info *);
285static int cxgbe_uninit_locked(struct port_info *);
286static int cxgbe_uninit_synchronized(struct port_info *);
281static int first_port_up(struct adapter *);
282static int last_port_down(struct adapter *);
287static int adapter_full_init(struct adapter *);
288static int adapter_full_uninit(struct adapter *);
289static int port_full_init(struct port_info *);
290static int port_full_uninit(struct port_info *);
291static void quiesce_eq(struct adapter *, struct sge_eq *);
292static void quiesce_iq(struct adapter *, struct sge_iq *);
293static void quiesce_fl(struct adapter *, struct sge_fl *);
283static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
294static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
284 iq_intr_handler_t *, void *, char *);
295 driver_intr_t *, void *, char *);
285static int t4_free_irq(struct adapter *, struct irq *);
286static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
287 unsigned int);
288static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
289static void cxgbe_tick(void *);
296static int t4_free_irq(struct adapter *, struct irq *);
297static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
298 unsigned int);
299static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
300static void cxgbe_tick(void *);
301static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
302 struct mbuf *);
290static int t4_sysctls(struct adapter *);
291static int cxgbe_sysctls(struct port_info *);
292static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
303static int t4_sysctls(struct adapter *);
304static int cxgbe_sysctls(struct port_info *);
305static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
306static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
293static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
294static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
295static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
296static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
297static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
307static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
308static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
309static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
310static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
311static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
312static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
313static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
314static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
298static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
315static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
316static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
317static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
318static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
319static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
320static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
321static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
322static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
323static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
324static int sysctl_tids(SYSCTL_HANDLER_ARGS);
325static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
326static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
299static inline void txq_start(struct ifnet *, struct sge_txq *);
300static uint32_t fconf_to_mode(uint32_t);
301static uint32_t mode_to_fconf(uint32_t);
302static uint32_t fspec_to_fconf(struct t4_filter_specification *);
303static int get_filter_mode(struct adapter *, uint32_t *);
304static int set_filter_mode(struct adapter *, uint32_t);
305static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
306static int get_filter(struct adapter *, struct t4_filter *);
307static int set_filter(struct adapter *, struct t4_filter *);
308static int del_filter(struct adapter *, struct t4_filter *);
309static void clear_filter(struct filter_entry *);
310static int set_filter_wr(struct adapter *, int);
311static int del_filter_wr(struct adapter *, int);
327static inline void txq_start(struct ifnet *, struct sge_txq *);
328static uint32_t fconf_to_mode(uint32_t);
329static uint32_t mode_to_fconf(uint32_t);
330static uint32_t fspec_to_fconf(struct t4_filter_specification *);
331static int get_filter_mode(struct adapter *, uint32_t *);
332static int set_filter_mode(struct adapter *, uint32_t);
333static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
334static int get_filter(struct adapter *, struct t4_filter *);
335static int set_filter(struct adapter *, struct t4_filter *);
336static int del_filter(struct adapter *, struct t4_filter *);
337static void clear_filter(struct filter_entry *);
338static int set_filter_wr(struct adapter *, int);
339static int del_filter_wr(struct adapter *, int);
312void filter_rpl(struct adapter *, const struct cpl_set_tcb_rpl *);
340static int filter_rpl(struct sge_iq *, const struct rss_header *,
341 struct mbuf *);
313static int get_sge_context(struct adapter *, struct t4_sge_context *);
342static int get_sge_context(struct adapter *, struct t4_sge_context *);
343static int read_card_mem(struct adapter *, struct t4_mem_range *);
344#ifndef TCP_OFFLOAD_DISABLE
345static int toe_capability(struct port_info *, int);
346static int activate_uld(struct adapter *, int, struct uld_softc *);
347static int deactivate_uld(struct uld_softc *);
348#endif
314static int t4_mod_event(module_t, int, void *);
315
316struct t4_pciids {
317 uint16_t device;
318 uint8_t mpf;
319 char *desc;
320} t4_pciids[] = {
321 {0xa000, 0, "Chelsio Terminator 4 FPGA"},

--- 5 unchanged lines hidden (view full) ---

327 {0x4405, 4, "Chelsio T440-BCH"},
328 {0x4406, 4, "Chelsio T440-CH"},
329 {0x4407, 4, "Chelsio T420-SO"},
330 {0x4408, 4, "Chelsio T420-CX"},
331 {0x4409, 4, "Chelsio T420-BT"},
332 {0x440a, 4, "Chelsio T404-BT"},
333};
334
349static int t4_mod_event(module_t, int, void *);
350
351struct t4_pciids {
352 uint16_t device;
353 uint8_t mpf;
354 char *desc;
355} t4_pciids[] = {
356 {0xa000, 0, "Chelsio Terminator 4 FPGA"},

--- 5 unchanged lines hidden (view full) ---

362 {0x4405, 4, "Chelsio T440-BCH"},
363 {0x4406, 4, "Chelsio T440-CH"},
364 {0x4407, 4, "Chelsio T420-SO"},
365 {0x4408, 4, "Chelsio T420-CX"},
366 {0x4409, 4, "Chelsio T420-BT"},
367 {0x440a, 4, "Chelsio T404-BT"},
368};
369
370#ifndef TCP_OFFLOAD_DISABLE
371/* This is used in service_iq() to get to the fl associated with an iq. */
372CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
373#endif
374
335static int
336t4_probe(device_t dev)
337{
338 int i;
339 uint16_t v = pci_get_vendor(dev);
340 uint16_t d = pci_get_device(dev);
341
342 if (v != PCI_VENDOR_ID_CHELSIO)

--- 10 unchanged lines hidden (view full) ---

353 return (ENXIO);
354}
355
356static int
357t4_attach(device_t dev)
358{
359 struct adapter *sc;
360 int rc = 0, i, n10g, n1g, rqidx, tqidx;
375static int
376t4_probe(device_t dev)
377{
378 int i;
379 uint16_t v = pci_get_vendor(dev);
380 uint16_t d = pci_get_device(dev);
381
382 if (v != PCI_VENDOR_ID_CHELSIO)

--- 10 unchanged lines hidden (view full) ---

393 return (ENXIO);
394}
395
396static int
397t4_attach(device_t dev)
398{
399 struct adapter *sc;
400 int rc = 0, i, n10g, n1g, rqidx, tqidx;
361 struct fw_caps_config_cmd caps;
362 uint32_t p, v;
363 struct intrs_and_queues iaq;
364 struct sge *s;
401 struct intrs_and_queues iaq;
402 struct sge *s;
403#ifndef TCP_OFFLOAD_DISABLE
404 int ofld_rqidx, ofld_tqidx;
405#endif
365
366 sc = device_get_softc(dev);
367 sc->dev = dev;
368 sc->pf = pci_get_function(dev);
369 sc->mbox = sc->pf;
370
371 pci_enable_busmaster(dev);
372 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
406
407 sc = device_get_softc(dev);
408 sc->dev = dev;
409 sc->pf = pci_get_function(dev);
410 sc->mbox = sc->pf;
411
412 pci_enable_busmaster(dev);
413 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
414 uint32_t v;
415
373 pci_set_max_read_req(dev, 4096);
374 v = pci_read_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, 2);
375 v |= PCIM_EXP_CTL_RELAXED_ORD_ENABLE;
376 pci_write_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, v, 2);
377 }
378
379 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
380 device_get_nameunit(dev));
381 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
416 pci_set_max_read_req(dev, 4096);
417 v = pci_read_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, 2);
418 v |= PCIM_EXP_CTL_RELAXED_ORD_ENABLE;
419 pci_write_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, v, 2);
420 }
421
422 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
423 device_get_nameunit(dev));
424 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
425 mtx_lock(&t4_list_lock);
426 SLIST_INSERT_HEAD(&t4_list, sc, link);
427 mtx_unlock(&t4_list_lock);
382
428
429 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
430 TAILQ_INIT(&sc->sfl);
431 callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
432
383 rc = map_bars(sc);
384 if (rc != 0)
385 goto done; /* error message displayed already */
386
387 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
433 rc = map_bars(sc);
434 if (rc != 0)
435 goto done; /* error message displayed already */
436
437 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
438 for (i = 0; i < ARRAY_SIZE(sc->cpl_handler); i++)
439 sc->cpl_handler[i] = cpl_not_handled;
440 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, filter_rpl);
388
389 /* Prepare the adapter for operation */
390 rc = -t4_prep_adapter(sc);
391 if (rc != 0) {
392 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
393 goto done;
394 }
395
441
442 /* Prepare the adapter for operation */
443 rc = -t4_prep_adapter(sc);
444 if (rc != 0) {
445 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
446 goto done;
447 }
448
396 /* Do this really early */
449 /*
450 * Do this really early, with the memory windows set up even before the
451 * character device. The userland tool's register i/o and mem read
452 * will work even in "recovery mode".
453 */
454 setup_memwin(sc);
397 sc->cdev = make_dev(&t4_cdevsw, device_get_unit(dev), UID_ROOT,
398 GID_WHEEL, 0600, "%s", device_get_nameunit(dev));
399 sc->cdev->si_drv1 = sc;
400
455 sc->cdev = make_dev(&t4_cdevsw, device_get_unit(dev), UID_ROOT,
456 GID_WHEEL, 0600, "%s", device_get_nameunit(dev));
457 sc->cdev->si_drv1 = sc;
458
459 /* Go no further if recovery mode has been requested. */
460 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
461 device_printf(dev, "recovery mode.\n");
462 goto done;
463 }
464
401 /* Prepare the firmware for operation */
402 rc = prep_firmware(sc);
403 if (rc != 0)
404 goto done; /* error message displayed already */
405
465 /* Prepare the firmware for operation */
466 rc = prep_firmware(sc);
467 if (rc != 0)
468 goto done; /* error message displayed already */
469
406 /* Read firmware devlog parameters */
407 (void) get_devlog_params(sc, &sc->params.devlog);
470 rc = get_params__pre_init(sc);
471 if (rc != 0)
472 goto done; /* error message displayed already */
408
473
409 /* Get device capabilities and select which ones we'll use */
410 rc = get_capabilities(sc, &caps);
411 if (rc != 0) {
412 device_printf(dev,
413 "failed to initialize adapter capabilities: %d.\n", rc);
414 goto done;
415 }
474 rc = t4_sge_init(sc);
475 if (rc != 0)
476 goto done; /* error message displayed already */
416
477
417 /* Choose the global RSS mode. */
418 rc = -t4_config_glbl_rss(sc, sc->mbox,
419 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
420 F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
421 F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
422 F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
423 if (rc != 0) {
424 device_printf(dev,
425 "failed to select global RSS mode: %d.\n", rc);
426 goto done;
478 if (sc->flags & MASTER_PF) {
479 /* get basic stuff going */
480 rc = -t4_fw_initialize(sc, sc->mbox);
481 if (rc != 0) {
482 device_printf(dev, "early init failed: %d.\n", rc);
483 goto done;
484 }
427 }
428
485 }
486
429 /* These are total (sum of all ports) limits for a bus driver */
430 rc = -t4_cfg_pfvf(sc, sc->mbox, sc->pf, 0,
431 128, /* max # of egress queues */
432 64, /* max # of egress Ethernet or control queues */
433 64, /* max # of ingress queues with fl/interrupt */
434 0, /* max # of ingress queues without interrupt */
435 0, /* PCIe traffic class */
436 4, /* max # of virtual interfaces */
437 M_FW_PFVF_CMD_CMASK, M_FW_PFVF_CMD_PMASK, 16,
438 FW_CMD_CAP_PF, FW_CMD_CAP_PF);
439 if (rc != 0) {
440 device_printf(dev,
441 "failed to configure pf/vf resources: %d.\n", rc);
442 goto done;
443 }
487 rc = get_params__post_init(sc);
488 if (rc != 0)
489 goto done; /* error message displayed already */
444
490
445 /* Need this before sge_init */
446 for (i = 0; i < SGE_NTIMERS; i++)
447 sc->sge.timer_val[i] = min(intr_timer[i], 200U);
448 for (i = 0; i < SGE_NCOUNTERS; i++)
449 sc->sge.counter_val[i] = min(intr_pktcount[i], M_THRESHOLD_0);
491 if (sc->flags & MASTER_PF) {
450
492
451 /* Also need the cooked value of cclk before sge_init */
452 p = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
453 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
454 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &p, &v);
455 if (rc != 0) {
456 device_printf(sc->dev,
457 "failed to obtain core clock value: %d.\n", rc);
458 goto done;
459 }
460 sc->params.vpd.cclk = v;
493 /* final tweaks to some settings */
461
494
462 t4_sge_init(sc);
463
464 t4_set_filter_mode(sc, filter_mode);
465 t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG,
466 V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP),
467 V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP));
468 t4_tp_wr_bits_indirect(sc, A_TP_INGRESS_CONFIG, F_CSUM_HAS_PSEUDO_HDR,
469 F_LOOKUPEVERYPKT);
470
471 /* get basic stuff going */
472 rc = -t4_early_init(sc, sc->mbox);
473 if (rc != 0) {
474 device_printf(dev, "early init failed: %d.\n", rc);
475 goto done;
495 t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd,
496 sc->params.b_wnd);
497 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
498 t4_set_reg_field(sc, A_TP_PARA_REG3, F_TUNNELCNGDROP0 |
499 F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | F_TUNNELCNGDROP3, 0);
500 t4_set_reg_field(sc, A_TP_PARA_REG5,
501 V_INDICATESIZE(M_INDICATESIZE) |
502 F_REARMDDPOFFSET | F_RESETDDPOFFSET,
503 V_INDICATESIZE(M_INDICATESIZE) |
504 F_REARMDDPOFFSET | F_RESETDDPOFFSET);
505 } else {
506 /*
507 * XXX: Verify that we can live with whatever the master driver
508 * has done so far, and hope that it doesn't change any global
509 * setting from underneath us in the future.
510 */
476 }
477
511 }
512
478 rc = get_params(sc, &caps);
479 if (rc != 0)
480 goto done; /* error message displayed already */
513 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &sc->filter_mode, 1,
514 A_TP_VLAN_PRI_MAP);
481
515
482 /* These are finalized by FW initialization, load their values now */
483 v = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
484 sc->params.tp.tre = G_TIMERRESOLUTION(v);
485 sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
486 t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
516 for (i = 0; i < NCHAN; i++)
517 sc->params.tp.tx_modq[i] = i;
487
518
488 /* tweak some settings */
489 t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) |
490 V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
491 V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9));
492 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
493 t4_set_reg_field(sc, A_TP_PARA_REG3, F_TUNNELCNGDROP0 |
494 F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | F_TUNNELCNGDROP3, 0);
495
496 setup_memwin(sc);
497
498 rc = t4_create_dma_tag(sc);
499 if (rc != 0)
500 goto done; /* error message displayed already */
501
502 /*
503 * First pass over all the ports - allocate VIs and initialize some
504 * basic parameters like mac address, port type, etc. We also figure
505 * out whether a port is 10G or 1G and use that information when

--- 21 unchanged lines hidden (view full) ---

527 }
528
529 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
530 device_get_nameunit(dev), i);
531 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
532
533 if (is_10G_port(pi)) {
534 n10g++;
519 rc = t4_create_dma_tag(sc);
520 if (rc != 0)
521 goto done; /* error message displayed already */
522
523 /*
524 * First pass over all the ports - allocate VIs and initialize some
525 * basic parameters like mac address, port type, etc. We also figure
526 * out whether a port is 10G or 1G and use that information when

--- 21 unchanged lines hidden (view full) ---

548 }
549
550 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
551 device_get_nameunit(dev), i);
552 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
553
554 if (is_10G_port(pi)) {
555 n10g++;
535 pi->tmr_idx = tmr_idx_10g;
536 pi->pktc_idx = pktc_idx_10g;
556 pi->tmr_idx = t4_tmr_idx_10g;
557 pi->pktc_idx = t4_pktc_idx_10g;
537 } else {
538 n1g++;
558 } else {
559 n1g++;
539 pi->tmr_idx = tmr_idx_1g;
540 pi->pktc_idx = pktc_idx_1g;
560 pi->tmr_idx = t4_tmr_idx_1g;
561 pi->pktc_idx = t4_pktc_idx_1g;
541 }
542
543 pi->xact_addr_filt = -1;
544
562 }
563
564 pi->xact_addr_filt = -1;
565
545 pi->qsize_rxq = max(qsize_rxq, 128);
546 while (pi->qsize_rxq & 7)
547 pi->qsize_rxq++;
548 pi->qsize_txq = max(qsize_txq, 128);
566 pi->qsize_rxq = t4_qsize_rxq;
567 pi->qsize_txq = t4_qsize_txq;
549
568
550 if (pi->qsize_rxq != qsize_rxq) {
551 device_printf(dev,
552 "using %d instead of %d as the rx queue size.\n",
553 pi->qsize_rxq, qsize_rxq);
554 }
555 if (pi->qsize_txq != qsize_txq) {
556 device_printf(dev,
557 "using %d instead of %d as the tx queue size.\n",
558 pi->qsize_txq, qsize_txq);
559 }
560
561 pi->dev = device_add_child(dev, "cxgbe", -1);
562 if (pi->dev == NULL) {
563 device_printf(dev,
564 "failed to add device for port %d.\n", i);
565 rc = ENXIO;
566 goto done;
567 }
568 device_set_softc(pi->dev, pi);
569 pi->dev = device_add_child(dev, "cxgbe", -1);
570 if (pi->dev == NULL) {
571 device_printf(dev,
572 "failed to add device for port %d.\n", i);
573 rc = ENXIO;
574 goto done;
575 }
576 device_set_softc(pi->dev, pi);
569
570 setbit(&sc->registered_device_map, i);
571 }
572
577 }
578
573 if (sc->registered_device_map == 0) {
574 device_printf(dev, "no usable ports\n");
575 rc = ENXIO;
576 goto done;
577 }
578
579 /*
580 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
581 */
582 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
583 if (rc != 0)
584 goto done; /* error message displayed already */
585
586 sc->intr_type = iaq.intr_type;
587 sc->intr_count = iaq.nirq;
579 /*
580 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
581 */
582 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
583 if (rc != 0)
584 goto done; /* error message displayed already */
585
586 sc->intr_type = iaq.intr_type;
587 sc->intr_count = iaq.nirq;
588 sc->flags |= iaq.intr_flags;
588
589 s = &sc->sge;
590 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
591 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
592 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
589
590 s = &sc->sge;
591 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
592 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
593 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
593 s->neq += sc->params.nports; /* control queues, 1 per port */
594 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
594 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
595 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
595 if (iaq.intr_shared)
596 sc->flags |= INTR_SHARED;
597 s->niq += NINTRQ(sc); /* interrupt queues */
598
596
599 s->intrq = malloc(NINTRQ(sc) * sizeof(struct sge_iq), M_CXGBE,
597#ifndef TCP_OFFLOAD_DISABLE
598 if (is_offload(sc)) {
599
600 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
601 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
602 s->neq += s->nofldtxq + s->nofldrxq;
603 s->niq += s->nofldrxq;
604
605 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
606 M_CXGBE, M_ZERO | M_WAITOK);
607 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
608 M_CXGBE, M_ZERO | M_WAITOK);
609 }
610#endif
611
612 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
600 M_ZERO | M_WAITOK);
613 M_ZERO | M_WAITOK);
601 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_ctrlq), M_CXGBE,
602 M_ZERO | M_WAITOK);
603 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
604 M_ZERO | M_WAITOK);
605 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
606 M_ZERO | M_WAITOK);
607 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
608 M_ZERO | M_WAITOK);
609 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
610 M_ZERO | M_WAITOK);
611
612 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
613 M_ZERO | M_WAITOK);
614
614 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
615 M_ZERO | M_WAITOK);
616 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
617 M_ZERO | M_WAITOK);
618 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
619 M_ZERO | M_WAITOK);
620 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
621 M_ZERO | M_WAITOK);
622
623 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
624 M_ZERO | M_WAITOK);
625
615 sc->l2t = t4_init_l2t(M_WAITOK);
626 t4_init_l2t(sc, M_WAITOK);
616
627
617 t4_sysctls(sc);
618
619 /*
620 * Second pass over the ports. This time we know the number of rx and
621 * tx queues that each port should get.
622 */
623 rqidx = tqidx = 0;
628 /*
629 * Second pass over the ports. This time we know the number of rx and
630 * tx queues that each port should get.
631 */
632 rqidx = tqidx = 0;
633#ifndef TCP_OFFLOAD_DISABLE
634 ofld_rqidx = ofld_tqidx = 0;
635#endif
624 for_each_port(sc, i) {
625 struct port_info *pi = sc->port[i];
626
627 if (pi == NULL)
628 continue;
629
630 pi->first_rxq = rqidx;
636 for_each_port(sc, i) {
637 struct port_info *pi = sc->port[i];
638
639 if (pi == NULL)
640 continue;
641
642 pi->first_rxq = rqidx;
631 pi->nrxq = is_10G_port(pi) ? iaq.nrxq10g : iaq.nrxq1g;
632
633 pi->first_txq = tqidx;
643 pi->first_txq = tqidx;
634 pi->ntxq = is_10G_port(pi) ? iaq.ntxq10g : iaq.ntxq1g;
644 if (is_10G_port(pi)) {
645 pi->nrxq = iaq.nrxq10g;
646 pi->ntxq = iaq.ntxq10g;
647 } else {
648 pi->nrxq = iaq.nrxq1g;
649 pi->ntxq = iaq.ntxq1g;
650 }
635
636 rqidx += pi->nrxq;
637 tqidx += pi->ntxq;
651
652 rqidx += pi->nrxq;
653 tqidx += pi->ntxq;
654
655#ifndef TCP_OFFLOAD_DISABLE
656 if (is_offload(sc)) {
657 pi->first_ofld_rxq = ofld_rqidx;
658 pi->first_ofld_txq = ofld_tqidx;
659 if (is_10G_port(pi)) {
660 pi->nofldrxq = iaq.nofldrxq10g;
661 pi->nofldtxq = iaq.nofldtxq10g;
662 } else {
663 pi->nofldrxq = iaq.nofldrxq1g;
664 pi->nofldtxq = iaq.nofldtxq1g;
665 }
666 ofld_rqidx += pi->nofldrxq;
667 ofld_tqidx += pi->nofldtxq;
668 }
669#endif
638 }
639
640 rc = bus_generic_attach(dev);
641 if (rc != 0) {
642 device_printf(dev,
643 "failed to attach all child ports: %d\n", rc);
644 goto done;
645 }
646
670 }
671
672 rc = bus_generic_attach(dev);
673 if (rc != 0) {
674 device_printf(dev,
675 "failed to attach all child ports: %d\n", rc);
676 goto done;
677 }
678
647#ifdef INVARIANTS
648 device_printf(dev,
679 device_printf(dev,
649 "%p, %d ports (0x%x), %d intr_type, %d intr_count\n",
650 sc, sc->params.nports, sc->params.portvec,
651 sc->intr_type, sc->intr_count);
652#endif
680 "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
681 sc->params.pci.width, sc->params.nports, sc->intr_count,
682 sc->intr_type == INTR_MSIX ? "MSI-X" :
683 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
684 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
685
653 t4_set_desc(sc);
654
655done:
686 t4_set_desc(sc);
687
688done:
689 if (rc != 0 && sc->cdev) {
690 /* cdev was created and so cxgbetool works; recover that way. */
691 device_printf(dev,
692 "error during attach, adapter is now in recovery mode.\n");
693 rc = 0;
694 }
695
656 if (rc != 0)
657 t4_detach(dev);
696 if (rc != 0)
697 t4_detach(dev);
698 else
699 t4_sysctls(sc);
658
659 return (rc);
660}
661
662/*
663 * Idempotent
664 */
665static int
666t4_detach(device_t dev)
667{
668 struct adapter *sc;
669 struct port_info *pi;
700
701 return (rc);
702}
703
704/*
705 * Idempotent
706 */
707static int
708t4_detach(device_t dev)
709{
710 struct adapter *sc;
711 struct port_info *pi;
670 int i;
712 int i, rc;
671
672 sc = device_get_softc(dev);
673
713
714 sc = device_get_softc(dev);
715
674 if (sc->cdev)
716 if (sc->flags & FULL_INIT_DONE)
717 t4_intr_disable(sc);
718
719 if (sc->cdev) {
675 destroy_dev(sc->cdev);
720 destroy_dev(sc->cdev);
721 sc->cdev = NULL;
722 }
676
723
677 bus_generic_detach(dev);
724 rc = bus_generic_detach(dev);
725 if (rc) {
726 device_printf(dev,
727 "failed to detach child devices: %d\n", rc);
728 return (rc);
729 }
730
678 for (i = 0; i < MAX_NPORTS; i++) {
679 pi = sc->port[i];
680 if (pi) {
681 t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
682 if (pi->dev)
683 device_delete_child(dev, pi->dev);
684
685 mtx_destroy(&pi->pi_lock);
686 free(pi, M_CXGBE);
687 }
688 }
689
731 for (i = 0; i < MAX_NPORTS; i++) {
732 pi = sc->port[i];
733 if (pi) {
734 t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
735 if (pi->dev)
736 device_delete_child(dev, pi->dev);
737
738 mtx_destroy(&pi->pi_lock);
739 free(pi, M_CXGBE);
740 }
741 }
742
743 if (sc->flags & FULL_INIT_DONE)
744 adapter_full_uninit(sc);
745
690 if (sc->flags & FW_OK)
691 t4_fw_bye(sc, sc->mbox);
692
693 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
694 pci_release_msi(dev);
695
696 if (sc->regs_res)
697 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
698 sc->regs_res);
699
700 if (sc->msix_res)
701 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
702 sc->msix_res);
703
704 if (sc->l2t)
705 t4_free_l2t(sc->l2t);
706
746 if (sc->flags & FW_OK)
747 t4_fw_bye(sc, sc->mbox);
748
749 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
750 pci_release_msi(dev);
751
752 if (sc->regs_res)
753 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
754 sc->regs_res);
755
756 if (sc->msix_res)
757 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
758 sc->msix_res);
759
760 if (sc->l2t)
761 t4_free_l2t(sc->l2t);
762
763#ifndef TCP_OFFLOAD_DISABLE
764 free(sc->sge.ofld_rxq, M_CXGBE);
765 free(sc->sge.ofld_txq, M_CXGBE);
766#endif
707 free(sc->irq, M_CXGBE);
708 free(sc->sge.rxq, M_CXGBE);
709 free(sc->sge.txq, M_CXGBE);
710 free(sc->sge.ctrlq, M_CXGBE);
767 free(sc->irq, M_CXGBE);
768 free(sc->sge.rxq, M_CXGBE);
769 free(sc->sge.txq, M_CXGBE);
770 free(sc->sge.ctrlq, M_CXGBE);
711 free(sc->sge.intrq, M_CXGBE);
712 free(sc->sge.iqmap, M_CXGBE);
713 free(sc->sge.eqmap, M_CXGBE);
714 free(sc->tids.ftid_tab, M_CXGBE);
715 t4_destroy_dma_tag(sc);
771 free(sc->sge.iqmap, M_CXGBE);
772 free(sc->sge.eqmap, M_CXGBE);
773 free(sc->tids.ftid_tab, M_CXGBE);
774 t4_destroy_dma_tag(sc);
716 mtx_destroy(&sc->sc_lock);
775 if (mtx_initialized(&sc->sc_lock)) {
776 mtx_lock(&t4_list_lock);
777 SLIST_REMOVE(&t4_list, sc, adapter, link);
778 mtx_unlock(&t4_list_lock);
779 mtx_destroy(&sc->sc_lock);
780 }
717
781
782 if (mtx_initialized(&sc->sfl_lock))
783 mtx_destroy(&sc->sfl_lock);
784
718 bzero(sc, sizeof(*sc));
719
720 return (0);
721}
722
723
724static int
725cxgbe_probe(device_t dev)
726{
727 char buf[128];
728 struct port_info *pi = device_get_softc(dev);
729
785 bzero(sc, sizeof(*sc));
786
787 return (0);
788}
789
790
791static int
792cxgbe_probe(device_t dev)
793{
794 char buf[128];
795 struct port_info *pi = device_get_softc(dev);
796
730 snprintf(buf, sizeof(buf), "Port %d", pi->port_id);
797 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
731 device_set_desc_copy(dev, buf);
732
733 return (BUS_PROBE_DEFAULT);
734}
735
736#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
737 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
738 IFCAP_VLAN_HWTSO)

--- 10 unchanged lines hidden (view full) ---

749 if (ifp == NULL) {
750 device_printf(dev, "Cannot allocate ifnet\n");
751 return (ENOMEM);
752 }
753 pi->ifp = ifp;
754 ifp->if_softc = pi;
755
756 callout_init(&pi->tick, CALLOUT_MPSAFE);
798 device_set_desc_copy(dev, buf);
799
800 return (BUS_PROBE_DEFAULT);
801}
802
803#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
804 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
805 IFCAP_VLAN_HWTSO)

--- 10 unchanged lines hidden (view full) ---

816 if (ifp == NULL) {
817 device_printf(dev, "Cannot allocate ifnet\n");
818 return (ENOMEM);
819 }
820 pi->ifp = ifp;
821 ifp->if_softc = pi;
822
823 callout_init(&pi->tick, CALLOUT_MPSAFE);
757 pi->tq = taskqueue_create("cxgbe_taskq", M_NOWAIT,
758 taskqueue_thread_enqueue, &pi->tq);
759 if (pi->tq == NULL) {
760 device_printf(dev, "failed to allocate port task queue\n");
761 if_free(pi->ifp);
762 return (ENOMEM);
763 }
764 taskqueue_start_threads(&pi->tq, 1, PI_NET, "%s taskq",
765 device_get_nameunit(dev));
766
767 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
768 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
769
770 ifp->if_init = cxgbe_init;
771 ifp->if_ioctl = cxgbe_ioctl;
772 ifp->if_start = cxgbe_start;
773 ifp->if_transmit = cxgbe_transmit;
774 ifp->if_qflush = cxgbe_qflush;
775
776 ifp->if_snd.ifq_drv_maxlen = 1024;
777 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
778 IFQ_SET_READY(&ifp->if_snd);
779
780 ifp->if_capabilities = T4_CAP;
824
825 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
826 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
827
828 ifp->if_init = cxgbe_init;
829 ifp->if_ioctl = cxgbe_ioctl;
830 ifp->if_start = cxgbe_start;
831 ifp->if_transmit = cxgbe_transmit;
832 ifp->if_qflush = cxgbe_qflush;
833
834 ifp->if_snd.ifq_drv_maxlen = 1024;
835 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
836 IFQ_SET_READY(&ifp->if_snd);
837
838 ifp->if_capabilities = T4_CAP;
839#ifndef TCP_OFFLOAD_DISABLE
840 if (is_offload(pi->adapter))
841 ifp->if_capabilities |= IFCAP_TOE4;
842#endif
781 ifp->if_capenable = T4_CAP_ENABLE;
782 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO;
783
784 /* Initialize ifmedia for this port */
785 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
786 cxgbe_media_status);
787 build_medialist(pi);
788
789 ether_ifattach(ifp, pi->hw_addr);
790
843 ifp->if_capenable = T4_CAP_ENABLE;
844 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO;
845
846 /* Initialize ifmedia for this port */
847 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
848 cxgbe_media_status);
849 build_medialist(pi);
850
851 ether_ifattach(ifp, pi->hw_addr);
852
791#ifdef INVARIANTS
792 device_printf(dev, "%p, %d txq, %d rxq\n", pi, pi->ntxq, pi->nrxq);
853#ifndef TCP_OFFLOAD_DISABLE
854 if (is_offload(pi->adapter)) {
855 device_printf(dev,
856 "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
857 pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
858 } else
793#endif
859#endif
860 device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
794
795 cxgbe_sysctls(pi);
796
797 return (0);
798}
799
800static int
801cxgbe_detach(device_t dev)
802{
803 struct port_info *pi = device_get_softc(dev);
804 struct adapter *sc = pi->adapter;
861
862 cxgbe_sysctls(pi);
863
864 return (0);
865}
866
867static int
868cxgbe_detach(device_t dev)
869{
870 struct port_info *pi = device_get_softc(dev);
871 struct adapter *sc = pi->adapter;
805 int rc;
872 struct ifnet *ifp = pi->ifp;
806
807 /* Tell if_ioctl and if_init that the port is going away */
808 ADAPTER_LOCK(sc);
809 SET_DOOMED(pi);
810 wakeup(&sc->flags);
811 while (IS_BUSY(sc))
812 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
813 SET_BUSY(sc);
814 ADAPTER_UNLOCK(sc);
815
873
874 /* Tell if_ioctl and if_init that the port is going away */
875 ADAPTER_LOCK(sc);
876 SET_DOOMED(pi);
877 wakeup(&sc->flags);
878 while (IS_BUSY(sc))
879 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
880 SET_BUSY(sc);
881 ADAPTER_UNLOCK(sc);
882
816 rc = cxgbe_uninit_synchronized(pi);
817 if (rc != 0)
818 device_printf(dev, "port uninit failed: %d.\n", rc);
883 PORT_LOCK(pi);
884 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
885 callout_stop(&pi->tick);
886 PORT_UNLOCK(pi);
887 callout_drain(&pi->tick);
819
888
820 taskqueue_free(pi->tq);
889 /* Let detach proceed even if these fail. */
890 cxgbe_uninit_synchronized(pi);
891 port_full_uninit(pi);
821
822 ifmedia_removeall(&pi->media);
823 ether_ifdetach(pi->ifp);
824 if_free(pi->ifp);
825
826 ADAPTER_LOCK(sc);
827 CLR_BUSY(sc);
828 wakeup_one(&sc->flags);

--- 122 unchanged lines hidden (view full) ---

951 if (IFCAP_TXCSUM & ifp->if_capenable)
952 ifp->if_hwassist |= CSUM_TSO;
953 else {
954 ifp->if_capenable &= ~IFCAP_TSO;
955 ifp->if_hwassist &= ~CSUM_TSO;
956 if_printf(ifp,
957 "enable txcsum first.\n");
958 rc = EAGAIN;
892
893 ifmedia_removeall(&pi->media);
894 ether_ifdetach(pi->ifp);
895 if_free(pi->ifp);
896
897 ADAPTER_LOCK(sc);
898 CLR_BUSY(sc);
899 wakeup_one(&sc->flags);

--- 122 unchanged lines hidden (view full) ---

1022 if (IFCAP_TXCSUM & ifp->if_capenable)
1023 ifp->if_hwassist |= CSUM_TSO;
1024 else {
1025 ifp->if_capenable &= ~IFCAP_TSO;
1026 ifp->if_hwassist &= ~CSUM_TSO;
1027 if_printf(ifp,
1028 "enable txcsum first.\n");
1029 rc = EAGAIN;
1030 goto fail;
959 }
960 } else
961 ifp->if_hwassist &= ~CSUM_TSO;
962 }
963 if (mask & IFCAP_LRO) {
964#ifdef INET
965 int i;
966 struct sge_rxq *rxq;
967
968 ifp->if_capenable ^= IFCAP_LRO;
969 for_each_rxq(pi, i, rxq) {
970 if (ifp->if_capenable & IFCAP_LRO)
1031 }
1032 } else
1033 ifp->if_hwassist &= ~CSUM_TSO;
1034 }
1035 if (mask & IFCAP_LRO) {
1036#ifdef INET
1037 int i;
1038 struct sge_rxq *rxq;
1039
1040 ifp->if_capenable ^= IFCAP_LRO;
1041 for_each_rxq(pi, i, rxq) {
1042 if (ifp->if_capenable & IFCAP_LRO)
971 rxq->flags |= RXQ_LRO_ENABLED;
1043 rxq->iq.flags |= IQ_LRO_ENABLED;
972 else
1044 else
973 rxq->flags &= ~RXQ_LRO_ENABLED;
1045 rxq->iq.flags &= ~IQ_LRO_ENABLED;
974 }
975#endif
976 }
977#ifndef TCP_OFFLOAD_DISABLE
1046 }
1047#endif
1048 }
1049#ifndef TCP_OFFLOAD_DISABLE
978 if (mask & IFCAP_TOE4) {
979 rc = EOPNOTSUPP;
1050 if (mask & IFCAP_TOE) {
1051 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1052
1053 rc = toe_capability(pi, enable);
1054 if (rc != 0)
1055 goto fail;
1056
1057 ifp->if_capenable ^= mask;
980 }
981#endif
982 if (mask & IFCAP_VLAN_HWTAGGING) {
983 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
984 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
985 PORT_LOCK(pi);
986 rc = update_mac_settings(pi, XGMAC_VLANEX);
987 PORT_UNLOCK(pi);

--- 48 unchanged lines hidden (view full) ---

1036 struct port_info *pi = ifp->if_softc;
1037 struct adapter *sc = pi->adapter;
1038 struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1039 struct buf_ring *br;
1040 int rc;
1041
1042 M_ASSERTPKTHDR(m);
1043
1058 }
1059#endif
1060 if (mask & IFCAP_VLAN_HWTAGGING) {
1061 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1062 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1063 PORT_LOCK(pi);
1064 rc = update_mac_settings(pi, XGMAC_VLANEX);
1065 PORT_UNLOCK(pi);

--- 48 unchanged lines hidden (view full) ---

1114 struct port_info *pi = ifp->if_softc;
1115 struct adapter *sc = pi->adapter;
1116 struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1117 struct buf_ring *br;
1118 int rc;
1119
1120 M_ASSERTPKTHDR(m);
1121
1044 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1122 if (__predict_false(pi->link_cfg.link_ok == 0)) {
1045 m_freem(m);
1123 m_freem(m);
1046 return (0);
1124 return (ENETDOWN);
1047 }
1048
1049 if (m->m_flags & M_FLOWID)
1050 txq += (m->m_pkthdr.flowid % pi->ntxq);
1051 br = txq->br;
1052
1053 if (TXQ_TRYLOCK(txq) == 0) {
1125 }
1126
1127 if (m->m_flags & M_FLOWID)
1128 txq += (m->m_pkthdr.flowid % pi->ntxq);
1129 br = txq->br;
1130
1131 if (TXQ_TRYLOCK(txq) == 0) {
1132 struct sge_eq *eq = &txq->eq;
1133
1054 /*
1134 /*
1055 * XXX: make sure that this packet really is sent out. There is
1056 * a small race where t4_eth_tx may stop draining the drbr and
1057 * goes away, just before we enqueued this mbuf.
1135 * It is possible that t4_eth_tx finishes up and releases the
1136 * lock between the TRYLOCK above and the drbr_enqueue here. We
1137 * need to make sure that this mbuf doesn't just sit there in
1138 * the drbr.
1058 */
1059
1139 */
1140
1060 return (drbr_enqueue(ifp, br, m));
1141 rc = drbr_enqueue(ifp, br, m);
1142 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1143 !(eq->flags & EQ_DOOMED))
1144 callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1145 return (rc);
1061 }
1062
1063 /*
1064 * txq->m is the mbuf that is held up due to a temporary shortage of
1065 * resources and it should be put on the wire first. Then what's in
1066 * drbr and finally the mbuf that was just passed in to us.
1067 *
1068 * Return code should indicate the fate of the mbuf that was passed in

--- 24 unchanged lines hidden (view full) ---

1093static void
1094cxgbe_qflush(struct ifnet *ifp)
1095{
1096 struct port_info *pi = ifp->if_softc;
1097 struct sge_txq *txq;
1098 int i;
1099 struct mbuf *m;
1100
1146 }
1147
1148 /*
1149 * txq->m is the mbuf that is held up due to a temporary shortage of
1150 * resources and it should be put on the wire first. Then what's in
1151 * drbr and finally the mbuf that was just passed in to us.
1152 *
1153 * Return code should indicate the fate of the mbuf that was passed in

--- 24 unchanged lines hidden (view full) ---

1178static void
1179cxgbe_qflush(struct ifnet *ifp)
1180{
1181 struct port_info *pi = ifp->if_softc;
1182 struct sge_txq *txq;
1183 int i;
1184 struct mbuf *m;
1185
1101 /* queues do not exist if !IFF_DRV_RUNNING. */
1102 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1186 /* queues do not exist if !PORT_INIT_DONE. */
1187 if (pi->flags & PORT_INIT_DONE) {
1103 for_each_txq(pi, i, txq) {
1104 TXQ_LOCK(txq);
1105 m_freem(txq->m);
1188 for_each_txq(pi, i, txq) {
1189 TXQ_LOCK(txq);
1190 m_freem(txq->m);
1191 txq->m = NULL;
1106 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1107 m_freem(m);
1108 TXQ_UNLOCK(txq);
1109 }
1110 }
1111 if_qflush(ifp);
1112}
1113

--- 97 unchanged lines hidden (view full) ---

1211 (bar0 + MEMWIN2_BASE) | V_BIR(0) |
1212 V_WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
1213}
1214
1215static int
1216cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1217 struct intrs_and_queues *iaq)
1218{
1192 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1193 m_freem(m);
1194 TXQ_UNLOCK(txq);
1195 }
1196 }
1197 if_qflush(ifp);
1198}
1199

--- 97 unchanged lines hidden (view full) ---

1297 (bar0 + MEMWIN2_BASE) | V_BIR(0) |
1298 V_WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
1299}
1300
1301static int
1302cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1303 struct intrs_and_queues *iaq)
1304{
1219 int rc, itype, navail, nc, nrxq10g, nrxq1g;
1305 int rc, itype, navail, nrxq10g, nrxq1g, n;
1306 int nofldrxq10g = 0, nofldrxq1g = 0;
1220
1221 bzero(iaq, sizeof(*iaq));
1307
1308 bzero(iaq, sizeof(*iaq));
1222 nc = mp_ncpus; /* our snapshot of the number of CPUs */
1223
1309
1310 iaq->ntxq10g = t4_ntxq10g;
1311 iaq->ntxq1g = t4_ntxq1g;
1312 iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1313 iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1314#ifndef TCP_OFFLOAD_DISABLE
1315 iaq->nofldtxq10g = t4_nofldtxq10g;
1316 iaq->nofldtxq1g = t4_nofldtxq1g;
1317 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1318 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1319#endif
1320
1224 for (itype = INTR_MSIX; itype; itype >>= 1) {
1225
1321 for (itype = INTR_MSIX; itype; itype >>= 1) {
1322
1226 if ((itype & intr_types) == 0)
1323 if ((itype & t4_intr_types) == 0)
1227 continue; /* not allowed */
1228
1229 if (itype == INTR_MSIX)
1230 navail = pci_msix_count(sc->dev);
1231 else if (itype == INTR_MSI)
1232 navail = pci_msi_count(sc->dev);
1233 else
1234 navail = 1;
1324 continue; /* not allowed */
1325
1326 if (itype == INTR_MSIX)
1327 navail = pci_msix_count(sc->dev);
1328 else if (itype == INTR_MSI)
1329 navail = pci_msi_count(sc->dev);
1330 else
1331 navail = 1;
1235
1332restart:
1236 if (navail == 0)
1237 continue;
1238
1239 iaq->intr_type = itype;
1333 if (navail == 0)
1334 continue;
1335
1336 iaq->intr_type = itype;
1337 iaq->intr_flags = 0;
1240
1338
1241 iaq->ntxq10g = min(nc, max_ntxq_10g);
1242 iaq->ntxq1g = min(nc, max_ntxq_1g);
1339 /*
1340 * Best option: an interrupt vector for errors, one for the
1341 * firmware event queue, and one each for each rxq (NIC as well
1342 * as offload).
1343 */
1344 iaq->nirq = T4_EXTRA_INTR;
1345 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1346 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1347 if (iaq->nirq <= navail &&
1348 (itype != INTR_MSI || powerof2(iaq->nirq))) {
1349 iaq->intr_flags |= INTR_DIRECT;
1350 goto allocate;
1351 }
1243
1352
1244 nrxq10g = min(nc, max_nrxq_10g);
1245 nrxq1g = min(nc, max_nrxq_1g);
1353 /*
1354 * Second best option: an interrupt vector for errors, one for
1355 * the firmware event queue, and one each for either NIC or
1356 * offload rxq's.
1357 */
1358 iaq->nirq = T4_EXTRA_INTR;
1359 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1360 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1361 if (iaq->nirq <= navail &&
1362 (itype != INTR_MSI || powerof2(iaq->nirq)))
1363 goto allocate;
1246
1364
1247 iaq->nirq = n10g * nrxq10g + n1g * nrxq1g + T4_EXTRA_INTR;
1248 if (iaq->nirq <= navail && intr_shared == 0) {
1365 /*
1366 * Next best option: an interrupt vector for errors, one for the
1367 * firmware event queue, and at least one per port. At this
1368 * point we know we'll have to downsize nrxq or nofldrxq to fit
1369 * what's available to us.
1370 */
1371 iaq->nirq = T4_EXTRA_INTR;
1372 iaq->nirq += n10g + n1g;
1373 if (iaq->nirq <= navail) {
1374 int leftover = navail - iaq->nirq;
1249
1375
1250 if (itype == INTR_MSI && !powerof2(iaq->nirq))
1251 goto share;
1376 if (n10g > 0) {
1377 int target = max(nrxq10g, nofldrxq10g);
1252
1378
1253 /* One for err, one for fwq, and one for each rxq */
1379 n = 1;
1380 while (n < target && leftover >= n10g) {
1381 leftover -= n10g;
1382 iaq->nirq += n10g;
1383 n++;
1384 }
1385 iaq->nrxq10g = min(n, nrxq10g);
1386#ifndef TCP_OFFLOAD_DISABLE
1387 iaq->nofldrxq10g = min(n, nofldrxq10g);
1388#endif
1389 }
1254
1390
1255 iaq->intr_shared = 0;
1256 iaq->nrxq10g = nrxq10g;
1257 iaq->nrxq1g = nrxq1g;
1391 if (n1g > 0) {
1392 int target = max(nrxq1g, nofldrxq1g);
1258
1393
1259 } else {
1260share:
1261 iaq->intr_shared = 1;
1262
1263 if (navail >= nc + T4_EXTRA_INTR) {
1264 if (itype == INTR_MSIX)
1265 navail = nc + T4_EXTRA_INTR;
1266
1267 /* navail is and must remain a pow2 for MSI */
1268 if (itype == INTR_MSI) {
1269 KASSERT(powerof2(navail),
1270 ("%d not power of 2", navail));
1271
1272 while (navail / 2 >= nc + T4_EXTRA_INTR)
1273 navail /= 2;
1394 n = 1;
1395 while (n < target && leftover >= n1g) {
1396 leftover -= n1g;
1397 iaq->nirq += n1g;
1398 n++;
1274 }
1399 }
1400 iaq->nrxq1g = min(n, nrxq1g);
1401#ifndef TCP_OFFLOAD_DISABLE
1402 iaq->nofldrxq1g = min(n, nofldrxq1g);
1403#endif
1275 }
1404 }
1276 iaq->nirq = navail; /* total # of interrupts */
1277
1405
1278 /*
1279 * If we have multiple vectors available reserve one
1280 * exclusively for errors. The rest will be shared by
1281 * the fwq and data.
1282 */
1283 if (navail > 1)
1284 navail--;
1285 iaq->nrxq10g = min(nrxq10g, navail);
1286 iaq->nrxq1g = min(nrxq1g, navail);
1406 if (itype != INTR_MSI || powerof2(iaq->nirq))
1407 goto allocate;
1287 }
1288
1408 }
1409
1410 /*
1411 * Least desirable option: one interrupt vector for everything.
1412 */
1413 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1414#ifndef TCP_OFFLOAD_DISABLE
1415 iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1416#endif
1417
1418allocate:
1289 navail = iaq->nirq;
1290 rc = 0;
1291 if (itype == INTR_MSIX)
1292 rc = pci_alloc_msix(sc->dev, &navail);
1293 else if (itype == INTR_MSI)
1294 rc = pci_alloc_msi(sc->dev, &navail);
1295
1296 if (rc == 0) {
1297 if (navail == iaq->nirq)
1298 return (0);
1299
1300 /*
1301 * Didn't get the number requested. Use whatever number
1302 * the kernel is willing to allocate (it's in navail).
1303 */
1419 navail = iaq->nirq;
1420 rc = 0;
1421 if (itype == INTR_MSIX)
1422 rc = pci_alloc_msix(sc->dev, &navail);
1423 else if (itype == INTR_MSI)
1424 rc = pci_alloc_msi(sc->dev, &navail);
1425
1426 if (rc == 0) {
1427 if (navail == iaq->nirq)
1428 return (0);
1429
1430 /*
1431 * Didn't get the number requested. Use whatever number
1432 * the kernel is willing to allocate (it's in navail).
1433 */
1434 device_printf(sc->dev, "fewer vectors than requested, "
1435 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1436 itype, iaq->nirq, navail);
1304 pci_release_msi(sc->dev);
1437 pci_release_msi(sc->dev);
1305 goto share;
1438 goto restart;
1306 }
1307
1308 device_printf(sc->dev,
1309 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1310 itype, rc, iaq->nirq, navail);
1311 }
1312
1313 device_printf(sc->dev,
1314 "failed to find a usable interrupt type. "
1439 }
1440
1441 device_printf(sc->dev,
1442 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1443 itype, rc, iaq->nirq, navail);
1444 }
1445
1446 device_printf(sc->dev,
1447 "failed to find a usable interrupt type. "
1315 "allowed=%d, msi-x=%d, msi=%d, intx=1", intr_types,
1448 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1316 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1317
1318 return (ENXIO);
1319}
1320
1321/*
1449 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1450
1451 return (ENXIO);
1452}
1453
1454/*
1322 * Install a compatible firmware (if required), establish contact with it,
1323 * become the master, and reset the device.
1455 * Install a compatible firmware (if required), establish contact with it (by
1456 * saying hello), and reset the device. If we end up as the master driver,
1457 * partition adapter resources by providing a configuration file to the
1458 * firmware.
1324 */
1325static int
1326prep_firmware(struct adapter *sc)
1327{
1459 */
1460static int
1461prep_firmware(struct adapter *sc)
1462{
1328 const struct firmware *fw;
1463 const struct firmware *fw = NULL, *cfg = NULL, *default_cfg;
1329 int rc;
1330 enum dev_state state;
1331
1464 int rc;
1465 enum dev_state state;
1466
1467 default_cfg = firmware_get(T4_CFGNAME);
1468
1332 /* Check firmware version and install a different one if necessary */
1333 rc = t4_check_fw_version(sc);
1469 /* Check firmware version and install a different one if necessary */
1470 rc = t4_check_fw_version(sc);
1334 if (rc != 0 || force_firmware_install) {
1471 if (rc != 0) {
1335 uint32_t v = 0;
1336
1337 fw = firmware_get(T4_FWNAME);
1338 if (fw != NULL) {
1339 const struct fw_hdr *hdr = (const void *)fw->data;
1340
1341 v = ntohl(hdr->fw_ver);
1342
1343 /*
1344 * The firmware module will not be used if it isn't the
1345 * same major version as what the driver was compiled
1472 uint32_t v = 0;
1473
1474 fw = firmware_get(T4_FWNAME);
1475 if (fw != NULL) {
1476 const struct fw_hdr *hdr = (const void *)fw->data;
1477
1478 v = ntohl(hdr->fw_ver);
1479
1480 /*
1481 * The firmware module will not be used if it isn't the
1482 * same major version as what the driver was compiled
1346 * with. This check trumps force_firmware_install.
1483 * with.
1347 */
1348 if (G_FW_HDR_FW_VER_MAJOR(v) != FW_VERSION_MAJOR) {
1349 device_printf(sc->dev,
1350 "Found firmware image but version %d "
1351 "can not be used with this driver (%d)\n",
1352 G_FW_HDR_FW_VER_MAJOR(v), FW_VERSION_MAJOR);
1353
1354 firmware_put(fw, FIRMWARE_UNLOAD);
1355 fw = NULL;
1356 }
1357 }
1358
1484 */
1485 if (G_FW_HDR_FW_VER_MAJOR(v) != FW_VERSION_MAJOR) {
1486 device_printf(sc->dev,
1487 "Found firmware image but version %d "
1488 "can not be used with this driver (%d)\n",
1489 G_FW_HDR_FW_VER_MAJOR(v), FW_VERSION_MAJOR);
1490
1491 firmware_put(fw, FIRMWARE_UNLOAD);
1492 fw = NULL;
1493 }
1494 }
1495
1359 if (fw == NULL && (rc < 0 || force_firmware_install)) {
1496 if (fw == NULL && rc < 0) {
1360 device_printf(sc->dev, "No usable firmware. "
1497 device_printf(sc->dev, "No usable firmware. "
1361 "card has %d.%d.%d, driver compiled with %d.%d.%d, "
1362 "force_firmware_install%s set",
1498 "card has %d.%d.%d, driver compiled with %d.%d.%d",
1363 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1364 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1365 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1366 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1499 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1500 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1501 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1502 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1367 FW_VERSION_MICRO,
1368 force_firmware_install ? "" : " not");
1369 return (EAGAIN);
1503 FW_VERSION_MICRO);
1504 rc = EAGAIN;
1505 goto done;
1370 }
1371
1372 /*
1373 * Always upgrade, even for minor/micro/build mismatches.
1374 * Downgrade only for a major version mismatch or if
1375 * force_firmware_install was specified.
1376 */
1506 }
1507
1508 /*
1509 * Always upgrade, even for minor/micro/build mismatches.
1510 * Downgrade only for a major version mismatch or if
1511 * force_firmware_install was specified.
1512 */
1377 if (fw != NULL && (rc < 0 || force_firmware_install ||
1378 v > sc->params.fw_vers)) {
1513 if (fw != NULL && (rc < 0 || v > sc->params.fw_vers)) {
1379 device_printf(sc->dev,
1380 "installing firmware %d.%d.%d.%d on card.\n",
1381 G_FW_HDR_FW_VER_MAJOR(v), G_FW_HDR_FW_VER_MINOR(v),
1382 G_FW_HDR_FW_VER_MICRO(v), G_FW_HDR_FW_VER_BUILD(v));
1383
1384 rc = -t4_load_fw(sc, fw->data, fw->datasize);
1385 if (rc != 0) {
1386 device_printf(sc->dev,
1387 "failed to install firmware: %d\n", rc);
1514 device_printf(sc->dev,
1515 "installing firmware %d.%d.%d.%d on card.\n",
1516 G_FW_HDR_FW_VER_MAJOR(v), G_FW_HDR_FW_VER_MINOR(v),
1517 G_FW_HDR_FW_VER_MICRO(v), G_FW_HDR_FW_VER_BUILD(v));
1518
1519 rc = -t4_load_fw(sc, fw->data, fw->datasize);
1520 if (rc != 0) {
1521 device_printf(sc->dev,
1522 "failed to install firmware: %d\n", rc);
1388 firmware_put(fw, FIRMWARE_UNLOAD);
1389 return (rc);
1523 goto done;
1390 } else {
1391 /* refresh */
1392 (void) t4_check_fw_version(sc);
1393 }
1394 }
1524 } else {
1525 /* refresh */
1526 (void) t4_check_fw_version(sc);
1527 }
1528 }
1395
1396 if (fw != NULL)
1397 firmware_put(fw, FIRMWARE_UNLOAD);
1398 }
1399
1529 }
1530
1400 /* Contact firmware, request master */
1401 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state);
1531 /* Contact firmware. */
1532 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
1402 if (rc < 0) {
1403 rc = -rc;
1404 device_printf(sc->dev,
1405 "failed to connect to the firmware: %d.\n", rc);
1533 if (rc < 0) {
1534 rc = -rc;
1535 device_printf(sc->dev,
1536 "failed to connect to the firmware: %d.\n", rc);
1406 return (rc);
1537 goto done;
1407 }
1538 }
1539 if (rc == sc->mbox)
1540 sc->flags |= MASTER_PF;
1408
1409 /* Reset device */
1410 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
1411 if (rc != 0) {
1412 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
1413 if (rc != ETIMEDOUT && rc != EIO)
1414 t4_fw_bye(sc, sc->mbox);
1541
1542 /* Reset device */
1543 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
1544 if (rc != 0) {
1545 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
1546 if (rc != ETIMEDOUT && rc != EIO)
1547 t4_fw_bye(sc, sc->mbox);
1415 return (rc);
1548 goto done;
1416 }
1417
1549 }
1550
1551 /* Partition adapter resources as specified in the config file. */
1552 if (sc->flags & MASTER_PF) {
1553 if (strncmp(t4_cfg_file, "default", sizeof(t4_cfg_file))) {
1554 char s[32];
1555
1556 snprintf(s, sizeof(s), "t4fw_cfg_%s", t4_cfg_file);
1557 cfg = firmware_get(s);
1558 if (cfg == NULL) {
1559 device_printf(sc->dev,
1560 "unable to locate %s module, "
1561 "will use default config file.\n", s);
1562 }
1563 }
1564
1565 rc = partition_resources(sc, cfg ? cfg : default_cfg);
1566 if (rc != 0)
1567 goto done; /* error message displayed already */
1568 }
1569
1418 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
1419 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1420 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1421 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1422 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
1423 sc->flags |= FW_OK;
1424
1570 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
1571 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1572 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1573 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1574 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
1575 sc->flags |= FW_OK;
1576
1425 return (0);
1577done:
1578 if (fw != NULL)
1579 firmware_put(fw, FIRMWARE_UNLOAD);
1580 if (cfg != NULL)
1581 firmware_put(cfg, FIRMWARE_UNLOAD);
1582 if (default_cfg != NULL)
1583 firmware_put(default_cfg, FIRMWARE_UNLOAD);
1584
1585 return (rc);
1426}
1427
1586}
1587
1588#define FW_PARAM_DEV(param) \
1589 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1590 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1591#define FW_PARAM_PFVF(param) \
1592 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1593 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1594
1595/*
1596 * Upload configuration file to card's memory.
1597 */
1428static int
1598static int
1429get_devlog_params(struct adapter *sc, struct devlog_params *dlog)
1599upload_config_file(struct adapter *sc, const struct firmware *fw, uint32_t *mt,
1600 uint32_t *ma)
1430{
1601{
1431 struct fw_devlog_cmd devlog_cmd;
1432 uint32_t meminfo;
1433 int rc;
1602 int rc, i;
1603 uint32_t param, val, mtype, maddr, bar, off, win, remaining;
1604 const uint32_t *b;
1434
1605
1435 bzero(&devlog_cmd, sizeof(devlog_cmd));
1436 devlog_cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
1437 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1438 devlog_cmd.retval_len16 = htobe32(FW_LEN16(devlog_cmd));
1439 rc = -t4_wr_mbox(sc, sc->mbox, &devlog_cmd, sizeof(devlog_cmd),
1440 &devlog_cmd);
1606 /* Figure out where the firmware wants us to upload it. */
1607 param = FW_PARAM_DEV(CF);
1608 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
1441 if (rc != 0) {
1609 if (rc != 0) {
1610 /* Firmwares without config file support will fail this way */
1442 device_printf(sc->dev,
1611 device_printf(sc->dev,
1443 "failed to get devlog parameters: %d.\n", rc);
1444 bzero(dlog, sizeof (*dlog));
1612 "failed to query config file location: %d.\n", rc);
1445 return (rc);
1446 }
1613 return (rc);
1614 }
1615 *mt = mtype = G_FW_PARAMS_PARAM_Y(val);
1616 *ma = maddr = G_FW_PARAMS_PARAM_Z(val) << 16;
1447
1617
1448 meminfo = be32toh(devlog_cmd.memtype_devlog_memaddr16_devlog);
1449 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(meminfo);
1450 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(meminfo) << 4;
1451 dlog->size = be32toh(devlog_cmd.memsize_devlog);
1618 if (maddr & 3) {
1619 device_printf(sc->dev,
1620 "cannot upload config file (type %u, addr %x).\n",
1621 mtype, maddr);
1622 return (EFAULT);
1623 }
1452
1624
1453 return (0);
1625 /* Translate mtype/maddr to an address suitable for the PCIe window */
1626 val = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1627 val &= F_EDRAM0_ENABLE | F_EDRAM1_ENABLE | F_EXT_MEM_ENABLE;
1628 switch (mtype) {
1629 case FW_MEMTYPE_CF_EDC0:
1630 if (!(val & F_EDRAM0_ENABLE))
1631 goto err;
1632 bar = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1633 maddr += G_EDRAM0_BASE(bar) << 20;
1634 break;
1635
1636 case FW_MEMTYPE_CF_EDC1:
1637 if (!(val & F_EDRAM1_ENABLE))
1638 goto err;
1639 bar = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1640 maddr += G_EDRAM1_BASE(bar) << 20;
1641 break;
1642
1643 case FW_MEMTYPE_CF_EXTMEM:
1644 if (!(val & F_EXT_MEM_ENABLE))
1645 goto err;
1646 bar = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1647 maddr += G_EXT_MEM_BASE(bar) << 20;
1648 break;
1649
1650 default:
1651err:
1652 device_printf(sc->dev,
1653 "cannot upload config file (type %u, enabled %u).\n",
1654 mtype, val);
1655 return (EFAULT);
1656 }
1657
1658 /*
1659 * Position the PCIe window (we use memwin2) to the 16B aligned area
1660 * just at/before the upload location.
1661 */
1662 win = maddr & ~0xf;
1663 off = maddr - win; /* offset from the start of the window. */
1664 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2), win);
1665 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2));
1666
1667 remaining = fw->datasize;
1668 if (remaining > FLASH_CFG_MAX_SIZE ||
1669 remaining > MEMWIN2_APERTURE - off) {
1670 device_printf(sc->dev, "cannot upload config file all at once "
1671 "(size %u, max %u, room %u).\n",
1672 remaining, FLASH_CFG_MAX_SIZE, MEMWIN2_APERTURE - off);
1673 return (EFBIG);
1674 }
1675
1676 /*
1677 * XXX: sheer laziness. We deliberately added 4 bytes of useless
1678 * stuffing/comments at the end of the config file so it's ok to simply
1679 * throw away the last remaining bytes when the config file is not an
1680 * exact multiple of 4.
1681 */
1682 b = fw->data;
1683 for (i = 0; remaining >= 4; i += 4, remaining -= 4)
1684 t4_write_reg(sc, MEMWIN2_BASE + off + i, *b++);
1685
1686 return (rc);
1454}
1455
1687}
1688
1689/*
1690 * Partition chip resources for use between various PFs, VFs, etc. This is done
1691 * by uploading the firmware configuration file to the adapter and instructing
1692 * the firmware to process it.
1693 */
1456static int
1694static int
1457get_capabilities(struct adapter *sc, struct fw_caps_config_cmd *caps)
1695partition_resources(struct adapter *sc, const struct firmware *cfg)
1458{
1459 int rc;
1696{
1697 int rc;
1698 struct fw_caps_config_cmd caps;
1699 uint32_t mtype, maddr, finicsum, cfcsum;
1460
1700
1461 bzero(caps, sizeof(*caps));
1462 caps->op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1463 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1464 caps->retval_len16 = htobe32(FW_LEN16(*caps));
1701 rc = cfg ? upload_config_file(sc, cfg, &mtype, &maddr) : ENOENT;
1702 if (rc != 0) {
1703 mtype = FW_MEMTYPE_CF_FLASH;
1704 maddr = t4_flash_cfg_addr(sc);
1705 }
1465
1706
1466 rc = -t4_wr_mbox(sc, sc->mbox, caps, sizeof(*caps), caps);
1467 if (rc != 0)
1707 bzero(&caps, sizeof(caps));
1708 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1709 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1710 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
1711 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
1712 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps));
1713 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
1714 if (rc != 0) {
1715 device_printf(sc->dev,
1716 "failed to pre-process config file: %d.\n", rc);
1468 return (rc);
1717 return (rc);
1718 }
1469
1719
1470 if (caps->niccaps & htobe16(FW_CAPS_CONFIG_NIC_VM))
1471 caps->niccaps ^= htobe16(FW_CAPS_CONFIG_NIC_VM);
1720 finicsum = be32toh(caps.finicsum);
1721 cfcsum = be32toh(caps.cfcsum);
1722 if (finicsum != cfcsum) {
1723 device_printf(sc->dev,
1724 "WARNING: config file checksum mismatch: %08x %08x\n",
1725 finicsum, cfcsum);
1726 }
1727 sc->cfcsum = cfcsum;
1472
1728
1473 caps->op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1729#define LIMIT_CAPS(x) do { \
1730 caps.x &= htobe16(t4_##x##_allowed); \
1731 sc->x = htobe16(caps.x); \
1732} while (0)
1733
1734 /*
1735 * Let the firmware know what features will (not) be used so it can tune
1736 * things accordingly.
1737 */
1738 LIMIT_CAPS(linkcaps);
1739 LIMIT_CAPS(niccaps);
1740 LIMIT_CAPS(toecaps);
1741 LIMIT_CAPS(rdmacaps);
1742 LIMIT_CAPS(iscsicaps);
1743 LIMIT_CAPS(fcoecaps);
1744#undef LIMIT_CAPS
1745
1746 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1474 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1747 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1475 rc = -t4_wr_mbox(sc, sc->mbox, caps, sizeof(*caps), NULL);
1748 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
1749 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
1750 if (rc != 0) {
1751 device_printf(sc->dev,
1752 "failed to process config file: %d.\n", rc);
1753 return (rc);
1754 }
1476
1755
1477 return (rc);
1756 return (0);
1478}
1479
1757}
1758
1759/*
1760 * Retrieve parameters that are needed (or nice to have) prior to calling
1761 * t4_sge_init and t4_fw_initialize.
1762 */
1480static int
1763static int
1481get_params(struct adapter *sc, struct fw_caps_config_cmd *caps)
1764get_params__pre_init(struct adapter *sc)
1482{
1483 int rc;
1765{
1766 int rc;
1484 uint32_t params[7], val[7];
1767 uint32_t param[2], val[2];
1768 struct fw_devlog_cmd cmd;
1769 struct devlog_params *dlog = &sc->params.devlog;
1485
1770
1486#define FW_PARAM_DEV(param) \
1487 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1488 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1489#define FW_PARAM_PFVF(param) \
1490 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1491 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1492
1493 params[0] = FW_PARAM_DEV(PORTVEC);
1494 params[1] = FW_PARAM_PFVF(IQFLINT_START);
1495 params[2] = FW_PARAM_PFVF(EQ_START);
1496 params[3] = FW_PARAM_PFVF(FILTER_START);
1497 params[4] = FW_PARAM_PFVF(FILTER_END);
1498 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 5, params, val);
1771 param[0] = FW_PARAM_DEV(PORTVEC);
1772 param[1] = FW_PARAM_DEV(CCLK);
1773 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1499 if (rc != 0) {
1500 device_printf(sc->dev,
1774 if (rc != 0) {
1775 device_printf(sc->dev,
1501 "failed to query parameters: %d.\n", rc);
1502 goto done;
1776 "failed to query parameters (pre_init): %d.\n", rc);
1777 return (rc);
1503 }
1504
1505 sc->params.portvec = val[0];
1506 sc->params.nports = 0;
1507 while (val[0]) {
1508 sc->params.nports++;
1509 val[0] &= val[0] - 1;
1510 }
1511
1778 }
1779
1780 sc->params.portvec = val[0];
1781 sc->params.nports = 0;
1782 while (val[0]) {
1783 sc->params.nports++;
1784 val[0] &= val[0] - 1;
1785 }
1786
1512 sc->sge.iq_start = val[1];
1513 sc->sge.eq_start = val[2];
1514 sc->tids.ftid_base = val[3];
1515 sc->tids.nftids = val[4] - val[3] + 1;
1787 sc->params.vpd.cclk = val[1];
1516
1788
1517 if (caps->toecaps) {
1789 /* Read device log parameters. */
1790 bzero(&cmd, sizeof(cmd));
1791 cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
1792 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1793 cmd.retval_len16 = htobe32(FW_LEN16(cmd));
1794 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
1795 if (rc != 0) {
1796 device_printf(sc->dev,
1797 "failed to get devlog parameters: %d.\n", rc);
1798 bzero(dlog, sizeof (*dlog));
1799 rc = 0; /* devlog isn't critical for device operation */
1800 } else {
1801 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
1802 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
1803 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
1804 dlog->size = be32toh(cmd.memsize_devlog);
1805 }
1806
1807 return (rc);
1808}
1809
1810/*
1811 * Retrieve various parameters that are of interest to the driver. The device
1812 * has been initialized by the firmware at this point.
1813 */
1814static int
1815get_params__post_init(struct adapter *sc)
1816{
1817 int rc;
1818 uint32_t param[7], val[7];
1819 struct fw_caps_config_cmd caps;
1820
1821 param[0] = FW_PARAM_PFVF(IQFLINT_START);
1822 param[1] = FW_PARAM_PFVF(EQ_START);
1823 param[2] = FW_PARAM_PFVF(FILTER_START);
1824 param[3] = FW_PARAM_PFVF(FILTER_END);
1825 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
1826 if (rc != 0) {
1827 device_printf(sc->dev,
1828 "failed to query parameters (post_init): %d.\n", rc);
1829 return (rc);
1830 }
1831
1832 sc->sge.iq_start = val[0];
1833 sc->sge.eq_start = val[1];
1834 sc->tids.ftid_base = val[2];
1835 sc->tids.nftids = val[3] - val[2] + 1;
1836
1837 /* get capabilites */
1838 bzero(&caps, sizeof(caps));
1839 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1840 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1841 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
1842 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
1843 if (rc != 0) {
1844 device_printf(sc->dev,
1845 "failed to get card capabilities: %d.\n", rc);
1846 return (rc);
1847 }
1848
1849 if (caps.toecaps) {
1518 /* query offload-related parameters */
1850 /* query offload-related parameters */
1519 params[0] = FW_PARAM_DEV(NTID);
1520 params[1] = FW_PARAM_PFVF(SERVER_START);
1521 params[2] = FW_PARAM_PFVF(SERVER_END);
1522 params[3] = FW_PARAM_PFVF(TDDP_START);
1523 params[4] = FW_PARAM_PFVF(TDDP_END);
1524 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
1525 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, params, val);
1851 param[0] = FW_PARAM_DEV(NTID);
1852 param[1] = FW_PARAM_PFVF(SERVER_START);
1853 param[2] = FW_PARAM_PFVF(SERVER_END);
1854 param[3] = FW_PARAM_PFVF(TDDP_START);
1855 param[4] = FW_PARAM_PFVF(TDDP_END);
1856 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
1857 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1526 if (rc != 0) {
1527 device_printf(sc->dev,
1528 "failed to query TOE parameters: %d.\n", rc);
1858 if (rc != 0) {
1859 device_printf(sc->dev,
1860 "failed to query TOE parameters: %d.\n", rc);
1529 goto done;
1861 return (rc);
1530 }
1531 sc->tids.ntids = val[0];
1532 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
1533 sc->tids.stid_base = val[1];
1534 sc->tids.nstids = val[2] - val[1] + 1;
1535 sc->vres.ddp.start = val[3];
1536 sc->vres.ddp.size = val[4] - val[3] + 1;
1537 sc->params.ofldq_wr_cred = val[5];
1538 sc->params.offload = 1;
1539 }
1862 }
1863 sc->tids.ntids = val[0];
1864 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
1865 sc->tids.stid_base = val[1];
1866 sc->tids.nstids = val[2] - val[1] + 1;
1867 sc->vres.ddp.start = val[3];
1868 sc->vres.ddp.size = val[4] - val[3] + 1;
1869 sc->params.ofldq_wr_cred = val[5];
1870 sc->params.offload = 1;
1871 }
1540 if (caps->rdmacaps) {
1541 params[0] = FW_PARAM_PFVF(STAG_START);
1542 params[1] = FW_PARAM_PFVF(STAG_END);
1543 params[2] = FW_PARAM_PFVF(RQ_START);
1544 params[3] = FW_PARAM_PFVF(RQ_END);
1545 params[4] = FW_PARAM_PFVF(PBL_START);
1546 params[5] = FW_PARAM_PFVF(PBL_END);
1547 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, params, val);
1872 if (caps.rdmacaps) {
1873 param[0] = FW_PARAM_PFVF(STAG_START);
1874 param[1] = FW_PARAM_PFVF(STAG_END);
1875 param[2] = FW_PARAM_PFVF(RQ_START);
1876 param[3] = FW_PARAM_PFVF(RQ_END);
1877 param[4] = FW_PARAM_PFVF(PBL_START);
1878 param[5] = FW_PARAM_PFVF(PBL_END);
1879 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1548 if (rc != 0) {
1549 device_printf(sc->dev,
1880 if (rc != 0) {
1881 device_printf(sc->dev,
1550 "failed to query RDMA parameters: %d.\n", rc);
1551 goto done;
1882 "failed to query RDMA parameters(1): %d.\n", rc);
1883 return (rc);
1552 }
1553 sc->vres.stag.start = val[0];
1554 sc->vres.stag.size = val[1] - val[0] + 1;
1555 sc->vres.rq.start = val[2];
1556 sc->vres.rq.size = val[3] - val[2] + 1;
1557 sc->vres.pbl.start = val[4];
1558 sc->vres.pbl.size = val[5] - val[4] + 1;
1884 }
1885 sc->vres.stag.start = val[0];
1886 sc->vres.stag.size = val[1] - val[0] + 1;
1887 sc->vres.rq.start = val[2];
1888 sc->vres.rq.size = val[3] - val[2] + 1;
1889 sc->vres.pbl.start = val[4];
1890 sc->vres.pbl.size = val[5] - val[4] + 1;
1891
1892 param[0] = FW_PARAM_PFVF(SQRQ_START);
1893 param[1] = FW_PARAM_PFVF(SQRQ_END);
1894 param[2] = FW_PARAM_PFVF(CQ_START);
1895 param[3] = FW_PARAM_PFVF(CQ_END);
1896 param[4] = FW_PARAM_PFVF(OCQ_START);
1897 param[5] = FW_PARAM_PFVF(OCQ_END);
1898 rc = -t4_query_params(sc, 0, 0, 0, 6, param, val);
1899 if (rc != 0) {
1900 device_printf(sc->dev,
1901 "failed to query RDMA parameters(2): %d.\n", rc);
1902 return (rc);
1903 }
1904 sc->vres.qp.start = val[0];
1905 sc->vres.qp.size = val[1] - val[0] + 1;
1906 sc->vres.cq.start = val[2];
1907 sc->vres.cq.size = val[3] - val[2] + 1;
1908 sc->vres.ocq.start = val[4];
1909 sc->vres.ocq.size = val[5] - val[4] + 1;
1559 }
1910 }
1560 if (caps->iscsicaps) {
1561 params[0] = FW_PARAM_PFVF(ISCSI_START);
1562 params[1] = FW_PARAM_PFVF(ISCSI_END);
1563 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, params, val);
1911 if (caps.iscsicaps) {
1912 param[0] = FW_PARAM_PFVF(ISCSI_START);
1913 param[1] = FW_PARAM_PFVF(ISCSI_END);
1914 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1564 if (rc != 0) {
1565 device_printf(sc->dev,
1566 "failed to query iSCSI parameters: %d.\n", rc);
1915 if (rc != 0) {
1916 device_printf(sc->dev,
1917 "failed to query iSCSI parameters: %d.\n", rc);
1567 goto done;
1918 return (rc);
1568 }
1569 sc->vres.iscsi.start = val[0];
1570 sc->vres.iscsi.size = val[1] - val[0] + 1;
1571 }
1919 }
1920 sc->vres.iscsi.start = val[0];
1921 sc->vres.iscsi.size = val[1] - val[0] + 1;
1922 }
1572#undef FW_PARAM_PFVF
1573#undef FW_PARAM_DEV
1574
1923
1575done:
1924 /* These are finalized by FW initialization, load their values now */
1925 val[0] = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
1926 sc->params.tp.tre = G_TIMERRESOLUTION(val[0]);
1927 sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(val[0]);
1928 t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
1929
1576 return (rc);
1577}
1578
1930 return (rc);
1931}
1932
1933#undef FW_PARAM_PFVF
1934#undef FW_PARAM_DEV
1935
1579static void
1580t4_set_desc(struct adapter *sc)
1581{
1582 char buf[128];
1583 struct adapter_params *p = &sc->params;
1584
1936static void
1937t4_set_desc(struct adapter *sc)
1938{
1939 char buf[128];
1940 struct adapter_params *p = &sc->params;
1941
1585 snprintf(buf, sizeof(buf),
1586 "Chelsio %s (rev %d) %d port %sNIC PCIe-x%d %d %s, S/N:%s, E/C:%s",
1587 p->vpd.id, p->rev, p->nports, is_offload(sc) ? "R" : "",
1588 p->pci.width, sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1589 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), p->vpd.sn, p->vpd.ec);
1942 snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, E/C:%s",
1943 p->vpd.id, is_offload(sc) ? "R" : "", p->rev, p->vpd.sn, p->vpd.ec);
1590
1591 device_set_desc_copy(sc->dev, buf);
1592}
1593
1594static void
1595build_medialist(struct port_info *pi)
1596{
1597 struct ifmedia *media = &pi->media;

--- 200 unchanged lines hidden (view full) ---

1798 return (rc);
1799}
1800
1801static int
1802cxgbe_init_synchronized(struct port_info *pi)
1803{
1804 struct adapter *sc = pi->adapter;
1805 struct ifnet *ifp = pi->ifp;
1944
1945 device_set_desc_copy(sc->dev, buf);
1946}
1947
1948static void
1949build_medialist(struct port_info *pi)
1950{
1951 struct ifmedia *media = &pi->media;

--- 200 unchanged lines hidden (view full) ---

2152 return (rc);
2153}
2154
2155static int
2156cxgbe_init_synchronized(struct port_info *pi)
2157{
2158 struct adapter *sc = pi->adapter;
2159 struct ifnet *ifp = pi->ifp;
1806 int rc = 0, i;
1807 uint16_t *rss;
1808 struct sge_rxq *rxq;
2160 int rc = 0;
1809
1810 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1811
1812 if (isset(&sc->open_device_map, pi->port_id)) {
1813 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
1814 ("mismatch between open_device_map and if_drv_flags"));
1815 return (0); /* already running */
1816 }
1817
2161
2162 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2163
2164 if (isset(&sc->open_device_map, pi->port_id)) {
2165 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2166 ("mismatch between open_device_map and if_drv_flags"));
2167 return (0); /* already running */
2168 }
2169
1818 if (sc->open_device_map == 0 && ((rc = first_port_up(sc)) != 0))
2170 if (!(sc->flags & FULL_INIT_DONE) &&
2171 ((rc = adapter_full_init(sc)) != 0))
1819 return (rc); /* error message displayed already */
1820
2172 return (rc); /* error message displayed already */
2173
1821 /*
1822 * Allocate tx/rx/fl queues for this port.
1823 */
1824 rc = t4_setup_eth_queues(pi);
1825 if (rc != 0)
1826 goto done; /* error message displayed already */
2174 if (!(pi->flags & PORT_INIT_DONE) &&
2175 ((rc = port_full_init(pi)) != 0))
2176 return (rc); /* error message displayed already */
1827
2177
1828 /*
1829 * Setup RSS for this port.
1830 */
1831 rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
1832 for_each_rxq(pi, i, rxq) {
1833 rss[i] = rxq->iq.abs_id;
1834 }
1835 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
1836 pi->nrxq);
1837 free(rss, M_CXGBE);
1838 if (rc != 0) {
1839 if_printf(ifp, "rss_config failed: %d\n", rc);
1840 goto done;
1841 }
1842
1843 PORT_LOCK(pi);
1844 rc = update_mac_settings(pi, XGMAC_ALL);
1845 PORT_UNLOCK(pi);
1846 if (rc)
1847 goto done; /* error message displayed already */
1848
1849 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
1850 if (rc != 0) {
1851 if_printf(ifp, "start_link failed: %d\n", rc);
1852 goto done;
1853 }
1854
1855 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
1856 if (rc != 0) {
1857 if_printf(ifp, "enable_vi failed: %d\n", rc);
1858 goto done;
1859 }
2178 PORT_LOCK(pi);
2179 rc = update_mac_settings(pi, XGMAC_ALL);
2180 PORT_UNLOCK(pi);
2181 if (rc)
2182 goto done; /* error message displayed already */
2183
2184 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2185 if (rc != 0) {
2186 if_printf(ifp, "start_link failed: %d\n", rc);
2187 goto done;
2188 }
2189
2190 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2191 if (rc != 0) {
2192 if_printf(ifp, "enable_vi failed: %d\n", rc);
2193 goto done;
2194 }
1860 pi->flags |= VI_ENABLED;
1861
1862 /* all ok */
1863 setbit(&sc->open_device_map, pi->port_id);
1864 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2195
2196 /* all ok */
2197 setbit(&sc->open_device_map, pi->port_id);
2198 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1865 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1866
1867 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
1868done:
1869 if (rc != 0)
1870 cxgbe_uninit_synchronized(pi);
1871
1872 return (rc);
1873}

--- 36 unchanged lines hidden (view full) ---

1910 */
1911static int
1912cxgbe_uninit_synchronized(struct port_info *pi)
1913{
1914 struct adapter *sc = pi->adapter;
1915 struct ifnet *ifp = pi->ifp;
1916 int rc;
1917
2199
2200 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2201done:
2202 if (rc != 0)
2203 cxgbe_uninit_synchronized(pi);
2204
2205 return (rc);
2206}

--- 36 unchanged lines hidden (view full) ---

2243 */
2244static int
2245cxgbe_uninit_synchronized(struct port_info *pi)
2246{
2247 struct adapter *sc = pi->adapter;
2248 struct ifnet *ifp = pi->ifp;
2249 int rc;
2250
1918 /*
1919 * taskqueue_drain may cause a deadlock if the adapter lock is held.
1920 */
1921 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1922
1923 /*
2251 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2252
2253 /*
1924 * Clear this port's bit from the open device map, and then drain
1925 * tasks and callouts.
2254 * Disable the VI so that all its data in either direction is discarded
2255 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
2256 * tick) intact as the TP can deliver negative advice or data that it's
2257 * holding in its RAM (for an offloaded connection) even after the VI is
2258 * disabled.
1926 */
2259 */
1927 clrbit(&sc->open_device_map, pi->port_id);
1928
1929 PORT_LOCK(pi);
1930 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1931 callout_stop(&pi->tick);
1932 PORT_UNLOCK(pi);
1933 callout_drain(&pi->tick);
1934
1935 /*
1936 * Stop and then free the queues' resources, including the queues
1937 * themselves.
1938 *
1939 * XXX: we could just stop the queues here (on ifconfig down) and free
1940 * them later (on port detach), but having up/down go through the entire
1941 * allocate/activate/deactivate/free sequence is a good way to find
1942 * leaks and bugs.
1943 */
1944 rc = t4_teardown_eth_queues(pi);
1945 if (rc != 0)
1946 if_printf(ifp, "teardown failed: %d\n", rc);
1947
1948 if (pi->flags & VI_ENABLED) {
1949 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
1950 if (rc)
1951 if_printf(ifp, "disable_vi failed: %d\n", rc);
1952 else
1953 pi->flags &= ~VI_ENABLED;
2260 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2261 if (rc) {
2262 if_printf(ifp, "disable_vi failed: %d\n", rc);
2263 return (rc);
1954 }
1955
2264 }
2265
2266 clrbit(&sc->open_device_map, pi->port_id);
2267 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2268
1956 pi->link_cfg.link_ok = 0;
1957 pi->link_cfg.speed = 0;
1958 t4_os_link_changed(sc, pi->port_id, 0);
1959
2269 pi->link_cfg.link_ok = 0;
2270 pi->link_cfg.speed = 0;
2271 t4_os_link_changed(sc, pi->port_id, 0);
2272
1960 if (sc->open_device_map == 0)
1961 last_port_down(sc);
1962
1963 return (0);
1964}
1965
1966#define T4_ALLOC_IRQ(sc, irq, rid, handler, arg, name) do { \
1967 rc = t4_alloc_irq(sc, irq, rid, handler, arg, name); \
1968 if (rc != 0) \
1969 goto done; \
1970} while (0)
2273 return (0);
2274}
2275
2276#define T4_ALLOC_IRQ(sc, irq, rid, handler, arg, name) do { \
2277 rc = t4_alloc_irq(sc, irq, rid, handler, arg, name); \
2278 if (rc != 0) \
2279 goto done; \
2280} while (0)
2281
1971static int
2282static int
1972first_port_up(struct adapter *sc)
2283adapter_full_init(struct adapter *sc)
1973{
1974 int rc, i, rid, p, q;
1975 char s[8];
1976 struct irq *irq;
2284{
2285 int rc, i, rid, p, q;
2286 char s[8];
2287 struct irq *irq;
1977 struct sge_iq *intrq;
2288 struct port_info *pi;
2289 struct sge_rxq *rxq;
2290#ifndef TCP_OFFLOAD_DISABLE
2291 struct sge_ofld_rxq *ofld_rxq;
2292#endif
1978
1979 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2293
2294 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2295 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
2296 ("%s: FULL_INIT_DONE already", __func__));
1980
1981 /*
1982 * queues that belong to the adapter (not any particular port).
1983 */
1984 rc = t4_setup_adapter_queues(sc);
1985 if (rc != 0)
1986 goto done;
1987
2297
2298 /*
2299 * queues that belong to the adapter (not any particular port).
2300 */
2301 rc = t4_setup_adapter_queues(sc);
2302 if (rc != 0)
2303 goto done;
2304
2305 for (i = 0; i < ARRAY_SIZE(sc->tq); i++) {
2306 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
2307 taskqueue_thread_enqueue, &sc->tq[i]);
2308 if (sc->tq[i] == NULL) {
2309 device_printf(sc->dev,
2310 "failed to allocate task queue %d\n", i);
2311 rc = ENOMEM;
2312 goto done;
2313 }
2314 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
2315 device_get_nameunit(sc->dev), i);
2316 }
2317
1988 /*
1989 * Setup interrupts.
1990 */
1991 irq = &sc->irq[0];
1992 rid = sc->intr_type == INTR_INTX ? 0 : 1;
1993 if (sc->intr_count == 1) {
2318 /*
2319 * Setup interrupts.
2320 */
2321 irq = &sc->irq[0];
2322 rid = sc->intr_type == INTR_INTX ? 0 : 1;
2323 if (sc->intr_count == 1) {
1994 KASSERT(sc->flags & INTR_SHARED,
1995 ("%s: single interrupt but not shared?", __func__));
2324 KASSERT(!(sc->flags & INTR_DIRECT),
2325 ("%s: single interrupt && INTR_DIRECT?", __func__));
1996
1997 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_all, sc, "all");
1998 } else {
2326
2327 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_all, sc, "all");
2328 } else {
1999 /* Multiple interrupts. The first one is always error intr */
2329 /* Multiple interrupts. */
2330 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
2331 ("%s: too few intr.", __func__));
2332
2333 /* The first one is always error intr */
2000 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_err, sc, "err");
2001 irq++;
2002 rid++;
2003
2334 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_err, sc, "err");
2335 irq++;
2336 rid++;
2337
2004 /* Firmware event queue normally has an interrupt of its own */
2005 if (sc->intr_count > T4_EXTRA_INTR) {
2006 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
2007 "evt");
2008 irq++;
2009 rid++;
2010 }
2338 /* The second one is always the firmware event queue */
2339 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, "evt");
2340 irq++;
2341 rid++;
2011
2342
2012 intrq = &sc->sge.intrq[0];
2013 if (sc->flags & INTR_SHARED) {
2343 /*
2344 * Note that if INTR_DIRECT is not set then either the NIC rx
2345 * queues or (exclusive or) the TOE rx queueus will be taking
2346 * direct interrupts.
2347 *
2348 * There is no need to check for is_offload(sc) as nofldrxq
2349 * will be 0 if offload is disabled.
2350 */
2351 for_each_port(sc, p) {
2352 pi = sc->port[p];
2014
2353
2015 /* All ports share these interrupt queues */
2016
2017 for (i = 0; i < NINTRQ(sc); i++) {
2018 snprintf(s, sizeof(s), "*.%d", i);
2019 T4_ALLOC_IRQ(sc, irq, rid, t4_intr, intrq, s);
2354#ifndef TCP_OFFLOAD_DISABLE
2355 /*
2356 * Skip over the NIC queues if they aren't taking direct
2357 * interrupts.
2358 */
2359 if (!(sc->flags & INTR_DIRECT) &&
2360 pi->nofldrxq > pi->nrxq)
2361 goto ofld_queues;
2362#endif
2363 rxq = &sc->sge.rxq[pi->first_rxq];
2364 for (q = 0; q < pi->nrxq; q++, rxq++) {
2365 snprintf(s, sizeof(s), "%d.%d", p, q);
2366 T4_ALLOC_IRQ(sc, irq, rid, t4_intr, rxq, s);
2020 irq++;
2021 rid++;
2367 irq++;
2368 rid++;
2022 intrq++;
2023 }
2369 }
2024 } else {
2025
2370
2026 /* Each port has its own set of interrupt queues */
2027
2028 for (p = 0; p < sc->params.nports; p++) {
2029 for (q = 0; q < sc->port[p]->nrxq; q++) {
2030 snprintf(s, sizeof(s), "%d.%d", p, q);
2031 T4_ALLOC_IRQ(sc, irq, rid, t4_intr,
2032 intrq, s);
2033 irq++;
2034 rid++;
2035 intrq++;
2036 }
2371#ifndef TCP_OFFLOAD_DISABLE
2372 /*
2373 * Skip over the offload queues if they aren't taking
2374 * direct interrupts.
2375 */
2376 if (!(sc->flags & INTR_DIRECT))
2377 continue;
2378ofld_queues:
2379 ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
2380 for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
2381 snprintf(s, sizeof(s), "%d,%d", p, q);
2382 T4_ALLOC_IRQ(sc, irq, rid, t4_intr, ofld_rxq, s);
2383 irq++;
2384 rid++;
2037 }
2385 }
2386#endif
2038 }
2039 }
2040
2041 t4_intr_enable(sc);
2042 sc->flags |= FULL_INIT_DONE;
2387 }
2388 }
2389
2390 t4_intr_enable(sc);
2391 sc->flags |= FULL_INIT_DONE;
2043
2044done:
2045 if (rc != 0)
2392done:
2393 if (rc != 0)
2046 last_port_down(sc);
2394 adapter_full_uninit(sc);
2047
2048 return (rc);
2049}
2050#undef T4_ALLOC_IRQ
2051
2395
2396 return (rc);
2397}
2398#undef T4_ALLOC_IRQ
2399
2052/*
2053 * Idempotent.
2054 */
2055static int
2400static int
2056last_port_down(struct adapter *sc)
2401adapter_full_uninit(struct adapter *sc)
2057{
2058 int i;
2059
2060 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2061
2402{
2403 int i;
2404
2405 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2406
2062 t4_intr_disable(sc);
2063
2064 t4_teardown_adapter_queues(sc);
2065
2066 for (i = 0; i < sc->intr_count; i++)
2067 t4_free_irq(sc, &sc->irq[i]);
2068
2407 t4_teardown_adapter_queues(sc);
2408
2409 for (i = 0; i < sc->intr_count; i++)
2410 t4_free_irq(sc, &sc->irq[i]);
2411
2412 for (i = 0; i < ARRAY_SIZE(sc->tq) && sc->tq[i]; i++) {
2413 taskqueue_free(sc->tq[i]);
2414 sc->tq[i] = NULL;
2415 }
2416
2069 sc->flags &= ~FULL_INIT_DONE;
2070
2071 return (0);
2072}
2073
2074static int
2417 sc->flags &= ~FULL_INIT_DONE;
2418
2419 return (0);
2420}
2421
2422static int
2423port_full_init(struct port_info *pi)
2424{
2425 struct adapter *sc = pi->adapter;
2426 struct ifnet *ifp = pi->ifp;
2427 uint16_t *rss;
2428 struct sge_rxq *rxq;
2429 int rc, i;
2430
2431 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2432 KASSERT((pi->flags & PORT_INIT_DONE) == 0,
2433 ("%s: PORT_INIT_DONE already", __func__));
2434
2435 sysctl_ctx_init(&pi->ctx);
2436 pi->flags |= PORT_SYSCTL_CTX;
2437
2438 /*
2439 * Allocate tx/rx/fl queues for this port.
2440 */
2441 rc = t4_setup_port_queues(pi);
2442 if (rc != 0)
2443 goto done; /* error message displayed already */
2444
2445 /*
2446 * Setup RSS for this port.
2447 */
2448 rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
2449 M_ZERO | M_WAITOK);
2450 for_each_rxq(pi, i, rxq) {
2451 rss[i] = rxq->iq.abs_id;
2452 }
2453 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
2454 pi->rss_size, rss, pi->nrxq);
2455 free(rss, M_CXGBE);
2456 if (rc != 0) {
2457 if_printf(ifp, "rss_config failed: %d\n", rc);
2458 goto done;
2459 }
2460
2461 pi->flags |= PORT_INIT_DONE;
2462done:
2463 if (rc != 0)
2464 port_full_uninit(pi);
2465
2466 return (rc);
2467}
2468
2469/*
2470 * Idempotent.
2471 */
2472static int
2473port_full_uninit(struct port_info *pi)
2474{
2475 struct adapter *sc = pi->adapter;
2476 int i;
2477 struct sge_rxq *rxq;
2478 struct sge_txq *txq;
2479#ifndef TCP_OFFLOAD_DISABLE
2480 struct sge_ofld_rxq *ofld_rxq;
2481 struct sge_wrq *ofld_txq;
2482#endif
2483
2484 if (pi->flags & PORT_INIT_DONE) {
2485
2486 /* Need to quiesce queues. XXX: ctrl queues? */
2487
2488 for_each_txq(pi, i, txq) {
2489 quiesce_eq(sc, &txq->eq);
2490 }
2491
2492#ifndef TCP_OFFLOAD_DISABLE
2493 for_each_ofld_txq(pi, i, ofld_txq) {
2494 quiesce_eq(sc, &ofld_txq->eq);
2495 }
2496#endif
2497
2498 for_each_rxq(pi, i, rxq) {
2499 quiesce_iq(sc, &rxq->iq);
2500 quiesce_fl(sc, &rxq->fl);
2501 }
2502
2503#ifndef TCP_OFFLOAD_DISABLE
2504 for_each_ofld_rxq(pi, i, ofld_rxq) {
2505 quiesce_iq(sc, &ofld_rxq->iq);
2506 quiesce_fl(sc, &ofld_rxq->fl);
2507 }
2508#endif
2509 }
2510
2511 t4_teardown_port_queues(pi);
2512 pi->flags &= ~PORT_INIT_DONE;
2513
2514 return (0);
2515}
2516
2517static void
2518quiesce_eq(struct adapter *sc, struct sge_eq *eq)
2519{
2520 EQ_LOCK(eq);
2521 eq->flags |= EQ_DOOMED;
2522
2523 /*
2524 * Wait for the response to a credit flush if one's
2525 * pending.
2526 */
2527 while (eq->flags & EQ_CRFLUSHED)
2528 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
2529 EQ_UNLOCK(eq);
2530
2531 callout_drain(&eq->tx_callout); /* XXX: iffy */
2532 pause("callout", 10); /* Still iffy */
2533
2534 taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
2535}
2536
2537static void
2538quiesce_iq(struct adapter *sc, struct sge_iq *iq)
2539{
2540 (void) sc; /* unused */
2541
2542 /* Synchronize with the interrupt handler */
2543 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
2544 pause("iqfree", 1);
2545}
2546
2547static void
2548quiesce_fl(struct adapter *sc, struct sge_fl *fl)
2549{
2550 mtx_lock(&sc->sfl_lock);
2551 FL_LOCK(fl);
2552 fl->flags |= FL_DOOMED;
2553 FL_UNLOCK(fl);
2554 mtx_unlock(&sc->sfl_lock);
2555
2556 callout_drain(&sc->sfl_callout);
2557 KASSERT((fl->flags & FL_STARVING) == 0,
2558 ("%s: still starving", __func__));
2559}
2560
2561static int
2075t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
2562t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
2076 iq_intr_handler_t *handler, void *arg, char *name)
2563 driver_intr_t *handler, void *arg, char *name)
2077{
2078 int rc;
2079
2080 irq->rid = rid;
2081 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
2082 RF_SHAREABLE | RF_ACTIVE);
2083 if (irq->res == NULL) {
2084 device_printf(sc->dev,

--- 275 unchanged lines hidden (view full) ---

2360 PORT_LOCK(pi);
2361 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2362 PORT_UNLOCK(pi);
2363 return; /* without scheduling another callout */
2364 }
2365
2366 t4_get_port_stats(pi->adapter, pi->tx_chan, s);
2367
2564{
2565 int rc;
2566
2567 irq->rid = rid;
2568 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
2569 RF_SHAREABLE | RF_ACTIVE);
2570 if (irq->res == NULL) {
2571 device_printf(sc->dev,

--- 275 unchanged lines hidden (view full) ---

2847 PORT_LOCK(pi);
2848 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2849 PORT_UNLOCK(pi);
2850 return; /* without scheduling another callout */
2851 }
2852
2853 t4_get_port_stats(pi->adapter, pi->tx_chan, s);
2854
2368 ifp->if_opackets = s->tx_frames;
2369 ifp->if_ipackets = s->rx_frames;
2370 ifp->if_obytes = s->tx_octets;
2371 ifp->if_ibytes = s->rx_octets;
2372 ifp->if_omcasts = s->tx_mcast_frames;
2373 ifp->if_imcasts = s->rx_mcast_frames;
2855 ifp->if_opackets = s->tx_frames - s->tx_pause;
2856 ifp->if_ipackets = s->rx_frames - s->rx_pause;
2857 ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
2858 ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
2859 ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
2860 ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
2374 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
2375 s->rx_ovflow3;
2376
2377 drops = s->tx_drop;
2378 for_each_txq(pi, i, txq)
2379 drops += txq->br->br_drops;
2380 ifp->if_snd.ifq_drops = drops;
2381
2382 ifp->if_oerrors = s->tx_error_frames;
2383 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
2384 s->rx_fcs_err + s->rx_len_err;
2385
2386 callout_schedule(&pi->tick, hz);
2387 PORT_UNLOCK(pi);
2388}
2389
2390static int
2861 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
2862 s->rx_ovflow3;
2863
2864 drops = s->tx_drop;
2865 for_each_txq(pi, i, txq)
2866 drops += txq->br->br_drops;
2867 ifp->if_snd.ifq_drops = drops;
2868
2869 ifp->if_oerrors = s->tx_error_frames;
2870 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
2871 s->rx_fcs_err + s->rx_len_err;
2872
2873 callout_schedule(&pi->tick, hz);
2874 PORT_UNLOCK(pi);
2875}
2876
2877static int
2878cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
2879{
2880#ifdef INVARIANTS
2881 panic("%s: opcode %02x on iq %p with payload %p",
2882 __func__, rss->opcode, iq, m);
2883#else
2884 log(LOG_ERR, "%s: opcode %02x on iq %p with payload %p",
2885 __func__, rss->opcode, iq, m);
2886 m_freem(m);
2887#endif
2888 return (EDOOFUS);
2889}
2890
2891int
2892t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
2893{
2894 uintptr_t *loc, new;
2895
2896 if (opcode >= ARRAY_SIZE(sc->cpl_handler))
2897 return (EINVAL);
2898
2899 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
2900 loc = (uintptr_t *) &sc->cpl_handler[opcode];
2901 atomic_store_rel_ptr(loc, new);
2902
2903 return (0);
2904}
2905
2906static int
2391t4_sysctls(struct adapter *sc)
2392{
2393 struct sysctl_ctx_list *ctx;
2394 struct sysctl_oid *oid;
2907t4_sysctls(struct adapter *sc)
2908{
2909 struct sysctl_ctx_list *ctx;
2910 struct sysctl_oid *oid;
2395 struct sysctl_oid_list *children;
2911 struct sysctl_oid_list *children, *c0;
2912 static char *caps[] = {
2913 "\20\1PPP\2QFC\3DCBX", /* caps[0] linkcaps */
2914 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL", /* caps[1] niccaps */
2915 "\20\1TOE", /* caps[2] toecaps */
2916 "\20\1RDDP\2RDMAC", /* caps[3] rdmacaps */
2917 "\20\1INITIATOR_PDU\2TARGET_PDU" /* caps[4] iscsicaps */
2918 "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
2919 "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
2920 "\20\1INITIATOR\2TARGET\3CTRL_OFLD" /* caps[5] fcoecaps */
2921 };
2396
2397 ctx = device_get_sysctl_ctx(sc->dev);
2922
2923 ctx = device_get_sysctl_ctx(sc->dev);
2924
2925 /*
2926 * dev.t4nex.X.
2927 */
2398 oid = device_get_sysctl_tree(sc->dev);
2928 oid = device_get_sysctl_tree(sc->dev);
2399 children = SYSCTL_CHILDREN(oid);
2929 c0 = children = SYSCTL_CHILDREN(oid);
2400
2401 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD,
2402 &sc->params.nports, 0, "# of ports");
2403
2404 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
2405 &sc->params.rev, 0, "chip hardware revision");
2406
2407 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
2408 CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
2409
2930
2931 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD,
2932 &sc->params.nports, 0, "# of ports");
2933
2934 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
2935 &sc->params.rev, 0, "chip hardware revision");
2936
2937 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
2938 CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
2939
2410 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "TOE", CTLFLAG_RD,
2411 &sc->params.offload, 0, "hardware is capable of TCP offload");
2940 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
2941 CTLFLAG_RD, &t4_cfg_file, 0, "configuration file");
2412
2942
2943 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD,
2944 &sc->cfcsum, 0, "config file checksum");
2945
2946 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
2947 CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
2948 sysctl_bitfield, "A", "available link capabilities");
2949
2950 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
2951 CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
2952 sysctl_bitfield, "A", "available NIC capabilities");
2953
2954 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
2955 CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
2956 sysctl_bitfield, "A", "available TCP offload capabilities");
2957
2958 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
2959 CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
2960 sysctl_bitfield, "A", "available RDMA capabilities");
2961
2962 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
2963 CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
2964 sysctl_bitfield, "A", "available iSCSI capabilities");
2965
2966 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
2967 CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
2968 sysctl_bitfield, "A", "available FCoE capabilities");
2969
2413 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD,
2414 &sc->params.vpd.cclk, 0, "core clock frequency (in KHz)");
2415
2416 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
2970 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD,
2971 &sc->params.vpd.cclk, 0, "core clock frequency (in KHz)");
2972
2973 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
2417 CTLTYPE_STRING | CTLFLAG_RD, &intr_timer, sizeof(intr_timer),
2418 sysctl_int_array, "A", "interrupt holdoff timer values (us)");
2974 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
2975 sizeof(sc->sge.timer_val), sysctl_int_array, "A",
2976 "interrupt holdoff timer values (us)");
2419
2420 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
2977
2978 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
2421 CTLTYPE_STRING | CTLFLAG_RD, &intr_pktcount, sizeof(intr_pktcount),
2422 sysctl_int_array, "A", "interrupt holdoff packet counter values");
2979 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
2980 sizeof(sc->sge.counter_val), sysctl_int_array, "A",
2981 "interrupt holdoff packet counter values");
2423
2982
2983 /*
2984 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
2985 */
2986 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
2987 CTLFLAG_RD | CTLFLAG_SKIP, NULL,
2988 "logs and miscellaneous information");
2989 children = SYSCTL_CHILDREN(oid);
2990
2991 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
2992 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2993 sysctl_cctrl, "A", "congestion control");
2994
2995 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
2996 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2997 sysctl_cpl_stats, "A", "CPL statistics");
2998
2999 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
3000 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3001 sysctl_ddp_stats, "A", "DDP statistics");
3002
2424 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
2425 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3003 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
3004 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2426 sysctl_devlog, "A", "device log");
3005 sysctl_devlog, "A", "firmware's device log");
2427
3006
3007 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
3008 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3009 sysctl_fcoe_stats, "A", "FCoE statistics");
3010
3011 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
3012 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3013 sysctl_hw_sched, "A", "hardware scheduler ");
3014
3015 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
3016 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3017 sysctl_l2t, "A", "hardware L2 table");
3018
3019 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
3020 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3021 sysctl_lb_stats, "A", "loopback statistics");
3022
3023 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
3024 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3025 sysctl_meminfo, "A", "memory regions");
3026
3027 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
3028 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3029 sysctl_path_mtus, "A", "path MTUs");
3030
3031 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
3032 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3033 sysctl_pm_stats, "A", "PM statistics");
3034
3035 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
3036 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3037 sysctl_rdma_stats, "A", "RDMA statistics");
3038
3039 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
3040 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3041 sysctl_tcp_stats, "A", "TCP statistics");
3042
3043 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
3044 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3045 sysctl_tids, "A", "TID information");
3046
3047 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
3048 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3049 sysctl_tp_err_stats, "A", "TP error statistics");
3050
3051 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
3052 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3053 sysctl_tx_rate, "A", "Tx rate");
3054
3055#ifndef TCP_OFFLOAD_DISABLE
3056 if (is_offload(sc)) {
3057 /*
3058 * dev.t4nex.X.toe.
3059 */
3060 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
3061 NULL, "TOE parameters");
3062 children = SYSCTL_CHILDREN(oid);
3063
3064 sc->tt.sndbuf = 256 * 1024;
3065 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
3066 &sc->tt.sndbuf, 0, "max hardware send buffer size");
3067
3068 sc->tt.ddp = 0;
3069 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
3070 &sc->tt.ddp, 0, "DDP allowed");
3071 sc->tt.indsz = M_INDICATESIZE;
3072 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
3073 &sc->tt.indsz, 0, "DDP max indicate size allowed");
3074 sc->tt.ddp_thres = 3*4096;
3075 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
3076 &sc->tt.ddp_thres, 0, "DDP threshold");
3077 }
3078#endif
3079
3080
2428 return (0);
2429}
2430
2431static int
2432cxgbe_sysctls(struct port_info *pi)
2433{
2434 struct sysctl_ctx_list *ctx;
2435 struct sysctl_oid *oid;

--- 11 unchanged lines hidden (view full) ---

2447 &pi->nrxq, 0, "# of rx queues");
2448 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
2449 &pi->ntxq, 0, "# of tx queues");
2450 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
2451 &pi->first_rxq, 0, "index of first rx queue");
2452 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
2453 &pi->first_txq, 0, "index of first tx queue");
2454
3081 return (0);
3082}
3083
3084static int
3085cxgbe_sysctls(struct port_info *pi)
3086{
3087 struct sysctl_ctx_list *ctx;
3088 struct sysctl_oid *oid;

--- 11 unchanged lines hidden (view full) ---

3100 &pi->nrxq, 0, "# of rx queues");
3101 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
3102 &pi->ntxq, 0, "# of tx queues");
3103 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
3104 &pi->first_rxq, 0, "index of first rx queue");
3105 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
3106 &pi->first_txq, 0, "index of first tx queue");
3107
3108#ifndef TCP_OFFLOAD_DISABLE
3109 if (is_offload(pi->adapter)) {
3110 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
3111 &pi->nofldrxq, 0,
3112 "# of rx queues for offloaded TCP connections");
3113 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
3114 &pi->nofldtxq, 0,
3115 "# of tx queues for offloaded TCP connections");
3116 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
3117 CTLFLAG_RD, &pi->first_ofld_rxq, 0,
3118 "index of first TOE rx queue");
3119 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
3120 CTLFLAG_RD, &pi->first_ofld_txq, 0,
3121 "index of first TOE tx queue");
3122 }
3123#endif
3124
2455 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
2456 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
2457 "holdoff timer index");
2458 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
2459 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
2460 "holdoff packet counter index");
2461
2462 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",

--- 174 unchanged lines hidden (view full) ---

2637 sbuf_trim(&sb);
2638 sbuf_finish(&sb);
2639 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
2640 sbuf_delete(&sb);
2641 return (rc);
2642}
2643
2644static int
3125 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
3126 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
3127 "holdoff timer index");
3128 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
3129 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
3130 "holdoff packet counter index");
3131
3132 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",

--- 174 unchanged lines hidden (view full) ---

3307 sbuf_trim(&sb);
3308 sbuf_finish(&sb);
3309 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
3310 sbuf_delete(&sb);
3311 return (rc);
3312}
3313
3314static int
3315sysctl_bitfield(SYSCTL_HANDLER_ARGS)
3316{
3317 int rc;
3318 struct sbuf *sb;
3319
3320 rc = sysctl_wire_old_buffer(req, 0);
3321 if (rc != 0)
3322 return(rc);
3323
3324 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3325 if (sb == NULL)
3326 return (ENOMEM);
3327
3328 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
3329 rc = sbuf_finish(sb);
3330 sbuf_delete(sb);
3331
3332 return (rc);
3333}
3334
3335static int
2645sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
2646{
2647 struct port_info *pi = arg1;
2648 struct adapter *sc = pi->adapter;
3336sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
3337{
3338 struct port_info *pi = arg1;
3339 struct adapter *sc = pi->adapter;
2649 struct sge_rxq *rxq;
2650 int idx, rc, i;
2651
2652 idx = pi->tmr_idx;
2653
2654 rc = sysctl_handle_int(oidp, &idx, 0, req);
2655 if (rc != 0 || req->newptr == NULL)
2656 return (rc);
2657
2658 if (idx < 0 || idx >= SGE_NTIMERS)
2659 return (EINVAL);
2660
2661 ADAPTER_LOCK(sc);
2662 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2663 if (rc == 0) {
3340 int idx, rc, i;
3341
3342 idx = pi->tmr_idx;
3343
3344 rc = sysctl_handle_int(oidp, &idx, 0, req);
3345 if (rc != 0 || req->newptr == NULL)
3346 return (rc);
3347
3348 if (idx < 0 || idx >= SGE_NTIMERS)
3349 return (EINVAL);
3350
3351 ADAPTER_LOCK(sc);
3352 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
3353 if (rc == 0) {
3354 struct sge_rxq *rxq;
3355 uint8_t v;
3356
3357 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
2664 for_each_rxq(pi, i, rxq) {
3358 for_each_rxq(pi, i, rxq) {
2665 rxq->iq.intr_params = V_QINTR_TIMER_IDX(idx) |
2666 V_QINTR_CNT_EN(pi->pktc_idx != -1);
3359#ifdef atomic_store_rel_8
3360 atomic_store_rel_8(&rxq->iq.intr_params, v);
3361#else
3362 rxq->iq.intr_params = v;
3363#endif
2667 }
2668 pi->tmr_idx = idx;
2669 }
2670
2671 ADAPTER_UNLOCK(sc);
2672 return (rc);
2673}
2674

--- 10 unchanged lines hidden (view full) ---

2685 if (rc != 0 || req->newptr == NULL)
2686 return (rc);
2687
2688 if (idx < -1 || idx >= SGE_NCOUNTERS)
2689 return (EINVAL);
2690
2691 ADAPTER_LOCK(sc);
2692 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
3364 }
3365 pi->tmr_idx = idx;
3366 }
3367
3368 ADAPTER_UNLOCK(sc);
3369 return (rc);
3370}
3371

--- 10 unchanged lines hidden (view full) ---

3382 if (rc != 0 || req->newptr == NULL)
3383 return (rc);
3384
3385 if (idx < -1 || idx >= SGE_NCOUNTERS)
3386 return (EINVAL);
3387
3388 ADAPTER_LOCK(sc);
3389 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2693 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
2694 rc = EBUSY; /* can be changed only when port is down */
3390 if (rc == 0 && pi->flags & PORT_INIT_DONE)
3391 rc = EBUSY; /* cannot be changed once the queues are created */
2695
2696 if (rc == 0)
2697 pi->pktc_idx = idx;
2698
2699 ADAPTER_UNLOCK(sc);
2700 return (rc);
2701}
2702

--- 10 unchanged lines hidden (view full) ---

2713 if (rc != 0 || req->newptr == NULL)
2714 return (rc);
2715
2716 if (qsize < 128 || (qsize & 7))
2717 return (EINVAL);
2718
2719 ADAPTER_LOCK(sc);
2720 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
3392
3393 if (rc == 0)
3394 pi->pktc_idx = idx;
3395
3396 ADAPTER_UNLOCK(sc);
3397 return (rc);
3398}
3399

--- 10 unchanged lines hidden (view full) ---

3410 if (rc != 0 || req->newptr == NULL)
3411 return (rc);
3412
3413 if (qsize < 128 || (qsize & 7))
3414 return (EINVAL);
3415
3416 ADAPTER_LOCK(sc);
3417 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2721 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
2722 rc = EBUSY; /* can be changed only when port is down */
3418 if (rc == 0 && pi->flags & PORT_INIT_DONE)
3419 rc = EBUSY; /* cannot be changed once the queues are created */
2723
2724 if (rc == 0)
2725 pi->qsize_rxq = qsize;
2726
2727 ADAPTER_UNLOCK(sc);
2728 return (rc);
2729}
2730

--- 10 unchanged lines hidden (view full) ---

2741 if (rc != 0 || req->newptr == NULL)
2742 return (rc);
2743
2744 if (qsize < 128)
2745 return (EINVAL);
2746
2747 ADAPTER_LOCK(sc);
2748 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
3420
3421 if (rc == 0)
3422 pi->qsize_rxq = qsize;
3423
3424 ADAPTER_UNLOCK(sc);
3425 return (rc);
3426}
3427

--- 10 unchanged lines hidden (view full) ---

3438 if (rc != 0 || req->newptr == NULL)
3439 return (rc);
3440
3441 if (qsize < 128)
3442 return (EINVAL);
3443
3444 ADAPTER_LOCK(sc);
3445 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2749 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
2750 rc = EBUSY; /* can be changed only when port is down */
3446 if (rc == 0 && pi->flags & PORT_INIT_DONE)
3447 rc = EBUSY; /* cannot be changed once the queues are created */
2751
2752 if (rc == 0)
2753 pi->qsize_txq = qsize;
2754
2755 ADAPTER_UNLOCK(sc);
2756 return (rc);
2757}
2758

--- 4 unchanged lines hidden (view full) ---

2763 int reg = arg2;
2764 uint64_t val;
2765
2766 val = t4_read_reg64(sc, reg);
2767
2768 return (sysctl_handle_64(oidp, &val, 0, req));
2769}
2770
3448
3449 if (rc == 0)
3450 pi->qsize_txq = qsize;
3451
3452 ADAPTER_UNLOCK(sc);
3453 return (rc);
3454}
3455

--- 4 unchanged lines hidden (view full) ---

3460 int reg = arg2;
3461 uint64_t val;
3462
3463 val = t4_read_reg64(sc, reg);
3464
3465 return (sysctl_handle_64(oidp, &val, 0, req));
3466}
3467
3468static int
3469sysctl_cctrl(SYSCTL_HANDLER_ARGS)
3470{
3471 struct adapter *sc = arg1;
3472 struct sbuf *sb;
3473 int rc, i;
3474 uint16_t incr[NMTUS][NCCTRL_WIN];
3475 static const char *dec_fac[] = {
3476 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
3477 "0.9375"
3478 };
3479
3480 rc = sysctl_wire_old_buffer(req, 0);
3481 if (rc != 0)
3482 return (rc);
3483
3484 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
3485 if (sb == NULL)
3486 return (ENOMEM);
3487
3488 t4_read_cong_tbl(sc, incr);
3489
3490 for (i = 0; i < NCCTRL_WIN; ++i) {
3491 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
3492 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
3493 incr[5][i], incr[6][i], incr[7][i]);
3494 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
3495 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
3496 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
3497 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
3498 }
3499
3500 rc = sbuf_finish(sb);
3501 sbuf_delete(sb);
3502
3503 return (rc);
3504}
3505
3506static int
3507sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
3508{
3509 struct adapter *sc = arg1;
3510 struct sbuf *sb;
3511 int rc;
3512 struct tp_cpl_stats stats;
3513
3514 rc = sysctl_wire_old_buffer(req, 0);
3515 if (rc != 0)
3516 return (rc);
3517
3518 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
3519 if (sb == NULL)
3520 return (ENOMEM);
3521
3522 t4_tp_get_cpl_stats(sc, &stats);
3523
3524 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
3525 "channel 3\n");
3526 sbuf_printf(sb, "CPL requests: %10u %10u %10u %10u\n",
3527 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
3528 sbuf_printf(sb, "CPL responses: %10u %10u %10u %10u",
3529 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
3530
3531 rc = sbuf_finish(sb);
3532 sbuf_delete(sb);
3533
3534 return (rc);
3535}
3536
3537static int
3538sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
3539{
3540 struct adapter *sc = arg1;
3541 struct sbuf *sb;
3542 int rc;
3543 struct tp_usm_stats stats;
3544
3545 rc = sysctl_wire_old_buffer(req, 0);
3546 if (rc != 0)
3547 return(rc);
3548
3549 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
3550 if (sb == NULL)
3551 return (ENOMEM);
3552
3553 t4_get_usm_stats(sc, &stats);
3554
3555 sbuf_printf(sb, "Frames: %u\n", stats.frames);
3556 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
3557 sbuf_printf(sb, "Drops: %u", stats.drops);
3558
3559 rc = sbuf_finish(sb);
3560 sbuf_delete(sb);
3561
3562 return (rc);
3563}
3564
2771const char *devlog_level_strings[] = {
2772 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
2773 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
2774 [FW_DEVLOG_LEVEL_ERR] = "ERR",
2775 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
2776 [FW_DEVLOG_LEVEL_INFO] = "INFO",
2777 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
2778};

--- 68 unchanged lines hidden (view full) ---

2847 if (buf[first].timestamp == 0)
2848 goto done; /* nothing in the log */
2849
2850 rc = sysctl_wire_old_buffer(req, 0);
2851 if (rc != 0)
2852 goto done;
2853
2854 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
3565const char *devlog_level_strings[] = {
3566 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
3567 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
3568 [FW_DEVLOG_LEVEL_ERR] = "ERR",
3569 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
3570 [FW_DEVLOG_LEVEL_INFO] = "INFO",
3571 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
3572};

--- 68 unchanged lines hidden (view full) ---

3641 if (buf[first].timestamp == 0)
3642 goto done; /* nothing in the log */
3643
3644 rc = sysctl_wire_old_buffer(req, 0);
3645 if (rc != 0)
3646 goto done;
3647
3648 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
2855 sbuf_printf(sb, "\n%10s %15s %8s %8s %s\n",
3649 if (sb == NULL) {
3650 rc = ENOMEM;
3651 goto done;
3652 }
3653 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
2856 "Seq#", "Tstamp", "Level", "Facility", "Message");
2857
2858 i = first;
2859 do {
2860 e = &buf[i];
2861 if (e->timestamp == 0)
2862 break; /* end */
2863

--- 13 unchanged lines hidden (view full) ---

2877
2878 rc = sbuf_finish(sb);
2879 sbuf_delete(sb);
2880done:
2881 free(buf, M_CXGBE);
2882 return (rc);
2883}
2884
3654 "Seq#", "Tstamp", "Level", "Facility", "Message");
3655
3656 i = first;
3657 do {
3658 e = &buf[i];
3659 if (e->timestamp == 0)
3660 break; /* end */
3661

--- 13 unchanged lines hidden (view full) ---

3675
3676 rc = sbuf_finish(sb);
3677 sbuf_delete(sb);
3678done:
3679 free(buf, M_CXGBE);
3680 return (rc);
3681}
3682
3683static int
3684sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
3685{
3686 struct adapter *sc = arg1;
3687 struct sbuf *sb;
3688 int rc;
3689 struct tp_fcoe_stats stats[4];
3690
3691 rc = sysctl_wire_old_buffer(req, 0);
3692 if (rc != 0)
3693 return (rc);
3694
3695 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
3696 if (sb == NULL)
3697 return (ENOMEM);
3698
3699 t4_get_fcoe_stats(sc, 0, &stats[0]);
3700 t4_get_fcoe_stats(sc, 1, &stats[1]);
3701 t4_get_fcoe_stats(sc, 2, &stats[2]);
3702 t4_get_fcoe_stats(sc, 3, &stats[3]);
3703
3704 sbuf_printf(sb, " channel 0 channel 1 "
3705 "channel 2 channel 3\n");
3706 sbuf_printf(sb, "octetsDDP: %16ju %16ju %16ju %16ju\n",
3707 stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
3708 stats[3].octetsDDP);
3709 sbuf_printf(sb, "framesDDP: %16u %16u %16u %16u\n", stats[0].framesDDP,
3710 stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
3711 sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
3712 stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
3713 stats[3].framesDrop);
3714
3715 rc = sbuf_finish(sb);
3716 sbuf_delete(sb);
3717
3718 return (rc);
3719}
3720
3721static int
3722sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
3723{
3724 struct adapter *sc = arg1;
3725 struct sbuf *sb;
3726 int rc, i;
3727 unsigned int map, kbps, ipg, mode;
3728 unsigned int pace_tab[NTX_SCHED];
3729
3730 rc = sysctl_wire_old_buffer(req, 0);
3731 if (rc != 0)
3732 return (rc);
3733
3734 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
3735 if (sb == NULL)
3736 return (ENOMEM);
3737
3738 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
3739 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
3740 t4_read_pace_tbl(sc, pace_tab);
3741
3742 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
3743 "Class IPG (0.1 ns) Flow IPG (us)");
3744
3745 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
3746 t4_get_tx_sched(sc, i, &kbps, &ipg);
3747 sbuf_printf(sb, "\n %u %-5s %u ", i,
3748 (mode & (1 << i)) ? "flow" : "class", map & 3);
3749 if (kbps)
3750 sbuf_printf(sb, "%9u ", kbps);
3751 else
3752 sbuf_printf(sb, " disabled ");
3753
3754 if (ipg)
3755 sbuf_printf(sb, "%13u ", ipg);
3756 else
3757 sbuf_printf(sb, " disabled ");
3758
3759 if (pace_tab[i])
3760 sbuf_printf(sb, "%10u", pace_tab[i]);
3761 else
3762 sbuf_printf(sb, " disabled");
3763 }
3764
3765 rc = sbuf_finish(sb);
3766 sbuf_delete(sb);
3767
3768 return (rc);
3769}
3770
3771static int
3772sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
3773{
3774 struct adapter *sc = arg1;
3775 struct sbuf *sb;
3776 int rc, i, j;
3777 uint64_t *p0, *p1;
3778 struct lb_port_stats s[2];
3779 static const char *stat_name[] = {
3780 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
3781 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
3782 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
3783 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
3784 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
3785 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
3786 "BG2FramesTrunc:", "BG3FramesTrunc:"
3787 };
3788
3789 rc = sysctl_wire_old_buffer(req, 0);
3790 if (rc != 0)
3791 return (rc);
3792
3793 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
3794 if (sb == NULL)
3795 return (ENOMEM);
3796
3797 memset(s, 0, sizeof(s));
3798
3799 for (i = 0; i < 4; i += 2) {
3800 t4_get_lb_stats(sc, i, &s[0]);
3801 t4_get_lb_stats(sc, i + 1, &s[1]);
3802
3803 p0 = &s[0].octets;
3804 p1 = &s[1].octets;
3805 sbuf_printf(sb, "%s Loopback %u"
3806 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
3807
3808 for (j = 0; j < ARRAY_SIZE(stat_name); j++)
3809 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
3810 *p0++, *p1++);
3811 }
3812
3813 rc = sbuf_finish(sb);
3814 sbuf_delete(sb);
3815
3816 return (rc);
3817}
3818
3819struct mem_desc {
3820 unsigned int base;
3821 unsigned int limit;
3822 unsigned int idx;
3823};
3824
3825static int
3826mem_desc_cmp(const void *a, const void *b)
3827{
3828 return ((const struct mem_desc *)a)->base -
3829 ((const struct mem_desc *)b)->base;
3830}
3831
3832static void
3833mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
3834 unsigned int to)
3835{
3836 unsigned int size;
3837
3838 size = to - from + 1;
3839 if (size == 0)
3840 return;
3841
3842 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
3843 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
3844}
3845
3846static int
3847sysctl_meminfo(SYSCTL_HANDLER_ARGS)
3848{
3849 struct adapter *sc = arg1;
3850 struct sbuf *sb;
3851 int rc, i, n;
3852 uint32_t lo, hi;
3853 static const char *memory[] = { "EDC0:", "EDC1:", "MC:" };
3854 static const char *region[] = {
3855 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
3856 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
3857 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
3858 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
3859 "RQUDP region:", "PBL region:", "TXPBL region:", "ULPRX state:",
3860 "ULPTX state:", "On-chip queues:"
3861 };
3862 struct mem_desc avail[3];
3863 struct mem_desc mem[ARRAY_SIZE(region) + 3]; /* up to 3 holes */
3864 struct mem_desc *md = mem;
3865
3866 rc = sysctl_wire_old_buffer(req, 0);
3867 if (rc != 0)
3868 return (rc);
3869
3870 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
3871 if (sb == NULL)
3872 return (ENOMEM);
3873
3874 for (i = 0; i < ARRAY_SIZE(mem); i++) {
3875 mem[i].limit = 0;
3876 mem[i].idx = i;
3877 }
3878
3879 /* Find and sort the populated memory ranges */
3880 i = 0;
3881 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
3882 if (lo & F_EDRAM0_ENABLE) {
3883 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
3884 avail[i].base = G_EDRAM0_BASE(hi) << 20;
3885 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
3886 avail[i].idx = 0;
3887 i++;
3888 }
3889 if (lo & F_EDRAM1_ENABLE) {
3890 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
3891 avail[i].base = G_EDRAM1_BASE(hi) << 20;
3892 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
3893 avail[i].idx = 1;
3894 i++;
3895 }
3896 if (lo & F_EXT_MEM_ENABLE) {
3897 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
3898 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
3899 avail[i].limit = avail[i].base + (G_EXT_MEM_SIZE(hi) << 20);
3900 avail[i].idx = 2;
3901 i++;
3902 }
3903 if (!i) /* no memory available */
3904 return 0;
3905 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
3906
3907 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
3908 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
3909 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
3910 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
3911 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
3912 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
3913 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
3914 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
3915 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
3916
3917 /* the next few have explicit upper bounds */
3918 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
3919 md->limit = md->base - 1 +
3920 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
3921 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
3922 md++;
3923
3924 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
3925 md->limit = md->base - 1 +
3926 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
3927 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
3928 md++;
3929
3930 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
3931 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
3932 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
3933 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
3934 } else {
3935 md->base = 0;
3936 md->idx = ARRAY_SIZE(region); /* hide it */
3937 }
3938 md++;
3939
3940#define ulp_region(reg) \
3941 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
3942 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
3943
3944 ulp_region(RX_ISCSI);
3945 ulp_region(RX_TDDP);
3946 ulp_region(TX_TPT);
3947 ulp_region(RX_STAG);
3948 ulp_region(RX_RQ);
3949 ulp_region(RX_RQUDP);
3950 ulp_region(RX_PBL);
3951 ulp_region(TX_PBL);
3952#undef ulp_region
3953
3954 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
3955 md->limit = md->base + sc->tids.ntids - 1;
3956 md++;
3957 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
3958 md->limit = md->base + sc->tids.ntids - 1;
3959 md++;
3960
3961 md->base = sc->vres.ocq.start;
3962 if (sc->vres.ocq.size)
3963 md->limit = md->base + sc->vres.ocq.size - 1;
3964 else
3965 md->idx = ARRAY_SIZE(region); /* hide it */
3966 md++;
3967
3968 /* add any address-space holes, there can be up to 3 */
3969 for (n = 0; n < i - 1; n++)
3970 if (avail[n].limit < avail[n + 1].base)
3971 (md++)->base = avail[n].limit;
3972 if (avail[n].limit)
3973 (md++)->base = avail[n].limit;
3974
3975 n = md - mem;
3976 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
3977
3978 for (lo = 0; lo < i; lo++)
3979 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
3980 avail[lo].limit - 1);
3981
3982 sbuf_printf(sb, "\n");
3983 for (i = 0; i < n; i++) {
3984 if (mem[i].idx >= ARRAY_SIZE(region))
3985 continue; /* skip holes */
3986 if (!mem[i].limit)
3987 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
3988 mem_region_show(sb, region[mem[i].idx], mem[i].base,
3989 mem[i].limit);
3990 }
3991
3992 sbuf_printf(sb, "\n");
3993 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
3994 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
3995 mem_region_show(sb, "uP RAM:", lo, hi);
3996
3997 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
3998 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
3999 mem_region_show(sb, "uP Extmem2:", lo, hi);
4000
4001 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
4002 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
4003 G_PMRXMAXPAGE(lo),
4004 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
4005 (lo & F_PMRXNUMCHN) ? 2 : 1);
4006
4007 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
4008 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
4009 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
4010 G_PMTXMAXPAGE(lo),
4011 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
4012 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
4013 sbuf_printf(sb, "%u p-structs\n",
4014 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
4015
4016 for (i = 0; i < 4; i++) {
4017 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
4018 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
4019 i, G_USED(lo), G_ALLOC(lo));
4020 }
4021 for (i = 0; i < 4; i++) {
4022 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
4023 sbuf_printf(sb,
4024 "\nLoopback %d using %u pages out of %u allocated",
4025 i, G_USED(lo), G_ALLOC(lo));
4026 }
4027
4028 rc = sbuf_finish(sb);
4029 sbuf_delete(sb);
4030
4031 return (rc);
4032}
4033
4034static int
4035sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
4036{
4037 struct adapter *sc = arg1;
4038 struct sbuf *sb;
4039 int rc;
4040 uint16_t mtus[NMTUS];
4041
4042 rc = sysctl_wire_old_buffer(req, 0);
4043 if (rc != 0)
4044 return (rc);
4045
4046 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4047 if (sb == NULL)
4048 return (ENOMEM);
4049
4050 t4_read_mtu_tbl(sc, mtus, NULL);
4051
4052 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
4053 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
4054 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
4055 mtus[14], mtus[15]);
4056
4057 rc = sbuf_finish(sb);
4058 sbuf_delete(sb);
4059
4060 return (rc);
4061}
4062
4063static int
4064sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
4065{
4066 struct adapter *sc = arg1;
4067 struct sbuf *sb;
4068 int rc, i;
4069 uint32_t tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
4070 uint64_t tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
4071 static const char *pm_stats[] = {
4072 "Read:", "Write bypass:", "Write mem:", "Flush:", "FIFO wait:"
4073 };
4074
4075 rc = sysctl_wire_old_buffer(req, 0);
4076 if (rc != 0)
4077 return (rc);
4078
4079 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4080 if (sb == NULL)
4081 return (ENOMEM);
4082
4083 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
4084 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
4085
4086 sbuf_printf(sb, " Tx count Tx cycles "
4087 "Rx count Rx cycles");
4088 for (i = 0; i < PM_NSTATS; i++)
4089 sbuf_printf(sb, "\n%-13s %10u %20ju %10u %20ju",
4090 pm_stats[i], tx_cnt[i], tx_cyc[i], rx_cnt[i], rx_cyc[i]);
4091
4092 rc = sbuf_finish(sb);
4093 sbuf_delete(sb);
4094
4095 return (rc);
4096}
4097
4098static int
4099sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
4100{
4101 struct adapter *sc = arg1;
4102 struct sbuf *sb;
4103 int rc;
4104 struct tp_rdma_stats stats;
4105
4106 rc = sysctl_wire_old_buffer(req, 0);
4107 if (rc != 0)
4108 return (rc);
4109
4110 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4111 if (sb == NULL)
4112 return (ENOMEM);
4113
4114 t4_tp_get_rdma_stats(sc, &stats);
4115 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
4116 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
4117
4118 rc = sbuf_finish(sb);
4119 sbuf_delete(sb);
4120
4121 return (rc);
4122}
4123
4124static int
4125sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
4126{
4127 struct adapter *sc = arg1;
4128 struct sbuf *sb;
4129 int rc;
4130 struct tp_tcp_stats v4, v6;
4131
4132 rc = sysctl_wire_old_buffer(req, 0);
4133 if (rc != 0)
4134 return (rc);
4135
4136 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4137 if (sb == NULL)
4138 return (ENOMEM);
4139
4140 t4_tp_get_tcp_stats(sc, &v4, &v6);
4141 sbuf_printf(sb,
4142 " IP IPv6\n");
4143 sbuf_printf(sb, "OutRsts: %20u %20u\n",
4144 v4.tcpOutRsts, v6.tcpOutRsts);
4145 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
4146 v4.tcpInSegs, v6.tcpInSegs);
4147 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
4148 v4.tcpOutSegs, v6.tcpOutSegs);
4149 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
4150 v4.tcpRetransSegs, v6.tcpRetransSegs);
4151
4152 rc = sbuf_finish(sb);
4153 sbuf_delete(sb);
4154
4155 return (rc);
4156}
4157
4158static int
4159sysctl_tids(SYSCTL_HANDLER_ARGS)
4160{
4161 struct adapter *sc = arg1;
4162 struct sbuf *sb;
4163 int rc;
4164 struct tid_info *t = &sc->tids;
4165
4166 rc = sysctl_wire_old_buffer(req, 0);
4167 if (rc != 0)
4168 return (rc);
4169
4170 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4171 if (sb == NULL)
4172 return (ENOMEM);
4173
4174 if (t->natids) {
4175 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
4176 t->atids_in_use);
4177 }
4178
4179 if (t->ntids) {
4180 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
4181 uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
4182
4183 if (b) {
4184 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
4185 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
4186 t->ntids - 1);
4187 } else {
4188 sbuf_printf(sb, "TID range: %u-%u",
4189 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
4190 t->ntids - 1);
4191 }
4192 } else
4193 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
4194 sbuf_printf(sb, ", in use: %u\n",
4195 atomic_load_acq_int(&t->tids_in_use));
4196 }
4197
4198 if (t->nstids) {
4199 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
4200 t->stid_base + t->nstids - 1, t->stids_in_use);
4201 }
4202
4203 if (t->nftids) {
4204 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
4205 t->ftid_base + t->nftids - 1);
4206 }
4207
4208 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
4209 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
4210 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
4211
4212 rc = sbuf_finish(sb);
4213 sbuf_delete(sb);
4214
4215 return (rc);
4216}
4217
4218static int
4219sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
4220{
4221 struct adapter *sc = arg1;
4222 struct sbuf *sb;
4223 int rc;
4224 struct tp_err_stats stats;
4225
4226 rc = sysctl_wire_old_buffer(req, 0);
4227 if (rc != 0)
4228 return (rc);
4229
4230 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4231 if (sb == NULL)
4232 return (ENOMEM);
4233
4234 t4_tp_get_err_stats(sc, &stats);
4235
4236 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
4237 "channel 3\n");
4238 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
4239 stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
4240 stats.macInErrs[3]);
4241 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
4242 stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
4243 stats.hdrInErrs[3]);
4244 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
4245 stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
4246 stats.tcpInErrs[3]);
4247 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
4248 stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
4249 stats.tcp6InErrs[3]);
4250 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
4251 stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
4252 stats.tnlCongDrops[3]);
4253 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
4254 stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
4255 stats.tnlTxDrops[3]);
4256 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
4257 stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
4258 stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
4259 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
4260 stats.ofldChanDrops[0], stats.ofldChanDrops[1],
4261 stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
4262 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
4263 stats.ofldNoNeigh, stats.ofldCongDefer);
4264
4265 rc = sbuf_finish(sb);
4266 sbuf_delete(sb);
4267
4268 return (rc);
4269}
4270
4271static int
4272sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
4273{
4274 struct adapter *sc = arg1;
4275 struct sbuf *sb;
4276 int rc;
4277 u64 nrate[NCHAN], orate[NCHAN];
4278
4279 rc = sysctl_wire_old_buffer(req, 0);
4280 if (rc != 0)
4281 return (rc);
4282
4283 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4284 if (sb == NULL)
4285 return (ENOMEM);
4286
4287 t4_get_chan_txrate(sc, nrate, orate);
4288 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
4289 "channel 3\n");
4290 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
4291 nrate[0], nrate[1], nrate[2], nrate[3]);
4292 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
4293 orate[0], orate[1], orate[2], orate[3]);
4294
4295 rc = sbuf_finish(sb);
4296 sbuf_delete(sb);
4297
4298 return (rc);
4299}
4300
2885static inline void
2886txq_start(struct ifnet *ifp, struct sge_txq *txq)
2887{
2888 struct buf_ring *br;
2889 struct mbuf *m;
2890
2891 TXQ_LOCK_ASSERT_OWNED(txq);
2892
2893 br = txq->br;
2894 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
2895 if (m)
2896 t4_eth_tx(ifp, txq, m);
2897}
2898
2899void
4301static inline void
4302txq_start(struct ifnet *ifp, struct sge_txq *txq)
4303{
4304 struct buf_ring *br;
4305 struct mbuf *m;
4306
4307 TXQ_LOCK_ASSERT_OWNED(txq);
4308
4309 br = txq->br;
4310 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
4311 if (m)
4312 t4_eth_tx(ifp, txq, m);
4313}
4314
4315void
2900cxgbe_txq_start(void *arg, int count)
4316t4_tx_callout(void *arg)
2901{
4317{
2902 struct sge_txq *txq = arg;
4318 struct sge_eq *eq = arg;
4319 struct adapter *sc;
2903
4320
2904 TXQ_LOCK(txq);
2905 if (txq->eq.flags & EQ_CRFLUSHED) {
2906 txq->eq.flags &= ~EQ_CRFLUSHED;
4321 if (EQ_TRYLOCK(eq) == 0)
4322 goto reschedule;
4323
4324 if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
4325 EQ_UNLOCK(eq);
4326reschedule:
4327 if (__predict_true(!(eq->flags && EQ_DOOMED)))
4328 callout_schedule(&eq->tx_callout, 1);
4329 return;
4330 }
4331
4332 EQ_LOCK_ASSERT_OWNED(eq);
4333
4334 if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
4335
4336 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
4337 struct sge_txq *txq = arg;
4338 struct port_info *pi = txq->ifp->if_softc;
4339
4340 sc = pi->adapter;
4341 } else {
4342 struct sge_wrq *wrq = arg;
4343
4344 sc = wrq->adapter;
4345 }
4346
4347 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
4348 }
4349
4350 EQ_UNLOCK(eq);
4351}
4352
4353void
4354t4_tx_task(void *arg, int count)
4355{
4356 struct sge_eq *eq = arg;
4357
4358 EQ_LOCK(eq);
4359 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
4360 struct sge_txq *txq = arg;
2907 txq_start(txq->ifp, txq);
4361 txq_start(txq->ifp, txq);
2908 } else
2909 wakeup_one(txq); /* txq is going away, wakeup free_txq */
2910 TXQ_UNLOCK(txq);
4362 } else {
4363 struct sge_wrq *wrq = arg;
4364 t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
4365 }
4366 EQ_UNLOCK(eq);
2911}
2912
2913static uint32_t
2914fconf_to_mode(uint32_t fconf)
2915{
2916 uint32_t mode;
2917
2918 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |

--- 13 unchanged lines hidden (view full) ---

2932
2933 if (fconf & F_PROTOCOL)
2934 mode |= T4_FILTER_IP_PROTO;
2935
2936 if (fconf & F_TOS)
2937 mode |= T4_FILTER_IP_TOS;
2938
2939 if (fconf & F_VLAN)
4367}
4368
4369static uint32_t
4370fconf_to_mode(uint32_t fconf)
4371{
4372 uint32_t mode;
4373
4374 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |

--- 13 unchanged lines hidden (view full) ---

4388
4389 if (fconf & F_PROTOCOL)
4390 mode |= T4_FILTER_IP_PROTO;
4391
4392 if (fconf & F_TOS)
4393 mode |= T4_FILTER_IP_TOS;
4394
4395 if (fconf & F_VLAN)
2940 mode |= T4_FILTER_IVLAN;
4396 mode |= T4_FILTER_VLAN;
2941
2942 if (fconf & F_VNIC_ID)
4397
4398 if (fconf & F_VNIC_ID)
2943 mode |= T4_FILTER_OVLAN;
4399 mode |= T4_FILTER_VNIC;
2944
2945 if (fconf & F_PORT)
2946 mode |= T4_FILTER_PORT;
2947
2948 if (fconf & F_FCOE)
2949 mode |= T4_FILTER_FCoE;
2950
2951 return (mode);

--- 17 unchanged lines hidden (view full) ---

2969 fconf |= F_ETHERTYPE;
2970
2971 if (mode & T4_FILTER_IP_PROTO)
2972 fconf |= F_PROTOCOL;
2973
2974 if (mode & T4_FILTER_IP_TOS)
2975 fconf |= F_TOS;
2976
4400
4401 if (fconf & F_PORT)
4402 mode |= T4_FILTER_PORT;
4403
4404 if (fconf & F_FCOE)
4405 mode |= T4_FILTER_FCoE;
4406
4407 return (mode);

--- 17 unchanged lines hidden (view full) ---

4425 fconf |= F_ETHERTYPE;
4426
4427 if (mode & T4_FILTER_IP_PROTO)
4428 fconf |= F_PROTOCOL;
4429
4430 if (mode & T4_FILTER_IP_TOS)
4431 fconf |= F_TOS;
4432
2977 if (mode & T4_FILTER_IVLAN)
4433 if (mode & T4_FILTER_VLAN)
2978 fconf |= F_VLAN;
2979
4434 fconf |= F_VLAN;
4435
2980 if (mode & T4_FILTER_OVLAN)
4436 if (mode & T4_FILTER_VNIC)
2981 fconf |= F_VNIC_ID;
2982
2983 if (mode & T4_FILTER_PORT)
2984 fconf |= F_PORT;
2985
2986 if (mode & T4_FILTER_FCoE)
2987 fconf |= F_FCOE;
2988

--- 18 unchanged lines hidden (view full) ---

3007 fconf |= F_ETHERTYPE;
3008
3009 if (fs->val.proto || fs->mask.proto)
3010 fconf |= F_PROTOCOL;
3011
3012 if (fs->val.tos || fs->mask.tos)
3013 fconf |= F_TOS;
3014
4437 fconf |= F_VNIC_ID;
4438
4439 if (mode & T4_FILTER_PORT)
4440 fconf |= F_PORT;
4441
4442 if (mode & T4_FILTER_FCoE)
4443 fconf |= F_FCOE;
4444

--- 18 unchanged lines hidden (view full) ---

4463 fconf |= F_ETHERTYPE;
4464
4465 if (fs->val.proto || fs->mask.proto)
4466 fconf |= F_PROTOCOL;
4467
4468 if (fs->val.tos || fs->mask.tos)
4469 fconf |= F_TOS;
4470
3015 if (fs->val.ivlan_vld || fs->mask.ivlan_vld)
4471 if (fs->val.vlan_vld || fs->mask.vlan_vld)
3016 fconf |= F_VLAN;
3017
4472 fconf |= F_VLAN;
4473
3018 if (fs->val.ovlan_vld || fs->mask.ovlan_vld)
4474 if (fs->val.vnic_vld || fs->mask.vnic_vld)
3019 fconf |= F_VNIC_ID;
3020
3021 if (fs->val.iport || fs->mask.iport)
3022 fconf |= F_PORT;
3023
3024 if (fs->val.fcoe || fs->mask.fcoe)
3025 fconf |= F_FCOE;
3026
3027 return (fconf);
3028}
3029
3030static int
3031get_filter_mode(struct adapter *sc, uint32_t *mode)
3032{
3033 uint32_t fconf;
3034
3035 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
3036 A_TP_VLAN_PRI_MAP);
3037
4475 fconf |= F_VNIC_ID;
4476
4477 if (fs->val.iport || fs->mask.iport)
4478 fconf |= F_PORT;
4479
4480 if (fs->val.fcoe || fs->mask.fcoe)
4481 fconf |= F_FCOE;
4482
4483 return (fconf);
4484}
4485
4486static int
4487get_filter_mode(struct adapter *sc, uint32_t *mode)
4488{
4489 uint32_t fconf;
4490
4491 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
4492 A_TP_VLAN_PRI_MAP);
4493
3038 *mode = fconf_to_mode(fconf);
4494 if (sc->filter_mode != fconf) {
4495 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
4496 device_get_nameunit(sc->dev), sc->filter_mode, fconf);
4497 sc->filter_mode = fconf;
4498 }
3039
4499
4500 *mode = fconf_to_mode(sc->filter_mode);
4501
3040 return (0);
3041}
3042
3043static int
3044set_filter_mode(struct adapter *sc, uint32_t mode)
3045{
3046 uint32_t fconf;
3047 int rc;

--- 6 unchanged lines hidden (view full) ---

3054 goto done;
3055 }
3056
3057 if (sc->tids.ftids_in_use > 0) {
3058 rc = EBUSY;
3059 goto done;
3060 }
3061
4502 return (0);
4503}
4504
4505static int
4506set_filter_mode(struct adapter *sc, uint32_t mode)
4507{
4508 uint32_t fconf;
4509 int rc;

--- 6 unchanged lines hidden (view full) ---

4516 goto done;
4517 }
4518
4519 if (sc->tids.ftids_in_use > 0) {
4520 rc = EBUSY;
4521 goto done;
4522 }
4523
4524#ifndef TCP_OFFLOAD_DISABLE
4525 if (sc->offload_map) {
4526 rc = EBUSY;
4527 goto done;
4528 }
4529#endif
4530
4531#ifdef notyet
3062 rc = -t4_set_filter_mode(sc, fconf);
4532 rc = -t4_set_filter_mode(sc, fconf);
4533 if (rc == 0)
4534 sc->filter_mode = fconf;
4535#else
4536 rc = ENOTSUP;
4537#endif
4538
3063done:
3064 ADAPTER_UNLOCK(sc);
3065 return (rc);
3066}
3067
3068static inline uint64_t
3069get_filter_hits(struct adapter *sc, uint32_t fid)
3070{

--- 43 unchanged lines hidden (view full) ---

3114
3115 t->idx = 0xffffffff;
3116 return (0);
3117}
3118
3119static int
3120set_filter(struct adapter *sc, struct t4_filter *t)
3121{
4539done:
4540 ADAPTER_UNLOCK(sc);
4541 return (rc);
4542}
4543
4544static inline uint64_t
4545get_filter_hits(struct adapter *sc, uint32_t fid)
4546{

--- 43 unchanged lines hidden (view full) ---

4590
4591 t->idx = 0xffffffff;
4592 return (0);
4593}
4594
4595static int
4596set_filter(struct adapter *sc, struct t4_filter *t)
4597{
3122 uint32_t fconf;
3123 unsigned int nfilters, nports;
3124 struct filter_entry *f;
3125 int i;
3126
3127 ADAPTER_LOCK_ASSERT_OWNED(sc);
3128
3129 nfilters = sc->tids.nftids;
3130 nports = sc->params.nports;
3131
3132 if (nfilters == 0)
3133 return (ENOTSUP);
3134
3135 if (!(sc->flags & FULL_INIT_DONE))
3136 return (EAGAIN);
3137
3138 if (t->idx >= nfilters)
3139 return (EINVAL);
3140
3141 /* Validate against the global filter mode */
4598 unsigned int nfilters, nports;
4599 struct filter_entry *f;
4600 int i;
4601
4602 ADAPTER_LOCK_ASSERT_OWNED(sc);
4603
4604 nfilters = sc->tids.nftids;
4605 nports = sc->params.nports;
4606
4607 if (nfilters == 0)
4608 return (ENOTSUP);
4609
4610 if (!(sc->flags & FULL_INIT_DONE))
4611 return (EAGAIN);
4612
4613 if (t->idx >= nfilters)
4614 return (EINVAL);
4615
4616 /* Validate against the global filter mode */
3142 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
3143 A_TP_VLAN_PRI_MAP);
3144 if ((fconf | fspec_to_fconf(&t->fs)) != fconf)
4617 if ((sc->filter_mode | fspec_to_fconf(&t->fs)) != sc->filter_mode)
3145 return (E2BIG);
3146
3147 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports)
3148 return (EINVAL);
3149
3150 if (t->fs.val.iport >= nports)
3151 return (EINVAL);
3152

--- 80 unchanged lines hidden (view full) ---

3233 t4_l2t_release(f->l2t);
3234
3235 bzero(f, sizeof (*f));
3236}
3237
3238static int
3239set_filter_wr(struct adapter *sc, int fidx)
3240{
4618 return (E2BIG);
4619
4620 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports)
4621 return (EINVAL);
4622
4623 if (t->fs.val.iport >= nports)
4624 return (EINVAL);
4625

--- 80 unchanged lines hidden (view full) ---

4706 t4_l2t_release(f->l2t);
4707
4708 bzero(f, sizeof (*f));
4709}
4710
4711static int
4712set_filter_wr(struct adapter *sc, int fidx)
4713{
3241 int rc;
3242 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
3243 struct mbuf *m;
3244 struct fw_filter_wr *fwr;
3245 unsigned int ftid;
3246
3247 ADAPTER_LOCK_ASSERT_OWNED(sc);
3248
3249 if (f->fs.newdmac || f->fs.newvlan) {

--- 43 unchanged lines hidden (view full) ---

3293 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
3294 V_FW_FILTER_WR_PRIO(f->fs.prio) |
3295 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
3296 fwr->ethtype = htobe16(f->fs.val.ethtype);
3297 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
3298 fwr->frag_to_ovlan_vldm =
3299 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
3300 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
4714 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
4715 struct mbuf *m;
4716 struct fw_filter_wr *fwr;
4717 unsigned int ftid;
4718
4719 ADAPTER_LOCK_ASSERT_OWNED(sc);
4720
4721 if (f->fs.newdmac || f->fs.newvlan) {

--- 43 unchanged lines hidden (view full) ---

4765 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
4766 V_FW_FILTER_WR_PRIO(f->fs.prio) |
4767 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
4768 fwr->ethtype = htobe16(f->fs.val.ethtype);
4769 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
4770 fwr->frag_to_ovlan_vldm =
4771 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
4772 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
3301 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
3302 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
3303 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
3304 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
4773 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
4774 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
4775 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
4776 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
3305 fwr->smac_sel = 0;
3306 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
4777 fwr->smac_sel = 0;
4778 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
3307 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.intrq[0].abs_id));
4779 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
3308 fwr->maci_to_matchtypem =
3309 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
3310 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
3311 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
3312 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
3313 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
3314 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
3315 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
3316 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
3317 fwr->ptcl = f->fs.val.proto;
3318 fwr->ptclm = f->fs.mask.proto;
3319 fwr->ttyp = f->fs.val.tos;
3320 fwr->ttypm = f->fs.mask.tos;
4780 fwr->maci_to_matchtypem =
4781 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
4782 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
4783 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
4784 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
4785 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
4786 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
4787 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
4788 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
4789 fwr->ptcl = f->fs.val.proto;
4790 fwr->ptclm = f->fs.mask.proto;
4791 fwr->ttyp = f->fs.val.tos;
4792 fwr->ttypm = f->fs.mask.tos;
3321 fwr->ivlan = htobe16(f->fs.val.ivlan);
3322 fwr->ivlanm = htobe16(f->fs.mask.ivlan);
3323 fwr->ovlan = htobe16(f->fs.val.ovlan);
3324 fwr->ovlanm = htobe16(f->fs.mask.ovlan);
4793 fwr->ivlan = htobe16(f->fs.val.vlan);
4794 fwr->ivlanm = htobe16(f->fs.mask.vlan);
4795 fwr->ovlan = htobe16(f->fs.val.vnic);
4796 fwr->ovlanm = htobe16(f->fs.mask.vnic);
3325 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
3326 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
3327 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
3328 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
3329 fwr->lp = htobe16(f->fs.val.dport);
3330 fwr->lpm = htobe16(f->fs.mask.dport);
3331 fwr->fp = htobe16(f->fs.val.sport);
3332 fwr->fpm = htobe16(f->fs.mask.sport);
3333 if (f->fs.newsmac)
3334 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
3335
3336 f->pending = 1;
3337 sc->tids.ftids_in_use++;
4797 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
4798 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
4799 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
4800 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
4801 fwr->lp = htobe16(f->fs.val.dport);
4802 fwr->lpm = htobe16(f->fs.mask.dport);
4803 fwr->fp = htobe16(f->fs.val.sport);
4804 fwr->fpm = htobe16(f->fs.mask.sport);
4805 if (f->fs.newsmac)
4806 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
4807
4808 f->pending = 1;
4809 sc->tids.ftids_in_use++;
3338 rc = t4_mgmt_tx(sc, m);
3339 if (rc != 0) {
3340 sc->tids.ftids_in_use--;
3341 m_freem(m);
3342 clear_filter(f);
3343 }
3344 return (rc);
4810
4811 t4_mgmt_tx(sc, m);
4812 return (0);
3345}
3346
3347static int
3348del_filter_wr(struct adapter *sc, int fidx)
3349{
3350 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
3351 struct mbuf *m;
3352 struct fw_filter_wr *fwr;
4813}
4814
4815static int
4816del_filter_wr(struct adapter *sc, int fidx)
4817{
4818 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
4819 struct mbuf *m;
4820 struct fw_filter_wr *fwr;
3353 unsigned int rc, ftid;
4821 unsigned int ftid;
3354
3355 ADAPTER_LOCK_ASSERT_OWNED(sc);
3356
3357 ftid = sc->tids.ftid_base + fidx;
3358
3359 m = m_gethdr(M_NOWAIT, MT_DATA);
3360 if (m == NULL)
3361 return (ENOMEM);
3362
3363 fwr = mtod(m, struct fw_filter_wr *);
3364 m->m_len = m->m_pkthdr.len = sizeof(*fwr);
3365 bzero(fwr, sizeof (*fwr));
3366
4822
4823 ADAPTER_LOCK_ASSERT_OWNED(sc);
4824
4825 ftid = sc->tids.ftid_base + fidx;
4826
4827 m = m_gethdr(M_NOWAIT, MT_DATA);
4828 if (m == NULL)
4829 return (ENOMEM);
4830
4831 fwr = mtod(m, struct fw_filter_wr *);
4832 m->m_len = m->m_pkthdr.len = sizeof(*fwr);
4833 bzero(fwr, sizeof (*fwr));
4834
3367 t4_mk_filtdelwr(ftid, fwr, sc->sge.intrq[0].abs_id);
4835 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
3368
3369 f->pending = 1;
4836
4837 f->pending = 1;
3370 rc = t4_mgmt_tx(sc, m);
3371 if (rc != 0) {
3372 f->pending = 0;
3373 m_freem(m);
3374 }
3375 return (rc);
4838 t4_mgmt_tx(sc, m);
4839 return (0);
3376}
3377
4840}
4841
3378/* XXX move intr handlers to main.c and make this static */
3379void
3380filter_rpl(struct adapter *sc, const struct cpl_set_tcb_rpl *rpl)
4842static int
4843filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
3381{
4844{
4845 struct adapter *sc = iq->adapter;
4846 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
3382 unsigned int idx = GET_TID(rpl);
3383
4847 unsigned int idx = GET_TID(rpl);
4848
4849 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
4850 rss->opcode));
4851
3384 if (idx >= sc->tids.ftid_base &&
3385 (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
3386 unsigned int rc = G_COOKIE(rpl->cookie);
3387 struct filter_entry *f = &sc->tids.ftid_tab[idx];
3388
4852 if (idx >= sc->tids.ftid_base &&
4853 (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
4854 unsigned int rc = G_COOKIE(rpl->cookie);
4855 struct filter_entry *f = &sc->tids.ftid_tab[idx];
4856
3389 if (rc == FW_FILTER_WR_FLT_DELETED) {
3390 /*
3391 * Clear the filter when we get confirmation from the
3392 * hardware that the filter has been deleted.
3393 */
3394 clear_filter(f);
3395 sc->tids.ftids_in_use--;
3396 } else if (rc == FW_FILTER_WR_SMT_TBL_FULL) {
3397 device_printf(sc->dev,
3398 "filter %u setup failed due to full SMT\n", idx);
3399 clear_filter(f);
3400 sc->tids.ftids_in_use--;
3401 } else if (rc == FW_FILTER_WR_FLT_ADDED) {
4857 if (rc == FW_FILTER_WR_FLT_ADDED) {
3402 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
3403 f->pending = 0; /* asynchronous setup completed */
3404 f->valid = 1;
4858 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
4859 f->pending = 0; /* asynchronous setup completed */
4860 f->valid = 1;
3405 } else {
3406 /*
3407 * Something went wrong. Issue a warning about the
3408 * problem and clear everything out.
3409 */
4861 return (0);
4862 }
4863
4864 if (rc != FW_FILTER_WR_FLT_DELETED) {
4865 /* Add or delete failed, need to display an error */
3410 device_printf(sc->dev,
3411 "filter %u setup failed with error %u\n", idx, rc);
4866 device_printf(sc->dev,
4867 "filter %u setup failed with error %u\n", idx, rc);
3412 clear_filter(f);
3413 sc->tids.ftids_in_use--;
3414 }
4868 }
4869
4870 clear_filter(f);
4871 ADAPTER_LOCK(sc);
4872 sc->tids.ftids_in_use--;
4873 ADAPTER_UNLOCK(sc);
3415 }
4874 }
4875
4876 return (0);
3416}
3417
3418static int
3419get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
3420{
3421 int rc = EINVAL;
3422
3423 if (cntxt->cid > M_CTXTQID)

--- 15 unchanged lines hidden (view full) ---

3439
3440 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id,
3441 &cntxt->data[0]);
3442 }
3443
3444 return (rc);
3445}
3446
4877}
4878
4879static int
4880get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
4881{
4882 int rc = EINVAL;
4883
4884 if (cntxt->cid > M_CTXTQID)

--- 15 unchanged lines hidden (view full) ---

4900
4901 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id,
4902 &cntxt->data[0]);
4903 }
4904
4905 return (rc);
4906}
4907
4908static int
4909read_card_mem(struct adapter *sc, struct t4_mem_range *mr)
4910{
4911 uint32_t base, size, lo, hi, win, off, remaining, i, n;
4912 uint32_t *buf, *b;
4913 int rc;
4914
4915 /* reads are in multiples of 32 bits */
4916 if (mr->addr & 3 || mr->len & 3 || mr->len == 0)
4917 return (EINVAL);
4918
4919 /*
4920 * We don't want to deal with potential holes so we mandate that the
4921 * requested region must lie entirely within one of the 3 memories.
4922 */
4923 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
4924 if (lo & F_EDRAM0_ENABLE) {
4925 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
4926 base = G_EDRAM0_BASE(hi) << 20;
4927 size = G_EDRAM0_SIZE(hi) << 20;
4928 if (size > 0 &&
4929 mr->addr >= base && mr->addr < base + size &&
4930 mr->addr + mr->len <= base + size)
4931 goto proceed;
4932 }
4933 if (lo & F_EDRAM1_ENABLE) {
4934 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
4935 base = G_EDRAM1_BASE(hi) << 20;
4936 size = G_EDRAM1_SIZE(hi) << 20;
4937 if (size > 0 &&
4938 mr->addr >= base && mr->addr < base + size &&
4939 mr->addr + mr->len <= base + size)
4940 goto proceed;
4941 }
4942 if (lo & F_EXT_MEM_ENABLE) {
4943 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
4944 base = G_EXT_MEM_BASE(hi) << 20;
4945 size = G_EXT_MEM_SIZE(hi) << 20;
4946 if (size > 0 &&
4947 mr->addr >= base && mr->addr < base + size &&
4948 mr->addr + mr->len <= base + size)
4949 goto proceed;
4950 }
4951 return (ENXIO);
4952
4953proceed:
4954 buf = b = malloc(mr->len, M_CXGBE, M_WAITOK);
4955
4956 /*
4957 * Position the PCIe window (we use memwin2) to the 16B aligned area
4958 * just at/before the requested region.
4959 */
4960 win = mr->addr & ~0xf;
4961 off = mr->addr - win; /* offset of the requested region in the win */
4962 remaining = mr->len;
4963
4964 while (remaining) {
4965 t4_write_reg(sc,
4966 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2), win);
4967 t4_read_reg(sc,
4968 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2));
4969
4970 /* number of bytes that we'll copy in the inner loop */
4971 n = min(remaining, MEMWIN2_APERTURE - off);
4972
4973 for (i = 0; i < n; i += 4, remaining -= 4)
4974 *b++ = t4_read_reg(sc, MEMWIN2_BASE + off + i);
4975
4976 win += MEMWIN2_APERTURE;
4977 off = 0;
4978 }
4979
4980 rc = copyout(buf, mr->data, mr->len);
4981 free(buf, M_CXGBE);
4982
4983 return (rc);
4984}
4985
3447int
3448t4_os_find_pci_capability(struct adapter *sc, int cap)
3449{
3450 int i;
3451
3452 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
3453}
3454

--- 54 unchanged lines hidden (view full) ---

3509
3510 if (link_stat) {
3511 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
3512 if_link_state_change(ifp, LINK_STATE_UP);
3513 } else
3514 if_link_state_change(ifp, LINK_STATE_DOWN);
3515}
3516
4986int
4987t4_os_find_pci_capability(struct adapter *sc, int cap)
4988{
4989 int i;
4990
4991 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
4992}
4993

--- 54 unchanged lines hidden (view full) ---

5048
5049 if (link_stat) {
5050 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
5051 if_link_state_change(ifp, LINK_STATE_UP);
5052 } else
5053 if_link_state_change(ifp, LINK_STATE_DOWN);
5054}
5055
5056void
5057t4_iterate(void (*func)(struct adapter *, void *), void *arg)
5058{
5059 struct adapter *sc;
5060
5061 mtx_lock(&t4_list_lock);
5062 SLIST_FOREACH(sc, &t4_list, link) {
5063 /*
5064 * func should not make any assumptions about what state sc is
5065 * in - the only guarantee is that sc->sc_lock is a valid lock.
5066 */
5067 func(sc, arg);
5068 }
5069 mtx_unlock(&t4_list_lock);
5070}
5071
3517static int
3518t4_open(struct cdev *dev, int flags, int type, struct thread *td)
3519{
3520 return (0);
3521}
3522
3523static int
3524t4_close(struct cdev *dev, int flags, int type, struct thread *td)

--- 80 unchanged lines hidden (view full) ---

3605 case CHELSIO_T4_DEL_FILTER:
3606 ADAPTER_LOCK(sc);
3607 rc = del_filter(sc, (struct t4_filter *)data);
3608 ADAPTER_UNLOCK(sc);
3609 break;
3610 case CHELSIO_T4_GET_SGE_CONTEXT:
3611 rc = get_sge_context(sc, (struct t4_sge_context *)data);
3612 break;
5072static int
5073t4_open(struct cdev *dev, int flags, int type, struct thread *td)
5074{
5075 return (0);
5076}
5077
5078static int
5079t4_close(struct cdev *dev, int flags, int type, struct thread *td)

--- 80 unchanged lines hidden (view full) ---

5160 case CHELSIO_T4_DEL_FILTER:
5161 ADAPTER_LOCK(sc);
5162 rc = del_filter(sc, (struct t4_filter *)data);
5163 ADAPTER_UNLOCK(sc);
5164 break;
5165 case CHELSIO_T4_GET_SGE_CONTEXT:
5166 rc = get_sge_context(sc, (struct t4_sge_context *)data);
5167 break;
5168 case CHELSIO_T4_LOAD_FW: {
5169 struct t4_data *fw = (struct t4_data *)data;
5170 uint8_t *fw_data;
5171
5172 if (sc->flags & FULL_INIT_DONE)
5173 return (EBUSY);
5174
5175 fw_data = malloc(fw->len, M_CXGBE, M_NOWAIT);
5176 if (fw_data == NULL)
5177 return (ENOMEM);
5178
5179 rc = copyin(fw->data, fw_data, fw->len);
5180 if (rc == 0)
5181 rc = -t4_load_fw(sc, fw_data, fw->len);
5182
5183 free(fw_data, M_CXGBE);
5184 break;
5185 }
5186 case CHELSIO_T4_GET_MEM:
5187 rc = read_card_mem(sc, (struct t4_mem_range *)data);
5188 break;
3613 default:
3614 rc = EINVAL;
3615 }
3616
3617 return (rc);
3618}
3619
5189 default:
5190 rc = EINVAL;
5191 }
5192
5193 return (rc);
5194}
5195
5196#ifndef TCP_OFFLOAD_DISABLE
3620static int
5197static int
5198toe_capability(struct port_info *pi, int enable)
5199{
5200 int rc;
5201 struct adapter *sc = pi->adapter;
5202
5203 ADAPTER_LOCK_ASSERT_OWNED(sc);
5204
5205 if (!is_offload(sc))
5206 return (ENODEV);
5207
5208 if (enable) {
5209 if (isset(&sc->offload_map, pi->port_id))
5210 return (0);
5211
5212 if (sc->offload_map == 0) {
5213 rc = activate_uld(sc, ULD_TOM, &sc->tom);
5214 if (rc != 0)
5215 return (rc);
5216 }
5217
5218 setbit(&sc->offload_map, pi->port_id);
5219 } else {
5220 if (!isset(&sc->offload_map, pi->port_id))
5221 return (0);
5222
5223 clrbit(&sc->offload_map, pi->port_id);
5224
5225 if (sc->offload_map == 0) {
5226 rc = deactivate_uld(&sc->tom);
5227 if (rc != 0) {
5228 setbit(&sc->offload_map, pi->port_id);
5229 return (rc);
5230 }
5231 }
5232 }
5233
5234 return (0);
5235}
5236
5237/*
5238 * Add an upper layer driver to the global list.
5239 */
5240int
5241t4_register_uld(struct uld_info *ui)
5242{
5243 int rc = 0;
5244 struct uld_info *u;
5245
5246 mtx_lock(&t4_uld_list_lock);
5247 SLIST_FOREACH(u, &t4_uld_list, link) {
5248 if (u->uld_id == ui->uld_id) {
5249 rc = EEXIST;
5250 goto done;
5251 }
5252 }
5253
5254 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
5255 ui->refcount = 0;
5256done:
5257 mtx_unlock(&t4_uld_list_lock);
5258 return (rc);
5259}
5260
5261int
5262t4_unregister_uld(struct uld_info *ui)
5263{
5264 int rc = EINVAL;
5265 struct uld_info *u;
5266
5267 mtx_lock(&t4_uld_list_lock);
5268
5269 SLIST_FOREACH(u, &t4_uld_list, link) {
5270 if (u == ui) {
5271 if (ui->refcount > 0) {
5272 rc = EBUSY;
5273 goto done;
5274 }
5275
5276 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
5277 rc = 0;
5278 goto done;
5279 }
5280 }
5281done:
5282 mtx_unlock(&t4_uld_list_lock);
5283 return (rc);
5284}
5285
5286static int
5287activate_uld(struct adapter *sc, int id, struct uld_softc *usc)
5288{
5289 int rc = EAGAIN;
5290 struct uld_info *ui;
5291
5292 mtx_lock(&t4_uld_list_lock);
5293
5294 SLIST_FOREACH(ui, &t4_uld_list, link) {
5295 if (ui->uld_id == id) {
5296 rc = ui->attach(sc, &usc->softc);
5297 if (rc == 0) {
5298 KASSERT(usc->softc != NULL,
5299 ("%s: ULD %d has no state", __func__, id));
5300 ui->refcount++;
5301 usc->uld = ui;
5302 }
5303 goto done;
5304 }
5305 }
5306done:
5307 mtx_unlock(&t4_uld_list_lock);
5308
5309 return (rc);
5310}
5311
5312static int
5313deactivate_uld(struct uld_softc *usc)
5314{
5315 int rc;
5316
5317 mtx_lock(&t4_uld_list_lock);
5318
5319 if (usc->uld == NULL || usc->softc == NULL) {
5320 rc = EINVAL;
5321 goto done;
5322 }
5323
5324 rc = usc->uld->detach(usc->softc);
5325 if (rc == 0) {
5326 KASSERT(usc->uld->refcount > 0,
5327 ("%s: ULD has bad refcount", __func__));
5328 usc->uld->refcount--;
5329 usc->uld = NULL;
5330 usc->softc = NULL;
5331 }
5332done:
5333 mtx_unlock(&t4_uld_list_lock);
5334
5335 return (rc);
5336}
5337#endif
5338
5339/*
5340 * Come up with reasonable defaults for some of the tunables, provided they're
5341 * not set by the user (in which case we'll use the values as is).
5342 */
5343static void
5344tweak_tunables(void)
5345{
5346 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
5347
5348 if (t4_ntxq10g < 1)
5349 t4_ntxq10g = min(nc, NTXQ_10G);
5350
5351 if (t4_ntxq1g < 1)
5352 t4_ntxq1g = min(nc, NTXQ_1G);
5353
5354 if (t4_nrxq10g < 1)
5355 t4_nrxq10g = min(nc, NRXQ_10G);
5356
5357 if (t4_nrxq1g < 1)
5358 t4_nrxq1g = min(nc, NRXQ_1G);
5359
5360#ifndef TCP_OFFLOAD_DISABLE
5361 if (t4_nofldtxq10g < 1)
5362 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
5363
5364 if (t4_nofldtxq1g < 1)
5365 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
5366
5367 if (t4_nofldrxq10g < 1)
5368 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
5369
5370 if (t4_nofldrxq1g < 1)
5371 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
5372#endif
5373
5374 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
5375 t4_tmr_idx_10g = TMR_IDX_10G;
5376
5377 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
5378 t4_pktc_idx_10g = PKTC_IDX_10G;
5379
5380 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
5381 t4_tmr_idx_1g = TMR_IDX_1G;
5382
5383 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
5384 t4_pktc_idx_1g = PKTC_IDX_1G;
5385
5386 if (t4_qsize_txq < 128)
5387 t4_qsize_txq = 128;
5388
5389 if (t4_qsize_rxq < 128)
5390 t4_qsize_rxq = 128;
5391 while (t4_qsize_rxq & 7)
5392 t4_qsize_rxq++;
5393
5394 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
5395}
5396
5397static int
3621t4_mod_event(module_t mod, int cmd, void *arg)
3622{
5398t4_mod_event(module_t mod, int cmd, void *arg)
5399{
5400 int rc = 0;
3623
5401
3624 if (cmd == MOD_LOAD)
5402 switch (cmd) {
5403 case MOD_LOAD:
3625 t4_sge_modload();
5404 t4_sge_modload();
5405 mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF);
5406 SLIST_INIT(&t4_list);
5407#ifndef TCP_OFFLOAD_DISABLE
5408 mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF);
5409 SLIST_INIT(&t4_uld_list);
5410#endif
5411 tweak_tunables();
5412 break;
3626
5413
3627 return (0);
5414 case MOD_UNLOAD:
5415#ifndef TCP_OFFLOAD_DISABLE
5416 mtx_lock(&t4_uld_list_lock);
5417 if (!SLIST_EMPTY(&t4_uld_list)) {
5418 rc = EBUSY;
5419 mtx_unlock(&t4_uld_list_lock);
5420 break;
5421 }
5422 mtx_unlock(&t4_uld_list_lock);
5423 mtx_destroy(&t4_uld_list_lock);
5424#endif
5425 mtx_lock(&t4_list_lock);
5426 if (!SLIST_EMPTY(&t4_list)) {
5427 rc = EBUSY;
5428 mtx_unlock(&t4_list_lock);
5429 break;
5430 }
5431 mtx_unlock(&t4_list_lock);
5432 mtx_destroy(&t4_list_lock);
5433 break;
5434 }
5435
5436 return (rc);
3628}
3629
3630static devclass_t t4_devclass;
3631static devclass_t cxgbe_devclass;
3632
3633DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, t4_mod_event, 0);
3634MODULE_VERSION(t4nex, 1);
3635
3636DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
3637MODULE_VERSION(cxgbe, 1);
5437}
5438
5439static devclass_t t4_devclass;
5440static devclass_t cxgbe_devclass;
5441
5442DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, t4_mod_event, 0);
5443MODULE_VERSION(t4nex, 1);
5444
5445DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
5446MODULE_VERSION(cxgbe, 1);