cxgb_main.c revision 210505
1167514Skmacy/**************************************************************************
2167514Skmacy
3189643SgnnCopyright (c) 2007-2009, Chelsio Inc.
4167514SkmacyAll rights reserved.
5167514Skmacy
6167514SkmacyRedistribution and use in source and binary forms, with or without
7167514Skmacymodification, are permitted provided that the following conditions are met:
8167514Skmacy
9167514Skmacy 1. Redistributions of source code must retain the above copyright notice,
10167514Skmacy    this list of conditions and the following disclaimer.
11167514Skmacy
12178302Skmacy 2. Neither the name of the Chelsio Corporation nor the names of its
13167514Skmacy    contributors may be used to endorse or promote products derived from
14167514Skmacy    this software without specific prior written permission.
15167514Skmacy
16167514SkmacyTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17167514SkmacyAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18167514SkmacyIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19167514SkmacyARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20167514SkmacyLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21167514SkmacyCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22167514SkmacySUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23167514SkmacyINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24167514SkmacyCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25167514SkmacyARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26167514SkmacyPOSSIBILITY OF SUCH DAMAGE.
27167514Skmacy
28167514Skmacy***************************************************************************/
29167514Skmacy
30167514Skmacy#include <sys/cdefs.h>
31167514Skmacy__FBSDID("$FreeBSD: head/sys/dev/cxgb/cxgb_main.c 210505 2010-07-26 17:31:15Z jhb $");
32167514Skmacy
33167514Skmacy#include <sys/param.h>
34167514Skmacy#include <sys/systm.h>
35167514Skmacy#include <sys/kernel.h>
36167514Skmacy#include <sys/bus.h>
37167514Skmacy#include <sys/module.h>
38167514Skmacy#include <sys/pciio.h>
39167514Skmacy#include <sys/conf.h>
40167514Skmacy#include <machine/bus.h>
41167514Skmacy#include <machine/resource.h>
42167514Skmacy#include <sys/bus_dma.h>
43176472Skmacy#include <sys/ktr.h>
44167514Skmacy#include <sys/rman.h>
45167514Skmacy#include <sys/ioccom.h>
46167514Skmacy#include <sys/mbuf.h>
47167514Skmacy#include <sys/linker.h>
48167514Skmacy#include <sys/firmware.h>
49167514Skmacy#include <sys/socket.h>
50167514Skmacy#include <sys/sockio.h>
51167514Skmacy#include <sys/smp.h>
52167514Skmacy#include <sys/sysctl.h>
53174708Skmacy#include <sys/syslog.h>
54167514Skmacy#include <sys/queue.h>
55167514Skmacy#include <sys/taskqueue.h>
56174708Skmacy#include <sys/proc.h>
57167514Skmacy
58167514Skmacy#include <net/bpf.h>
59167514Skmacy#include <net/ethernet.h>
60167514Skmacy#include <net/if.h>
61167514Skmacy#include <net/if_arp.h>
62167514Skmacy#include <net/if_dl.h>
63167514Skmacy#include <net/if_media.h>
64167514Skmacy#include <net/if_types.h>
65180583Skmacy#include <net/if_vlan_var.h>
66167514Skmacy
67167514Skmacy#include <netinet/in_systm.h>
68167514Skmacy#include <netinet/in.h>
69167514Skmacy#include <netinet/if_ether.h>
70167514Skmacy#include <netinet/ip.h>
71167514Skmacy#include <netinet/ip.h>
72167514Skmacy#include <netinet/tcp.h>
73167514Skmacy#include <netinet/udp.h>
74167514Skmacy
75167514Skmacy#include <dev/pci/pcireg.h>
76167514Skmacy#include <dev/pci/pcivar.h>
77167514Skmacy#include <dev/pci/pci_private.h>
78167514Skmacy
79170076Skmacy#include <cxgb_include.h>
80167514Skmacy
81167514Skmacy#ifdef PRIV_SUPPORTED
82167514Skmacy#include <sys/priv.h>
83167514Skmacy#endif
84167514Skmacy
85192933Sgnnstatic int cxgb_setup_interrupts(adapter_t *);
86192933Sgnnstatic void cxgb_teardown_interrupts(adapter_t *);
87167514Skmacystatic void cxgb_init(void *);
88202671Snpstatic int cxgb_init_locked(struct port_info *);
89202671Snpstatic int cxgb_uninit_locked(struct port_info *);
90194521Skmacystatic int cxgb_uninit_synchronized(struct port_info *);
91167514Skmacystatic int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
92167514Skmacystatic int cxgb_media_change(struct ifnet *);
93186282Sgnnstatic int cxgb_ifm_type(int);
94194921Snpstatic void cxgb_build_medialist(struct port_info *);
95167514Skmacystatic void cxgb_media_status(struct ifnet *, struct ifmediareq *);
96167514Skmacystatic int setup_sge_qsets(adapter_t *);
97167514Skmacystatic void cxgb_async_intr(void *);
98170869Skmacystatic void cxgb_tick_handler(void *, int);
99167514Skmacystatic void cxgb_tick(void *);
100209841Snpstatic void link_check_callout(void *);
101209841Snpstatic void check_link_status(void *, int);
102167514Skmacystatic void setup_rss(adapter_t *sc);
103207643Snpstatic int alloc_filters(struct adapter *);
104207643Snpstatic int setup_hw_filters(struct adapter *);
105207643Snpstatic int set_filter(struct adapter *, int, const struct filter_info *);
106207643Snpstatic inline void mk_set_tcb_field(struct cpl_set_tcb_field *, unsigned int,
107207643Snp    unsigned int, u64, u64);
108207643Snpstatic inline void set_tcb_field_ulp(struct cpl_set_tcb_field *, unsigned int,
109207643Snp    unsigned int, u64, u64);
110167514Skmacy
111167514Skmacy/* Attachment glue for the PCI controller end of the device.  Each port of
112167514Skmacy * the device is attached separately, as defined later.
113167514Skmacy */
114167514Skmacystatic int cxgb_controller_probe(device_t);
115167514Skmacystatic int cxgb_controller_attach(device_t);
116167514Skmacystatic int cxgb_controller_detach(device_t);
117167514Skmacystatic void cxgb_free(struct adapter *);
118167514Skmacystatic __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
119167514Skmacy    unsigned int end);
120182679Skmacystatic void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf);
121167514Skmacystatic int cxgb_get_regs_len(void);
122169978Skmacystatic int offload_open(struct port_info *pi);
123171978Skmacystatic void touch_bars(device_t dev);
124174626Skmacystatic int offload_close(struct t3cdev *tdev);
125197791Snpstatic void cxgb_update_mac_settings(struct port_info *p);
126167514Skmacy
127167514Skmacystatic device_method_t cxgb_controller_methods[] = {
128167514Skmacy	DEVMETHOD(device_probe,		cxgb_controller_probe),
129167514Skmacy	DEVMETHOD(device_attach,	cxgb_controller_attach),
130167514Skmacy	DEVMETHOD(device_detach,	cxgb_controller_detach),
131167514Skmacy
132167514Skmacy	/* bus interface */
133167514Skmacy	DEVMETHOD(bus_print_child,	bus_generic_print_child),
134167514Skmacy	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
135167514Skmacy
136167514Skmacy	{ 0, 0 }
137167514Skmacy};
138167514Skmacy
139167514Skmacystatic driver_t cxgb_controller_driver = {
140167514Skmacy	"cxgbc",
141167514Skmacy	cxgb_controller_methods,
142167514Skmacy	sizeof(struct adapter)
143167514Skmacy};
144167514Skmacy
145167514Skmacystatic devclass_t	cxgb_controller_devclass;
146167514SkmacyDRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, 0, 0);
147167514Skmacy
148167514Skmacy/*
149167514Skmacy * Attachment glue for the ports.  Attachment is done directly to the
150167514Skmacy * controller device.
151167514Skmacy */
152167514Skmacystatic int cxgb_port_probe(device_t);
153167514Skmacystatic int cxgb_port_attach(device_t);
154167514Skmacystatic int cxgb_port_detach(device_t);
155167514Skmacy
156167514Skmacystatic device_method_t cxgb_port_methods[] = {
157167514Skmacy	DEVMETHOD(device_probe,		cxgb_port_probe),
158167514Skmacy	DEVMETHOD(device_attach,	cxgb_port_attach),
159167514Skmacy	DEVMETHOD(device_detach,	cxgb_port_detach),
160167514Skmacy	{ 0, 0 }
161167514Skmacy};
162167514Skmacy
163167514Skmacystatic driver_t cxgb_port_driver = {
164167514Skmacy	"cxgb",
165167514Skmacy	cxgb_port_methods,
166167514Skmacy	0
167167514Skmacy};
168167514Skmacy
169167514Skmacystatic d_ioctl_t cxgb_extension_ioctl;
170170654Skmacystatic d_open_t cxgb_extension_open;
171170654Skmacystatic d_close_t cxgb_extension_close;
172167514Skmacy
173170654Skmacystatic struct cdevsw cxgb_cdevsw = {
174170654Skmacy       .d_version =    D_VERSION,
175170654Skmacy       .d_flags =      0,
176170654Skmacy       .d_open =       cxgb_extension_open,
177170654Skmacy       .d_close =      cxgb_extension_close,
178170654Skmacy       .d_ioctl =      cxgb_extension_ioctl,
179170654Skmacy       .d_name =       "cxgb",
180170654Skmacy};
181170654Skmacy
182167514Skmacystatic devclass_t	cxgb_port_devclass;
183167514SkmacyDRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
184167514Skmacy
185167514Skmacy/*
186167514Skmacy * The driver uses the best interrupt scheme available on a platform in the
187167514Skmacy * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
188167514Skmacy * of these schemes the driver may consider as follows:
189167514Skmacy *
190167514Skmacy * msi = 2: choose from among all three options
191167514Skmacy * msi = 1 : only consider MSI and pin interrupts
192167514Skmacy * msi = 0: force pin interrupts
193167514Skmacy */
194167760Skmacystatic int msi_allowed = 2;
195170083Skmacy
196167514SkmacyTUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
197167514SkmacySYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
198167514SkmacySYSCTL_UINT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
199167514Skmacy    "MSI-X, MSI, INTx selector");
200169978Skmacy
201169053Skmacy/*
202169978Skmacy * The driver enables offload as a default.
203169978Skmacy * To disable it, use ofld_disable = 1.
204169053Skmacy */
205169978Skmacystatic int ofld_disable = 0;
206169978SkmacyTUNABLE_INT("hw.cxgb.ofld_disable", &ofld_disable);
207169978SkmacySYSCTL_UINT(_hw_cxgb, OID_AUTO, ofld_disable, CTLFLAG_RDTUN, &ofld_disable, 0,
208169978Skmacy    "disable ULP offload");
209169978Skmacy
210169978Skmacy/*
211169978Skmacy * The driver uses an auto-queue algorithm by default.
212185165Skmacy * To disable it and force a single queue-set per port, use multiq = 0
213169978Skmacy */
214185165Skmacystatic int multiq = 1;
215185165SkmacyTUNABLE_INT("hw.cxgb.multiq", &multiq);
216185165SkmacySYSCTL_UINT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0,
217185165Skmacy    "use min(ncpus/ports, 8) queue-sets per port");
218167514Skmacy
219176572Skmacy/*
220185165Skmacy * By default the driver will not update the firmware unless
221185165Skmacy * it was compiled against a newer version
222185165Skmacy *
223176572Skmacy */
224176572Skmacystatic int force_fw_update = 0;
225176572SkmacyTUNABLE_INT("hw.cxgb.force_fw_update", &force_fw_update);
226176572SkmacySYSCTL_UINT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0,
227176572Skmacy    "update firmware even if up to date");
228175200Skmacy
229205950Snpint cxgb_use_16k_clusters = -1;
230175200SkmacyTUNABLE_INT("hw.cxgb.use_16k_clusters", &cxgb_use_16k_clusters);
231205950SnpSYSCTL_INT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
232175200Skmacy    &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
233175200Skmacy
234194039Sgnn/*
235194039Sgnn * Tune the size of the output queue.
236194039Sgnn */
237194039Sgnnint cxgb_snd_queue_len = IFQ_MAXLEN;
238194039SgnnTUNABLE_INT("hw.cxgb.snd_queue_len", &cxgb_snd_queue_len);
239194039SgnnSYSCTL_UINT(_hw_cxgb, OID_AUTO, snd_queue_len, CTLFLAG_RDTUN,
240194039Sgnn    &cxgb_snd_queue_len, 0, "send queue size ");
241194039Sgnn
242208887Snpstatic int nfilters = -1;
243208887SnpTUNABLE_INT("hw.cxgb.nfilters", &nfilters);
244208887SnpSYSCTL_INT(_hw_cxgb, OID_AUTO, nfilters, CTLFLAG_RDTUN,
245208887Snp    &nfilters, 0, "max number of entries in the filter table");
246194039Sgnn
247167514Skmacyenum {
248167514Skmacy	MAX_TXQ_ENTRIES      = 16384,
249167514Skmacy	MAX_CTRL_TXQ_ENTRIES = 1024,
250167514Skmacy	MAX_RSPQ_ENTRIES     = 16384,
251167514Skmacy	MAX_RX_BUFFERS       = 16384,
252167514Skmacy	MAX_RX_JUMBO_BUFFERS = 16384,
253167514Skmacy	MIN_TXQ_ENTRIES      = 4,
254167514Skmacy	MIN_CTRL_TXQ_ENTRIES = 4,
255167514Skmacy	MIN_RSPQ_ENTRIES     = 32,
256172096Skmacy	MIN_FL_ENTRIES       = 32,
257172096Skmacy	MIN_FL_JUMBO_ENTRIES = 32
258167514Skmacy};
259167514Skmacy
260171471Skmacystruct filter_info {
261171471Skmacy	u32 sip;
262171471Skmacy	u32 sip_mask;
263171471Skmacy	u32 dip;
264171471Skmacy	u16 sport;
265171471Skmacy	u16 dport;
266171471Skmacy	u32 vlan:12;
267171471Skmacy	u32 vlan_prio:3;
268171471Skmacy	u32 mac_hit:1;
269171471Skmacy	u32 mac_idx:4;
270171471Skmacy	u32 mac_vld:1;
271171471Skmacy	u32 pkt_type:2;
272171471Skmacy	u32 report_filter_id:1;
273171471Skmacy	u32 pass:1;
274171471Skmacy	u32 rss:1;
275171471Skmacy	u32 qset:3;
276171471Skmacy	u32 locked:1;
277171471Skmacy	u32 valid:1;
278171471Skmacy};
279171471Skmacy
280171471Skmacyenum { FILTER_NO_VLAN_PRI = 7 };
281171471Skmacy
282182679Skmacy#define EEPROM_MAGIC 0x38E2F10C
283182679Skmacy
284167514Skmacy#define PORT_MASK ((1 << MAX_NPORTS) - 1)
285167514Skmacy
286167514Skmacy/* Table for probing the cards.  The desc field isn't actually used */
287167514Skmacystruct cxgb_ident {
288167514Skmacy	uint16_t	vendor;
289167514Skmacy	uint16_t	device;
290167514Skmacy	int		index;
291167514Skmacy	char		*desc;
292167514Skmacy} cxgb_identifiers[] = {
293167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
294167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
295167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
296167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
297167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
298167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
299167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
300167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
301167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
302167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
303170654Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
304197791Snp	{PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "T3C10"},
305197791Snp	{PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"},
306197791Snp	{PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"},
307167514Skmacy	{0, 0, 0, NULL}
308167514Skmacy};
309167514Skmacy
310171471Skmacystatic int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
311171471Skmacy
312176472Skmacy
313174708Skmacystatic __inline char
314171471Skmacyt3rev2char(struct adapter *adapter)
315171471Skmacy{
316171471Skmacy	char rev = 'z';
317171471Skmacy
318171471Skmacy	switch(adapter->params.rev) {
319171471Skmacy	case T3_REV_A:
320171471Skmacy		rev = 'a';
321171471Skmacy		break;
322171471Skmacy	case T3_REV_B:
323171471Skmacy	case T3_REV_B2:
324171471Skmacy		rev = 'b';
325171471Skmacy		break;
326171471Skmacy	case T3_REV_C:
327171471Skmacy		rev = 'c';
328171471Skmacy		break;
329171471Skmacy	}
330171471Skmacy	return rev;
331171471Skmacy}
332171471Skmacy
333167514Skmacystatic struct cxgb_ident *
334167514Skmacycxgb_get_ident(device_t dev)
335167514Skmacy{
336167514Skmacy	struct cxgb_ident *id;
337167514Skmacy
338167514Skmacy	for (id = cxgb_identifiers; id->desc != NULL; id++) {
339167514Skmacy		if ((id->vendor == pci_get_vendor(dev)) &&
340167514Skmacy		    (id->device == pci_get_device(dev))) {
341167514Skmacy			return (id);
342167514Skmacy		}
343167514Skmacy	}
344167514Skmacy	return (NULL);
345167514Skmacy}
346167514Skmacy
347167514Skmacystatic const struct adapter_info *
348167514Skmacycxgb_get_adapter_info(device_t dev)
349167514Skmacy{
350167514Skmacy	struct cxgb_ident *id;
351167514Skmacy	const struct adapter_info *ai;
352183063Skmacy
353167514Skmacy	id = cxgb_get_ident(dev);
354167514Skmacy	if (id == NULL)
355167514Skmacy		return (NULL);
356167514Skmacy
357167514Skmacy	ai = t3_get_adapter_info(id->index);
358167514Skmacy
359167514Skmacy	return (ai);
360167514Skmacy}
361167514Skmacy
362167514Skmacystatic int
363167514Skmacycxgb_controller_probe(device_t dev)
364167514Skmacy{
365167514Skmacy	const struct adapter_info *ai;
366167514Skmacy	char *ports, buf[80];
367170654Skmacy	int nports;
368183063Skmacy
369167514Skmacy	ai = cxgb_get_adapter_info(dev);
370167514Skmacy	if (ai == NULL)
371167514Skmacy		return (ENXIO);
372167514Skmacy
373170654Skmacy	nports = ai->nports0 + ai->nports1;
374170654Skmacy	if (nports == 1)
375167514Skmacy		ports = "port";
376167514Skmacy	else
377167514Skmacy		ports = "ports";
378167514Skmacy
379199237Snp	snprintf(buf, sizeof(buf), "%s, %d %s", ai->desc, nports, ports);
380167514Skmacy	device_set_desc_copy(dev, buf);
381167514Skmacy	return (BUS_PROBE_DEFAULT);
382167514Skmacy}
383167514Skmacy
384176572Skmacy#define FW_FNAME "cxgb_t3fw"
385190330Sgnn#define TPEEPROM_NAME "cxgb_t3%c_tp_eeprom"
386190330Sgnn#define TPSRAM_NAME "cxgb_t3%c_protocol_sram"
387171471Skmacy
388167514Skmacystatic int
389169978Skmacyupgrade_fw(adapter_t *sc)
390167514Skmacy{
391167514Skmacy	const struct firmware *fw;
392167514Skmacy	int status;
393205944Snp	u32 vers;
394167514Skmacy
395176572Skmacy	if ((fw = firmware_get(FW_FNAME)) == NULL)  {
396176572Skmacy		device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME);
397169978Skmacy		return (ENOENT);
398171471Skmacy	} else
399205944Snp		device_printf(sc->dev, "installing firmware on card\n");
400167514Skmacy	status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
401167514Skmacy
402205944Snp	if (status != 0) {
403205944Snp		device_printf(sc->dev, "failed to install firmware: %d\n",
404205944Snp		    status);
405205944Snp	} else {
406205944Snp		t3_get_fw_version(sc, &vers);
407205944Snp		snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
408205944Snp		    G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
409205944Snp		    G_FW_VERSION_MICRO(vers));
410205944Snp	}
411205944Snp
412167514Skmacy	firmware_put(fw, FIRMWARE_UNLOAD);
413167514Skmacy
414167514Skmacy	return (status);
415167514Skmacy}
416167514Skmacy
417192537Sgnn/*
418192537Sgnn * The cxgb_controller_attach function is responsible for the initial
419192537Sgnn * bringup of the device.  Its responsibilities include:
420192537Sgnn *
421192537Sgnn *  1. Determine if the device supports MSI or MSI-X.
422192537Sgnn *  2. Allocate bus resources so that we can access the Base Address Register
423192537Sgnn *  3. Create and initialize mutexes for the controller and its control
424192537Sgnn *     logic such as SGE and MDIO.
425192537Sgnn *  4. Call hardware specific setup routine for the adapter as a whole.
426192537Sgnn *  5. Allocate the BAR for doing MSI-X.
427192537Sgnn *  6. Setup the line interrupt iff MSI-X is not supported.
428192537Sgnn *  7. Create the driver's taskq.
429192584Sgnn *  8. Start one task queue service thread.
430192584Sgnn *  9. Check if the firmware and SRAM are up-to-date.  They will be
431192584Sgnn *     auto-updated later (before FULL_INIT_DONE), if required.
432192537Sgnn * 10. Create a child device for each MAC (port)
433192537Sgnn * 11. Initialize T3 private state.
434192537Sgnn * 12. Trigger the LED
435192537Sgnn * 13. Setup offload iff supported.
436192537Sgnn * 14. Reset/restart the tick callout.
437192537Sgnn * 15. Attach sysctls
438192537Sgnn *
439192537Sgnn * NOTE: Any modification or deviation from this list MUST be reflected in
440192537Sgnn * the above comment.  Failure to do so will result in problems on various
441192537Sgnn * error conditions including link flapping.
442192537Sgnn */
443167514Skmacystatic int
444167514Skmacycxgb_controller_attach(device_t dev)
445167514Skmacy{
446167514Skmacy	device_t child;
447167514Skmacy	const struct adapter_info *ai;
448167514Skmacy	struct adapter *sc;
449172109Skmacy	int i, error = 0;
450167514Skmacy	uint32_t vers;
451167760Skmacy	int port_qsets = 1;
452172109Skmacy	int msi_needed, reg;
453185655Sgnn	char buf[80];
454185655Sgnn
455167514Skmacy	sc = device_get_softc(dev);
456167514Skmacy	sc->dev = dev;
457169978Skmacy	sc->msi_count = 0;
458172109Skmacy	ai = cxgb_get_adapter_info(dev);
459172109Skmacy
460167840Skmacy	/* find the PCIe link width and set max read request to 4KB*/
461167840Skmacy	if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
462210505Sjhb		uint16_t lnk;
463171471Skmacy
464210505Sjhb		lnk = pci_read_config(dev, reg + PCIR_EXPRESS_LINK_STA, 2);
465210505Sjhb		sc->link_width = (lnk & PCIM_LINK_STA_WIDTH) >> 4;
466210505Sjhb		if (sc->link_width < 8 &&
467210505Sjhb		    (ai->caps & SUPPORTED_10000baseT_Full)) {
468210505Sjhb			device_printf(sc->dev,
469210505Sjhb			    "PCIe x%d Link, expect reduced performance\n",
470210505Sjhb			    sc->link_width);
471210505Sjhb		}
472210505Sjhb
473210505Sjhb		pci_set_max_read_req(dev, 4096);
474167840Skmacy	}
475204274Snp
476171978Skmacy	touch_bars(dev);
477167514Skmacy	pci_enable_busmaster(dev);
478167514Skmacy	/*
479167514Skmacy	 * Allocate the registers and make them available to the driver.
480167514Skmacy	 * The registers that we care about for NIC mode are in BAR 0
481167514Skmacy	 */
482167514Skmacy	sc->regs_rid = PCIR_BAR(0);
483167514Skmacy	if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
484167514Skmacy	    &sc->regs_rid, RF_ACTIVE)) == NULL) {
485176472Skmacy		device_printf(dev, "Cannot allocate BAR region 0\n");
486167514Skmacy		return (ENXIO);
487167514Skmacy	}
488176472Skmacy	sc->udbs_rid = PCIR_BAR(2);
489185662Sgnn	sc->udbs_res = NULL;
490185662Sgnn	if (is_offload(sc) &&
491185662Sgnn	    ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
492185662Sgnn		   &sc->udbs_rid, RF_ACTIVE)) == NULL)) {
493176472Skmacy		device_printf(dev, "Cannot allocate BAR region 1\n");
494176472Skmacy		error = ENXIO;
495176472Skmacy		goto out;
496185662Sgnn	}
497167514Skmacy
498170869Skmacy	snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
499170869Skmacy	    device_get_unit(dev));
500170869Skmacy	ADAPTER_LOCK_INIT(sc, sc->lockbuf);
501170869Skmacy
502170869Skmacy	snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
503170869Skmacy	    device_get_unit(dev));
504170869Skmacy	snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
505170869Skmacy	    device_get_unit(dev));
506170869Skmacy	snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
507170869Skmacy	    device_get_unit(dev));
508167514Skmacy
509176472Skmacy	MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN);
510170869Skmacy	MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
511170869Skmacy	MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
512170869Skmacy
513167514Skmacy	sc->bt = rman_get_bustag(sc->regs_res);
514167514Skmacy	sc->bh = rman_get_bushandle(sc->regs_res);
515167514Skmacy	sc->mmio_len = rman_get_size(sc->regs_res);
516167769Skmacy
517197791Snp	for (i = 0; i < MAX_NPORTS; i++)
518197791Snp		sc->port[i].adapter = sc;
519197791Snp
520167769Skmacy	if (t3_prep_adapter(sc, ai, 1) < 0) {
521170654Skmacy		printf("prep adapter failed\n");
522167769Skmacy		error = ENODEV;
523167769Skmacy		goto out;
524167769Skmacy	}
525177464Skmacy        /* Allocate the BAR for doing MSI-X.  If it succeeds, try to allocate
526167514Skmacy	 * enough messages for the queue sets.  If that fails, try falling
527167514Skmacy	 * back to MSI.  If that fails, then try falling back to the legacy
528167514Skmacy	 * interrupt pin model.
529167514Skmacy	 */
530167514Skmacy	sc->msix_regs_rid = 0x20;
531167514Skmacy	if ((msi_allowed >= 2) &&
532167514Skmacy	    (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
533167514Skmacy	    &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
534167514Skmacy
535192933Sgnn		if (multiq)
536192933Sgnn			port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus);
537192933Sgnn		msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1;
538167760Skmacy
539192933Sgnn		if (pci_msix_count(dev) == 0 ||
540192933Sgnn		    (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 ||
541192933Sgnn		    sc->msi_count != msi_needed) {
542192933Sgnn			device_printf(dev, "alloc msix failed - "
543192933Sgnn				      "msi_count=%d, msi_needed=%d, err=%d; "
544192933Sgnn				      "will try MSI\n", sc->msi_count,
545192933Sgnn				      msi_needed, error);
546169978Skmacy			sc->msi_count = 0;
547192933Sgnn			port_qsets = 1;
548167514Skmacy			pci_release_msi(dev);
549167514Skmacy			bus_release_resource(dev, SYS_RES_MEMORY,
550167514Skmacy			    sc->msix_regs_rid, sc->msix_regs_res);
551167514Skmacy			sc->msix_regs_res = NULL;
552167514Skmacy		} else {
553167514Skmacy			sc->flags |= USING_MSIX;
554192933Sgnn			sc->cxgb_intr = cxgb_async_intr;
555192933Sgnn			device_printf(dev,
556192933Sgnn				      "using MSI-X interrupts (%u vectors)\n",
557192933Sgnn				      sc->msi_count);
558167514Skmacy		}
559167514Skmacy	}
560167514Skmacy
561169978Skmacy	if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
562169978Skmacy		sc->msi_count = 1;
563192933Sgnn		if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) {
564192933Sgnn			device_printf(dev, "alloc msi failed - "
565192933Sgnn				      "err=%d; will try INTx\n", error);
566169978Skmacy			sc->msi_count = 0;
567192933Sgnn			port_qsets = 1;
568167514Skmacy			pci_release_msi(dev);
569167514Skmacy		} else {
570167514Skmacy			sc->flags |= USING_MSI;
571170081Skmacy			sc->cxgb_intr = t3_intr_msi;
572192933Sgnn			device_printf(dev, "using MSI interrupts\n");
573167514Skmacy		}
574167514Skmacy	}
575169978Skmacy	if (sc->msi_count == 0) {
576167760Skmacy		device_printf(dev, "using line interrupts\n");
577170081Skmacy		sc->cxgb_intr = t3b_intr;
578167514Skmacy	}
579167514Skmacy
580167514Skmacy	/* Create a private taskqueue thread for handling driver events */
581167514Skmacy	sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
582167514Skmacy	    taskqueue_thread_enqueue, &sc->tq);
583167514Skmacy	if (sc->tq == NULL) {
584167514Skmacy		device_printf(dev, "failed to allocate controller task queue\n");
585167514Skmacy		goto out;
586167514Skmacy	}
587171804Skmacy
588167514Skmacy	taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
589167514Skmacy	    device_get_nameunit(dev));
590170869Skmacy	TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
591167514Skmacy
592167514Skmacy
593167514Skmacy	/* Create a periodic callout for checking adapter status */
594170869Skmacy	callout_init(&sc->cxgb_tick_ch, TRUE);
595167514Skmacy
596189643Sgnn	if (t3_check_fw_version(sc) < 0 || force_fw_update) {
597167514Skmacy		/*
598167514Skmacy		 * Warn user that a firmware update will be attempted in init.
599167514Skmacy		 */
600169978Skmacy		device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
601169978Skmacy		    FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
602167514Skmacy		sc->flags &= ~FW_UPTODATE;
603167514Skmacy	} else {
604167514Skmacy		sc->flags |= FW_UPTODATE;
605167514Skmacy	}
606171471Skmacy
607189643Sgnn	if (t3_check_tpsram_version(sc) < 0) {
608171471Skmacy		/*
609171471Skmacy		 * Warn user that a firmware update will be attempted in init.
610171471Skmacy		 */
611171471Skmacy		device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
612171471Skmacy		    t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
613171471Skmacy		sc->flags &= ~TPS_UPTODATE;
614171471Skmacy	} else {
615171471Skmacy		sc->flags |= TPS_UPTODATE;
616171471Skmacy	}
617167514Skmacy
618167514Skmacy	/*
619167514Skmacy	 * Create a child device for each MAC.  The ethernet attachment
620167514Skmacy	 * will be done in these children.
621167760Skmacy	 */
622167760Skmacy	for (i = 0; i < (sc)->params.nports; i++) {
623171978Skmacy		struct port_info *pi;
624171978Skmacy
625167514Skmacy		if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
626167514Skmacy			device_printf(dev, "failed to add child port\n");
627167514Skmacy			error = EINVAL;
628167514Skmacy			goto out;
629167514Skmacy		}
630171978Skmacy		pi = &sc->port[i];
631171978Skmacy		pi->adapter = sc;
632171978Skmacy		pi->nqsets = port_qsets;
633171978Skmacy		pi->first_qset = i*port_qsets;
634171978Skmacy		pi->port_id = i;
635171978Skmacy		pi->tx_chan = i >= ai->nports0;
636171978Skmacy		pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
637171978Skmacy		sc->rxpkt_map[pi->txpkt_intf] = i;
638174708Skmacy		sc->port[i].tx_chan = i >= ai->nports0;
639171471Skmacy		sc->portdev[i] = child;
640171978Skmacy		device_set_softc(child, pi);
641167514Skmacy	}
642167514Skmacy	if ((error = bus_generic_attach(dev)) != 0)
643167514Skmacy		goto out;
644167514Skmacy
645167514Skmacy	/* initialize sge private state */
646170654Skmacy	t3_sge_init_adapter(sc);
647167514Skmacy
648167514Skmacy	t3_led_ready(sc);
649169978Skmacy
650169978Skmacy	cxgb_offload_init();
651169978Skmacy	if (is_offload(sc)) {
652169978Skmacy		setbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
653169978Skmacy		cxgb_adapter_ofld(sc);
654169978Skmacy        }
655167514Skmacy	error = t3_get_fw_version(sc, &vers);
656167514Skmacy	if (error)
657167514Skmacy		goto out;
658167514Skmacy
659169978Skmacy	snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
660169978Skmacy	    G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
661169978Skmacy	    G_FW_VERSION_MICRO(vers));
662169978Skmacy
663199237Snp	snprintf(buf, sizeof(buf), "%s %sNIC\t E/C: %s S/N: %s",
664199237Snp		 ai->desc, is_offload(sc) ? "R" : "",
665185655Sgnn		 sc->params.vpd.ec, sc->params.vpd.sn);
666185655Sgnn	device_set_desc_copy(dev, buf);
667185655Sgnn
668192540Sgnn	snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x",
669192540Sgnn		 sc->params.vpd.port_type[0], sc->params.vpd.port_type[1],
670192540Sgnn		 sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]);
671192540Sgnn
672176472Skmacy	device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]);
673209841Snp	callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
674174708Skmacy	t3_add_attach_sysctls(sc);
675167514Skmacyout:
676167514Skmacy	if (error)
677167514Skmacy		cxgb_free(sc);
678167514Skmacy
679167514Skmacy	return (error);
680167514Skmacy}
681167514Skmacy
682192537Sgnn/*
683192584Sgnn * The cxgb_controller_detach routine is called with the device is
684192537Sgnn * unloaded from the system.
685192537Sgnn */
686192537Sgnn
687167514Skmacystatic int
688167514Skmacycxgb_controller_detach(device_t dev)
689167514Skmacy{
690167514Skmacy	struct adapter *sc;
691167514Skmacy
692167514Skmacy	sc = device_get_softc(dev);
693167514Skmacy
694167514Skmacy	cxgb_free(sc);
695167514Skmacy
696167514Skmacy	return (0);
697167514Skmacy}
698167514Skmacy
699192537Sgnn/*
700192537Sgnn * The cxgb_free() is called by the cxgb_controller_detach() routine
701192537Sgnn * to tear down the structures that were built up in
702192537Sgnn * cxgb_controller_attach(), and should be the final piece of work
703192584Sgnn * done when fully unloading the driver.
704192537Sgnn *
705192537Sgnn *
706192537Sgnn *  1. Shutting down the threads started by the cxgb_controller_attach()
707192537Sgnn *     routine.
708192537Sgnn *  2. Stopping the lower level device and all callouts (cxgb_down_locked()).
709192537Sgnn *  3. Detaching all of the port devices created during the
710192537Sgnn *     cxgb_controller_attach() routine.
711192537Sgnn *  4. Removing the device children created via cxgb_controller_attach().
712192933Sgnn *  5. Releasing PCI resources associated with the device.
713192537Sgnn *  6. Turning off the offload support, iff it was turned on.
714192537Sgnn *  7. Destroying the mutexes created in cxgb_controller_attach().
715192537Sgnn *
716192537Sgnn */
717167514Skmacystatic void
718167514Skmacycxgb_free(struct adapter *sc)
719167514Skmacy{
720167514Skmacy	int i;
721167514Skmacy
722176472Skmacy	ADAPTER_LOCK(sc);
723176472Skmacy	sc->flags |= CXGB_SHUTDOWN;
724176472Skmacy	ADAPTER_UNLOCK(sc);
725192537Sgnn
726192537Sgnn	/*
727194521Skmacy	 * Make sure all child devices are gone.
728192537Sgnn	 */
729192537Sgnn	bus_generic_detach(sc->dev);
730192537Sgnn	for (i = 0; i < (sc)->params.nports; i++) {
731192584Sgnn		if (sc->portdev[i] &&
732192584Sgnn		    device_delete_child(sc->dev, sc->portdev[i]) != 0)
733192537Sgnn			device_printf(sc->dev, "failed to delete child port\n");
734192537Sgnn	}
735192537Sgnn
736194521Skmacy	/*
737194521Skmacy	 * At this point, it is as if cxgb_port_detach has run on all ports, and
738194521Skmacy	 * cxgb_down has run on the adapter.  All interrupts have been silenced,
739194521Skmacy	 * all open devices have been closed.
740194521Skmacy	 */
741194521Skmacy	KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)",
742194521Skmacy					   __func__, sc->open_device_map));
743194521Skmacy	for (i = 0; i < sc->params.nports; i++) {
744194521Skmacy		KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!",
745194521Skmacy						  __func__, i));
746194521Skmacy	}
747194521Skmacy
748194521Skmacy	/*
749194521Skmacy	 * Finish off the adapter's callouts.
750194521Skmacy	 */
751194521Skmacy	callout_drain(&sc->cxgb_tick_ch);
752194521Skmacy	callout_drain(&sc->sge_timer_ch);
753194521Skmacy
754194521Skmacy	/*
755194521Skmacy	 * Release resources grabbed under FULL_INIT_DONE by cxgb_up.  The
756194521Skmacy	 * sysctls are cleaned up by the kernel linker.
757194521Skmacy	 */
758194521Skmacy	if (sc->flags & FULL_INIT_DONE) {
759194521Skmacy 		t3_free_sge_resources(sc);
760194521Skmacy 		sc->flags &= ~FULL_INIT_DONE;
761194521Skmacy 	}
762194521Skmacy
763194521Skmacy	/*
764194521Skmacy	 * Release all interrupt resources.
765194521Skmacy	 */
766192933Sgnn	cxgb_teardown_interrupts(sc);
767169978Skmacy	if (sc->flags & (USING_MSI | USING_MSIX)) {
768169978Skmacy		device_printf(sc->dev, "releasing msi message(s)\n");
769169978Skmacy		pci_release_msi(sc->dev);
770169978Skmacy	} else {
771169978Skmacy		device_printf(sc->dev, "no msi message to release\n");
772169978Skmacy	}
773192933Sgnn
774169978Skmacy	if (sc->msix_regs_res != NULL) {
775169978Skmacy		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
776169978Skmacy		    sc->msix_regs_res);
777169978Skmacy	}
778176472Skmacy
779194521Skmacy	/*
780194521Skmacy	 * Free the adapter's taskqueue.
781194521Skmacy	 */
782176472Skmacy	if (sc->tq != NULL) {
783171978Skmacy		taskqueue_free(sc->tq);
784176472Skmacy		sc->tq = NULL;
785176472Skmacy	}
786176472Skmacy
787169978Skmacy	if (is_offload(sc)) {
788194521Skmacy		clrbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
789169978Skmacy		cxgb_adapter_unofld(sc);
790194521Skmacy	}
791194521Skmacy
792183059Skmacy#ifdef notyet
793176472Skmacy	if (sc->flags & CXGB_OFLD_INIT)
794176472Skmacy		cxgb_offload_deactivate(sc);
795178302Skmacy#endif
796171471Skmacy	free(sc->filters, M_DEVBUF);
797167514Skmacy	t3_sge_free(sc);
798194521Skmacy
799170869Skmacy	cxgb_offload_exit();
800176472Skmacy
801176472Skmacy	if (sc->udbs_res != NULL)
802176472Skmacy		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid,
803176472Skmacy		    sc->udbs_res);
804176472Skmacy
805167514Skmacy	if (sc->regs_res != NULL)
806167514Skmacy		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
807167514Skmacy		    sc->regs_res);
808167514Skmacy
809170869Skmacy	MTX_DESTROY(&sc->mdio_lock);
810170869Skmacy	MTX_DESTROY(&sc->sge.reg_lock);
811170869Skmacy	MTX_DESTROY(&sc->elmer_lock);
812170869Skmacy	ADAPTER_LOCK_DEINIT(sc);
813167514Skmacy}
814167514Skmacy
815167514Skmacy/**
816167514Skmacy *	setup_sge_qsets - configure SGE Tx/Rx/response queues
817167514Skmacy *	@sc: the controller softc
818167514Skmacy *
819167514Skmacy *	Determines how many sets of SGE queues to use and initializes them.
820167514Skmacy *	We support multiple queue sets per port if we have MSI-X, otherwise
821167514Skmacy *	just one queue set per port.
822167514Skmacy */
823167514Skmacystatic int
824167514Skmacysetup_sge_qsets(adapter_t *sc)
825167514Skmacy{
826172096Skmacy	int i, j, err, irq_idx = 0, qset_idx = 0;
827169978Skmacy	u_int ntxq = SGE_TXQ_PER_SET;
828167514Skmacy
829167514Skmacy	if ((err = t3_sge_alloc(sc)) != 0) {
830167760Skmacy		device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
831167514Skmacy		return (err);
832167514Skmacy	}
833167514Skmacy
834167514Skmacy	if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
835167514Skmacy		irq_idx = -1;
836167514Skmacy
837172096Skmacy	for (i = 0; i < (sc)->params.nports; i++) {
838167514Skmacy		struct port_info *pi = &sc->port[i];
839167514Skmacy
840171978Skmacy		for (j = 0; j < pi->nqsets; j++, qset_idx++) {
841167760Skmacy			err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
842167514Skmacy			    (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
843167514Skmacy			    &sc->params.sge.qset[qset_idx], ntxq, pi);
844167514Skmacy			if (err) {
845167514Skmacy				t3_free_sge_resources(sc);
846171978Skmacy				device_printf(sc->dev, "t3_sge_alloc_qset failed with %d\n",
847171978Skmacy				    err);
848167514Skmacy				return (err);
849167514Skmacy			}
850167514Skmacy		}
851167514Skmacy	}
852167514Skmacy
853167514Skmacy	return (0);
854167514Skmacy}
855167514Skmacy
856170654Skmacystatic void
857192933Sgnncxgb_teardown_interrupts(adapter_t *sc)
858170654Skmacy{
859192933Sgnn	int i;
860170654Skmacy
861192933Sgnn	for (i = 0; i < SGE_QSETS; i++) {
862192933Sgnn		if (sc->msix_intr_tag[i] == NULL) {
863192933Sgnn
864192933Sgnn			/* Should have been setup fully or not at all */
865192933Sgnn			KASSERT(sc->msix_irq_res[i] == NULL &&
866192933Sgnn				sc->msix_irq_rid[i] == 0,
867192933Sgnn				("%s: half-done interrupt (%d).", __func__, i));
868192933Sgnn
869192933Sgnn			continue;
870170654Skmacy		}
871192933Sgnn
872192933Sgnn		bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
873192933Sgnn				  sc->msix_intr_tag[i]);
874192933Sgnn		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i],
875192933Sgnn				     sc->msix_irq_res[i]);
876192933Sgnn
877192933Sgnn		sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL;
878192933Sgnn		sc->msix_irq_rid[i] = 0;
879170654Skmacy	}
880192933Sgnn
881192933Sgnn	if (sc->intr_tag) {
882192933Sgnn		KASSERT(sc->irq_res != NULL,
883192933Sgnn			("%s: half-done interrupt.", __func__));
884192933Sgnn
885192933Sgnn		bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
886192933Sgnn		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
887192933Sgnn				     sc->irq_res);
888192933Sgnn
889192933Sgnn		sc->irq_res = sc->intr_tag = NULL;
890192933Sgnn		sc->irq_rid = 0;
891192933Sgnn	}
892170654Skmacy}
893170654Skmacy
894167514Skmacystatic int
895192933Sgnncxgb_setup_interrupts(adapter_t *sc)
896167514Skmacy{
897192933Sgnn	struct resource *res;
898192933Sgnn	void *tag;
899192933Sgnn	int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX);
900167514Skmacy
901192933Sgnn	sc->irq_rid = intr_flag ? 1 : 0;
902192933Sgnn	sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid,
903192933Sgnn					     RF_SHAREABLE | RF_ACTIVE);
904192933Sgnn	if (sc->irq_res == NULL) {
905192933Sgnn		device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n",
906192933Sgnn			      intr_flag, sc->irq_rid);
907192933Sgnn		err = EINVAL;
908192933Sgnn		sc->irq_rid = 0;
909192933Sgnn	} else {
910192933Sgnn		err = bus_setup_intr(sc->dev, sc->irq_res,
911204274Snp		    INTR_MPSAFE | INTR_TYPE_NET, NULL,
912204274Snp		    sc->cxgb_intr, sc, &sc->intr_tag);
913192933Sgnn
914192933Sgnn		if (err) {
915192933Sgnn			device_printf(sc->dev,
916192933Sgnn				      "Cannot set up interrupt (%x, %u, %d)\n",
917192933Sgnn				      intr_flag, sc->irq_rid, err);
918192933Sgnn			bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
919192933Sgnn					     sc->irq_res);
920192933Sgnn			sc->irq_res = sc->intr_tag = NULL;
921192933Sgnn			sc->irq_rid = 0;
922192933Sgnn		}
923167514Skmacy	}
924171804Skmacy
925192933Sgnn	/* That's all for INTx or MSI */
926192933Sgnn	if (!(intr_flag & USING_MSIX) || err)
927192933Sgnn		return (err);
928192933Sgnn
929192933Sgnn	for (i = 0; i < sc->msi_count - 1; i++) {
930192933Sgnn		rid = i + 2;
931192933Sgnn		res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid,
932192933Sgnn					     RF_SHAREABLE | RF_ACTIVE);
933192933Sgnn		if (res == NULL) {
934192933Sgnn			device_printf(sc->dev, "Cannot allocate interrupt "
935192933Sgnn				      "for message %d\n", rid);
936192933Sgnn			err = EINVAL;
937192933Sgnn			break;
938192933Sgnn		}
939192933Sgnn
940192933Sgnn		err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET,
941204274Snp				     NULL, t3_intr_msix, &sc->sge.qs[i], &tag);
942192933Sgnn		if (err) {
943192933Sgnn			device_printf(sc->dev, "Cannot set up interrupt "
944192933Sgnn				      "for message %d (%d)\n", rid, err);
945192933Sgnn			bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res);
946192933Sgnn			break;
947167514Skmacy		}
948192933Sgnn
949192933Sgnn		sc->msix_irq_rid[i] = rid;
950192933Sgnn		sc->msix_irq_res[i] = res;
951192933Sgnn		sc->msix_intr_tag[i] = tag;
952167514Skmacy	}
953167760Skmacy
954192933Sgnn	if (err)
955192933Sgnn		cxgb_teardown_interrupts(sc);
956192933Sgnn
957192933Sgnn	return (err);
958167514Skmacy}
959167514Skmacy
960192933Sgnn
961167514Skmacystatic int
962167514Skmacycxgb_port_probe(device_t dev)
963167514Skmacy{
964167514Skmacy	struct port_info *p;
965167514Skmacy	char buf[80];
966176472Skmacy	const char *desc;
967176472Skmacy
968167514Skmacy	p = device_get_softc(dev);
969176472Skmacy	desc = p->phy.desc;
970176472Skmacy	snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, desc);
971167514Skmacy	device_set_desc_copy(dev, buf);
972167514Skmacy	return (0);
973167514Skmacy}
974167514Skmacy
975167514Skmacy
976167514Skmacystatic int
977167514Skmacycxgb_makedev(struct port_info *pi)
978167514Skmacy{
979167514Skmacy
980170654Skmacy	pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit,
981209115Snp	    UID_ROOT, GID_WHEEL, 0600, "%s", if_name(pi->ifp));
982167514Skmacy
983167514Skmacy	if (pi->port_cdev == NULL)
984167514Skmacy		return (ENOMEM);
985167514Skmacy
986167514Skmacy	pi->port_cdev->si_drv1 = (void *)pi;
987167514Skmacy
988167514Skmacy	return (0);
989167514Skmacy}
990167514Skmacy
991204274Snp#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
992204348Snp    IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
993207639Snp    IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE)
994204274Snp#define CXGB_CAP_ENABLE (CXGB_CAP & ~IFCAP_TSO6)
995167514Skmacy
996167514Skmacystatic int
997167514Skmacycxgb_port_attach(device_t dev)
998167514Skmacy{
999167514Skmacy	struct port_info *p;
1000167514Skmacy	struct ifnet *ifp;
1001194921Snp	int err;
1002176472Skmacy	struct adapter *sc;
1003204274Snp
1004167514Skmacy	p = device_get_softc(dev);
1005176472Skmacy	sc = p->adapter;
1006170869Skmacy	snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
1007171803Skmacy	    device_get_unit(device_get_parent(dev)), p->port_id);
1008170869Skmacy	PORT_LOCK_INIT(p, p->lockbuf);
1009167514Skmacy
1010209841Snp	callout_init(&p->link_check_ch, CALLOUT_MPSAFE);
1011209841Snp	TASK_INIT(&p->link_check_task, 0, check_link_status, p);
1012209841Snp
1013167514Skmacy	/* Allocate an ifnet object and set it up */
1014167514Skmacy	ifp = p->ifp = if_alloc(IFT_ETHER);
1015167514Skmacy	if (ifp == NULL) {
1016167514Skmacy		device_printf(dev, "Cannot allocate ifnet\n");
1017167514Skmacy		return (ENOMEM);
1018167514Skmacy	}
1019167514Skmacy
1020167514Skmacy	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1021167514Skmacy	ifp->if_init = cxgb_init;
1022167514Skmacy	ifp->if_softc = p;
1023167514Skmacy	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1024167514Skmacy	ifp->if_ioctl = cxgb_ioctl;
1025167514Skmacy	ifp->if_start = cxgb_start;
1026174708Skmacy
1027207554Ssobomax	ifp->if_snd.ifq_drv_maxlen = max(cxgb_snd_queue_len, ifqmaxlen);
1028167514Skmacy	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1029167514Skmacy	IFQ_SET_READY(&ifp->if_snd);
1030167514Skmacy
1031204274Snp	ifp->if_capabilities = CXGB_CAP;
1032204274Snp	ifp->if_capenable = CXGB_CAP_ENABLE;
1033204274Snp	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO;
1034204274Snp
1035171471Skmacy	/*
1036204274Snp	 * Disable TSO on 4-port - it isn't supported by the firmware.
1037171471Skmacy	 */
1038204274Snp	if (sc->params.nports > 2) {
1039204348Snp		ifp->if_capabilities &= ~(IFCAP_TSO | IFCAP_VLAN_HWTSO);
1040204348Snp		ifp->if_capenable &= ~(IFCAP_TSO | IFCAP_VLAN_HWTSO);
1041171471Skmacy		ifp->if_hwassist &= ~CSUM_TSO;
1042171471Skmacy	}
1043171471Skmacy
1044167514Skmacy	ether_ifattach(ifp, p->hw_addr);
1045194521Skmacy	ifp->if_transmit = cxgb_transmit;
1046194521Skmacy	ifp->if_qflush = cxgb_qflush;
1047192537Sgnn
1048204274Snp#ifdef DEFAULT_JUMBO
1049204274Snp	if (sc->params.nports <= 2)
1050180583Skmacy		ifp->if_mtu = ETHERMTU_JUMBO;
1051204274Snp#endif
1052167514Skmacy	if ((err = cxgb_makedev(p)) != 0) {
1053167514Skmacy		printf("makedev failed %d\n", err);
1054167514Skmacy		return (err);
1055167514Skmacy	}
1056194921Snp
1057194921Snp	/* Create a list of media supported by this port */
1058167514Skmacy	ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
1059167514Skmacy	    cxgb_media_status);
1060194921Snp	cxgb_build_medialist(p);
1061176472Skmacy
1062170654Skmacy	t3_sge_init_port(p);
1063189643Sgnn
1064192537Sgnn	return (err);
1065167514Skmacy}
1066167514Skmacy
1067192537Sgnn/*
1068192537Sgnn * cxgb_port_detach() is called via the device_detach methods when
1069192537Sgnn * cxgb_free() calls the bus_generic_detach.  It is responsible for
1070192537Sgnn * removing the device from the view of the kernel, i.e. from all
1071192537Sgnn * interfaces lists etc.  This routine is only called when the driver is
1072192537Sgnn * being unloaded, not when the link goes down.
1073192537Sgnn */
1074167514Skmacystatic int
1075167514Skmacycxgb_port_detach(device_t dev)
1076167514Skmacy{
1077167514Skmacy	struct port_info *p;
1078192537Sgnn	struct adapter *sc;
1079194521Skmacy	int i;
1080167514Skmacy
1081167514Skmacy	p = device_get_softc(dev);
1082192537Sgnn	sc = p->adapter;
1083169978Skmacy
1084202671Snp	/* Tell cxgb_ioctl and if_init that the port is going away */
1085202671Snp	ADAPTER_LOCK(sc);
1086202671Snp	SET_DOOMED(p);
1087202671Snp	wakeup(&sc->flags);
1088202671Snp	while (IS_BUSY(sc))
1089202671Snp		mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0);
1090202671Snp	SET_BUSY(sc);
1091202671Snp	ADAPTER_UNLOCK(sc);
1092194521Skmacy
1093192537Sgnn	if (p->port_cdev != NULL)
1094192537Sgnn		destroy_dev(p->port_cdev);
1095194521Skmacy
1096194521Skmacy	cxgb_uninit_synchronized(p);
1097192537Sgnn	ether_ifdetach(p->ifp);
1098192537Sgnn
1099194521Skmacy	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1100194521Skmacy		struct sge_qset *qs = &sc->sge.qs[i];
1101194521Skmacy		struct sge_txq *txq = &qs->txq[TXQ_ETH];
1102194521Skmacy
1103194521Skmacy		callout_drain(&txq->txq_watchdog);
1104194521Skmacy		callout_drain(&txq->txq_timer);
1105192537Sgnn	}
1106192537Sgnn
1107170869Skmacy	PORT_LOCK_DEINIT(p);
1108167514Skmacy	if_free(p->ifp);
1109194521Skmacy	p->ifp = NULL;
1110194521Skmacy
1111202671Snp	ADAPTER_LOCK(sc);
1112202671Snp	CLR_BUSY(sc);
1113202671Snp	wakeup_one(&sc->flags);
1114202671Snp	ADAPTER_UNLOCK(sc);
1115167514Skmacy	return (0);
1116167514Skmacy}
1117167514Skmacy
1118167514Skmacyvoid
1119167514Skmacyt3_fatal_err(struct adapter *sc)
1120167514Skmacy{
1121167514Skmacy	u_int fw_status[4];
1122183062Skmacy
1123172096Skmacy	if (sc->flags & FULL_INIT_DONE) {
1124172096Skmacy		t3_sge_stop(sc);
1125172096Skmacy		t3_write_reg(sc, A_XGM_TX_CTRL, 0);
1126172096Skmacy		t3_write_reg(sc, A_XGM_RX_CTRL, 0);
1127172096Skmacy		t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
1128172096Skmacy		t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
1129172096Skmacy		t3_intr_disable(sc);
1130172096Skmacy	}
1131167514Skmacy	device_printf(sc->dev,"encountered fatal error, operation suspended\n");
1132167514Skmacy	if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
1133167514Skmacy		device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
1134167514Skmacy		    fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
1135167514Skmacy}
1136167514Skmacy
1137167514Skmacyint
1138167514Skmacyt3_os_find_pci_capability(adapter_t *sc, int cap)
1139167514Skmacy{
1140167514Skmacy	device_t dev;
1141167514Skmacy	struct pci_devinfo *dinfo;
1142167514Skmacy	pcicfgregs *cfg;
1143167514Skmacy	uint32_t status;
1144167514Skmacy	uint8_t ptr;
1145167514Skmacy
1146167514Skmacy	dev = sc->dev;
1147167514Skmacy	dinfo = device_get_ivars(dev);
1148167514Skmacy	cfg = &dinfo->cfg;
1149167514Skmacy
1150167514Skmacy	status = pci_read_config(dev, PCIR_STATUS, 2);
1151167514Skmacy	if (!(status & PCIM_STATUS_CAPPRESENT))
1152167514Skmacy		return (0);
1153167514Skmacy
1154167514Skmacy	switch (cfg->hdrtype & PCIM_HDRTYPE) {
1155167514Skmacy	case 0:
1156167514Skmacy	case 1:
1157167514Skmacy		ptr = PCIR_CAP_PTR;
1158167514Skmacy		break;
1159167514Skmacy	case 2:
1160167514Skmacy		ptr = PCIR_CAP_PTR_2;
1161167514Skmacy		break;
1162167514Skmacy	default:
1163167514Skmacy		return (0);
1164167514Skmacy		break;
1165167514Skmacy	}
1166167514Skmacy	ptr = pci_read_config(dev, ptr, 1);
1167167514Skmacy
1168167514Skmacy	while (ptr != 0) {
1169167514Skmacy		if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
1170167514Skmacy			return (ptr);
1171167514Skmacy		ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1172167514Skmacy	}
1173167514Skmacy
1174167514Skmacy	return (0);
1175167514Skmacy}
1176167514Skmacy
1177167514Skmacyint
1178167514Skmacyt3_os_pci_save_state(struct adapter *sc)
1179167514Skmacy{
1180167514Skmacy	device_t dev;
1181167514Skmacy	struct pci_devinfo *dinfo;
1182167514Skmacy
1183167514Skmacy	dev = sc->dev;
1184167514Skmacy	dinfo = device_get_ivars(dev);
1185167514Skmacy
1186167514Skmacy	pci_cfg_save(dev, dinfo, 0);
1187167514Skmacy	return (0);
1188167514Skmacy}
1189167514Skmacy
1190167514Skmacyint
1191167514Skmacyt3_os_pci_restore_state(struct adapter *sc)
1192167514Skmacy{
1193167514Skmacy	device_t dev;
1194167514Skmacy	struct pci_devinfo *dinfo;
1195167514Skmacy
1196167514Skmacy	dev = sc->dev;
1197167514Skmacy	dinfo = device_get_ivars(dev);
1198167514Skmacy
1199167514Skmacy	pci_cfg_restore(dev, dinfo);
1200167514Skmacy	return (0);
1201167514Skmacy}
1202167514Skmacy
1203167514Skmacy/**
1204167514Skmacy *	t3_os_link_changed - handle link status changes
1205197791Snp *	@sc: the adapter associated with the link change
1206197791Snp *	@port_id: the port index whose link status has changed
1207177340Skmacy *	@link_status: the new status of the link
1208167514Skmacy *	@speed: the new speed setting
1209167514Skmacy *	@duplex: the new duplex setting
1210167514Skmacy *	@fc: the new flow-control setting
1211167514Skmacy *
1212167514Skmacy *	This is the OS-dependent handler for link status changes.  The OS
1213167514Skmacy *	neutral handler takes care of most of the processing for these events,
1214167514Skmacy *	then calls this handler for any OS-specific processing.
1215167514Skmacy */
1216167514Skmacyvoid
1217167514Skmacyt3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
1218197791Snp     int duplex, int fc, int mac_was_reset)
1219167514Skmacy{
1220167514Skmacy	struct port_info *pi = &adapter->port[port_id];
1221194521Skmacy	struct ifnet *ifp = pi->ifp;
1222167514Skmacy
1223194521Skmacy	/* no race with detach, so ifp should always be good */
1224194521Skmacy	KASSERT(ifp, ("%s: if detached.", __func__));
1225194521Skmacy
1226197791Snp	/* Reapply mac settings if they were lost due to a reset */
1227197791Snp	if (mac_was_reset) {
1228197791Snp		PORT_LOCK(pi);
1229197791Snp		cxgb_update_mac_settings(pi);
1230197791Snp		PORT_UNLOCK(pi);
1231197791Snp	}
1232197791Snp
1233169978Skmacy	if (link_status) {
1234194521Skmacy		ifp->if_baudrate = IF_Mbps(speed);
1235194521Skmacy		if_link_state_change(ifp, LINK_STATE_UP);
1236192540Sgnn	} else
1237194521Skmacy		if_link_state_change(ifp, LINK_STATE_DOWN);
1238167514Skmacy}
1239167514Skmacy
1240181614Skmacy/**
1241181614Skmacy *	t3_os_phymod_changed - handle PHY module changes
1242181614Skmacy *	@phy: the PHY reporting the module change
1243181614Skmacy *	@mod_type: new module type
1244181614Skmacy *
1245181614Skmacy *	This is the OS-dependent handler for PHY module changes.  It is
1246181614Skmacy *	invoked when a PHY module is removed or inserted for any OS-specific
1247181614Skmacy *	processing.
1248181614Skmacy */
1249181614Skmacyvoid t3_os_phymod_changed(struct adapter *adap, int port_id)
1250181614Skmacy{
1251181614Skmacy	static const char *mod_str[] = {
1252204921Snp		NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX-L", "unknown"
1253181614Skmacy	};
1254181614Skmacy	struct port_info *pi = &adap->port[port_id];
1255194921Snp	int mod = pi->phy.modtype;
1256181614Skmacy
1257194921Snp	if (mod != pi->media.ifm_cur->ifm_data)
1258194921Snp		cxgb_build_medialist(pi);
1259194921Snp
1260194921Snp	if (mod == phy_modtype_none)
1261194921Snp		if_printf(pi->ifp, "PHY module unplugged\n");
1262181614Skmacy	else {
1263194921Snp		KASSERT(mod < ARRAY_SIZE(mod_str),
1264194921Snp			("invalid PHY module type %d", mod));
1265194921Snp		if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]);
1266181614Skmacy	}
1267181614Skmacy}
1268181614Skmacy
1269167514Skmacyvoid
1270167514Skmacyt3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
1271167514Skmacy{
1272167514Skmacy
1273167514Skmacy	/*
1274167514Skmacy	 * The ifnet might not be allocated before this gets called,
1275167514Skmacy	 * as this is called early on in attach by t3_prep_adapter
1276167514Skmacy	 * save the address off in the port structure
1277167514Skmacy	 */
1278167514Skmacy	if (cxgb_debug)
1279167514Skmacy		printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
1280167514Skmacy	bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
1281167514Skmacy}
1282167514Skmacy
1283194521Skmacy/*
1284194521Skmacy * Programs the XGMAC based on the settings in the ifnet.  These settings
1285194521Skmacy * include MTU, MAC address, mcast addresses, etc.
1286167514Skmacy */
1287167514Skmacystatic void
1288194521Skmacycxgb_update_mac_settings(struct port_info *p)
1289167514Skmacy{
1290194521Skmacy	struct ifnet *ifp = p->ifp;
1291167514Skmacy	struct t3_rx_mode rm;
1292167514Skmacy	struct cmac *mac = &p->mac;
1293180583Skmacy	int mtu, hwtagging;
1294167514Skmacy
1295194521Skmacy	PORT_LOCK_ASSERT_OWNED(p);
1296167514Skmacy
1297180583Skmacy	bcopy(IF_LLADDR(ifp), p->hw_addr, ETHER_ADDR_LEN);
1298180583Skmacy
1299180583Skmacy	mtu = ifp->if_mtu;
1300180583Skmacy	if (ifp->if_capenable & IFCAP_VLAN_MTU)
1301180583Skmacy		mtu += ETHER_VLAN_ENCAP_LEN;
1302180583Skmacy
1303180583Skmacy	hwtagging = (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0;
1304180583Skmacy
1305180583Skmacy	t3_mac_set_mtu(mac, mtu);
1306180583Skmacy	t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging);
1307167514Skmacy	t3_mac_set_address(mac, 0, p->hw_addr);
1308194521Skmacy	t3_init_rx_mode(&rm, p);
1309167514Skmacy	t3_mac_set_rx_mode(mac, &rm);
1310167514Skmacy}
1311167514Skmacy
1312176472Skmacy
1313176472Skmacystatic int
1314176472Skmacyawait_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
1315176472Skmacy			      unsigned long n)
1316176472Skmacy{
1317176472Skmacy	int attempts = 5;
1318176472Skmacy
1319176472Skmacy	while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
1320176472Skmacy		if (!--attempts)
1321176472Skmacy			return (ETIMEDOUT);
1322176472Skmacy		t3_os_sleep(10);
1323176472Skmacy	}
1324176472Skmacy	return 0;
1325176472Skmacy}
1326176472Skmacy
1327176472Skmacystatic int
1328176472Skmacyinit_tp_parity(struct adapter *adap)
1329176472Skmacy{
1330176472Skmacy	int i;
1331176472Skmacy	struct mbuf *m;
1332176472Skmacy	struct cpl_set_tcb_field *greq;
1333176472Skmacy	unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
1334176472Skmacy
1335176472Skmacy	t3_tp_set_offload_mode(adap, 1);
1336176472Skmacy
1337176472Skmacy	for (i = 0; i < 16; i++) {
1338176472Skmacy		struct cpl_smt_write_req *req;
1339176472Skmacy
1340176472Skmacy		m = m_gethdr(M_WAITOK, MT_DATA);
1341176472Skmacy		req = mtod(m, struct cpl_smt_write_req *);
1342176472Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1343176472Skmacy		memset(req, 0, sizeof(*req));
1344194521Skmacy		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1345176472Skmacy		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
1346176472Skmacy		req->iff = i;
1347176472Skmacy		t3_mgmt_tx(adap, m);
1348176472Skmacy	}
1349176472Skmacy
1350176472Skmacy	for (i = 0; i < 2048; i++) {
1351176472Skmacy		struct cpl_l2t_write_req *req;
1352176472Skmacy
1353176472Skmacy		m = m_gethdr(M_WAITOK, MT_DATA);
1354176472Skmacy		req = mtod(m, struct cpl_l2t_write_req *);
1355176472Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1356176472Skmacy		memset(req, 0, sizeof(*req));
1357194521Skmacy		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1358176472Skmacy		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
1359176472Skmacy		req->params = htonl(V_L2T_W_IDX(i));
1360176472Skmacy		t3_mgmt_tx(adap, m);
1361176472Skmacy	}
1362176472Skmacy
1363176472Skmacy	for (i = 0; i < 2048; i++) {
1364176472Skmacy		struct cpl_rte_write_req *req;
1365176472Skmacy
1366176472Skmacy		m = m_gethdr(M_WAITOK, MT_DATA);
1367176472Skmacy		req = mtod(m, struct cpl_rte_write_req *);
1368176472Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1369176472Skmacy		memset(req, 0, sizeof(*req));
1370194521Skmacy		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1371176472Skmacy		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
1372176472Skmacy		req->l2t_idx = htonl(V_L2T_W_IDX(i));
1373176472Skmacy		t3_mgmt_tx(adap, m);
1374176472Skmacy	}
1375176472Skmacy
1376176472Skmacy	m = m_gethdr(M_WAITOK, MT_DATA);
1377176472Skmacy	greq = mtod(m, struct cpl_set_tcb_field *);
1378176472Skmacy	m->m_len = m->m_pkthdr.len = sizeof(*greq);
1379176472Skmacy	memset(greq, 0, sizeof(*greq));
1380194521Skmacy	greq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1381176472Skmacy	OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
1382176472Skmacy	greq->mask = htobe64(1);
1383176472Skmacy	t3_mgmt_tx(adap, m);
1384176472Skmacy
1385176472Skmacy	i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
1386176472Skmacy	t3_tp_set_offload_mode(adap, 0);
1387176472Skmacy	return (i);
1388176472Skmacy}
1389176472Skmacy
1390167514Skmacy/**
1391167514Skmacy *	setup_rss - configure Receive Side Steering (per-queue connection demux)
1392167514Skmacy *	@adap: the adapter
1393167514Skmacy *
1394167514Skmacy *	Sets up RSS to distribute packets to multiple receive queues.  We
1395167514Skmacy *	configure the RSS CPU lookup table to distribute to the number of HW
1396167514Skmacy *	receive queues, and the response queue lookup table to narrow that
1397167514Skmacy *	down to the response queues actually configured for each port.
1398167514Skmacy *	We always configure the RSS mapping for two ports since the mapping
1399167514Skmacy *	table has plenty of entries.
1400167514Skmacy */
1401167514Skmacystatic void
1402167514Skmacysetup_rss(adapter_t *adap)
1403167514Skmacy{
1404167514Skmacy	int i;
1405171471Skmacy	u_int nq[2];
1406167514Skmacy	uint8_t cpus[SGE_QSETS + 1];
1407167514Skmacy	uint16_t rspq_map[RSS_TABLE_SIZE];
1408171471Skmacy
1409167514Skmacy	for (i = 0; i < SGE_QSETS; ++i)
1410167514Skmacy		cpus[i] = i;
1411167514Skmacy	cpus[SGE_QSETS] = 0xff;
1412167514Skmacy
1413171978Skmacy	nq[0] = nq[1] = 0;
1414171978Skmacy	for_each_port(adap, i) {
1415171978Skmacy		const struct port_info *pi = adap2pinfo(adap, i);
1416171978Skmacy
1417171978Skmacy		nq[pi->tx_chan] += pi->nqsets;
1418171978Skmacy	}
1419167514Skmacy	for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
1420176472Skmacy		rspq_map[i] = nq[0] ? i % nq[0] : 0;
1421176472Skmacy		rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0;
1422167514Skmacy	}
1423196840Sjhb
1424171471Skmacy	/* Calculate the reverse RSS map table */
1425196840Sjhb	for (i = 0; i < SGE_QSETS; ++i)
1426196840Sjhb		adap->rrss_map[i] = 0xff;
1427171471Skmacy	for (i = 0; i < RSS_TABLE_SIZE; ++i)
1428171471Skmacy		if (adap->rrss_map[rspq_map[i]] == 0xff)
1429171471Skmacy			adap->rrss_map[rspq_map[i]] = i;
1430167514Skmacy
1431167514Skmacy	t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
1432171471Skmacy		      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
1433176472Skmacy	              F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ,
1434176472Skmacy	              cpus, rspq_map);
1435171471Skmacy
1436167514Skmacy}
1437167514Skmacy
1438169978Skmacy/*
1439169978Skmacy * Sends an mbuf to an offload queue driver
1440169978Skmacy * after dealing with any active network taps.
1441169978Skmacy */
1442169978Skmacystatic inline int
1443174626Skmacyoffload_tx(struct t3cdev *tdev, struct mbuf *m)
1444169978Skmacy{
1445169978Skmacy	int ret;
1446169978Skmacy
1447169978Skmacy	ret = t3_offload_tx(tdev, m);
1448170654Skmacy	return (ret);
1449169978Skmacy}
1450169978Skmacy
1451169978Skmacystatic int
1452169978Skmacywrite_smt_entry(struct adapter *adapter, int idx)
1453169978Skmacy{
1454169978Skmacy	struct port_info *pi = &adapter->port[idx];
1455169978Skmacy	struct cpl_smt_write_req *req;
1456169978Skmacy	struct mbuf *m;
1457169978Skmacy
1458169978Skmacy	if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
1459169978Skmacy		return (ENOMEM);
1460169978Skmacy
1461169978Skmacy	req = mtod(m, struct cpl_smt_write_req *);
1462174708Skmacy	m->m_pkthdr.len = m->m_len = sizeof(struct cpl_smt_write_req);
1463174708Skmacy
1464194521Skmacy	req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1465169978Skmacy	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
1466169978Skmacy	req->mtu_idx = NMTUS - 1;  /* should be 0 but there's a T3 bug */
1467169978Skmacy	req->iff = idx;
1468169978Skmacy	memset(req->src_mac1, 0, sizeof(req->src_mac1));
1469169978Skmacy	memcpy(req->src_mac0, pi->hw_addr, ETHER_ADDR_LEN);
1470169978Skmacy
1471169978Skmacy	m_set_priority(m, 1);
1472169978Skmacy
1473169978Skmacy	offload_tx(&adapter->tdev, m);
1474169978Skmacy
1475169978Skmacy	return (0);
1476169978Skmacy}
1477169978Skmacy
1478169978Skmacystatic int
1479169978Skmacyinit_smt(struct adapter *adapter)
1480169978Skmacy{
1481169978Skmacy	int i;
1482169978Skmacy
1483169978Skmacy	for_each_port(adapter, i)
1484169978Skmacy		write_smt_entry(adapter, i);
1485169978Skmacy	return 0;
1486169978Skmacy}
1487169978Skmacy
1488167514Skmacystatic void
1489169978Skmacyinit_port_mtus(adapter_t *adapter)
1490169978Skmacy{
1491194521Skmacy	unsigned int mtus = ETHERMTU | (ETHERMTU << 16);
1492169978Skmacy
1493169978Skmacy	t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
1494169978Skmacy}
1495169978Skmacy
1496169978Skmacystatic void
1497167514Skmacysend_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
1498167514Skmacy			      int hi, int port)
1499167514Skmacy{
1500167514Skmacy	struct mbuf *m;
1501167514Skmacy	struct mngt_pktsched_wr *req;
1502167514Skmacy
1503171471Skmacy	m = m_gethdr(M_DONTWAIT, MT_DATA);
1504167848Skmacy	if (m) {
1505169978Skmacy		req = mtod(m, struct mngt_pktsched_wr *);
1506194521Skmacy		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1507167848Skmacy		req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1508167848Skmacy		req->sched = sched;
1509167848Skmacy		req->idx = qidx;
1510167848Skmacy		req->min = lo;
1511167848Skmacy		req->max = hi;
1512167848Skmacy		req->binding = port;
1513167848Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1514167848Skmacy		t3_mgmt_tx(adap, m);
1515167848Skmacy	}
1516167514Skmacy}
1517167514Skmacy
1518167514Skmacystatic void
1519167514Skmacybind_qsets(adapter_t *sc)
1520167514Skmacy{
1521167514Skmacy	int i, j;
1522167514Skmacy
1523167514Skmacy	for (i = 0; i < (sc)->params.nports; ++i) {
1524167514Skmacy		const struct port_info *pi = adap2pinfo(sc, i);
1525167514Skmacy
1526172096Skmacy		for (j = 0; j < pi->nqsets; ++j) {
1527167514Skmacy			send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1528172096Skmacy					  -1, pi->tx_chan);
1529172096Skmacy
1530172096Skmacy		}
1531167514Skmacy	}
1532167514Skmacy}
1533167514Skmacy
1534171471Skmacystatic void
1535171471Skmacyupdate_tpeeprom(struct adapter *adap)
1536171471Skmacy{
1537171471Skmacy	const struct firmware *tpeeprom;
1538172109Skmacy
1539171471Skmacy	uint32_t version;
1540171471Skmacy	unsigned int major, minor;
1541171471Skmacy	int ret, len;
1542189643Sgnn	char rev, name[32];
1543171471Skmacy
1544171471Skmacy	t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
1545171471Skmacy
1546171471Skmacy	major = G_TP_VERSION_MAJOR(version);
1547171471Skmacy	minor = G_TP_VERSION_MINOR(version);
1548171471Skmacy	if (major == TP_VERSION_MAJOR  && minor == TP_VERSION_MINOR)
1549171471Skmacy		return;
1550171471Skmacy
1551171471Skmacy	rev = t3rev2char(adap);
1552189643Sgnn	snprintf(name, sizeof(name), TPEEPROM_NAME, rev);
1553171471Skmacy
1554189643Sgnn	tpeeprom = firmware_get(name);
1555171471Skmacy	if (tpeeprom == NULL) {
1556190330Sgnn		device_printf(adap->dev,
1557190330Sgnn			      "could not load TP EEPROM: unable to load %s\n",
1558190330Sgnn			      name);
1559171471Skmacy		return;
1560171471Skmacy	}
1561171471Skmacy
1562171471Skmacy	len = tpeeprom->datasize - 4;
1563171471Skmacy
1564171471Skmacy	ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
1565171471Skmacy	if (ret)
1566171471Skmacy		goto release_tpeeprom;
1567171471Skmacy
1568171471Skmacy	if (len != TP_SRAM_LEN) {
1569190330Sgnn		device_printf(adap->dev,
1570190330Sgnn			      "%s length is wrong len=%d expected=%d\n", name,
1571190330Sgnn			      len, TP_SRAM_LEN);
1572171471Skmacy		return;
1573171471Skmacy	}
1574171471Skmacy
1575171471Skmacy	ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
1576171471Skmacy	    TP_SRAM_OFFSET);
1577171471Skmacy
1578171471Skmacy	if (!ret) {
1579171471Skmacy		device_printf(adap->dev,
1580171471Skmacy			"Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
1581171471Skmacy			 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1582171471Skmacy	} else
1583190330Sgnn		device_printf(adap->dev,
1584190330Sgnn			      "Protocol SRAM image update in EEPROM failed\n");
1585171471Skmacy
1586171471Skmacyrelease_tpeeprom:
1587171471Skmacy	firmware_put(tpeeprom, FIRMWARE_UNLOAD);
1588171471Skmacy
1589171471Skmacy	return;
1590171471Skmacy}
1591171471Skmacy
1592171471Skmacystatic int
1593171471Skmacyupdate_tpsram(struct adapter *adap)
1594171471Skmacy{
1595171471Skmacy	const struct firmware *tpsram;
1596171471Skmacy	int ret;
1597189643Sgnn	char rev, name[32];
1598171471Skmacy
1599171471Skmacy	rev = t3rev2char(adap);
1600189643Sgnn	snprintf(name, sizeof(name), TPSRAM_NAME, rev);
1601171471Skmacy
1602171471Skmacy	update_tpeeprom(adap);
1603171471Skmacy
1604189643Sgnn	tpsram = firmware_get(name);
1605171471Skmacy	if (tpsram == NULL){
1606176613Skmacy		device_printf(adap->dev, "could not load TP SRAM\n");
1607171471Skmacy		return (EINVAL);
1608171471Skmacy	} else
1609176613Skmacy		device_printf(adap->dev, "updating TP SRAM\n");
1610171471Skmacy
1611171471Skmacy	ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
1612171471Skmacy	if (ret)
1613171471Skmacy		goto release_tpsram;
1614171471Skmacy
1615171471Skmacy	ret = t3_set_proto_sram(adap, tpsram->data);
1616171471Skmacy	if (ret)
1617171471Skmacy		device_printf(adap->dev, "loading protocol SRAM failed\n");
1618171471Skmacy
1619171471Skmacyrelease_tpsram:
1620171471Skmacy	firmware_put(tpsram, FIRMWARE_UNLOAD);
1621171471Skmacy
1622171471Skmacy	return ret;
1623171471Skmacy}
1624171471Skmacy
1625169978Skmacy/**
1626169978Skmacy *	cxgb_up - enable the adapter
1627169978Skmacy *	@adap: adapter being enabled
1628169978Skmacy *
1629169978Skmacy *	Called when the first port is enabled, this function performs the
1630169978Skmacy *	actions necessary to make an adapter operational, such as completing
1631169978Skmacy *	the initialization of HW modules, and enabling interrupts.
1632169978Skmacy */
1633169978Skmacystatic int
1634169978Skmacycxgb_up(struct adapter *sc)
1635169978Skmacy{
1636169978Skmacy	int err = 0;
1637208887Snp	unsigned int mxf = t3_mc5_size(&sc->mc5) - MC5_MIN_TIDS;
1638169978Skmacy
1639194521Skmacy	KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)",
1640194521Skmacy					   __func__, sc->open_device_map));
1641194521Skmacy
1642169978Skmacy	if ((sc->flags & FULL_INIT_DONE) == 0) {
1643169978Skmacy
1644202671Snp		ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1645202671Snp
1646169978Skmacy		if ((sc->flags & FW_UPTODATE) == 0)
1647171471Skmacy			if ((err = upgrade_fw(sc)))
1648171471Skmacy				goto out;
1649194521Skmacy
1650171471Skmacy		if ((sc->flags & TPS_UPTODATE) == 0)
1651171471Skmacy			if ((err = update_tpsram(sc)))
1652171471Skmacy				goto out;
1653194521Skmacy
1654208887Snp		if (is_offload(sc) && nfilters != 0) {
1655207643Snp			sc->params.mc5.nservers = 0;
1656208887Snp
1657208887Snp			if (nfilters < 0)
1658208887Snp				sc->params.mc5.nfilters = mxf;
1659208887Snp			else
1660208887Snp				sc->params.mc5.nfilters = min(nfilters, mxf);
1661207643Snp		}
1662207643Snp
1663169978Skmacy		err = t3_init_hw(sc, 0);
1664169978Skmacy		if (err)
1665169978Skmacy			goto out;
1666169978Skmacy
1667176472Skmacy		t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1668169978Skmacy		t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1669169978Skmacy
1670169978Skmacy		err = setup_sge_qsets(sc);
1671169978Skmacy		if (err)
1672169978Skmacy			goto out;
1673169978Skmacy
1674207643Snp		alloc_filters(sc);
1675169978Skmacy		setup_rss(sc);
1676192933Sgnn
1677192933Sgnn		t3_intr_clear(sc);
1678192933Sgnn		err = cxgb_setup_interrupts(sc);
1679192933Sgnn		if (err)
1680192933Sgnn			goto out;
1681192933Sgnn
1682174708Skmacy		t3_add_configured_sysctls(sc);
1683169978Skmacy		sc->flags |= FULL_INIT_DONE;
1684169978Skmacy	}
1685169978Skmacy
1686169978Skmacy	t3_intr_clear(sc);
1687169978Skmacy	t3_sge_start(sc);
1688169978Skmacy	t3_intr_enable(sc);
1689169978Skmacy
1690176472Skmacy	if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) &&
1691176472Skmacy	    is_offload(sc) && init_tp_parity(sc) == 0)
1692176472Skmacy		sc->flags |= TP_PARITY_INIT;
1693176472Skmacy
1694176472Skmacy	if (sc->flags & TP_PARITY_INIT) {
1695194521Skmacy		t3_write_reg(sc, A_TP_INT_CAUSE, F_CMCACHEPERR | F_ARPLUTPERR);
1696176472Skmacy		t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff);
1697176472Skmacy	}
1698176472Skmacy
1699172096Skmacy	if (!(sc->flags & QUEUES_BOUND)) {
1700169978Skmacy		bind_qsets(sc);
1701207643Snp		setup_hw_filters(sc);
1702171471Skmacy		sc->flags |= QUEUES_BOUND;
1703171471Skmacy	}
1704194521Skmacy
1705194521Skmacy	t3_sge_reset_adapter(sc);
1706169978Skmacyout:
1707169978Skmacy	return (err);
1708169978Skmacy}
1709169978Skmacy
1710169978Skmacy/*
1711194521Skmacy * Called when the last open device is closed.  Does NOT undo all of cxgb_up's
1712194521Skmacy * work.  Specifically, the resources grabbed under FULL_INIT_DONE are released
1713194521Skmacy * during controller_detach, not here.
1714169978Skmacy */
1715167514Skmacystatic void
1716194521Skmacycxgb_down(struct adapter *sc)
1717169978Skmacy{
1718169978Skmacy	t3_sge_stop(sc);
1719169978Skmacy	t3_intr_disable(sc);
1720169978Skmacy}
1721169978Skmacy
1722169978Skmacystatic int
1723169978Skmacyoffload_open(struct port_info *pi)
1724169978Skmacy{
1725194521Skmacy	struct adapter *sc = pi->adapter;
1726194521Skmacy	struct t3cdev *tdev = &sc->tdev;
1727183059Skmacy
1728194521Skmacy	setbit(&sc->open_device_map, OFFLOAD_DEVMAP_BIT);
1729169978Skmacy
1730194521Skmacy	t3_tp_set_offload_mode(sc, 1);
1731174708Skmacy	tdev->lldev = pi->ifp;
1732194521Skmacy	init_port_mtus(sc);
1733194521Skmacy	t3_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd,
1734194521Skmacy		     sc->params.rev == 0 ?  sc->port[0].ifp->if_mtu : 0xffff);
1735194521Skmacy	init_smt(sc);
1736178767Skmacy	cxgb_add_clients(tdev);
1737178767Skmacy
1738194521Skmacy	return (0);
1739169978Skmacy}
1740174708Skmacy
1741169978Skmacystatic int
1742174708Skmacyoffload_close(struct t3cdev *tdev)
1743169978Skmacy{
1744169978Skmacy	struct adapter *adapter = tdev2adap(tdev);
1745169978Skmacy
1746176472Skmacy	if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT))
1747170654Skmacy		return (0);
1748178767Skmacy
1749178767Skmacy	/* Call back all registered clients */
1750178767Skmacy	cxgb_remove_clients(tdev);
1751178767Skmacy
1752169978Skmacy	tdev->lldev = NULL;
1753169978Skmacy	cxgb_set_dummy_ops(tdev);
1754169978Skmacy	t3_tp_set_offload_mode(adapter, 0);
1755194521Skmacy
1756169978Skmacy	clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
1757169978Skmacy
1758194521Skmacy	return (0);
1759194521Skmacy}
1760192537Sgnn
1761194521Skmacy/*
1762202671Snp * if_init for cxgb ports.
1763194521Skmacy */
1764202671Snpstatic void
1765202671Snpcxgb_init(void *arg)
1766194521Skmacy{
1767202671Snp	struct port_info *p = arg;
1768194521Skmacy	struct adapter *sc = p->adapter;
1769192537Sgnn
1770194521Skmacy	ADAPTER_LOCK(sc);
1771202671Snp	cxgb_init_locked(p); /* releases adapter lock */
1772202671Snp	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1773202671Snp}
1774194521Skmacy
1775202671Snpstatic int
1776202671Snpcxgb_init_locked(struct port_info *p)
1777202671Snp{
1778202671Snp	struct adapter *sc = p->adapter;
1779202671Snp	struct ifnet *ifp = p->ifp;
1780202671Snp	struct cmac *mac = &p->mac;
1781202671Snp	int i, rc = 0, may_sleep = 0;
1782202671Snp
1783202671Snp	ADAPTER_LOCK_ASSERT_OWNED(sc);
1784202671Snp
1785194521Skmacy	while (!IS_DOOMED(p) && IS_BUSY(sc)) {
1786202671Snp		if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbinit", 0)) {
1787194521Skmacy			rc = EINTR;
1788194521Skmacy			goto done;
1789194521Skmacy		}
1790194521Skmacy	}
1791202671Snp	if (IS_DOOMED(p)) {
1792194521Skmacy		rc = ENXIO;
1793202671Snp		goto done;
1794194521Skmacy	}
1795202671Snp	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1796194521Skmacy
1797194521Skmacy	/*
1798202671Snp	 * The code that runs during one-time adapter initialization can sleep
1799202671Snp	 * so it's important not to hold any locks across it.
1800194521Skmacy	 */
1801202671Snp	may_sleep = sc->flags & FULL_INIT_DONE ? 0 : 1;
1802194521Skmacy
1803202671Snp	if (may_sleep) {
1804202671Snp		SET_BUSY(sc);
1805202671Snp		ADAPTER_UNLOCK(sc);
1806194521Skmacy	}
1807194521Skmacy
1808194521Skmacy	if (sc->open_device_map == 0) {
1809194521Skmacy		if ((rc = cxgb_up(sc)) != 0)
1810202671Snp			goto done;
1811167514Skmacy
1812194521Skmacy		if (is_offload(sc) && !ofld_disable && offload_open(p))
1813169978Skmacy			log(LOG_WARNING,
1814169978Skmacy			    "Could not initialize offload capabilities\n");
1815169978Skmacy	}
1816192540Sgnn
1817194521Skmacy	PORT_LOCK(p);
1818202671Snp	if (isset(&sc->open_device_map, p->port_id) &&
1819202671Snp	    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1820202671Snp		PORT_UNLOCK(p);
1821202671Snp		goto done;
1822202671Snp	}
1823192540Sgnn	t3_port_intr_enable(sc, p->port_id);
1824194521Skmacy	if (!mac->multiport)
1825197791Snp		t3_mac_init(mac);
1826194521Skmacy	cxgb_update_mac_settings(p);
1827194521Skmacy	t3_link_start(&p->phy, mac, &p->link_config);
1828194521Skmacy	t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1829194521Skmacy	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1830194521Skmacy	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1831194521Skmacy	PORT_UNLOCK(p);
1832192540Sgnn
1833194521Skmacy	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1834194521Skmacy		struct sge_qset *qs = &sc->sge.qs[i];
1835194521Skmacy		struct sge_txq *txq = &qs->txq[TXQ_ETH];
1836170869Skmacy
1837194521Skmacy		callout_reset_on(&txq->txq_watchdog, hz, cxgb_tx_watchdog, qs,
1838194521Skmacy				 txq->txq_watchdog.c_cpu);
1839194521Skmacy	}
1840167514Skmacy
1841194521Skmacy	/* all ok */
1842194521Skmacy	setbit(&sc->open_device_map, p->port_id);
1843209841Snp	callout_reset(&p->link_check_ch,
1844209841Snp	    p->phy.caps & SUPPORTED_LINK_IRQ ?  hz * 3 : hz / 4,
1845209841Snp	    link_check_callout, p);
1846167760Skmacy
1847202671Snpdone:
1848202671Snp	if (may_sleep) {
1849202671Snp		ADAPTER_LOCK(sc);
1850202671Snp		KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1851202671Snp		CLR_BUSY(sc);
1852202671Snp		wakeup_one(&sc->flags);
1853202671Snp	}
1854202671Snp	ADAPTER_UNLOCK(sc);
1855202671Snp	return (rc);
1856167514Skmacy}
1857167514Skmacy
1858202671Snpstatic int
1859202671Snpcxgb_uninit_locked(struct port_info *p)
1860202671Snp{
1861202671Snp	struct adapter *sc = p->adapter;
1862202671Snp	int rc;
1863202671Snp
1864202671Snp	ADAPTER_LOCK_ASSERT_OWNED(sc);
1865202671Snp
1866202671Snp	while (!IS_DOOMED(p) && IS_BUSY(sc)) {
1867202671Snp		if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbunin", 0)) {
1868202671Snp			rc = EINTR;
1869202671Snp			goto done;
1870202671Snp		}
1871202671Snp	}
1872202671Snp	if (IS_DOOMED(p)) {
1873202671Snp		rc = ENXIO;
1874202671Snp		goto done;
1875202671Snp	}
1876202671Snp	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1877202671Snp	SET_BUSY(sc);
1878202671Snp	ADAPTER_UNLOCK(sc);
1879202671Snp
1880202671Snp	rc = cxgb_uninit_synchronized(p);
1881202671Snp
1882202671Snp	ADAPTER_LOCK(sc);
1883202671Snp	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1884202671Snp	CLR_BUSY(sc);
1885202671Snp	wakeup_one(&sc->flags);
1886202671Snpdone:
1887202671Snp	ADAPTER_UNLOCK(sc);
1888202671Snp	return (rc);
1889202671Snp}
1890202671Snp
1891194521Skmacy/*
1892194521Skmacy * Called on "ifconfig down", and from port_detach
1893194521Skmacy */
1894194521Skmacystatic int
1895194521Skmacycxgb_uninit_synchronized(struct port_info *pi)
1896167514Skmacy{
1897194521Skmacy	struct adapter *sc = pi->adapter;
1898194521Skmacy	struct ifnet *ifp = pi->ifp;
1899167514Skmacy
1900194521Skmacy	/*
1901202671Snp	 * taskqueue_drain may cause a deadlock if the adapter lock is held.
1902202671Snp	 */
1903202671Snp	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1904202671Snp
1905202671Snp	/*
1906194521Skmacy	 * Clear this port's bit from the open device map, and then drain all
1907194521Skmacy	 * the tasks that can access/manipulate this port's port_info or ifp.
1908194521Skmacy	 * We disable this port's interrupts here and so the the slow/ext
1909194521Skmacy	 * interrupt tasks won't be enqueued.  The tick task will continue to
1910194521Skmacy	 * be enqueued every second but the runs after this drain will not see
1911194521Skmacy	 * this port in the open device map.
1912194521Skmacy	 *
1913194521Skmacy	 * A well behaved task must take open_device_map into account and ignore
1914194521Skmacy	 * ports that are not open.
1915194521Skmacy	 */
1916194521Skmacy	clrbit(&sc->open_device_map, pi->port_id);
1917194521Skmacy	t3_port_intr_disable(sc, pi->port_id);
1918194521Skmacy	taskqueue_drain(sc->tq, &sc->slow_intr_task);
1919194521Skmacy	taskqueue_drain(sc->tq, &sc->tick_task);
1920194521Skmacy
1921209841Snp	callout_drain(&pi->link_check_ch);
1922209841Snp	taskqueue_drain(sc->tq, &pi->link_check_task);
1923209841Snp
1924194521Skmacy	PORT_LOCK(pi);
1925169978Skmacy	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1926169978Skmacy
1927177340Skmacy	/* disable pause frames */
1928194521Skmacy	t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0);
1929170869Skmacy
1930177340Skmacy	/* Reset RX FIFO HWM */
1931194521Skmacy	t3_set_reg_field(sc, A_XGM_RXFIFO_CFG +  pi->mac.offset,
1932177340Skmacy			 V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0);
1933177340Skmacy
1934199240Snp	DELAY(100 * 1000);
1935177340Skmacy
1936177340Skmacy	/* Wait for TXFIFO empty */
1937194521Skmacy	t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset,
1938177340Skmacy			F_TXFIFO_EMPTY, 1, 20, 5);
1939177340Skmacy
1940199240Snp	DELAY(100 * 1000);
1941199240Snp	t3_mac_disable(&pi->mac, MAC_DIRECTION_RX);
1942177340Skmacy
1943194521Skmacy
1944177340Skmacy	pi->phy.ops->power_down(&pi->phy, 1);
1945177340Skmacy
1946194521Skmacy	PORT_UNLOCK(pi);
1947167514Skmacy
1948194521Skmacy	pi->link_config.link_ok = 0;
1949197791Snp	t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0);
1950194521Skmacy
1951194521Skmacy	if ((sc->open_device_map & PORT_MASK) == 0)
1952194521Skmacy		offload_close(&sc->tdev);
1953194521Skmacy
1954194521Skmacy	if (sc->open_device_map == 0)
1955194521Skmacy		cxgb_down(pi->adapter);
1956194521Skmacy
1957194521Skmacy	return (0);
1958170654Skmacy}
1959170654Skmacy
1960181616Skmacy/*
1961181616Skmacy * Mark lro enabled or disabled in all qsets for this port
1962181616Skmacy */
1963170654Skmacystatic int
1964181616Skmacycxgb_set_lro(struct port_info *p, int enabled)
1965181616Skmacy{
1966181616Skmacy	int i;
1967181616Skmacy	struct adapter *adp = p->adapter;
1968181616Skmacy	struct sge_qset *q;
1969181616Skmacy
1970181616Skmacy	for (i = 0; i < p->nqsets; i++) {
1971181616Skmacy		q = &adp->sge.qs[p->first_qset + i];
1972181616Skmacy		q->lro.enabled = (enabled != 0);
1973181616Skmacy	}
1974181616Skmacy	return (0);
1975181616Skmacy}
1976181616Skmacy
1977181616Skmacystatic int
1978167514Skmacycxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
1979167514Skmacy{
1980167514Skmacy	struct port_info *p = ifp->if_softc;
1981202671Snp	struct adapter *sc = p->adapter;
1982167514Skmacy	struct ifreq *ifr = (struct ifreq *)data;
1983202671Snp	int flags, error = 0, mtu;
1984167514Skmacy	uint32_t mask;
1985167514Skmacy
1986167514Skmacy	switch (command) {
1987167514Skmacy	case SIOCSIFMTU:
1988202671Snp		ADAPTER_LOCK(sc);
1989202671Snp		error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1990202671Snp		if (error) {
1991202671Snpfail:
1992202671Snp			ADAPTER_UNLOCK(sc);
1993202671Snp			return (error);
1994202671Snp		}
1995202671Snp
1996194521Skmacy		mtu = ifr->ifr_mtu;
1997194521Skmacy		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
1998194521Skmacy			error = EINVAL;
1999194521Skmacy		} else {
2000194521Skmacy			ifp->if_mtu = mtu;
2001194521Skmacy			PORT_LOCK(p);
2002194521Skmacy			cxgb_update_mac_settings(p);
2003194521Skmacy			PORT_UNLOCK(p);
2004194521Skmacy		}
2005202671Snp		ADAPTER_UNLOCK(sc);
2006167514Skmacy		break;
2007167514Skmacy	case SIOCSIFFLAGS:
2008202671Snp		ADAPTER_LOCK(sc);
2009202671Snp		if (IS_DOOMED(p)) {
2010202671Snp			error = ENXIO;
2011202671Snp			goto fail;
2012202671Snp		}
2013167514Skmacy		if (ifp->if_flags & IFF_UP) {
2014167514Skmacy			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2015167514Skmacy				flags = p->if_flags;
2016167514Skmacy				if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
2017194521Skmacy				    ((ifp->if_flags ^ flags) & IFF_ALLMULTI)) {
2018202671Snp					if (IS_BUSY(sc)) {
2019202671Snp						error = EBUSY;
2020202671Snp						goto fail;
2021202671Snp					}
2022194521Skmacy					PORT_LOCK(p);
2023194521Skmacy					cxgb_update_mac_settings(p);
2024194521Skmacy					PORT_UNLOCK(p);
2025194521Skmacy				}
2026202671Snp				ADAPTER_UNLOCK(sc);
2027167514Skmacy			} else
2028202671Snp				error = cxgb_init_locked(p);
2029167760Skmacy			p->if_flags = ifp->if_flags;
2030170869Skmacy		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2031202671Snp			error = cxgb_uninit_locked(p);
2032202863Snp		else
2033202863Snp			ADAPTER_UNLOCK(sc);
2034202671Snp
2035202671Snp		ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2036176472Skmacy		break;
2037176472Skmacy	case SIOCADDMULTI:
2038176472Skmacy	case SIOCDELMULTI:
2039202671Snp		ADAPTER_LOCK(sc);
2040202671Snp		error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2041202671Snp		if (error)
2042202671Snp			goto fail;
2043202671Snp
2044170869Skmacy		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2045194521Skmacy			PORT_LOCK(p);
2046194521Skmacy			cxgb_update_mac_settings(p);
2047194521Skmacy			PORT_UNLOCK(p);
2048167514Skmacy		}
2049202671Snp		ADAPTER_UNLOCK(sc);
2050194521Skmacy
2051167514Skmacy		break;
2052167514Skmacy	case SIOCSIFCAP:
2053202671Snp		ADAPTER_LOCK(sc);
2054202671Snp		error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2055202671Snp		if (error)
2056202671Snp			goto fail;
2057202671Snp
2058167514Skmacy		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2059167514Skmacy		if (mask & IFCAP_TXCSUM) {
2060204348Snp			ifp->if_capenable ^= IFCAP_TXCSUM;
2061204348Snp			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2062204348Snp
2063204348Snp			if (IFCAP_TSO & ifp->if_capenable &&
2064204348Snp			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
2065204348Snp				ifp->if_capenable &= ~IFCAP_TSO;
2066204348Snp				ifp->if_hwassist &= ~CSUM_TSO;
2067204348Snp				if_printf(ifp,
2068204348Snp				    "tso disabled due to -txcsum.\n");
2069167514Skmacy			}
2070167514Skmacy		}
2071204348Snp		if (mask & IFCAP_RXCSUM)
2072180583Skmacy			ifp->if_capenable ^= IFCAP_RXCSUM;
2073167514Skmacy		if (mask & IFCAP_TSO4) {
2074204348Snp			ifp->if_capenable ^= IFCAP_TSO4;
2075204348Snp
2076204348Snp			if (IFCAP_TSO & ifp->if_capenable) {
2077204348Snp				if (IFCAP_TXCSUM & ifp->if_capenable)
2078204348Snp					ifp->if_hwassist |= CSUM_TSO;
2079204348Snp				else {
2080204348Snp					ifp->if_capenable &= ~IFCAP_TSO;
2081204348Snp					ifp->if_hwassist &= ~CSUM_TSO;
2082204348Snp					if_printf(ifp,
2083204348Snp					    "enable txcsum first.\n");
2084204348Snp					error = EAGAIN;
2085204348Snp				}
2086204348Snp			} else
2087167514Skmacy				ifp->if_hwassist &= ~CSUM_TSO;
2088167514Skmacy		}
2089181616Skmacy		if (mask & IFCAP_LRO) {
2090181616Skmacy			ifp->if_capenable ^= IFCAP_LRO;
2091181616Skmacy
2092181616Skmacy			/* Safe to do this even if cxgb_up not called yet */
2093181616Skmacy			cxgb_set_lro(p, ifp->if_capenable & IFCAP_LRO);
2094181616Skmacy		}
2095180583Skmacy		if (mask & IFCAP_VLAN_HWTAGGING) {
2096180583Skmacy			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2097194521Skmacy			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2098194521Skmacy				PORT_LOCK(p);
2099194521Skmacy				cxgb_update_mac_settings(p);
2100194521Skmacy				PORT_UNLOCK(p);
2101194521Skmacy			}
2102180583Skmacy		}
2103180583Skmacy		if (mask & IFCAP_VLAN_MTU) {
2104180583Skmacy			ifp->if_capenable ^= IFCAP_VLAN_MTU;
2105194521Skmacy			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2106194521Skmacy				PORT_LOCK(p);
2107194521Skmacy				cxgb_update_mac_settings(p);
2108194521Skmacy				PORT_UNLOCK(p);
2109194521Skmacy			}
2110180583Skmacy		}
2111204348Snp		if (mask & IFCAP_VLAN_HWTSO)
2112204348Snp			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2113202671Snp		if (mask & IFCAP_VLAN_HWCSUM)
2114180583Skmacy			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2115180583Skmacy
2116180583Skmacy#ifdef VLAN_CAPABILITIES
2117180583Skmacy		VLAN_CAPABILITIES(ifp);
2118180583Skmacy#endif
2119202671Snp		ADAPTER_UNLOCK(sc);
2120167514Skmacy		break;
2121202671Snp	case SIOCSIFMEDIA:
2122202671Snp	case SIOCGIFMEDIA:
2123202671Snp		error = ifmedia_ioctl(ifp, ifr, &p->media, command);
2124202671Snp		break;
2125167514Skmacy	default:
2126202671Snp		error = ether_ioctl(ifp, command, data);
2127167514Skmacy	}
2128194521Skmacy
2129167514Skmacy	return (error);
2130167514Skmacy}
2131167514Skmacy
2132174708Skmacystatic int
2133167514Skmacycxgb_media_change(struct ifnet *ifp)
2134167514Skmacy{
2135194921Snp	return (EOPNOTSUPP);
2136167514Skmacy}
2137167514Skmacy
2138186282Sgnn/*
2139194921Snp * Translates phy->modtype to the correct Ethernet media subtype.
2140186282Sgnn */
2141186282Sgnnstatic int
2142194921Snpcxgb_ifm_type(int mod)
2143186282Sgnn{
2144194921Snp	switch (mod) {
2145186282Sgnn	case phy_modtype_sr:
2146194921Snp		return (IFM_10G_SR);
2147186282Sgnn	case phy_modtype_lr:
2148194921Snp		return (IFM_10G_LR);
2149186282Sgnn	case phy_modtype_lrm:
2150194921Snp		return (IFM_10G_LRM);
2151186282Sgnn	case phy_modtype_twinax:
2152194921Snp		return (IFM_10G_TWINAX);
2153186282Sgnn	case phy_modtype_twinax_long:
2154194921Snp		return (IFM_10G_TWINAX_LONG);
2155186282Sgnn	case phy_modtype_none:
2156194921Snp		return (IFM_NONE);
2157186282Sgnn	case phy_modtype_unknown:
2158194921Snp		return (IFM_UNKNOWN);
2159186282Sgnn	}
2160186282Sgnn
2161194921Snp	KASSERT(0, ("%s: modtype %d unknown", __func__, mod));
2162194921Snp	return (IFM_UNKNOWN);
2163186282Sgnn}
2164186282Sgnn
2165194921Snp/*
2166194921Snp * Rebuilds the ifmedia list for this port, and sets the current media.
2167194921Snp */
2168167514Skmacystatic void
2169194921Snpcxgb_build_medialist(struct port_info *p)
2170194921Snp{
2171194921Snp	struct cphy *phy = &p->phy;
2172194921Snp	struct ifmedia *media = &p->media;
2173194921Snp	int mod = phy->modtype;
2174194921Snp	int m = IFM_ETHER | IFM_FDX;
2175194921Snp
2176194921Snp	PORT_LOCK(p);
2177194921Snp
2178194921Snp	ifmedia_removeall(media);
2179194921Snp	if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) {
2180194921Snp		/* Copper (RJ45) */
2181194921Snp
2182194921Snp		if (phy->caps & SUPPORTED_10000baseT_Full)
2183194921Snp			ifmedia_add(media, m | IFM_10G_T, mod, NULL);
2184194921Snp
2185194921Snp		if (phy->caps & SUPPORTED_1000baseT_Full)
2186194921Snp			ifmedia_add(media, m | IFM_1000_T, mod, NULL);
2187194921Snp
2188194921Snp		if (phy->caps & SUPPORTED_100baseT_Full)
2189194921Snp			ifmedia_add(media, m | IFM_100_TX, mod, NULL);
2190194921Snp
2191194921Snp		if (phy->caps & SUPPORTED_10baseT_Full)
2192194921Snp			ifmedia_add(media, m | IFM_10_T, mod, NULL);
2193194921Snp
2194194921Snp		ifmedia_add(media, IFM_ETHER | IFM_AUTO, mod, NULL);
2195194921Snp		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2196194921Snp
2197194921Snp	} else if (phy->caps & SUPPORTED_TP) {
2198194921Snp		/* Copper (CX4) */
2199194921Snp
2200194921Snp		KASSERT(phy->caps & SUPPORTED_10000baseT_Full,
2201194921Snp			("%s: unexpected cap 0x%x", __func__, phy->caps));
2202194921Snp
2203194921Snp		ifmedia_add(media, m | IFM_10G_CX4, mod, NULL);
2204194921Snp		ifmedia_set(media, m | IFM_10G_CX4);
2205194921Snp
2206194921Snp	} else if (phy->caps & SUPPORTED_FIBRE &&
2207194921Snp		   phy->caps & SUPPORTED_10000baseT_Full) {
2208194921Snp		/* 10G optical (but includes SFP+ twinax) */
2209194921Snp
2210194921Snp		m |= cxgb_ifm_type(mod);
2211194921Snp		if (IFM_SUBTYPE(m) == IFM_NONE)
2212194921Snp			m &= ~IFM_FDX;
2213194921Snp
2214194921Snp		ifmedia_add(media, m, mod, NULL);
2215194921Snp		ifmedia_set(media, m);
2216194921Snp
2217194921Snp	} else if (phy->caps & SUPPORTED_FIBRE &&
2218194921Snp		   phy->caps & SUPPORTED_1000baseT_Full) {
2219194921Snp		/* 1G optical */
2220194921Snp
2221194921Snp		/* XXX: Lie and claim to be SX, could actually be any 1G-X */
2222194921Snp		ifmedia_add(media, m | IFM_1000_SX, mod, NULL);
2223194921Snp		ifmedia_set(media, m | IFM_1000_SX);
2224194921Snp
2225194921Snp	} else {
2226194921Snp		KASSERT(0, ("%s: don't know how to handle 0x%x.", __func__,
2227194921Snp			    phy->caps));
2228194921Snp	}
2229194921Snp
2230194921Snp	PORT_UNLOCK(p);
2231194921Snp}
2232194921Snp
2233194921Snpstatic void
2234167514Skmacycxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2235167514Skmacy{
2236167514Skmacy	struct port_info *p = ifp->if_softc;
2237186282Sgnn	struct ifmedia_entry *cur = p->media.ifm_cur;
2238194921Snp	int speed = p->link_config.speed;
2239167514Skmacy
2240194921Snp	if (cur->ifm_data != p->phy.modtype) {
2241194921Snp		cxgb_build_medialist(p);
2242194921Snp		cur = p->media.ifm_cur;
2243186282Sgnn	}
2244186282Sgnn
2245167514Skmacy	ifmr->ifm_status = IFM_AVALID;
2246167514Skmacy	if (!p->link_config.link_ok)
2247167514Skmacy		return;
2248167514Skmacy
2249167514Skmacy	ifmr->ifm_status |= IFM_ACTIVE;
2250167514Skmacy
2251194921Snp	/*
2252194921Snp	 * active and current will differ iff current media is autoselect.  That
2253194921Snp	 * can happen only for copper RJ45.
2254194921Snp	 */
2255194921Snp	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
2256194921Snp		return;
2257194921Snp	KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg,
2258194921Snp		("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps));
2259194921Snp
2260194921Snp	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2261194921Snp	if (speed == SPEED_10000)
2262194921Snp		ifmr->ifm_active |= IFM_10G_T;
2263194921Snp	else if (speed == SPEED_1000)
2264194921Snp		ifmr->ifm_active |= IFM_1000_T;
2265194921Snp	else if (speed == SPEED_100)
2266194921Snp		ifmr->ifm_active |= IFM_100_TX;
2267194921Snp	else if (speed == SPEED_10)
2268170654Skmacy		ifmr->ifm_active |= IFM_10_T;
2269167514Skmacy	else
2270194921Snp		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
2271194921Snp			    speed));
2272167514Skmacy}
2273167514Skmacy
2274167514Skmacystatic void
2275167514Skmacycxgb_async_intr(void *data)
2276167514Skmacy{
2277167760Skmacy	adapter_t *sc = data;
2278167760Skmacy
2279209840Snp	t3_write_reg(sc, A_PL_INT_ENABLE0, 0);
2280209840Snp	(void) t3_read_reg(sc, A_PL_INT_ENABLE0);
2281170869Skmacy	taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
2282167514Skmacy}
2283167514Skmacy
2284209841Snpstatic void
2285209841Snplink_check_callout(void *arg)
2286197791Snp{
2287209841Snp	struct port_info *pi = arg;
2288209841Snp	struct adapter *sc = pi->adapter;
2289197791Snp
2290209841Snp	if (!isset(&sc->open_device_map, pi->port_id))
2291209841Snp		return;
2292197791Snp
2293209841Snp	taskqueue_enqueue(sc->tq, &pi->link_check_task);
2294197791Snp}
2295197791Snp
2296167514Skmacystatic void
2297209841Snpcheck_link_status(void *arg, int pending)
2298167514Skmacy{
2299209841Snp	struct port_info *pi = arg;
2300209841Snp	struct adapter *sc = pi->adapter;
2301167514Skmacy
2302209841Snp	if (!isset(&sc->open_device_map, pi->port_id))
2303209841Snp		return;
2304167514Skmacy
2305209841Snp	t3_link_changed(sc, pi->port_id);
2306194521Skmacy
2307209841Snp	if (pi->link_fault || !(pi->phy.caps & SUPPORTED_LINK_IRQ))
2308209841Snp		callout_reset(&pi->link_check_ch, hz, link_check_callout, pi);
2309167514Skmacy}
2310167514Skmacy
2311209841Snpvoid
2312209841Snpt3_os_link_intr(struct port_info *pi)
2313209841Snp{
2314209841Snp	/*
2315209841Snp	 * Schedule a link check in the near future.  If the link is flapping
2316209841Snp	 * rapidly we'll keep resetting the callout and delaying the check until
2317209841Snp	 * things stabilize a bit.
2318209841Snp	 */
2319209841Snp	callout_reset(&pi->link_check_ch, hz / 4, link_check_callout, pi);
2320209841Snp}
2321209841Snp
2322167514Skmacystatic void
2323194521Skmacycheck_t3b2_mac(struct adapter *sc)
2324167514Skmacy{
2325167514Skmacy	int i;
2326167514Skmacy
2327194521Skmacy	if (sc->flags & CXGB_SHUTDOWN)
2328176472Skmacy		return;
2329194521Skmacy
2330194521Skmacy	for_each_port(sc, i) {
2331194521Skmacy		struct port_info *p = &sc->port[i];
2332194521Skmacy		int status;
2333194521Skmacy#ifdef INVARIANTS
2334167746Skmacy		struct ifnet *ifp = p->ifp;
2335194521Skmacy#endif
2336194521Skmacy
2337197791Snp		if (!isset(&sc->open_device_map, p->port_id) || p->link_fault ||
2338197791Snp		    !p->link_config.link_ok)
2339167746Skmacy			continue;
2340194521Skmacy
2341194521Skmacy		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2342194521Skmacy			("%s: state mismatch (drv_flags %x, device_map %x)",
2343194521Skmacy			 __func__, ifp->if_drv_flags, sc->open_device_map));
2344194521Skmacy
2345167746Skmacy		PORT_LOCK(p);
2346194521Skmacy		status = t3b2_mac_watchdog_task(&p->mac);
2347167746Skmacy		if (status == 1)
2348167746Skmacy			p->mac.stats.num_toggled++;
2349167746Skmacy		else if (status == 2) {
2350167746Skmacy			struct cmac *mac = &p->mac;
2351167746Skmacy
2352194521Skmacy			cxgb_update_mac_settings(p);
2353167746Skmacy			t3_link_start(&p->phy, mac, &p->link_config);
2354167746Skmacy			t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2355194521Skmacy			t3_port_intr_enable(sc, p->port_id);
2356167746Skmacy			p->mac.stats.num_resets++;
2357167746Skmacy		}
2358167746Skmacy		PORT_UNLOCK(p);
2359167514Skmacy	}
2360167514Skmacy}
2361167514Skmacy
2362167746Skmacystatic void
2363167746Skmacycxgb_tick(void *arg)
2364167746Skmacy{
2365167746Skmacy	adapter_t *sc = (adapter_t *)arg;
2366170869Skmacy
2367194521Skmacy	if (sc->flags & CXGB_SHUTDOWN)
2368176472Skmacy		return;
2369174708Skmacy
2370185508Skmacy	taskqueue_enqueue(sc->tq, &sc->tick_task);
2371209841Snp	callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
2372170869Skmacy}
2373170869Skmacy
2374170869Skmacystatic void
2375170869Skmacycxgb_tick_handler(void *arg, int count)
2376170869Skmacy{
2377170869Skmacy	adapter_t *sc = (adapter_t *)arg;
2378167746Skmacy	const struct adapter_params *p = &sc->params;
2379181652Skmacy	int i;
2380189643Sgnn	uint32_t cause, reset;
2381167746Skmacy
2382194521Skmacy	if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE))
2383176472Skmacy		return;
2384176472Skmacy
2385185508Skmacy	if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map)
2386185508Skmacy		check_t3b2_mac(sc);
2387185508Skmacy
2388206109Snp	cause = t3_read_reg(sc, A_SG_INT_CAUSE) & (F_RSPQSTARVE | F_FLEMPTY);
2389206109Snp	if (cause) {
2390189643Sgnn		struct sge_qset *qs = &sc->sge.qs[0];
2391206109Snp		uint32_t mask, v;
2392189643Sgnn
2393206109Snp		v = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS) & ~0xff00;
2394189643Sgnn
2395206109Snp		mask = 1;
2396206109Snp		for (i = 0; i < SGE_QSETS; i++) {
2397206109Snp			if (v & mask)
2398206109Snp				qs[i].rspq.starved++;
2399206109Snp			mask <<= 1;
2400189643Sgnn		}
2401206109Snp
2402206109Snp		mask <<= SGE_QSETS; /* skip RSPQXDISABLED */
2403206109Snp
2404206109Snp		for (i = 0; i < SGE_QSETS * 2; i++) {
2405206109Snp			if (v & mask) {
2406206109Snp				qs[i / 2].fl[i % 2].empty++;
2407206109Snp			}
2408206109Snp			mask <<= 1;
2409206109Snp		}
2410206109Snp
2411206109Snp		/* clear */
2412206109Snp		t3_write_reg(sc, A_SG_RSPQ_FL_STATUS, v);
2413206109Snp		t3_write_reg(sc, A_SG_INT_CAUSE, cause);
2414189643Sgnn	}
2415189643Sgnn
2416185506Skmacy	for (i = 0; i < sc->params.nports; i++) {
2417185506Skmacy		struct port_info *pi = &sc->port[i];
2418185506Skmacy		struct ifnet *ifp = pi->ifp;
2419189643Sgnn		struct cmac *mac = &pi->mac;
2420189643Sgnn		struct mac_stats *mstats = &mac->stats;
2421205948Snp		int drops, j;
2422194521Skmacy
2423194521Skmacy		if (!isset(&sc->open_device_map, pi->port_id))
2424194521Skmacy			continue;
2425194521Skmacy
2426185508Skmacy		PORT_LOCK(pi);
2427189643Sgnn		t3_mac_update_stats(mac);
2428185508Skmacy		PORT_UNLOCK(pi);
2429185508Skmacy
2430205948Snp		ifp->if_opackets = mstats->tx_frames;
2431205948Snp		ifp->if_ipackets = mstats->rx_frames;
2432185506Skmacy		ifp->if_obytes = mstats->tx_octets;
2433185506Skmacy		ifp->if_ibytes = mstats->rx_octets;
2434185506Skmacy		ifp->if_omcasts = mstats->tx_mcast_frames;
2435185506Skmacy		ifp->if_imcasts = mstats->rx_mcast_frames;
2436205948Snp		ifp->if_collisions = mstats->tx_total_collisions;
2437205948Snp		ifp->if_iqdrops = mstats->rx_cong_drops;
2438185506Skmacy
2439205948Snp		drops = 0;
2440205948Snp		for (j = pi->first_qset; j < pi->first_qset + pi->nqsets; j++)
2441205948Snp			drops += sc->sge.qs[j].txq[TXQ_ETH].txq_mr->br_drops;
2442205948Snp		ifp->if_snd.ifq_drops = drops;
2443205948Snp
2444185506Skmacy		ifp->if_oerrors =
2445185506Skmacy		    mstats->tx_excess_collisions +
2446185506Skmacy		    mstats->tx_underrun +
2447185506Skmacy		    mstats->tx_len_errs +
2448185506Skmacy		    mstats->tx_mac_internal_errs +
2449185506Skmacy		    mstats->tx_excess_deferral +
2450185506Skmacy		    mstats->tx_fcs_errs;
2451185506Skmacy		ifp->if_ierrors =
2452185506Skmacy		    mstats->rx_jabber +
2453185506Skmacy		    mstats->rx_data_errs +
2454185506Skmacy		    mstats->rx_sequence_errs +
2455185506Skmacy		    mstats->rx_runt +
2456185506Skmacy		    mstats->rx_too_long +
2457185506Skmacy		    mstats->rx_mac_internal_errs +
2458185506Skmacy		    mstats->rx_short +
2459185506Skmacy		    mstats->rx_fcs_errs;
2460189643Sgnn
2461189643Sgnn		if (mac->multiport)
2462189643Sgnn			continue;
2463189643Sgnn
2464189643Sgnn		/* Count rx fifo overflows, once per second */
2465189643Sgnn		cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset);
2466189643Sgnn		reset = 0;
2467189643Sgnn		if (cause & F_RXFIFO_OVERFLOW) {
2468189643Sgnn			mac->stats.rx_fifo_ovfl++;
2469189643Sgnn			reset |= F_RXFIFO_OVERFLOW;
2470189643Sgnn		}
2471189643Sgnn		t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset);
2472185506Skmacy	}
2473167746Skmacy}
2474167746Skmacy
2475171978Skmacystatic void
2476171978Skmacytouch_bars(device_t dev)
2477171978Skmacy{
2478171978Skmacy	/*
2479171978Skmacy	 * Don't enable yet
2480171978Skmacy	 */
2481171978Skmacy#if !defined(__LP64__) && 0
2482171978Skmacy	u32 v;
2483171978Skmacy
2484171978Skmacy	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
2485171978Skmacy	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
2486171978Skmacy	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
2487171978Skmacy	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
2488171978Skmacy	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
2489171978Skmacy	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
2490171978Skmacy#endif
2491171978Skmacy}
2492171978Skmacy
2493167514Skmacystatic int
2494171471Skmacyset_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
2495171471Skmacy{
2496171471Skmacy	uint8_t *buf;
2497171471Skmacy	int err = 0;
2498171471Skmacy	u32 aligned_offset, aligned_len, *p;
2499171471Skmacy	struct adapter *adapter = pi->adapter;
2500171471Skmacy
2501171471Skmacy
2502171471Skmacy	aligned_offset = offset & ~3;
2503171471Skmacy	aligned_len = (len + (offset & 3) + 3) & ~3;
2504171471Skmacy
2505171471Skmacy	if (aligned_offset != offset || aligned_len != len) {
2506171471Skmacy		buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);
2507171471Skmacy		if (!buf)
2508171471Skmacy			return (ENOMEM);
2509171471Skmacy		err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
2510171471Skmacy		if (!err && aligned_len > 4)
2511171471Skmacy			err = t3_seeprom_read(adapter,
2512171471Skmacy					      aligned_offset + aligned_len - 4,
2513171471Skmacy					      (u32 *)&buf[aligned_len - 4]);
2514171471Skmacy		if (err)
2515171471Skmacy			goto out;
2516171471Skmacy		memcpy(buf + (offset & 3), data, len);
2517171471Skmacy	} else
2518171471Skmacy		buf = (uint8_t *)(uintptr_t)data;
2519171471Skmacy
2520171471Skmacy	err = t3_seeprom_wp(adapter, 0);
2521171471Skmacy	if (err)
2522171471Skmacy		goto out;
2523171471Skmacy
2524171471Skmacy	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2525171471Skmacy		err = t3_seeprom_write(adapter, aligned_offset, *p);
2526171471Skmacy		aligned_offset += 4;
2527171471Skmacy	}
2528171471Skmacy
2529171471Skmacy	if (!err)
2530171471Skmacy		err = t3_seeprom_wp(adapter, 1);
2531171471Skmacyout:
2532171471Skmacy	if (buf != data)
2533171471Skmacy		free(buf, M_DEVBUF);
2534171471Skmacy	return err;
2535171471Skmacy}
2536171471Skmacy
2537171471Skmacy
2538171471Skmacystatic int
2539167514Skmacyin_range(int val, int lo, int hi)
2540167514Skmacy{
2541167514Skmacy	return val < 0 || (val <= hi && val >= lo);
2542167514Skmacy}
2543167514Skmacy
2544167514Skmacystatic int
2545192450Simpcxgb_extension_open(struct cdev *dev, int flags, int fmp, struct thread *td)
2546170654Skmacy{
2547170654Skmacy       return (0);
2548170654Skmacy}
2549170654Skmacy
2550170654Skmacystatic int
2551192450Simpcxgb_extension_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2552170654Skmacy{
2553170654Skmacy       return (0);
2554170654Skmacy}
2555170654Skmacy
2556170654Skmacystatic int
2557167514Skmacycxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
2558167514Skmacy    int fflag, struct thread *td)
2559167514Skmacy{
2560167514Skmacy	int mmd, error = 0;
2561167514Skmacy	struct port_info *pi = dev->si_drv1;
2562167514Skmacy	adapter_t *sc = pi->adapter;
2563167514Skmacy
2564167514Skmacy#ifdef PRIV_SUPPORTED
2565167514Skmacy	if (priv_check(td, PRIV_DRIVER)) {
2566167514Skmacy		if (cxgb_debug)
2567167514Skmacy			printf("user does not have access to privileged ioctls\n");
2568167514Skmacy		return (EPERM);
2569167514Skmacy	}
2570167514Skmacy#else
2571167514Skmacy	if (suser(td)) {
2572167514Skmacy		if (cxgb_debug)
2573167514Skmacy			printf("user does not have access to privileged ioctls\n");
2574167514Skmacy		return (EPERM);
2575167514Skmacy	}
2576167514Skmacy#endif
2577167514Skmacy
2578167514Skmacy	switch (cmd) {
2579182679Skmacy	case CHELSIO_GET_MIIREG: {
2580167514Skmacy		uint32_t val;
2581167514Skmacy		struct cphy *phy = &pi->phy;
2582182679Skmacy		struct ch_mii_data *mid = (struct ch_mii_data *)data;
2583167514Skmacy
2584167514Skmacy		if (!phy->mdio_read)
2585167514Skmacy			return (EOPNOTSUPP);
2586167514Skmacy		if (is_10G(sc)) {
2587167514Skmacy			mmd = mid->phy_id >> 8;
2588167514Skmacy			if (!mmd)
2589167514Skmacy				mmd = MDIO_DEV_PCS;
2590190330Sgnn			else if (mmd > MDIO_DEV_VEND2)
2591171471Skmacy				return (EINVAL);
2592167514Skmacy
2593167514Skmacy			error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
2594167514Skmacy					     mid->reg_num, &val);
2595167514Skmacy		} else
2596167514Skmacy		        error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
2597167514Skmacy					     mid->reg_num & 0x1f, &val);
2598167514Skmacy		if (error == 0)
2599167514Skmacy			mid->val_out = val;
2600167514Skmacy		break;
2601167514Skmacy	}
2602182679Skmacy	case CHELSIO_SET_MIIREG: {
2603167514Skmacy		struct cphy *phy = &pi->phy;
2604182679Skmacy		struct ch_mii_data *mid = (struct ch_mii_data *)data;
2605167514Skmacy
2606167514Skmacy		if (!phy->mdio_write)
2607167514Skmacy			return (EOPNOTSUPP);
2608167514Skmacy		if (is_10G(sc)) {
2609167514Skmacy			mmd = mid->phy_id >> 8;
2610167514Skmacy			if (!mmd)
2611167514Skmacy				mmd = MDIO_DEV_PCS;
2612190330Sgnn			else if (mmd > MDIO_DEV_VEND2)
2613167514Skmacy				return (EINVAL);
2614167514Skmacy
2615167514Skmacy			error = phy->mdio_write(sc, mid->phy_id & 0x1f,
2616167514Skmacy					      mmd, mid->reg_num, mid->val_in);
2617167514Skmacy		} else
2618167514Skmacy			error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
2619167514Skmacy					      mid->reg_num & 0x1f,
2620167514Skmacy					      mid->val_in);
2621167514Skmacy		break;
2622167514Skmacy	}
2623167514Skmacy	case CHELSIO_SETREG: {
2624167514Skmacy		struct ch_reg *edata = (struct ch_reg *)data;
2625167514Skmacy		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2626167514Skmacy			return (EFAULT);
2627167514Skmacy		t3_write_reg(sc, edata->addr, edata->val);
2628167514Skmacy		break;
2629167514Skmacy	}
2630167514Skmacy	case CHELSIO_GETREG: {
2631167514Skmacy		struct ch_reg *edata = (struct ch_reg *)data;
2632167514Skmacy		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2633167514Skmacy			return (EFAULT);
2634167514Skmacy		edata->val = t3_read_reg(sc, edata->addr);
2635167514Skmacy		break;
2636167514Skmacy	}
2637167514Skmacy	case CHELSIO_GET_SGE_CONTEXT: {
2638167514Skmacy		struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
2639176472Skmacy		mtx_lock_spin(&sc->sge.reg_lock);
2640167514Skmacy		switch (ecntxt->cntxt_type) {
2641167514Skmacy		case CNTXT_TYPE_EGRESS:
2642182679Skmacy			error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
2643167514Skmacy			    ecntxt->data);
2644167514Skmacy			break;
2645167514Skmacy		case CNTXT_TYPE_FL:
2646182679Skmacy			error = -t3_sge_read_fl(sc, ecntxt->cntxt_id,
2647167514Skmacy			    ecntxt->data);
2648167514Skmacy			break;
2649167514Skmacy		case CNTXT_TYPE_RSP:
2650182679Skmacy			error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id,
2651167514Skmacy			    ecntxt->data);
2652167514Skmacy			break;
2653167514Skmacy		case CNTXT_TYPE_CQ:
2654182679Skmacy			error = -t3_sge_read_cq(sc, ecntxt->cntxt_id,
2655167514Skmacy			    ecntxt->data);
2656167514Skmacy			break;
2657167514Skmacy		default:
2658167514Skmacy			error = EINVAL;
2659167514Skmacy			break;
2660167514Skmacy		}
2661176472Skmacy		mtx_unlock_spin(&sc->sge.reg_lock);
2662167514Skmacy		break;
2663167514Skmacy	}
2664167514Skmacy	case CHELSIO_GET_SGE_DESC: {
2665167514Skmacy		struct ch_desc *edesc = (struct ch_desc *)data;
2666167514Skmacy		int ret;
2667167514Skmacy		if (edesc->queue_num >= SGE_QSETS * 6)
2668167514Skmacy			return (EINVAL);
2669167514Skmacy		ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
2670167514Skmacy		    edesc->queue_num % 6, edesc->idx, edesc->data);
2671167514Skmacy		if (ret < 0)
2672167514Skmacy			return (EINVAL);
2673167514Skmacy		edesc->size = ret;
2674167514Skmacy		break;
2675167514Skmacy	}
2676182679Skmacy	case CHELSIO_GET_QSET_PARAMS: {
2677167514Skmacy		struct qset_params *q;
2678167514Skmacy		struct ch_qset_params *t = (struct ch_qset_params *)data;
2679182679Skmacy		int q1 = pi->first_qset;
2680182679Skmacy		int nqsets = pi->nqsets;
2681176472Skmacy		int i;
2682176472Skmacy
2683182679Skmacy		if (t->qset_idx >= nqsets)
2684182679Skmacy			return EINVAL;
2685167514Skmacy
2686182679Skmacy		i = q1 + t->qset_idx;
2687182679Skmacy		q = &sc->params.sge.qset[i];
2688167514Skmacy		t->rspq_size   = q->rspq_size;
2689167514Skmacy		t->txq_size[0] = q->txq_size[0];
2690167514Skmacy		t->txq_size[1] = q->txq_size[1];
2691167514Skmacy		t->txq_size[2] = q->txq_size[2];
2692167514Skmacy		t->fl_size[0]  = q->fl_size;
2693167514Skmacy		t->fl_size[1]  = q->jumbo_size;
2694167514Skmacy		t->polling     = q->polling;
2695182679Skmacy		t->lro         = q->lro;
2696180583Skmacy		t->intr_lat    = q->coalesce_usecs;
2697167514Skmacy		t->cong_thres  = q->cong_thres;
2698182679Skmacy		t->qnum        = i;
2699182679Skmacy
2700205946Snp		if ((sc->flags & FULL_INIT_DONE) == 0)
2701205946Snp			t->vector = 0;
2702205946Snp		else if (sc->flags & USING_MSIX)
2703182679Skmacy			t->vector = rman_get_start(sc->msix_irq_res[i]);
2704182679Skmacy		else
2705182679Skmacy			t->vector = rman_get_start(sc->irq_res);
2706182679Skmacy
2707167514Skmacy		break;
2708167514Skmacy	}
2709182679Skmacy	case CHELSIO_GET_QSET_NUM: {
2710167514Skmacy		struct ch_reg *edata = (struct ch_reg *)data;
2711182679Skmacy		edata->val = pi->nqsets;
2712182679Skmacy		break;
2713182679Skmacy	}
2714182679Skmacy	case CHELSIO_LOAD_FW: {
2715182679Skmacy		uint8_t *fw_data;
2716182679Skmacy		uint32_t vers;
2717182679Skmacy		struct ch_mem_range *t = (struct ch_mem_range *)data;
2718182679Skmacy
2719167514Skmacy		/*
2720182679Skmacy		 * You're allowed to load a firmware only before FULL_INIT_DONE
2721182679Skmacy		 *
2722182679Skmacy		 * FW_UPTODATE is also set so the rest of the initialization
2723182679Skmacy		 * will not overwrite what was loaded here.  This gives you the
2724182679Skmacy		 * flexibility to load any firmware (and maybe shoot yourself in
2725182679Skmacy		 * the foot).
2726167514Skmacy		 */
2727182679Skmacy
2728182679Skmacy		ADAPTER_LOCK(sc);
2729182679Skmacy		if (sc->open_device_map || sc->flags & FULL_INIT_DONE) {
2730182679Skmacy			ADAPTER_UNLOCK(sc);
2731182679Skmacy			return (EBUSY);
2732182679Skmacy		}
2733182679Skmacy
2734182679Skmacy		fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2735182679Skmacy		if (!fw_data)
2736182679Skmacy			error = ENOMEM;
2737182679Skmacy		else
2738182679Skmacy			error = copyin(t->buf, fw_data, t->len);
2739182679Skmacy
2740182679Skmacy		if (!error)
2741182679Skmacy			error = -t3_load_fw(sc, fw_data, t->len);
2742182679Skmacy
2743182679Skmacy		if (t3_get_fw_version(sc, &vers) == 0) {
2744182679Skmacy			snprintf(&sc->fw_version[0], sizeof(sc->fw_version),
2745182679Skmacy			    "%d.%d.%d", G_FW_VERSION_MAJOR(vers),
2746182679Skmacy			    G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers));
2747182679Skmacy		}
2748182679Skmacy
2749182679Skmacy		if (!error)
2750182679Skmacy			sc->flags |= FW_UPTODATE;
2751182679Skmacy
2752182679Skmacy		free(fw_data, M_DEVBUF);
2753182679Skmacy		ADAPTER_UNLOCK(sc);
2754167514Skmacy		break;
2755167514Skmacy	}
2756182679Skmacy	case CHELSIO_LOAD_BOOT: {
2757182679Skmacy		uint8_t *boot_data;
2758182679Skmacy		struct ch_mem_range *t = (struct ch_mem_range *)data;
2759182679Skmacy
2760182679Skmacy		boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2761182679Skmacy		if (!boot_data)
2762182679Skmacy			return ENOMEM;
2763182679Skmacy
2764182679Skmacy		error = copyin(t->buf, boot_data, t->len);
2765182679Skmacy		if (!error)
2766182679Skmacy			error = -t3_load_boot(sc, boot_data, t->len);
2767182679Skmacy
2768182679Skmacy		free(boot_data, M_DEVBUF);
2769167514Skmacy		break;
2770167514Skmacy	}
2771182679Skmacy	case CHELSIO_GET_PM: {
2772182679Skmacy		struct ch_pm *m = (struct ch_pm *)data;
2773182679Skmacy		struct tp_params *p = &sc->params.tp;
2774182679Skmacy
2775182679Skmacy		if (!is_offload(sc))
2776182679Skmacy			return (EOPNOTSUPP);
2777182679Skmacy
2778182679Skmacy		m->tx_pg_sz = p->tx_pg_size;
2779182679Skmacy		m->tx_num_pg = p->tx_num_pgs;
2780182679Skmacy		m->rx_pg_sz  = p->rx_pg_size;
2781182679Skmacy		m->rx_num_pg = p->rx_num_pgs;
2782182679Skmacy		m->pm_total  = p->pmtx_size + p->chan_rx_size * p->nchan;
2783182679Skmacy
2784167514Skmacy		break;
2785182679Skmacy	}
2786182679Skmacy	case CHELSIO_SET_PM: {
2787182679Skmacy		struct ch_pm *m = (struct ch_pm *)data;
2788182679Skmacy		struct tp_params *p = &sc->params.tp;
2789182679Skmacy
2790182679Skmacy		if (!is_offload(sc))
2791182679Skmacy			return (EOPNOTSUPP);
2792182679Skmacy		if (sc->flags & FULL_INIT_DONE)
2793182679Skmacy			return (EBUSY);
2794182679Skmacy
2795182679Skmacy		if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) ||
2796182679Skmacy		    !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1)))
2797182679Skmacy			return (EINVAL);	/* not power of 2 */
2798182679Skmacy		if (!(m->rx_pg_sz & 0x14000))
2799182679Skmacy			return (EINVAL);	/* not 16KB or 64KB */
2800182679Skmacy		if (!(m->tx_pg_sz & 0x1554000))
2801182679Skmacy			return (EINVAL);
2802182679Skmacy		if (m->tx_num_pg == -1)
2803182679Skmacy			m->tx_num_pg = p->tx_num_pgs;
2804182679Skmacy		if (m->rx_num_pg == -1)
2805182679Skmacy			m->rx_num_pg = p->rx_num_pgs;
2806182679Skmacy		if (m->tx_num_pg % 24 || m->rx_num_pg % 24)
2807182679Skmacy			return (EINVAL);
2808182679Skmacy		if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size ||
2809182679Skmacy		    m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size)
2810182679Skmacy			return (EINVAL);
2811182679Skmacy
2812182679Skmacy		p->rx_pg_size = m->rx_pg_sz;
2813182679Skmacy		p->tx_pg_size = m->tx_pg_sz;
2814182679Skmacy		p->rx_num_pgs = m->rx_num_pg;
2815182679Skmacy		p->tx_num_pgs = m->tx_num_pg;
2816182679Skmacy		break;
2817182679Skmacy	}
2818169978Skmacy	case CHELSIO_SETMTUTAB: {
2819169978Skmacy		struct ch_mtus *m = (struct ch_mtus *)data;
2820169978Skmacy		int i;
2821169978Skmacy
2822169978Skmacy		if (!is_offload(sc))
2823169978Skmacy			return (EOPNOTSUPP);
2824169978Skmacy		if (offload_running(sc))
2825169978Skmacy			return (EBUSY);
2826169978Skmacy		if (m->nmtus != NMTUS)
2827169978Skmacy			return (EINVAL);
2828169978Skmacy		if (m->mtus[0] < 81)         /* accommodate SACK */
2829169978Skmacy			return (EINVAL);
2830169978Skmacy
2831169978Skmacy		/*
2832169978Skmacy		 * MTUs must be in ascending order
2833169978Skmacy		 */
2834169978Skmacy		for (i = 1; i < NMTUS; ++i)
2835169978Skmacy			if (m->mtus[i] < m->mtus[i - 1])
2836169978Skmacy				return (EINVAL);
2837169978Skmacy
2838182679Skmacy		memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus));
2839169978Skmacy		break;
2840169978Skmacy	}
2841169978Skmacy	case CHELSIO_GETMTUTAB: {
2842169978Skmacy		struct ch_mtus *m = (struct ch_mtus *)data;
2843169978Skmacy
2844169978Skmacy		if (!is_offload(sc))
2845169978Skmacy			return (EOPNOTSUPP);
2846169978Skmacy
2847169978Skmacy		memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
2848169978Skmacy		m->nmtus = NMTUS;
2849169978Skmacy		break;
2850171471Skmacy	}
2851167514Skmacy	case CHELSIO_GET_MEM: {
2852167514Skmacy		struct ch_mem_range *t = (struct ch_mem_range *)data;
2853167514Skmacy		struct mc7 *mem;
2854167514Skmacy		uint8_t *useraddr;
2855167514Skmacy		u64 buf[32];
2856182679Skmacy
2857182679Skmacy		/*
2858182679Skmacy		 * Use these to avoid modifying len/addr in the the return
2859182679Skmacy		 * struct
2860182679Skmacy		 */
2861182679Skmacy		uint32_t len = t->len, addr = t->addr;
2862182679Skmacy
2863167514Skmacy		if (!is_offload(sc))
2864167514Skmacy			return (EOPNOTSUPP);
2865167514Skmacy		if (!(sc->flags & FULL_INIT_DONE))
2866167514Skmacy			return (EIO);         /* need the memory controllers */
2867182679Skmacy		if ((addr & 0x7) || (len & 0x7))
2868167514Skmacy			return (EINVAL);
2869167514Skmacy		if (t->mem_id == MEM_CM)
2870167514Skmacy			mem = &sc->cm;
2871167514Skmacy		else if (t->mem_id == MEM_PMRX)
2872167514Skmacy			mem = &sc->pmrx;
2873167514Skmacy		else if (t->mem_id == MEM_PMTX)
2874167514Skmacy			mem = &sc->pmtx;
2875167514Skmacy		else
2876167514Skmacy			return (EINVAL);
2877167514Skmacy
2878167514Skmacy		/*
2879167514Skmacy		 * Version scheme:
2880167514Skmacy		 * bits 0..9: chip version
2881167514Skmacy		 * bits 10..15: chip revision
2882167514Skmacy		 */
2883167514Skmacy		t->version = 3 | (sc->params.rev << 10);
2884167514Skmacy
2885167514Skmacy		/*
2886167514Skmacy		 * Read 256 bytes at a time as len can be large and we don't
2887167514Skmacy		 * want to use huge intermediate buffers.
2888167514Skmacy		 */
2889174708Skmacy		useraddr = (uint8_t *)t->buf;
2890182679Skmacy		while (len) {
2891182679Skmacy			unsigned int chunk = min(len, sizeof(buf));
2892167514Skmacy
2893182679Skmacy			error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf);
2894167514Skmacy			if (error)
2895167514Skmacy				return (-error);
2896167514Skmacy			if (copyout(buf, useraddr, chunk))
2897167514Skmacy				return (EFAULT);
2898167514Skmacy			useraddr += chunk;
2899182679Skmacy			addr += chunk;
2900182679Skmacy			len -= chunk;
2901167514Skmacy		}
2902167514Skmacy		break;
2903167514Skmacy	}
2904169978Skmacy	case CHELSIO_READ_TCAM_WORD: {
2905169978Skmacy		struct ch_tcam_word *t = (struct ch_tcam_word *)data;
2906169978Skmacy
2907169978Skmacy		if (!is_offload(sc))
2908169978Skmacy			return (EOPNOTSUPP);
2909171471Skmacy		if (!(sc->flags & FULL_INIT_DONE))
2910171471Skmacy			return (EIO);         /* need MC5 */
2911169978Skmacy		return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
2912169978Skmacy		break;
2913169978Skmacy	}
2914167514Skmacy	case CHELSIO_SET_TRACE_FILTER: {
2915167514Skmacy		struct ch_trace *t = (struct ch_trace *)data;
2916167514Skmacy		const struct trace_params *tp;
2917167514Skmacy
2918167514Skmacy		tp = (const struct trace_params *)&t->sip;
2919167514Skmacy		if (t->config_tx)
2920167514Skmacy			t3_config_trace_filter(sc, tp, 0, t->invert_match,
2921167514Skmacy					       t->trace_tx);
2922167514Skmacy		if (t->config_rx)
2923167514Skmacy			t3_config_trace_filter(sc, tp, 1, t->invert_match,
2924167514Skmacy					       t->trace_rx);
2925167514Skmacy		break;
2926167514Skmacy	}
2927167514Skmacy	case CHELSIO_SET_PKTSCHED: {
2928167514Skmacy		struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
2929167514Skmacy		if (sc->open_device_map == 0)
2930167514Skmacy			return (EAGAIN);
2931167514Skmacy		send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
2932167514Skmacy		    p->binding);
2933167514Skmacy		break;
2934167514Skmacy	}
2935167514Skmacy	case CHELSIO_IFCONF_GETREGS: {
2936182679Skmacy		struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data;
2937167514Skmacy		int reglen = cxgb_get_regs_len();
2938182679Skmacy		uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT);
2939167514Skmacy		if (buf == NULL) {
2940167514Skmacy			return (ENOMEM);
2941182679Skmacy		}
2942182679Skmacy		if (regs->len > reglen)
2943167514Skmacy			regs->len = reglen;
2944182679Skmacy		else if (regs->len < reglen)
2945189643Sgnn			error = ENOBUFS;
2946182679Skmacy
2947182679Skmacy		if (!error) {
2948182679Skmacy			cxgb_get_regs(sc, regs, buf);
2949182679Skmacy			error = copyout(buf, regs->data, reglen);
2950167514Skmacy		}
2951167514Skmacy		free(buf, M_DEVBUF);
2952167514Skmacy
2953167514Skmacy		break;
2954167514Skmacy	}
2955169978Skmacy	case CHELSIO_SET_HW_SCHED: {
2956169978Skmacy		struct ch_hw_sched *t = (struct ch_hw_sched *)data;
2957169978Skmacy		unsigned int ticks_per_usec = core_ticks_per_usec(sc);
2958169978Skmacy
2959169978Skmacy		if ((sc->flags & FULL_INIT_DONE) == 0)
2960169978Skmacy			return (EAGAIN);       /* need TP to be initialized */
2961169978Skmacy		if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
2962169978Skmacy		    !in_range(t->channel, 0, 1) ||
2963169978Skmacy		    !in_range(t->kbps, 0, 10000000) ||
2964169978Skmacy		    !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
2965169978Skmacy		    !in_range(t->flow_ipg, 0,
2966169978Skmacy			      dack_ticks_to_usec(sc, 0x7ff)))
2967169978Skmacy			return (EINVAL);
2968169978Skmacy
2969169978Skmacy		if (t->kbps >= 0) {
2970169978Skmacy			error = t3_config_sched(sc, t->kbps, t->sched);
2971169978Skmacy			if (error < 0)
2972169978Skmacy				return (-error);
2973169978Skmacy		}
2974169978Skmacy		if (t->class_ipg >= 0)
2975169978Skmacy			t3_set_sched_ipg(sc, t->sched, t->class_ipg);
2976169978Skmacy		if (t->flow_ipg >= 0) {
2977169978Skmacy			t->flow_ipg *= 1000;     /* us -> ns */
2978169978Skmacy			t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
2979169978Skmacy		}
2980169978Skmacy		if (t->mode >= 0) {
2981169978Skmacy			int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
2982169978Skmacy
2983169978Skmacy			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2984169978Skmacy					 bit, t->mode ? bit : 0);
2985169978Skmacy		}
2986169978Skmacy		if (t->channel >= 0)
2987169978Skmacy			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2988169978Skmacy					 1 << t->sched, t->channel << t->sched);
2989169978Skmacy		break;
2990182679Skmacy	}
2991182679Skmacy	case CHELSIO_GET_EEPROM: {
2992182679Skmacy		int i;
2993182679Skmacy		struct ch_eeprom *e = (struct ch_eeprom *)data;
2994182679Skmacy		uint8_t *buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT);
2995182679Skmacy
2996182679Skmacy		if (buf == NULL) {
2997182679Skmacy			return (ENOMEM);
2998182679Skmacy		}
2999182679Skmacy		e->magic = EEPROM_MAGIC;
3000182679Skmacy		for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4)
3001182679Skmacy			error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]);
3002182679Skmacy
3003182679Skmacy		if (!error)
3004182679Skmacy			error = copyout(buf + e->offset, e->data, e->len);
3005182679Skmacy
3006182679Skmacy		free(buf, M_DEVBUF);
3007182679Skmacy		break;
3008182679Skmacy	}
3009182679Skmacy	case CHELSIO_CLEAR_STATS: {
3010182679Skmacy		if (!(sc->flags & FULL_INIT_DONE))
3011182679Skmacy			return EAGAIN;
3012182679Skmacy
3013182679Skmacy		PORT_LOCK(pi);
3014182679Skmacy		t3_mac_update_stats(&pi->mac);
3015182679Skmacy		memset(&pi->mac.stats, 0, sizeof(pi->mac.stats));
3016182679Skmacy		PORT_UNLOCK(pi);
3017182679Skmacy		break;
3018182679Skmacy	}
3019189643Sgnn	case CHELSIO_GET_UP_LA: {
3020189643Sgnn		struct ch_up_la *la = (struct ch_up_la *)data;
3021189643Sgnn		uint8_t *buf = malloc(LA_BUFSIZE, M_DEVBUF, M_NOWAIT);
3022189643Sgnn		if (buf == NULL) {
3023189643Sgnn			return (ENOMEM);
3024189643Sgnn		}
3025189643Sgnn		if (la->bufsize < LA_BUFSIZE)
3026189643Sgnn			error = ENOBUFS;
3027189643Sgnn
3028189643Sgnn		if (!error)
3029189643Sgnn			error = -t3_get_up_la(sc, &la->stopped, &la->idx,
3030189643Sgnn					      &la->bufsize, buf);
3031189643Sgnn		if (!error)
3032189643Sgnn			error = copyout(buf, la->data, la->bufsize);
3033189643Sgnn
3034189643Sgnn		free(buf, M_DEVBUF);
3035189643Sgnn		break;
3036189643Sgnn	}
3037189643Sgnn	case CHELSIO_GET_UP_IOQS: {
3038189643Sgnn		struct ch_up_ioqs *ioqs = (struct ch_up_ioqs *)data;
3039189643Sgnn		uint8_t *buf = malloc(IOQS_BUFSIZE, M_DEVBUF, M_NOWAIT);
3040189643Sgnn		uint32_t *v;
3041189643Sgnn
3042189643Sgnn		if (buf == NULL) {
3043189643Sgnn			return (ENOMEM);
3044189643Sgnn		}
3045189643Sgnn		if (ioqs->bufsize < IOQS_BUFSIZE)
3046189643Sgnn			error = ENOBUFS;
3047189643Sgnn
3048189643Sgnn		if (!error)
3049189643Sgnn			error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf);
3050189643Sgnn
3051189643Sgnn		if (!error) {
3052189643Sgnn			v = (uint32_t *)buf;
3053189643Sgnn
3054189643Sgnn			ioqs->ioq_rx_enable = *v++;
3055189643Sgnn			ioqs->ioq_tx_enable = *v++;
3056189643Sgnn			ioqs->ioq_rx_status = *v++;
3057189643Sgnn			ioqs->ioq_tx_status = *v++;
3058189643Sgnn
3059189643Sgnn			error = copyout(v, ioqs->data, ioqs->bufsize);
3060189643Sgnn		}
3061189643Sgnn
3062189643Sgnn		free(buf, M_DEVBUF);
3063189643Sgnn		break;
3064189643Sgnn	}
3065207643Snp	case CHELSIO_SET_FILTER: {
3066207643Snp		struct ch_filter *f = (struct ch_filter *)data;;
3067207643Snp		struct filter_info *p;
3068207643Snp		unsigned int nfilters = sc->params.mc5.nfilters;
3069207643Snp
3070207643Snp		if (!is_offload(sc))
3071207643Snp			return (EOPNOTSUPP);	/* No TCAM */
3072207643Snp		if (!(sc->flags & FULL_INIT_DONE))
3073207643Snp			return (EAGAIN);	/* mc5 not setup yet */
3074207643Snp		if (nfilters == 0)
3075207643Snp			return (EBUSY);		/* TOE will use TCAM */
3076207643Snp
3077207643Snp		/* sanity checks */
3078207643Snp		if (f->filter_id >= nfilters ||
3079207643Snp		    (f->val.dip && f->mask.dip != 0xffffffff) ||
3080207643Snp		    (f->val.sport && f->mask.sport != 0xffff) ||
3081207643Snp		    (f->val.dport && f->mask.dport != 0xffff) ||
3082207643Snp		    (f->val.vlan && f->mask.vlan != 0xfff) ||
3083207643Snp		    (f->val.vlan_prio &&
3084207643Snp			f->mask.vlan_prio != FILTER_NO_VLAN_PRI) ||
3085207643Snp		    (f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) ||
3086207643Snp		    f->qset >= SGE_QSETS ||
3087207643Snp		    sc->rrss_map[f->qset] >= RSS_TABLE_SIZE)
3088207643Snp			return (EINVAL);
3089207643Snp
3090207643Snp		/* Was allocated with M_WAITOK */
3091207643Snp		KASSERT(sc->filters, ("filter table NULL\n"));
3092207643Snp
3093207643Snp		p = &sc->filters[f->filter_id];
3094207643Snp		if (p->locked)
3095207643Snp			return (EPERM);
3096207643Snp
3097207643Snp		bzero(p, sizeof(*p));
3098207643Snp		p->sip = f->val.sip;
3099207643Snp		p->sip_mask = f->mask.sip;
3100207643Snp		p->dip = f->val.dip;
3101207643Snp		p->sport = f->val.sport;
3102207643Snp		p->dport = f->val.dport;
3103207643Snp		p->vlan = f->mask.vlan ? f->val.vlan : 0xfff;
3104207643Snp		p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) :
3105207643Snp		    FILTER_NO_VLAN_PRI;
3106207643Snp		p->mac_hit = f->mac_hit;
3107207643Snp		p->mac_vld = f->mac_addr_idx != 0xffff;
3108207643Snp		p->mac_idx = f->mac_addr_idx;
3109207643Snp		p->pkt_type = f->proto;
3110207643Snp		p->report_filter_id = f->want_filter_id;
3111207643Snp		p->pass = f->pass;
3112207643Snp		p->rss = f->rss;
3113207643Snp		p->qset = f->qset;
3114207643Snp
3115207643Snp		error = set_filter(sc, f->filter_id, p);
3116207643Snp		if (error == 0)
3117207643Snp			p->valid = 1;
3118207643Snp		break;
3119207643Snp	}
3120207643Snp	case CHELSIO_DEL_FILTER: {
3121207643Snp		struct ch_filter *f = (struct ch_filter *)data;
3122207643Snp		struct filter_info *p;
3123207643Snp		unsigned int nfilters = sc->params.mc5.nfilters;
3124207643Snp
3125207643Snp		if (!is_offload(sc))
3126207643Snp			return (EOPNOTSUPP);
3127207643Snp		if (!(sc->flags & FULL_INIT_DONE))
3128207643Snp			return (EAGAIN);
3129207643Snp		if (nfilters == 0 || sc->filters == NULL)
3130207643Snp			return (EINVAL);
3131207643Snp		if (f->filter_id >= nfilters)
3132207643Snp		       return (EINVAL);
3133207643Snp
3134207643Snp		p = &sc->filters[f->filter_id];
3135207643Snp		if (p->locked)
3136207643Snp			return (EPERM);
3137207643Snp		if (!p->valid)
3138207643Snp			return (EFAULT); /* Read "Bad address" as "Bad index" */
3139207643Snp
3140207643Snp		bzero(p, sizeof(*p));
3141207643Snp		p->sip = p->sip_mask = 0xffffffff;
3142207643Snp		p->vlan = 0xfff;
3143207643Snp		p->vlan_prio = FILTER_NO_VLAN_PRI;
3144207643Snp		p->pkt_type = 1;
3145207643Snp		error = set_filter(sc, f->filter_id, p);
3146207643Snp		break;
3147207643Snp	}
3148207643Snp	case CHELSIO_GET_FILTER: {
3149207643Snp		struct ch_filter *f = (struct ch_filter *)data;
3150207643Snp		struct filter_info *p;
3151207643Snp		unsigned int i, nfilters = sc->params.mc5.nfilters;
3152207643Snp
3153207643Snp		if (!is_offload(sc))
3154207643Snp			return (EOPNOTSUPP);
3155207643Snp		if (!(sc->flags & FULL_INIT_DONE))
3156207643Snp			return (EAGAIN);
3157207643Snp		if (nfilters == 0 || sc->filters == NULL)
3158207643Snp			return (EINVAL);
3159207643Snp
3160207643Snp		i = f->filter_id == 0xffffffff ? 0 : f->filter_id + 1;
3161207643Snp		for (; i < nfilters; i++) {
3162207643Snp			p = &sc->filters[i];
3163207643Snp			if (!p->valid)
3164207643Snp				continue;
3165207643Snp
3166207643Snp			bzero(f, sizeof(*f));
3167207643Snp
3168207643Snp			f->filter_id = i;
3169207643Snp			f->val.sip = p->sip;
3170207643Snp			f->mask.sip = p->sip_mask;
3171207643Snp			f->val.dip = p->dip;
3172207643Snp			f->mask.dip = p->dip ? 0xffffffff : 0;
3173207643Snp			f->val.sport = p->sport;
3174207643Snp			f->mask.sport = p->sport ? 0xffff : 0;
3175207643Snp			f->val.dport = p->dport;
3176207643Snp			f->mask.dport = p->dport ? 0xffff : 0;
3177207643Snp			f->val.vlan = p->vlan == 0xfff ? 0 : p->vlan;
3178207643Snp			f->mask.vlan = p->vlan == 0xfff ? 0 : 0xfff;
3179207643Snp			f->val.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
3180207643Snp			    0 : p->vlan_prio;
3181207643Snp			f->mask.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
3182207643Snp			    0 : FILTER_NO_VLAN_PRI;
3183207643Snp			f->mac_hit = p->mac_hit;
3184207643Snp			f->mac_addr_idx = p->mac_vld ? p->mac_idx : 0xffff;
3185207643Snp			f->proto = p->pkt_type;
3186207643Snp			f->want_filter_id = p->report_filter_id;
3187207643Snp			f->pass = p->pass;
3188207643Snp			f->rss = p->rss;
3189207643Snp			f->qset = p->qset;
3190207643Snp
3191207643Snp			break;
3192207643Snp		}
3193207643Snp
3194207643Snp		if (i == nfilters)
3195207643Snp			f->filter_id = 0xffffffff;
3196207643Snp		break;
3197207643Snp	}
3198167514Skmacy	default:
3199167514Skmacy		return (EOPNOTSUPP);
3200167514Skmacy		break;
3201167514Skmacy	}
3202167514Skmacy
3203167514Skmacy	return (error);
3204167514Skmacy}
3205167514Skmacy
3206167514Skmacystatic __inline void
3207167514Skmacyreg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
3208167514Skmacy    unsigned int end)
3209167514Skmacy{
3210182679Skmacy	uint32_t *p = (uint32_t *)(buf + start);
3211167514Skmacy
3212167514Skmacy	for ( ; start <= end; start += sizeof(uint32_t))
3213167514Skmacy		*p++ = t3_read_reg(ap, start);
3214167514Skmacy}
3215167514Skmacy
3216167514Skmacy#define T3_REGMAP_SIZE (3 * 1024)
3217167514Skmacystatic int
3218167514Skmacycxgb_get_regs_len(void)
3219167514Skmacy{
3220167514Skmacy	return T3_REGMAP_SIZE;
3221167514Skmacy}
3222167514Skmacy
3223167514Skmacystatic void
3224182679Skmacycxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf)
3225167514Skmacy{
3226167514Skmacy
3227167514Skmacy	/*
3228167514Skmacy	 * Version scheme:
3229167514Skmacy	 * bits 0..9: chip version
3230167514Skmacy	 * bits 10..15: chip revision
3231167514Skmacy	 * bit 31: set for PCIe cards
3232167514Skmacy	 */
3233167514Skmacy	regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
3234167514Skmacy
3235167514Skmacy	/*
3236167514Skmacy	 * We skip the MAC statistics registers because they are clear-on-read.
3237167514Skmacy	 * Also reading multi-register stats would need to synchronize with the
3238167514Skmacy	 * periodic mac stats accumulation.  Hard to justify the complexity.
3239167514Skmacy	 */
3240182679Skmacy	memset(buf, 0, cxgb_get_regs_len());
3241167514Skmacy	reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
3242167514Skmacy	reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
3243167514Skmacy	reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
3244167514Skmacy	reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
3245167514Skmacy	reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
3246167514Skmacy	reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
3247167514Skmacy		       XGM_REG(A_XGM_SERDES_STAT3, 1));
3248167514Skmacy	reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
3249167514Skmacy		       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
3250167514Skmacy}
3251176572Skmacy
3252207643Snpstatic int
3253207643Snpalloc_filters(struct adapter *sc)
3254207643Snp{
3255207643Snp	struct filter_info *p;
3256207643Snp	unsigned int nfilters = sc->params.mc5.nfilters;
3257176572Skmacy
3258207643Snp	if (nfilters == 0)
3259207643Snp		return (0);
3260207643Snp
3261207643Snp	p = malloc(sizeof(*p) * nfilters, M_DEVBUF, M_WAITOK | M_ZERO);
3262207643Snp	sc->filters = p;
3263207643Snp
3264207643Snp	p = &sc->filters[nfilters - 1];
3265207643Snp	p->vlan = 0xfff;
3266207643Snp	p->vlan_prio = FILTER_NO_VLAN_PRI;
3267207643Snp	p->pass = p->rss = p->valid = p->locked = 1;
3268207643Snp
3269207643Snp	return (0);
3270207643Snp}
3271207643Snp
3272207643Snpstatic int
3273207643Snpsetup_hw_filters(struct adapter *sc)
3274207643Snp{
3275207643Snp	int i, rc;
3276207643Snp	unsigned int nfilters = sc->params.mc5.nfilters;
3277207643Snp
3278207643Snp	if (!sc->filters)
3279207643Snp		return (0);
3280207643Snp
3281207643Snp	t3_enable_filters(sc);
3282207643Snp
3283207643Snp	for (i = rc = 0; i < nfilters && !rc; i++) {
3284207643Snp		if (sc->filters[i].locked)
3285207643Snp			rc = set_filter(sc, i, &sc->filters[i]);
3286207643Snp	}
3287207643Snp
3288207643Snp	return (rc);
3289207643Snp}
3290207643Snp
3291207643Snpstatic int
3292207643Snpset_filter(struct adapter *sc, int id, const struct filter_info *f)
3293207643Snp{
3294207643Snp	int len;
3295207643Snp	struct mbuf *m;
3296207643Snp	struct ulp_txpkt *txpkt;
3297207643Snp	struct work_request_hdr *wr;
3298207643Snp	struct cpl_pass_open_req *oreq;
3299207643Snp	struct cpl_set_tcb_field *sreq;
3300207643Snp
3301207643Snp	len = sizeof(*wr) + sizeof(*oreq) + 2 * sizeof(*sreq);
3302207643Snp	KASSERT(len <= MHLEN, ("filter request too big for an mbuf"));
3303207643Snp
3304207643Snp	id += t3_mc5_size(&sc->mc5) - sc->params.mc5.nroutes -
3305207643Snp	      sc->params.mc5.nfilters;
3306207643Snp
3307207643Snp	m = m_gethdr(M_WAITOK, MT_DATA);
3308207643Snp	m->m_len = m->m_pkthdr.len = len;
3309207643Snp	bzero(mtod(m, char *), len);
3310207643Snp
3311207643Snp	wr = mtod(m, struct work_request_hdr *);
3312207643Snp	wr->wrh_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC);
3313207643Snp
3314207643Snp	oreq = (struct cpl_pass_open_req *)(wr + 1);
3315207643Snp	txpkt = (struct ulp_txpkt *)oreq;
3316207643Snp	txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
3317207643Snp	txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8));
3318207643Snp	OPCODE_TID(oreq) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, id));
3319207643Snp	oreq->local_port = htons(f->dport);
3320207643Snp	oreq->peer_port = htons(f->sport);
3321207643Snp	oreq->local_ip = htonl(f->dip);
3322207643Snp	oreq->peer_ip = htonl(f->sip);
3323207643Snp	oreq->peer_netmask = htonl(f->sip_mask);
3324207643Snp	oreq->opt0h = 0;
3325207643Snp	oreq->opt0l = htonl(F_NO_OFFLOAD);
3326207643Snp	oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) |
3327207643Snp			 V_CONN_POLICY(CPL_CONN_POLICY_FILTER) |
3328207643Snp			 V_VLAN_PRI(f->vlan_prio >> 1) |
3329207643Snp			 V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) |
3330207643Snp			 V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) |
3331207643Snp			 V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4)));
3332207643Snp
3333207643Snp	sreq = (struct cpl_set_tcb_field *)(oreq + 1);
3334207643Snp	set_tcb_field_ulp(sreq, id, 1, 0x1800808000ULL,
3335207643Snp			  (f->report_filter_id << 15) | (1 << 23) |
3336207643Snp			  ((u64)f->pass << 35) | ((u64)!f->rss << 36));
3337207643Snp	set_tcb_field_ulp(sreq + 1, id, 0, 0xffffffff, (2 << 19) | 1);
3338207643Snp	t3_mgmt_tx(sc, m);
3339207643Snp
3340207643Snp	if (f->pass && !f->rss) {
3341207643Snp		len = sizeof(*sreq);
3342207643Snp		m = m_gethdr(M_WAITOK, MT_DATA);
3343207643Snp		m->m_len = m->m_pkthdr.len = len;
3344207643Snp		bzero(mtod(m, char *), len);
3345207643Snp		sreq = mtod(m, struct cpl_set_tcb_field *);
3346207643Snp		sreq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
3347207643Snp		mk_set_tcb_field(sreq, id, 25, 0x3f80000,
3348207643Snp				 (u64)sc->rrss_map[f->qset] << 19);
3349207643Snp		t3_mgmt_tx(sc, m);
3350207643Snp	}
3351207643Snp	return 0;
3352207643Snp}
3353207643Snp
3354207643Snpstatic inline void
3355207643Snpmk_set_tcb_field(struct cpl_set_tcb_field *req, unsigned int tid,
3356207643Snp    unsigned int word, u64 mask, u64 val)
3357207643Snp{
3358207643Snp	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
3359207643Snp	req->reply = V_NO_REPLY(1);
3360207643Snp	req->cpu_idx = 0;
3361207643Snp	req->word = htons(word);
3362207643Snp	req->mask = htobe64(mask);
3363207643Snp	req->val = htobe64(val);
3364207643Snp}
3365207643Snp
3366207643Snpstatic inline void
3367207643Snpset_tcb_field_ulp(struct cpl_set_tcb_field *req, unsigned int tid,
3368207643Snp    unsigned int word, u64 mask, u64 val)
3369207643Snp{
3370207643Snp	struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
3371207643Snp
3372207643Snp	txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
3373207643Snp	txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8));
3374207643Snp	mk_set_tcb_field(req, tid, word, mask, val);
3375207643Snp}
3376