cxgb_main.c revision 199237
1167514Skmacy/**************************************************************************
2167514Skmacy
3189643SgnnCopyright (c) 2007-2009, Chelsio Inc.
4167514SkmacyAll rights reserved.
5167514Skmacy
6167514SkmacyRedistribution and use in source and binary forms, with or without
7167514Skmacymodification, are permitted provided that the following conditions are met:
8167514Skmacy
9167514Skmacy 1. Redistributions of source code must retain the above copyright notice,
10167514Skmacy    this list of conditions and the following disclaimer.
11167514Skmacy
12178302Skmacy 2. Neither the name of the Chelsio Corporation nor the names of its
13167514Skmacy    contributors may be used to endorse or promote products derived from
14167514Skmacy    this software without specific prior written permission.
15167514Skmacy
16167514SkmacyTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17167514SkmacyAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18167514SkmacyIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19167514SkmacyARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20167514SkmacyLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21167514SkmacyCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22167514SkmacySUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23167514SkmacyINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24167514SkmacyCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25167514SkmacyARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26167514SkmacyPOSSIBILITY OF SUCH DAMAGE.
27167514Skmacy
28167514Skmacy***************************************************************************/
29167514Skmacy
30167514Skmacy#include <sys/cdefs.h>
31167514Skmacy__FBSDID("$FreeBSD: head/sys/dev/cxgb/cxgb_main.c 199237 2009-11-13 00:28:16Z np $");
32167514Skmacy
33167514Skmacy#include <sys/param.h>
34167514Skmacy#include <sys/systm.h>
35167514Skmacy#include <sys/kernel.h>
36167514Skmacy#include <sys/bus.h>
37167514Skmacy#include <sys/module.h>
38167514Skmacy#include <sys/pciio.h>
39167514Skmacy#include <sys/conf.h>
40167514Skmacy#include <machine/bus.h>
41167514Skmacy#include <machine/resource.h>
42167514Skmacy#include <sys/bus_dma.h>
43176472Skmacy#include <sys/ktr.h>
44167514Skmacy#include <sys/rman.h>
45167514Skmacy#include <sys/ioccom.h>
46167514Skmacy#include <sys/mbuf.h>
47167514Skmacy#include <sys/linker.h>
48167514Skmacy#include <sys/firmware.h>
49167514Skmacy#include <sys/socket.h>
50167514Skmacy#include <sys/sockio.h>
51167514Skmacy#include <sys/smp.h>
52167514Skmacy#include <sys/sysctl.h>
53174708Skmacy#include <sys/syslog.h>
54167514Skmacy#include <sys/queue.h>
55167514Skmacy#include <sys/taskqueue.h>
56174708Skmacy#include <sys/proc.h>
57167514Skmacy
58167514Skmacy#include <net/bpf.h>
59167514Skmacy#include <net/ethernet.h>
60167514Skmacy#include <net/if.h>
61167514Skmacy#include <net/if_arp.h>
62167514Skmacy#include <net/if_dl.h>
63167514Skmacy#include <net/if_media.h>
64167514Skmacy#include <net/if_types.h>
65180583Skmacy#include <net/if_vlan_var.h>
66167514Skmacy
67167514Skmacy#include <netinet/in_systm.h>
68167514Skmacy#include <netinet/in.h>
69167514Skmacy#include <netinet/if_ether.h>
70167514Skmacy#include <netinet/ip.h>
71167514Skmacy#include <netinet/ip.h>
72167514Skmacy#include <netinet/tcp.h>
73167514Skmacy#include <netinet/udp.h>
74167514Skmacy
75167514Skmacy#include <dev/pci/pcireg.h>
76167514Skmacy#include <dev/pci/pcivar.h>
77167514Skmacy#include <dev/pci/pci_private.h>
78167514Skmacy
79170076Skmacy#include <cxgb_include.h>
80167514Skmacy
81167514Skmacy#ifdef PRIV_SUPPORTED
82167514Skmacy#include <sys/priv.h>
83167514Skmacy#endif
84167514Skmacy
85192933Sgnnstatic int cxgb_setup_interrupts(adapter_t *);
86192933Sgnnstatic void cxgb_teardown_interrupts(adapter_t *);
87194521Skmacystatic int cxgb_begin_op(struct port_info *, const char *);
88194521Skmacystatic int cxgb_begin_detach(struct port_info *);
89194521Skmacystatic int cxgb_end_op(struct port_info *);
90167514Skmacystatic void cxgb_init(void *);
91194521Skmacystatic int cxgb_init_synchronized(struct port_info *);
92194521Skmacystatic int cxgb_uninit_synchronized(struct port_info *);
93167514Skmacystatic int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
94167514Skmacystatic int cxgb_media_change(struct ifnet *);
95186282Sgnnstatic int cxgb_ifm_type(int);
96194921Snpstatic void cxgb_build_medialist(struct port_info *);
97167514Skmacystatic void cxgb_media_status(struct ifnet *, struct ifmediareq *);
98167514Skmacystatic int setup_sge_qsets(adapter_t *);
99167514Skmacystatic void cxgb_async_intr(void *);
100167514Skmacystatic void cxgb_ext_intr_handler(void *, int);
101170869Skmacystatic void cxgb_tick_handler(void *, int);
102167514Skmacystatic void cxgb_tick(void *);
103167514Skmacystatic void setup_rss(adapter_t *sc);
104167514Skmacy
105167514Skmacy/* Attachment glue for the PCI controller end of the device.  Each port of
106167514Skmacy * the device is attached separately, as defined later.
107167514Skmacy */
108167514Skmacystatic int cxgb_controller_probe(device_t);
109167514Skmacystatic int cxgb_controller_attach(device_t);
110167514Skmacystatic int cxgb_controller_detach(device_t);
111167514Skmacystatic void cxgb_free(struct adapter *);
112167514Skmacystatic __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
113167514Skmacy    unsigned int end);
114182679Skmacystatic void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf);
115167514Skmacystatic int cxgb_get_regs_len(void);
116169978Skmacystatic int offload_open(struct port_info *pi);
117171978Skmacystatic void touch_bars(device_t dev);
118174626Skmacystatic int offload_close(struct t3cdev *tdev);
119197791Snpstatic void cxgb_update_mac_settings(struct port_info *p);
120167514Skmacy
121167514Skmacystatic device_method_t cxgb_controller_methods[] = {
122167514Skmacy	DEVMETHOD(device_probe,		cxgb_controller_probe),
123167514Skmacy	DEVMETHOD(device_attach,	cxgb_controller_attach),
124167514Skmacy	DEVMETHOD(device_detach,	cxgb_controller_detach),
125167514Skmacy
126167514Skmacy	/* bus interface */
127167514Skmacy	DEVMETHOD(bus_print_child,	bus_generic_print_child),
128167514Skmacy	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
129167514Skmacy
130167514Skmacy	{ 0, 0 }
131167514Skmacy};
132167514Skmacy
133167514Skmacystatic driver_t cxgb_controller_driver = {
134167514Skmacy	"cxgbc",
135167514Skmacy	cxgb_controller_methods,
136167514Skmacy	sizeof(struct adapter)
137167514Skmacy};
138167514Skmacy
139167514Skmacystatic devclass_t	cxgb_controller_devclass;
140167514SkmacyDRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, 0, 0);
141167514Skmacy
142167514Skmacy/*
143167514Skmacy * Attachment glue for the ports.  Attachment is done directly to the
144167514Skmacy * controller device.
145167514Skmacy */
146167514Skmacystatic int cxgb_port_probe(device_t);
147167514Skmacystatic int cxgb_port_attach(device_t);
148167514Skmacystatic int cxgb_port_detach(device_t);
149167514Skmacy
150167514Skmacystatic device_method_t cxgb_port_methods[] = {
151167514Skmacy	DEVMETHOD(device_probe,		cxgb_port_probe),
152167514Skmacy	DEVMETHOD(device_attach,	cxgb_port_attach),
153167514Skmacy	DEVMETHOD(device_detach,	cxgb_port_detach),
154167514Skmacy	{ 0, 0 }
155167514Skmacy};
156167514Skmacy
157167514Skmacystatic driver_t cxgb_port_driver = {
158167514Skmacy	"cxgb",
159167514Skmacy	cxgb_port_methods,
160167514Skmacy	0
161167514Skmacy};
162167514Skmacy
163167514Skmacystatic d_ioctl_t cxgb_extension_ioctl;
164170654Skmacystatic d_open_t cxgb_extension_open;
165170654Skmacystatic d_close_t cxgb_extension_close;
166167514Skmacy
167170654Skmacystatic struct cdevsw cxgb_cdevsw = {
168170654Skmacy       .d_version =    D_VERSION,
169170654Skmacy       .d_flags =      0,
170170654Skmacy       .d_open =       cxgb_extension_open,
171170654Skmacy       .d_close =      cxgb_extension_close,
172170654Skmacy       .d_ioctl =      cxgb_extension_ioctl,
173170654Skmacy       .d_name =       "cxgb",
174170654Skmacy};
175170654Skmacy
176167514Skmacystatic devclass_t	cxgb_port_devclass;
177167514SkmacyDRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
178167514Skmacy
179167514Skmacy/*
180167514Skmacy * The driver uses the best interrupt scheme available on a platform in the
181167514Skmacy * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
182167514Skmacy * of these schemes the driver may consider as follows:
183167514Skmacy *
184167514Skmacy * msi = 2: choose from among all three options
185167514Skmacy * msi = 1 : only consider MSI and pin interrupts
186167514Skmacy * msi = 0: force pin interrupts
187167514Skmacy */
188167760Skmacystatic int msi_allowed = 2;
189170083Skmacy
190167514SkmacyTUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
191167514SkmacySYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
192167514SkmacySYSCTL_UINT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
193167514Skmacy    "MSI-X, MSI, INTx selector");
194169978Skmacy
195169053Skmacy/*
196169978Skmacy * The driver enables offload as a default.
197169978Skmacy * To disable it, use ofld_disable = 1.
198169053Skmacy */
199169978Skmacystatic int ofld_disable = 0;
200169978SkmacyTUNABLE_INT("hw.cxgb.ofld_disable", &ofld_disable);
201169978SkmacySYSCTL_UINT(_hw_cxgb, OID_AUTO, ofld_disable, CTLFLAG_RDTUN, &ofld_disable, 0,
202169978Skmacy    "disable ULP offload");
203169978Skmacy
204169978Skmacy/*
205169978Skmacy * The driver uses an auto-queue algorithm by default.
206185165Skmacy * To disable it and force a single queue-set per port, use multiq = 0
207169978Skmacy */
208185165Skmacystatic int multiq = 1;
209185165SkmacyTUNABLE_INT("hw.cxgb.multiq", &multiq);
210185165SkmacySYSCTL_UINT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0,
211185165Skmacy    "use min(ncpus/ports, 8) queue-sets per port");
212167514Skmacy
213176572Skmacy/*
214185165Skmacy * By default the driver will not update the firmware unless
215185165Skmacy * it was compiled against a newer version
216185165Skmacy *
217176572Skmacy */
218176572Skmacystatic int force_fw_update = 0;
219176572SkmacyTUNABLE_INT("hw.cxgb.force_fw_update", &force_fw_update);
220176572SkmacySYSCTL_UINT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0,
221176572Skmacy    "update firmware even if up to date");
222175200Skmacy
223183059Skmacyint cxgb_use_16k_clusters = 1;
224175200SkmacyTUNABLE_INT("hw.cxgb.use_16k_clusters", &cxgb_use_16k_clusters);
225175200SkmacySYSCTL_UINT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
226175200Skmacy    &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
227175200Skmacy
228194039Sgnn/*
229194039Sgnn * Tune the size of the output queue.
230194039Sgnn */
231194039Sgnnint cxgb_snd_queue_len = IFQ_MAXLEN;
232194039SgnnTUNABLE_INT("hw.cxgb.snd_queue_len", &cxgb_snd_queue_len);
233194039SgnnSYSCTL_UINT(_hw_cxgb, OID_AUTO, snd_queue_len, CTLFLAG_RDTUN,
234194039Sgnn    &cxgb_snd_queue_len, 0, "send queue size ");
235194039Sgnn
236194039Sgnn
237167514Skmacyenum {
238167514Skmacy	MAX_TXQ_ENTRIES      = 16384,
239167514Skmacy	MAX_CTRL_TXQ_ENTRIES = 1024,
240167514Skmacy	MAX_RSPQ_ENTRIES     = 16384,
241167514Skmacy	MAX_RX_BUFFERS       = 16384,
242167514Skmacy	MAX_RX_JUMBO_BUFFERS = 16384,
243167514Skmacy	MIN_TXQ_ENTRIES      = 4,
244167514Skmacy	MIN_CTRL_TXQ_ENTRIES = 4,
245167514Skmacy	MIN_RSPQ_ENTRIES     = 32,
246172096Skmacy	MIN_FL_ENTRIES       = 32,
247172096Skmacy	MIN_FL_JUMBO_ENTRIES = 32
248167514Skmacy};
249167514Skmacy
250171471Skmacystruct filter_info {
251171471Skmacy	u32 sip;
252171471Skmacy	u32 sip_mask;
253171471Skmacy	u32 dip;
254171471Skmacy	u16 sport;
255171471Skmacy	u16 dport;
256171471Skmacy	u32 vlan:12;
257171471Skmacy	u32 vlan_prio:3;
258171471Skmacy	u32 mac_hit:1;
259171471Skmacy	u32 mac_idx:4;
260171471Skmacy	u32 mac_vld:1;
261171471Skmacy	u32 pkt_type:2;
262171471Skmacy	u32 report_filter_id:1;
263171471Skmacy	u32 pass:1;
264171471Skmacy	u32 rss:1;
265171471Skmacy	u32 qset:3;
266171471Skmacy	u32 locked:1;
267171471Skmacy	u32 valid:1;
268171471Skmacy};
269171471Skmacy
270171471Skmacyenum { FILTER_NO_VLAN_PRI = 7 };
271171471Skmacy
272182679Skmacy#define EEPROM_MAGIC 0x38E2F10C
273182679Skmacy
274167514Skmacy#define PORT_MASK ((1 << MAX_NPORTS) - 1)
275167514Skmacy
276167514Skmacy/* Table for probing the cards.  The desc field isn't actually used */
277167514Skmacystruct cxgb_ident {
278167514Skmacy	uint16_t	vendor;
279167514Skmacy	uint16_t	device;
280167514Skmacy	int		index;
281167514Skmacy	char		*desc;
282167514Skmacy} cxgb_identifiers[] = {
283167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
284167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
285167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
286167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
287167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
288167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
289167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
290167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
291167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
292167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
293170654Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
294197791Snp	{PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "T3C10"},
295197791Snp	{PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"},
296197791Snp	{PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"},
297167514Skmacy	{0, 0, 0, NULL}
298167514Skmacy};
299167514Skmacy
300171471Skmacystatic int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
301171471Skmacy
302176472Skmacy
303174708Skmacystatic __inline char
304171471Skmacyt3rev2char(struct adapter *adapter)
305171471Skmacy{
306171471Skmacy	char rev = 'z';
307171471Skmacy
308171471Skmacy	switch(adapter->params.rev) {
309171471Skmacy	case T3_REV_A:
310171471Skmacy		rev = 'a';
311171471Skmacy		break;
312171471Skmacy	case T3_REV_B:
313171471Skmacy	case T3_REV_B2:
314171471Skmacy		rev = 'b';
315171471Skmacy		break;
316171471Skmacy	case T3_REV_C:
317171471Skmacy		rev = 'c';
318171471Skmacy		break;
319171471Skmacy	}
320171471Skmacy	return rev;
321171471Skmacy}
322171471Skmacy
323167514Skmacystatic struct cxgb_ident *
324167514Skmacycxgb_get_ident(device_t dev)
325167514Skmacy{
326167514Skmacy	struct cxgb_ident *id;
327167514Skmacy
328167514Skmacy	for (id = cxgb_identifiers; id->desc != NULL; id++) {
329167514Skmacy		if ((id->vendor == pci_get_vendor(dev)) &&
330167514Skmacy		    (id->device == pci_get_device(dev))) {
331167514Skmacy			return (id);
332167514Skmacy		}
333167514Skmacy	}
334167514Skmacy	return (NULL);
335167514Skmacy}
336167514Skmacy
337167514Skmacystatic const struct adapter_info *
338167514Skmacycxgb_get_adapter_info(device_t dev)
339167514Skmacy{
340167514Skmacy	struct cxgb_ident *id;
341167514Skmacy	const struct adapter_info *ai;
342183063Skmacy
343167514Skmacy	id = cxgb_get_ident(dev);
344167514Skmacy	if (id == NULL)
345167514Skmacy		return (NULL);
346167514Skmacy
347167514Skmacy	ai = t3_get_adapter_info(id->index);
348167514Skmacy
349167514Skmacy	return (ai);
350167514Skmacy}
351167514Skmacy
352167514Skmacystatic int
353167514Skmacycxgb_controller_probe(device_t dev)
354167514Skmacy{
355167514Skmacy	const struct adapter_info *ai;
356167514Skmacy	char *ports, buf[80];
357170654Skmacy	int nports;
358183063Skmacy
359167514Skmacy	ai = cxgb_get_adapter_info(dev);
360167514Skmacy	if (ai == NULL)
361167514Skmacy		return (ENXIO);
362167514Skmacy
363170654Skmacy	nports = ai->nports0 + ai->nports1;
364170654Skmacy	if (nports == 1)
365167514Skmacy		ports = "port";
366167514Skmacy	else
367167514Skmacy		ports = "ports";
368167514Skmacy
369199237Snp	snprintf(buf, sizeof(buf), "%s, %d %s", ai->desc, nports, ports);
370167514Skmacy	device_set_desc_copy(dev, buf);
371167514Skmacy	return (BUS_PROBE_DEFAULT);
372167514Skmacy}
373167514Skmacy
374176572Skmacy#define FW_FNAME "cxgb_t3fw"
375190330Sgnn#define TPEEPROM_NAME "cxgb_t3%c_tp_eeprom"
376190330Sgnn#define TPSRAM_NAME "cxgb_t3%c_protocol_sram"
377171471Skmacy
378167514Skmacystatic int
379169978Skmacyupgrade_fw(adapter_t *sc)
380167514Skmacy{
381167514Skmacy#ifdef FIRMWARE_LATEST
382167514Skmacy	const struct firmware *fw;
383167514Skmacy#else
384167514Skmacy	struct firmware *fw;
385167514Skmacy#endif
386167514Skmacy	int status;
387167514Skmacy
388176572Skmacy	if ((fw = firmware_get(FW_FNAME)) == NULL)  {
389176572Skmacy		device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME);
390169978Skmacy		return (ENOENT);
391171471Skmacy	} else
392176572Skmacy		device_printf(sc->dev, "updating firmware on card\n");
393167514Skmacy	status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
394167514Skmacy
395171471Skmacy	device_printf(sc->dev, "firmware update returned %s %d\n", (status == 0) ? "success" : "fail", status);
396171471Skmacy
397167514Skmacy	firmware_put(fw, FIRMWARE_UNLOAD);
398167514Skmacy
399167514Skmacy	return (status);
400167514Skmacy}
401167514Skmacy
402192537Sgnn/*
403192537Sgnn * The cxgb_controller_attach function is responsible for the initial
404192537Sgnn * bringup of the device.  Its responsibilities include:
405192537Sgnn *
406192537Sgnn *  1. Determine if the device supports MSI or MSI-X.
407192537Sgnn *  2. Allocate bus resources so that we can access the Base Address Register
408192537Sgnn *  3. Create and initialize mutexes for the controller and its control
409192537Sgnn *     logic such as SGE and MDIO.
410192537Sgnn *  4. Call hardware specific setup routine for the adapter as a whole.
411192537Sgnn *  5. Allocate the BAR for doing MSI-X.
412192537Sgnn *  6. Setup the line interrupt iff MSI-X is not supported.
413192537Sgnn *  7. Create the driver's taskq.
414192584Sgnn *  8. Start one task queue service thread.
415192584Sgnn *  9. Check if the firmware and SRAM are up-to-date.  They will be
416192584Sgnn *     auto-updated later (before FULL_INIT_DONE), if required.
417192537Sgnn * 10. Create a child device for each MAC (port)
418192537Sgnn * 11. Initialize T3 private state.
419192537Sgnn * 12. Trigger the LED
420192537Sgnn * 13. Setup offload iff supported.
421192537Sgnn * 14. Reset/restart the tick callout.
422192537Sgnn * 15. Attach sysctls
423192537Sgnn *
424192537Sgnn * NOTE: Any modification or deviation from this list MUST be reflected in
425192537Sgnn * the above comment.  Failure to do so will result in problems on various
426192537Sgnn * error conditions including link flapping.
427192537Sgnn */
428167514Skmacystatic int
429167514Skmacycxgb_controller_attach(device_t dev)
430167514Skmacy{
431167514Skmacy	device_t child;
432167514Skmacy	const struct adapter_info *ai;
433167514Skmacy	struct adapter *sc;
434172109Skmacy	int i, error = 0;
435167514Skmacy	uint32_t vers;
436167760Skmacy	int port_qsets = 1;
437171868Skmacy#ifdef MSI_SUPPORTED
438172109Skmacy	int msi_needed, reg;
439176472Skmacy#endif
440185655Sgnn	char buf[80];
441185655Sgnn
442167514Skmacy	sc = device_get_softc(dev);
443167514Skmacy	sc->dev = dev;
444169978Skmacy	sc->msi_count = 0;
445172109Skmacy	ai = cxgb_get_adapter_info(dev);
446172109Skmacy
447172109Skmacy	/*
448172109Skmacy	 * XXX not really related but a recent addition
449172109Skmacy	 */
450172109Skmacy#ifdef MSI_SUPPORTED
451167840Skmacy	/* find the PCIe link width and set max read request to 4KB*/
452167840Skmacy	if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
453167840Skmacy		uint16_t lnk, pectl;
454167840Skmacy		lnk = pci_read_config(dev, reg + 0x12, 2);
455167840Skmacy		sc->link_width = (lnk >> 4) & 0x3f;
456167840Skmacy
457167840Skmacy		pectl = pci_read_config(dev, reg + 0x8, 2);
458167840Skmacy		pectl = (pectl & ~0x7000) | (5 << 12);
459167840Skmacy		pci_write_config(dev, reg + 0x8, pectl, 2);
460167840Skmacy	}
461171471Skmacy
462171471Skmacy	if (sc->link_width != 0 && sc->link_width <= 4 &&
463171471Skmacy	    (ai->nports0 + ai->nports1) <= 2) {
464167840Skmacy		device_printf(sc->dev,
465167862Skmacy		    "PCIe x%d Link, expect reduced performance\n",
466167840Skmacy		    sc->link_width);
467167840Skmacy	}
468172109Skmacy#endif
469171978Skmacy	touch_bars(dev);
470167514Skmacy	pci_enable_busmaster(dev);
471167514Skmacy	/*
472167514Skmacy	 * Allocate the registers and make them available to the driver.
473167514Skmacy	 * The registers that we care about for NIC mode are in BAR 0
474167514Skmacy	 */
475167514Skmacy	sc->regs_rid = PCIR_BAR(0);
476167514Skmacy	if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
477167514Skmacy	    &sc->regs_rid, RF_ACTIVE)) == NULL) {
478176472Skmacy		device_printf(dev, "Cannot allocate BAR region 0\n");
479167514Skmacy		return (ENXIO);
480167514Skmacy	}
481176472Skmacy	sc->udbs_rid = PCIR_BAR(2);
482185662Sgnn	sc->udbs_res = NULL;
483185662Sgnn	if (is_offload(sc) &&
484185662Sgnn	    ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
485185662Sgnn		   &sc->udbs_rid, RF_ACTIVE)) == NULL)) {
486176472Skmacy		device_printf(dev, "Cannot allocate BAR region 1\n");
487176472Skmacy		error = ENXIO;
488176472Skmacy		goto out;
489185662Sgnn	}
490167514Skmacy
491170869Skmacy	snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
492170869Skmacy	    device_get_unit(dev));
493170869Skmacy	ADAPTER_LOCK_INIT(sc, sc->lockbuf);
494170869Skmacy
495170869Skmacy	snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
496170869Skmacy	    device_get_unit(dev));
497170869Skmacy	snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
498170869Skmacy	    device_get_unit(dev));
499170869Skmacy	snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
500170869Skmacy	    device_get_unit(dev));
501167514Skmacy
502176472Skmacy	MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN);
503170869Skmacy	MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
504170869Skmacy	MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
505170869Skmacy
506167514Skmacy	sc->bt = rman_get_bustag(sc->regs_res);
507167514Skmacy	sc->bh = rman_get_bushandle(sc->regs_res);
508167514Skmacy	sc->mmio_len = rman_get_size(sc->regs_res);
509167769Skmacy
510197791Snp	for (i = 0; i < MAX_NPORTS; i++)
511197791Snp		sc->port[i].adapter = sc;
512197791Snp
513167769Skmacy	if (t3_prep_adapter(sc, ai, 1) < 0) {
514170654Skmacy		printf("prep adapter failed\n");
515167769Skmacy		error = ENODEV;
516167769Skmacy		goto out;
517167769Skmacy	}
518177464Skmacy        /* Allocate the BAR for doing MSI-X.  If it succeeds, try to allocate
519167514Skmacy	 * enough messages for the queue sets.  If that fails, try falling
520167514Skmacy	 * back to MSI.  If that fails, then try falling back to the legacy
521167514Skmacy	 * interrupt pin model.
522167514Skmacy	 */
523167514Skmacy#ifdef MSI_SUPPORTED
524167760Skmacy
525167514Skmacy	sc->msix_regs_rid = 0x20;
526167514Skmacy	if ((msi_allowed >= 2) &&
527167514Skmacy	    (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
528167514Skmacy	    &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
529167514Skmacy
530192933Sgnn		if (multiq)
531192933Sgnn			port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus);
532192933Sgnn		msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1;
533167760Skmacy
534192933Sgnn		if (pci_msix_count(dev) == 0 ||
535192933Sgnn		    (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 ||
536192933Sgnn		    sc->msi_count != msi_needed) {
537192933Sgnn			device_printf(dev, "alloc msix failed - "
538192933Sgnn				      "msi_count=%d, msi_needed=%d, err=%d; "
539192933Sgnn				      "will try MSI\n", sc->msi_count,
540192933Sgnn				      msi_needed, error);
541169978Skmacy			sc->msi_count = 0;
542192933Sgnn			port_qsets = 1;
543167514Skmacy			pci_release_msi(dev);
544167514Skmacy			bus_release_resource(dev, SYS_RES_MEMORY,
545167514Skmacy			    sc->msix_regs_rid, sc->msix_regs_res);
546167514Skmacy			sc->msix_regs_res = NULL;
547167514Skmacy		} else {
548167514Skmacy			sc->flags |= USING_MSIX;
549192933Sgnn			sc->cxgb_intr = cxgb_async_intr;
550192933Sgnn			device_printf(dev,
551192933Sgnn				      "using MSI-X interrupts (%u vectors)\n",
552192933Sgnn				      sc->msi_count);
553167514Skmacy		}
554167514Skmacy	}
555167514Skmacy
556169978Skmacy	if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
557169978Skmacy		sc->msi_count = 1;
558192933Sgnn		if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) {
559192933Sgnn			device_printf(dev, "alloc msi failed - "
560192933Sgnn				      "err=%d; will try INTx\n", error);
561169978Skmacy			sc->msi_count = 0;
562192933Sgnn			port_qsets = 1;
563167514Skmacy			pci_release_msi(dev);
564167514Skmacy		} else {
565167514Skmacy			sc->flags |= USING_MSI;
566170081Skmacy			sc->cxgb_intr = t3_intr_msi;
567192933Sgnn			device_printf(dev, "using MSI interrupts\n");
568167514Skmacy		}
569167514Skmacy	}
570167514Skmacy#endif
571169978Skmacy	if (sc->msi_count == 0) {
572167760Skmacy		device_printf(dev, "using line interrupts\n");
573170081Skmacy		sc->cxgb_intr = t3b_intr;
574167514Skmacy	}
575167514Skmacy
576167514Skmacy	/* Create a private taskqueue thread for handling driver events */
577167514Skmacy#ifdef TASKQUEUE_CURRENT
578167514Skmacy	sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
579167514Skmacy	    taskqueue_thread_enqueue, &sc->tq);
580167514Skmacy#else
581167514Skmacy	sc->tq = taskqueue_create_fast("cxgb_taskq", M_NOWAIT,
582167514Skmacy	    taskqueue_thread_enqueue, &sc->tq);
583167514Skmacy#endif
584167514Skmacy	if (sc->tq == NULL) {
585167514Skmacy		device_printf(dev, "failed to allocate controller task queue\n");
586167514Skmacy		goto out;
587167514Skmacy	}
588171804Skmacy
589167514Skmacy	taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
590167514Skmacy	    device_get_nameunit(dev));
591167514Skmacy	TASK_INIT(&sc->ext_intr_task, 0, cxgb_ext_intr_handler, sc);
592170869Skmacy	TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
593167514Skmacy
594167514Skmacy
595167514Skmacy	/* Create a periodic callout for checking adapter status */
596170869Skmacy	callout_init(&sc->cxgb_tick_ch, TRUE);
597167514Skmacy
598189643Sgnn	if (t3_check_fw_version(sc) < 0 || force_fw_update) {
599167514Skmacy		/*
600167514Skmacy		 * Warn user that a firmware update will be attempted in init.
601167514Skmacy		 */
602169978Skmacy		device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
603169978Skmacy		    FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
604167514Skmacy		sc->flags &= ~FW_UPTODATE;
605167514Skmacy	} else {
606167514Skmacy		sc->flags |= FW_UPTODATE;
607167514Skmacy	}
608171471Skmacy
609189643Sgnn	if (t3_check_tpsram_version(sc) < 0) {
610171471Skmacy		/*
611171471Skmacy		 * Warn user that a firmware update will be attempted in init.
612171471Skmacy		 */
613171471Skmacy		device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
614171471Skmacy		    t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
615171471Skmacy		sc->flags &= ~TPS_UPTODATE;
616171471Skmacy	} else {
617171471Skmacy		sc->flags |= TPS_UPTODATE;
618171471Skmacy	}
619167514Skmacy
620167514Skmacy	/*
621167514Skmacy	 * Create a child device for each MAC.  The ethernet attachment
622167514Skmacy	 * will be done in these children.
623167760Skmacy	 */
624167760Skmacy	for (i = 0; i < (sc)->params.nports; i++) {
625171978Skmacy		struct port_info *pi;
626171978Skmacy
627167514Skmacy		if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
628167514Skmacy			device_printf(dev, "failed to add child port\n");
629167514Skmacy			error = EINVAL;
630167514Skmacy			goto out;
631167514Skmacy		}
632171978Skmacy		pi = &sc->port[i];
633171978Skmacy		pi->adapter = sc;
634171978Skmacy		pi->nqsets = port_qsets;
635171978Skmacy		pi->first_qset = i*port_qsets;
636171978Skmacy		pi->port_id = i;
637171978Skmacy		pi->tx_chan = i >= ai->nports0;
638171978Skmacy		pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
639171978Skmacy		sc->rxpkt_map[pi->txpkt_intf] = i;
640174708Skmacy		sc->port[i].tx_chan = i >= ai->nports0;
641171471Skmacy		sc->portdev[i] = child;
642171978Skmacy		device_set_softc(child, pi);
643167514Skmacy	}
644167514Skmacy	if ((error = bus_generic_attach(dev)) != 0)
645167514Skmacy		goto out;
646167514Skmacy
647167514Skmacy	/* initialize sge private state */
648170654Skmacy	t3_sge_init_adapter(sc);
649167514Skmacy
650167514Skmacy	t3_led_ready(sc);
651169978Skmacy
652169978Skmacy	cxgb_offload_init();
653169978Skmacy	if (is_offload(sc)) {
654169978Skmacy		setbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
655169978Skmacy		cxgb_adapter_ofld(sc);
656169978Skmacy        }
657167514Skmacy	error = t3_get_fw_version(sc, &vers);
658167514Skmacy	if (error)
659167514Skmacy		goto out;
660167514Skmacy
661169978Skmacy	snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
662169978Skmacy	    G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
663169978Skmacy	    G_FW_VERSION_MICRO(vers));
664169978Skmacy
665199237Snp	snprintf(buf, sizeof(buf), "%s %sNIC\t E/C: %s S/N: %s",
666199237Snp		 ai->desc, is_offload(sc) ? "R" : "",
667185655Sgnn		 sc->params.vpd.ec, sc->params.vpd.sn);
668185655Sgnn	device_set_desc_copy(dev, buf);
669185655Sgnn
670192540Sgnn	snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x",
671192540Sgnn		 sc->params.vpd.port_type[0], sc->params.vpd.port_type[1],
672192540Sgnn		 sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]);
673192540Sgnn
674176472Skmacy	device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]);
675181652Skmacy	callout_reset(&sc->cxgb_tick_ch, CXGB_TICKS(sc), cxgb_tick, sc);
676174708Skmacy	t3_add_attach_sysctls(sc);
677167514Skmacyout:
678167514Skmacy	if (error)
679167514Skmacy		cxgb_free(sc);
680167514Skmacy
681167514Skmacy	return (error);
682167514Skmacy}
683167514Skmacy
684192537Sgnn/*
685192584Sgnn * The cxgb_controller_detach routine is called with the device is
686192537Sgnn * unloaded from the system.
687192537Sgnn */
688192537Sgnn
689167514Skmacystatic int
690167514Skmacycxgb_controller_detach(device_t dev)
691167514Skmacy{
692167514Skmacy	struct adapter *sc;
693167514Skmacy
694167514Skmacy	sc = device_get_softc(dev);
695167514Skmacy
696167514Skmacy	cxgb_free(sc);
697167514Skmacy
698167514Skmacy	return (0);
699167514Skmacy}
700167514Skmacy
701192537Sgnn/*
702192537Sgnn * The cxgb_free() is called by the cxgb_controller_detach() routine
703192537Sgnn * to tear down the structures that were built up in
704192537Sgnn * cxgb_controller_attach(), and should be the final piece of work
705192584Sgnn * done when fully unloading the driver.
706192537Sgnn *
707192537Sgnn *
708192537Sgnn *  1. Shutting down the threads started by the cxgb_controller_attach()
709192537Sgnn *     routine.
710192537Sgnn *  2. Stopping the lower level device and all callouts (cxgb_down_locked()).
711192537Sgnn *  3. Detaching all of the port devices created during the
712192537Sgnn *     cxgb_controller_attach() routine.
713192537Sgnn *  4. Removing the device children created via cxgb_controller_attach().
714192933Sgnn *  5. Releasing PCI resources associated with the device.
715192537Sgnn *  6. Turning off the offload support, iff it was turned on.
716192537Sgnn *  7. Destroying the mutexes created in cxgb_controller_attach().
717192537Sgnn *
718192537Sgnn */
719167514Skmacystatic void
720167514Skmacycxgb_free(struct adapter *sc)
721167514Skmacy{
722167514Skmacy	int i;
723167514Skmacy
724176472Skmacy	ADAPTER_LOCK(sc);
725176472Skmacy	sc->flags |= CXGB_SHUTDOWN;
726176472Skmacy	ADAPTER_UNLOCK(sc);
727192537Sgnn
728192537Sgnn	/*
729194521Skmacy	 * Make sure all child devices are gone.
730192537Sgnn	 */
731192537Sgnn	bus_generic_detach(sc->dev);
732192537Sgnn	for (i = 0; i < (sc)->params.nports; i++) {
733192584Sgnn		if (sc->portdev[i] &&
734192584Sgnn		    device_delete_child(sc->dev, sc->portdev[i]) != 0)
735192537Sgnn			device_printf(sc->dev, "failed to delete child port\n");
736192537Sgnn	}
737192537Sgnn
738194521Skmacy	/*
739194521Skmacy	 * At this point, it is as if cxgb_port_detach has run on all ports, and
740194521Skmacy	 * cxgb_down has run on the adapter.  All interrupts have been silenced,
741194521Skmacy	 * all open devices have been closed.
742194521Skmacy	 */
743194521Skmacy	KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)",
744194521Skmacy					   __func__, sc->open_device_map));
745194521Skmacy	for (i = 0; i < sc->params.nports; i++) {
746194521Skmacy		KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!",
747194521Skmacy						  __func__, i));
748194521Skmacy	}
749194521Skmacy
750194521Skmacy	/*
751194521Skmacy	 * Finish off the adapter's callouts.
752194521Skmacy	 */
753194521Skmacy	callout_drain(&sc->cxgb_tick_ch);
754194521Skmacy	callout_drain(&sc->sge_timer_ch);
755194521Skmacy
756194521Skmacy	/*
757194521Skmacy	 * Release resources grabbed under FULL_INIT_DONE by cxgb_up.  The
758194521Skmacy	 * sysctls are cleaned up by the kernel linker.
759194521Skmacy	 */
760194521Skmacy	if (sc->flags & FULL_INIT_DONE) {
761194521Skmacy 		t3_free_sge_resources(sc);
762194521Skmacy 		sc->flags &= ~FULL_INIT_DONE;
763194521Skmacy 	}
764194521Skmacy
765194521Skmacy	/*
766194521Skmacy	 * Release all interrupt resources.
767194521Skmacy	 */
768192933Sgnn	cxgb_teardown_interrupts(sc);
769169978Skmacy#ifdef MSI_SUPPORTED
770169978Skmacy	if (sc->flags & (USING_MSI | USING_MSIX)) {
771169978Skmacy		device_printf(sc->dev, "releasing msi message(s)\n");
772169978Skmacy		pci_release_msi(sc->dev);
773169978Skmacy	} else {
774169978Skmacy		device_printf(sc->dev, "no msi message to release\n");
775169978Skmacy	}
776192933Sgnn
777169978Skmacy	if (sc->msix_regs_res != NULL) {
778169978Skmacy		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
779169978Skmacy		    sc->msix_regs_res);
780169978Skmacy	}
781192933Sgnn#endif
782176472Skmacy
783194521Skmacy	/*
784194521Skmacy	 * Free the adapter's taskqueue.
785194521Skmacy	 */
786176472Skmacy	if (sc->tq != NULL) {
787171978Skmacy		taskqueue_free(sc->tq);
788176472Skmacy		sc->tq = NULL;
789176472Skmacy	}
790176472Skmacy
791169978Skmacy	if (is_offload(sc)) {
792194521Skmacy		clrbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
793169978Skmacy		cxgb_adapter_unofld(sc);
794194521Skmacy	}
795194521Skmacy
796183059Skmacy#ifdef notyet
797176472Skmacy	if (sc->flags & CXGB_OFLD_INIT)
798176472Skmacy		cxgb_offload_deactivate(sc);
799178302Skmacy#endif
800171471Skmacy	free(sc->filters, M_DEVBUF);
801167514Skmacy	t3_sge_free(sc);
802194521Skmacy
803170869Skmacy	cxgb_offload_exit();
804176472Skmacy
805176472Skmacy	if (sc->udbs_res != NULL)
806176472Skmacy		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid,
807176472Skmacy		    sc->udbs_res);
808176472Skmacy
809167514Skmacy	if (sc->regs_res != NULL)
810167514Skmacy		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
811167514Skmacy		    sc->regs_res);
812167514Skmacy
813170869Skmacy	MTX_DESTROY(&sc->mdio_lock);
814170869Skmacy	MTX_DESTROY(&sc->sge.reg_lock);
815170869Skmacy	MTX_DESTROY(&sc->elmer_lock);
816170869Skmacy	ADAPTER_LOCK_DEINIT(sc);
817167514Skmacy}
818167514Skmacy
819167514Skmacy/**
820167514Skmacy *	setup_sge_qsets - configure SGE Tx/Rx/response queues
821167514Skmacy *	@sc: the controller softc
822167514Skmacy *
823167514Skmacy *	Determines how many sets of SGE queues to use and initializes them.
824167514Skmacy *	We support multiple queue sets per port if we have MSI-X, otherwise
825167514Skmacy *	just one queue set per port.
826167514Skmacy */
827167514Skmacystatic int
828167514Skmacysetup_sge_qsets(adapter_t *sc)
829167514Skmacy{
830172096Skmacy	int i, j, err, irq_idx = 0, qset_idx = 0;
831169978Skmacy	u_int ntxq = SGE_TXQ_PER_SET;
832167514Skmacy
833167514Skmacy	if ((err = t3_sge_alloc(sc)) != 0) {
834167760Skmacy		device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
835167514Skmacy		return (err);
836167514Skmacy	}
837167514Skmacy
838167514Skmacy	if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
839167514Skmacy		irq_idx = -1;
840167514Skmacy
841172096Skmacy	for (i = 0; i < (sc)->params.nports; i++) {
842167514Skmacy		struct port_info *pi = &sc->port[i];
843167514Skmacy
844171978Skmacy		for (j = 0; j < pi->nqsets; j++, qset_idx++) {
845167760Skmacy			err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
846167514Skmacy			    (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
847167514Skmacy			    &sc->params.sge.qset[qset_idx], ntxq, pi);
848167514Skmacy			if (err) {
849167514Skmacy				t3_free_sge_resources(sc);
850171978Skmacy				device_printf(sc->dev, "t3_sge_alloc_qset failed with %d\n",
851171978Skmacy				    err);
852167514Skmacy				return (err);
853167514Skmacy			}
854167514Skmacy		}
855167514Skmacy	}
856167514Skmacy
857167514Skmacy	return (0);
858167514Skmacy}
859167514Skmacy
860170654Skmacystatic void
861192933Sgnncxgb_teardown_interrupts(adapter_t *sc)
862170654Skmacy{
863192933Sgnn	int i;
864170654Skmacy
865192933Sgnn	for (i = 0; i < SGE_QSETS; i++) {
866192933Sgnn		if (sc->msix_intr_tag[i] == NULL) {
867192933Sgnn
868192933Sgnn			/* Should have been setup fully or not at all */
869192933Sgnn			KASSERT(sc->msix_irq_res[i] == NULL &&
870192933Sgnn				sc->msix_irq_rid[i] == 0,
871192933Sgnn				("%s: half-done interrupt (%d).", __func__, i));
872192933Sgnn
873192933Sgnn			continue;
874170654Skmacy		}
875192933Sgnn
876192933Sgnn		bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
877192933Sgnn				  sc->msix_intr_tag[i]);
878192933Sgnn		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i],
879192933Sgnn				     sc->msix_irq_res[i]);
880192933Sgnn
881192933Sgnn		sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL;
882192933Sgnn		sc->msix_irq_rid[i] = 0;
883170654Skmacy	}
884192933Sgnn
885192933Sgnn	if (sc->intr_tag) {
886192933Sgnn		KASSERT(sc->irq_res != NULL,
887192933Sgnn			("%s: half-done interrupt.", __func__));
888192933Sgnn
889192933Sgnn		bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
890192933Sgnn		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
891192933Sgnn				     sc->irq_res);
892192933Sgnn
893192933Sgnn		sc->irq_res = sc->intr_tag = NULL;
894192933Sgnn		sc->irq_rid = 0;
895192933Sgnn	}
896170654Skmacy}
897170654Skmacy
898167514Skmacystatic int
899192933Sgnncxgb_setup_interrupts(adapter_t *sc)
900167514Skmacy{
901192933Sgnn	struct resource *res;
902192933Sgnn	void *tag;
903192933Sgnn	int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX);
904167514Skmacy
905192933Sgnn	sc->irq_rid = intr_flag ? 1 : 0;
906192933Sgnn	sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid,
907192933Sgnn					     RF_SHAREABLE | RF_ACTIVE);
908192933Sgnn	if (sc->irq_res == NULL) {
909192933Sgnn		device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n",
910192933Sgnn			      intr_flag, sc->irq_rid);
911192933Sgnn		err = EINVAL;
912192933Sgnn		sc->irq_rid = 0;
913192933Sgnn	} else {
914192933Sgnn		err = bus_setup_intr(sc->dev, sc->irq_res,
915192933Sgnn				     INTR_MPSAFE | INTR_TYPE_NET,
916167514Skmacy#ifdef INTR_FILTERS
917192933Sgnn				     NULL,
918167514Skmacy#endif
919192933Sgnn				     sc->cxgb_intr, sc, &sc->intr_tag);
920192933Sgnn
921192933Sgnn		if (err) {
922192933Sgnn			device_printf(sc->dev,
923192933Sgnn				      "Cannot set up interrupt (%x, %u, %d)\n",
924192933Sgnn				      intr_flag, sc->irq_rid, err);
925192933Sgnn			bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
926192933Sgnn					     sc->irq_res);
927192933Sgnn			sc->irq_res = sc->intr_tag = NULL;
928192933Sgnn			sc->irq_rid = 0;
929192933Sgnn		}
930167514Skmacy	}
931171804Skmacy
932192933Sgnn	/* That's all for INTx or MSI */
933192933Sgnn	if (!(intr_flag & USING_MSIX) || err)
934192933Sgnn		return (err);
935192933Sgnn
936192933Sgnn	for (i = 0; i < sc->msi_count - 1; i++) {
937192933Sgnn		rid = i + 2;
938192933Sgnn		res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid,
939192933Sgnn					     RF_SHAREABLE | RF_ACTIVE);
940192933Sgnn		if (res == NULL) {
941192933Sgnn			device_printf(sc->dev, "Cannot allocate interrupt "
942192933Sgnn				      "for message %d\n", rid);
943192933Sgnn			err = EINVAL;
944192933Sgnn			break;
945192933Sgnn		}
946192933Sgnn
947192933Sgnn		err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET,
948167514Skmacy#ifdef INTR_FILTERS
949192933Sgnn				     NULL,
950167514Skmacy#endif
951192933Sgnn				     t3_intr_msix, &sc->sge.qs[i], &tag);
952192933Sgnn		if (err) {
953192933Sgnn			device_printf(sc->dev, "Cannot set up interrupt "
954192933Sgnn				      "for message %d (%d)\n", rid, err);
955192933Sgnn			bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res);
956192933Sgnn			break;
957167514Skmacy		}
958192933Sgnn
959192933Sgnn		sc->msix_irq_rid[i] = rid;
960192933Sgnn		sc->msix_irq_res[i] = res;
961192933Sgnn		sc->msix_intr_tag[i] = tag;
962167514Skmacy	}
963167760Skmacy
964192933Sgnn	if (err)
965192933Sgnn		cxgb_teardown_interrupts(sc);
966192933Sgnn
967192933Sgnn	return (err);
968167514Skmacy}
969167514Skmacy
970192933Sgnn
971167514Skmacystatic int
972167514Skmacycxgb_port_probe(device_t dev)
973167514Skmacy{
974167514Skmacy	struct port_info *p;
975167514Skmacy	char buf[80];
976176472Skmacy	const char *desc;
977176472Skmacy
978167514Skmacy	p = device_get_softc(dev);
979176472Skmacy	desc = p->phy.desc;
980176472Skmacy	snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, desc);
981167514Skmacy	device_set_desc_copy(dev, buf);
982167514Skmacy	return (0);
983167514Skmacy}
984167514Skmacy
985167514Skmacy
986167514Skmacystatic int
987167514Skmacycxgb_makedev(struct port_info *pi)
988167514Skmacy{
989167514Skmacy
990170654Skmacy	pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit,
991170654Skmacy	    UID_ROOT, GID_WHEEL, 0600, if_name(pi->ifp));
992167514Skmacy
993167514Skmacy	if (pi->port_cdev == NULL)
994167514Skmacy		return (ENOMEM);
995167514Skmacy
996167514Skmacy	pi->port_cdev->si_drv1 = (void *)pi;
997167514Skmacy
998167514Skmacy	return (0);
999167514Skmacy}
1000167514Skmacy
1001183289Skmacy#ifndef LRO_SUPPORTED
1002183289Skmacy#ifdef IFCAP_LRO
1003183289Skmacy#undef IFCAP_LRO
1004183289Skmacy#endif
1005183289Skmacy#define IFCAP_LRO 0x0
1006183289Skmacy#endif
1007167514Skmacy
1008167514Skmacy#ifdef TSO_SUPPORTED
1009181616Skmacy#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO)
1010167514Skmacy/* Don't enable TSO6 yet */
1011181616Skmacy#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4 | IFCAP_JUMBO_MTU | IFCAP_LRO)
1012167514Skmacy#else
1013167514Skmacy#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
1014167514Skmacy/* Don't enable TSO6 yet */
1015167514Skmacy#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM |  IFCAP_JUMBO_MTU)
1016167514Skmacy#define IFCAP_TSO4 0x0
1017171868Skmacy#define IFCAP_TSO6 0x0
1018167514Skmacy#define CSUM_TSO   0x0
1019167514Skmacy#endif
1020167514Skmacy
1021167514Skmacy
1022167514Skmacystatic int
1023167514Skmacycxgb_port_attach(device_t dev)
1024167514Skmacy{
1025167514Skmacy	struct port_info *p;
1026167514Skmacy	struct ifnet *ifp;
1027194921Snp	int err;
1028176472Skmacy	struct adapter *sc;
1029167514Skmacy
1030176472Skmacy
1031167514Skmacy	p = device_get_softc(dev);
1032176472Skmacy	sc = p->adapter;
1033170869Skmacy	snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
1034171803Skmacy	    device_get_unit(device_get_parent(dev)), p->port_id);
1035170869Skmacy	PORT_LOCK_INIT(p, p->lockbuf);
1036167514Skmacy
1037167514Skmacy	/* Allocate an ifnet object and set it up */
1038167514Skmacy	ifp = p->ifp = if_alloc(IFT_ETHER);
1039167514Skmacy	if (ifp == NULL) {
1040167514Skmacy		device_printf(dev, "Cannot allocate ifnet\n");
1041167514Skmacy		return (ENOMEM);
1042167514Skmacy	}
1043167514Skmacy
1044167514Skmacy	/*
1045167514Skmacy	 * Note that there is currently no watchdog timer.
1046167514Skmacy	 */
1047167514Skmacy	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1048167514Skmacy	ifp->if_init = cxgb_init;
1049167514Skmacy	ifp->if_softc = p;
1050167514Skmacy	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1051167514Skmacy	ifp->if_ioctl = cxgb_ioctl;
1052167514Skmacy	ifp->if_start = cxgb_start;
1053174708Skmacy
1054194039Sgnn	ifp->if_snd.ifq_drv_maxlen = cxgb_snd_queue_len;
1055167514Skmacy	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1056167514Skmacy	IFQ_SET_READY(&ifp->if_snd);
1057167514Skmacy
1058167514Skmacy	ifp->if_hwassist = ifp->if_capabilities = ifp->if_capenable = 0;
1059167514Skmacy	ifp->if_capabilities |= CXGB_CAP;
1060167514Skmacy	ifp->if_capenable |= CXGB_CAP_ENABLE;
1061167514Skmacy	ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO);
1062171471Skmacy	/*
1063171471Skmacy	 * disable TSO on 4-port - it isn't supported by the firmware yet
1064171471Skmacy	 */
1065171471Skmacy	if (p->adapter->params.nports > 2) {
1066171471Skmacy		ifp->if_capabilities &= ~(IFCAP_TSO4 | IFCAP_TSO6);
1067171471Skmacy		ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TSO6);
1068171471Skmacy		ifp->if_hwassist &= ~CSUM_TSO;
1069171471Skmacy	}
1070171471Skmacy
1071167514Skmacy	ether_ifattach(ifp, p->hw_addr);
1072194521Skmacy	ifp->if_transmit = cxgb_transmit;
1073194521Skmacy	ifp->if_qflush = cxgb_qflush;
1074192537Sgnn
1075171471Skmacy	/*
1076171471Skmacy	 * Only default to jumbo frames on 10GigE
1077171471Skmacy	 */
1078171471Skmacy	if (p->adapter->params.nports <= 2)
1079180583Skmacy		ifp->if_mtu = ETHERMTU_JUMBO;
1080167514Skmacy	if ((err = cxgb_makedev(p)) != 0) {
1081167514Skmacy		printf("makedev failed %d\n", err);
1082167514Skmacy		return (err);
1083167514Skmacy	}
1084194921Snp
1085194921Snp	/* Create a list of media supported by this port */
1086167514Skmacy	ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
1087167514Skmacy	    cxgb_media_status);
1088194921Snp	cxgb_build_medialist(p);
1089176472Skmacy
1090170654Skmacy	t3_sge_init_port(p);
1091189643Sgnn
1092192537Sgnn	return (err);
1093167514Skmacy}
1094167514Skmacy
1095192537Sgnn/*
1096192537Sgnn * cxgb_port_detach() is called via the device_detach methods when
1097192537Sgnn * cxgb_free() calls the bus_generic_detach.  It is responsible for
1098192537Sgnn * removing the device from the view of the kernel, i.e. from all
1099192537Sgnn * interfaces lists etc.  This routine is only called when the driver is
1100192537Sgnn * being unloaded, not when the link goes down.
1101192537Sgnn */
1102167514Skmacystatic int
1103167514Skmacycxgb_port_detach(device_t dev)
1104167514Skmacy{
1105167514Skmacy	struct port_info *p;
1106192537Sgnn	struct adapter *sc;
1107194521Skmacy	int i;
1108167514Skmacy
1109167514Skmacy	p = device_get_softc(dev);
1110192537Sgnn	sc = p->adapter;
1111169978Skmacy
1112194521Skmacy	cxgb_begin_detach(p);
1113194521Skmacy
1114192537Sgnn	if (p->port_cdev != NULL)
1115192537Sgnn		destroy_dev(p->port_cdev);
1116194521Skmacy
1117194521Skmacy	cxgb_uninit_synchronized(p);
1118192537Sgnn	ether_ifdetach(p->ifp);
1119192537Sgnn
1120194521Skmacy	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1121194521Skmacy		struct sge_qset *qs = &sc->sge.qs[i];
1122194521Skmacy		struct sge_txq *txq = &qs->txq[TXQ_ETH];
1123194521Skmacy
1124194521Skmacy		callout_drain(&txq->txq_watchdog);
1125194521Skmacy		callout_drain(&txq->txq_timer);
1126192537Sgnn	}
1127192537Sgnn
1128170869Skmacy	PORT_LOCK_DEINIT(p);
1129167514Skmacy	if_free(p->ifp);
1130194521Skmacy	p->ifp = NULL;
1131194521Skmacy
1132194521Skmacy	cxgb_end_op(p);
1133167514Skmacy	return (0);
1134167514Skmacy}
1135167514Skmacy
1136167514Skmacyvoid
1137167514Skmacyt3_fatal_err(struct adapter *sc)
1138167514Skmacy{
1139167514Skmacy	u_int fw_status[4];
1140183062Skmacy
1141172096Skmacy	if (sc->flags & FULL_INIT_DONE) {
1142172096Skmacy		t3_sge_stop(sc);
1143172096Skmacy		t3_write_reg(sc, A_XGM_TX_CTRL, 0);
1144172096Skmacy		t3_write_reg(sc, A_XGM_RX_CTRL, 0);
1145172096Skmacy		t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
1146172096Skmacy		t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
1147172096Skmacy		t3_intr_disable(sc);
1148172096Skmacy	}
1149167514Skmacy	device_printf(sc->dev,"encountered fatal error, operation suspended\n");
1150167514Skmacy	if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
1151167514Skmacy		device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
1152167514Skmacy		    fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
1153167514Skmacy}
1154167514Skmacy
1155167514Skmacyint
1156167514Skmacyt3_os_find_pci_capability(adapter_t *sc, int cap)
1157167514Skmacy{
1158167514Skmacy	device_t dev;
1159167514Skmacy	struct pci_devinfo *dinfo;
1160167514Skmacy	pcicfgregs *cfg;
1161167514Skmacy	uint32_t status;
1162167514Skmacy	uint8_t ptr;
1163167514Skmacy
1164167514Skmacy	dev = sc->dev;
1165167514Skmacy	dinfo = device_get_ivars(dev);
1166167514Skmacy	cfg = &dinfo->cfg;
1167167514Skmacy
1168167514Skmacy	status = pci_read_config(dev, PCIR_STATUS, 2);
1169167514Skmacy	if (!(status & PCIM_STATUS_CAPPRESENT))
1170167514Skmacy		return (0);
1171167514Skmacy
1172167514Skmacy	switch (cfg->hdrtype & PCIM_HDRTYPE) {
1173167514Skmacy	case 0:
1174167514Skmacy	case 1:
1175167514Skmacy		ptr = PCIR_CAP_PTR;
1176167514Skmacy		break;
1177167514Skmacy	case 2:
1178167514Skmacy		ptr = PCIR_CAP_PTR_2;
1179167514Skmacy		break;
1180167514Skmacy	default:
1181167514Skmacy		return (0);
1182167514Skmacy		break;
1183167514Skmacy	}
1184167514Skmacy	ptr = pci_read_config(dev, ptr, 1);
1185167514Skmacy
1186167514Skmacy	while (ptr != 0) {
1187167514Skmacy		if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
1188167514Skmacy			return (ptr);
1189167514Skmacy		ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1190167514Skmacy	}
1191167514Skmacy
1192167514Skmacy	return (0);
1193167514Skmacy}
1194167514Skmacy
1195167514Skmacyint
1196167514Skmacyt3_os_pci_save_state(struct adapter *sc)
1197167514Skmacy{
1198167514Skmacy	device_t dev;
1199167514Skmacy	struct pci_devinfo *dinfo;
1200167514Skmacy
1201167514Skmacy	dev = sc->dev;
1202167514Skmacy	dinfo = device_get_ivars(dev);
1203167514Skmacy
1204167514Skmacy	pci_cfg_save(dev, dinfo, 0);
1205167514Skmacy	return (0);
1206167514Skmacy}
1207167514Skmacy
1208167514Skmacyint
1209167514Skmacyt3_os_pci_restore_state(struct adapter *sc)
1210167514Skmacy{
1211167514Skmacy	device_t dev;
1212167514Skmacy	struct pci_devinfo *dinfo;
1213167514Skmacy
1214167514Skmacy	dev = sc->dev;
1215167514Skmacy	dinfo = device_get_ivars(dev);
1216167514Skmacy
1217167514Skmacy	pci_cfg_restore(dev, dinfo);
1218167514Skmacy	return (0);
1219167514Skmacy}
1220167514Skmacy
1221167514Skmacy/**
1222167514Skmacy *	t3_os_link_changed - handle link status changes
1223197791Snp *	@sc: the adapter associated with the link change
1224197791Snp *	@port_id: the port index whose link status has changed
1225177340Skmacy *	@link_status: the new status of the link
1226167514Skmacy *	@speed: the new speed setting
1227167514Skmacy *	@duplex: the new duplex setting
1228167514Skmacy *	@fc: the new flow-control setting
1229167514Skmacy *
1230167514Skmacy *	This is the OS-dependent handler for link status changes.  The OS
1231167514Skmacy *	neutral handler takes care of most of the processing for these events,
1232167514Skmacy *	then calls this handler for any OS-specific processing.
1233167514Skmacy */
1234167514Skmacyvoid
1235167514Skmacyt3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
1236197791Snp     int duplex, int fc, int mac_was_reset)
1237167514Skmacy{
1238167514Skmacy	struct port_info *pi = &adapter->port[port_id];
1239194521Skmacy	struct ifnet *ifp = pi->ifp;
1240167514Skmacy
1241194521Skmacy	/* no race with detach, so ifp should always be good */
1242194521Skmacy	KASSERT(ifp, ("%s: if detached.", __func__));
1243194521Skmacy
1244197791Snp	/* Reapply mac settings if they were lost due to a reset */
1245197791Snp	if (mac_was_reset) {
1246197791Snp		PORT_LOCK(pi);
1247197791Snp		cxgb_update_mac_settings(pi);
1248197791Snp		PORT_UNLOCK(pi);
1249197791Snp	}
1250197791Snp
1251169978Skmacy	if (link_status) {
1252194521Skmacy		ifp->if_baudrate = IF_Mbps(speed);
1253194521Skmacy		if_link_state_change(ifp, LINK_STATE_UP);
1254192540Sgnn	} else
1255194521Skmacy		if_link_state_change(ifp, LINK_STATE_DOWN);
1256167514Skmacy}
1257167514Skmacy
1258181614Skmacy/**
1259181614Skmacy *	t3_os_phymod_changed - handle PHY module changes
1260181614Skmacy *	@phy: the PHY reporting the module change
1261181614Skmacy *	@mod_type: new module type
1262181614Skmacy *
1263181614Skmacy *	This is the OS-dependent handler for PHY module changes.  It is
1264181614Skmacy *	invoked when a PHY module is removed or inserted for any OS-specific
1265181614Skmacy *	processing.
1266181614Skmacy */
1267181614Skmacyvoid t3_os_phymod_changed(struct adapter *adap, int port_id)
1268181614Skmacy{
1269181614Skmacy	static const char *mod_str[] = {
1270181614Skmacy		NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
1271181614Skmacy	};
1272181614Skmacy	struct port_info *pi = &adap->port[port_id];
1273194921Snp	int mod = pi->phy.modtype;
1274181614Skmacy
1275194921Snp	if (mod != pi->media.ifm_cur->ifm_data)
1276194921Snp		cxgb_build_medialist(pi);
1277194921Snp
1278194921Snp	if (mod == phy_modtype_none)
1279194921Snp		if_printf(pi->ifp, "PHY module unplugged\n");
1280181614Skmacy	else {
1281194921Snp		KASSERT(mod < ARRAY_SIZE(mod_str),
1282194921Snp			("invalid PHY module type %d", mod));
1283194921Snp		if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]);
1284181614Skmacy	}
1285181614Skmacy}
1286181614Skmacy
1287167514Skmacy/*
1288167514Skmacy * Interrupt-context handler for external (PHY) interrupts.
1289167514Skmacy */
1290167514Skmacyvoid
1291167514Skmacyt3_os_ext_intr_handler(adapter_t *sc)
1292167514Skmacy{
1293167514Skmacy	if (cxgb_debug)
1294167514Skmacy		printf("t3_os_ext_intr_handler\n");
1295167514Skmacy	/*
1296167514Skmacy	 * Schedule a task to handle external interrupts as they may be slow
1297167514Skmacy	 * and we use a mutex to protect MDIO registers.  We disable PHY
1298167514Skmacy	 * interrupts in the meantime and let the task reenable them when
1299167514Skmacy	 * it's done.
1300167514Skmacy	 */
1301167514Skmacy	if (sc->slow_intr_mask) {
1302194521Skmacy		ADAPTER_LOCK(sc);
1303167514Skmacy		sc->slow_intr_mask &= ~F_T3DBG;
1304167514Skmacy		t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
1305167514Skmacy		taskqueue_enqueue(sc->tq, &sc->ext_intr_task);
1306194521Skmacy		ADAPTER_UNLOCK(sc);
1307167514Skmacy	}
1308167514Skmacy}
1309167514Skmacy
1310167514Skmacyvoid
1311167514Skmacyt3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
1312167514Skmacy{
1313167514Skmacy
1314167514Skmacy	/*
1315167514Skmacy	 * The ifnet might not be allocated before this gets called,
1316167514Skmacy	 * as this is called early on in attach by t3_prep_adapter
1317167514Skmacy	 * save the address off in the port structure
1318167514Skmacy	 */
1319167514Skmacy	if (cxgb_debug)
1320167514Skmacy		printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
1321167514Skmacy	bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
1322167514Skmacy}
1323167514Skmacy
1324194521Skmacy/*
1325194521Skmacy * Programs the XGMAC based on the settings in the ifnet.  These settings
1326194521Skmacy * include MTU, MAC address, mcast addresses, etc.
1327167514Skmacy */
1328167514Skmacystatic void
1329194521Skmacycxgb_update_mac_settings(struct port_info *p)
1330167514Skmacy{
1331194521Skmacy	struct ifnet *ifp = p->ifp;
1332167514Skmacy	struct t3_rx_mode rm;
1333167514Skmacy	struct cmac *mac = &p->mac;
1334180583Skmacy	int mtu, hwtagging;
1335167514Skmacy
1336194521Skmacy	PORT_LOCK_ASSERT_OWNED(p);
1337167514Skmacy
1338180583Skmacy	bcopy(IF_LLADDR(ifp), p->hw_addr, ETHER_ADDR_LEN);
1339180583Skmacy
1340180583Skmacy	mtu = ifp->if_mtu;
1341180583Skmacy	if (ifp->if_capenable & IFCAP_VLAN_MTU)
1342180583Skmacy		mtu += ETHER_VLAN_ENCAP_LEN;
1343180583Skmacy
1344180583Skmacy	hwtagging = (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0;
1345180583Skmacy
1346180583Skmacy	t3_mac_set_mtu(mac, mtu);
1347180583Skmacy	t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging);
1348167514Skmacy	t3_mac_set_address(mac, 0, p->hw_addr);
1349194521Skmacy	t3_init_rx_mode(&rm, p);
1350167514Skmacy	t3_mac_set_rx_mode(mac, &rm);
1351167514Skmacy}
1352167514Skmacy
1353176472Skmacy
1354176472Skmacystatic int
1355176472Skmacyawait_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
1356176472Skmacy			      unsigned long n)
1357176472Skmacy{
1358176472Skmacy	int attempts = 5;
1359176472Skmacy
1360176472Skmacy	while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
1361176472Skmacy		if (!--attempts)
1362176472Skmacy			return (ETIMEDOUT);
1363176472Skmacy		t3_os_sleep(10);
1364176472Skmacy	}
1365176472Skmacy	return 0;
1366176472Skmacy}
1367176472Skmacy
1368176472Skmacystatic int
1369176472Skmacyinit_tp_parity(struct adapter *adap)
1370176472Skmacy{
1371176472Skmacy	int i;
1372176472Skmacy	struct mbuf *m;
1373176472Skmacy	struct cpl_set_tcb_field *greq;
1374176472Skmacy	unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
1375176472Skmacy
1376176472Skmacy	t3_tp_set_offload_mode(adap, 1);
1377176472Skmacy
1378176472Skmacy	for (i = 0; i < 16; i++) {
1379176472Skmacy		struct cpl_smt_write_req *req;
1380176472Skmacy
1381176472Skmacy		m = m_gethdr(M_WAITOK, MT_DATA);
1382176472Skmacy		req = mtod(m, struct cpl_smt_write_req *);
1383176472Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1384176472Skmacy		memset(req, 0, sizeof(*req));
1385194521Skmacy		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1386176472Skmacy		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
1387176472Skmacy		req->iff = i;
1388176472Skmacy		t3_mgmt_tx(adap, m);
1389176472Skmacy	}
1390176472Skmacy
1391176472Skmacy	for (i = 0; i < 2048; i++) {
1392176472Skmacy		struct cpl_l2t_write_req *req;
1393176472Skmacy
1394176472Skmacy		m = m_gethdr(M_WAITOK, MT_DATA);
1395176472Skmacy		req = mtod(m, struct cpl_l2t_write_req *);
1396176472Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1397176472Skmacy		memset(req, 0, sizeof(*req));
1398194521Skmacy		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1399176472Skmacy		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
1400176472Skmacy		req->params = htonl(V_L2T_W_IDX(i));
1401176472Skmacy		t3_mgmt_tx(adap, m);
1402176472Skmacy	}
1403176472Skmacy
1404176472Skmacy	for (i = 0; i < 2048; i++) {
1405176472Skmacy		struct cpl_rte_write_req *req;
1406176472Skmacy
1407176472Skmacy		m = m_gethdr(M_WAITOK, MT_DATA);
1408176472Skmacy		req = mtod(m, struct cpl_rte_write_req *);
1409176472Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1410176472Skmacy		memset(req, 0, sizeof(*req));
1411194521Skmacy		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1412176472Skmacy		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
1413176472Skmacy		req->l2t_idx = htonl(V_L2T_W_IDX(i));
1414176472Skmacy		t3_mgmt_tx(adap, m);
1415176472Skmacy	}
1416176472Skmacy
1417176472Skmacy	m = m_gethdr(M_WAITOK, MT_DATA);
1418176472Skmacy	greq = mtod(m, struct cpl_set_tcb_field *);
1419176472Skmacy	m->m_len = m->m_pkthdr.len = sizeof(*greq);
1420176472Skmacy	memset(greq, 0, sizeof(*greq));
1421194521Skmacy	greq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1422176472Skmacy	OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
1423176472Skmacy	greq->mask = htobe64(1);
1424176472Skmacy	t3_mgmt_tx(adap, m);
1425176472Skmacy
1426176472Skmacy	i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
1427176472Skmacy	t3_tp_set_offload_mode(adap, 0);
1428176472Skmacy	return (i);
1429176472Skmacy}
1430176472Skmacy
1431167514Skmacy/**
1432167514Skmacy *	setup_rss - configure Receive Side Steering (per-queue connection demux)
1433167514Skmacy *	@adap: the adapter
1434167514Skmacy *
1435167514Skmacy *	Sets up RSS to distribute packets to multiple receive queues.  We
1436167514Skmacy *	configure the RSS CPU lookup table to distribute to the number of HW
1437167514Skmacy *	receive queues, and the response queue lookup table to narrow that
1438167514Skmacy *	down to the response queues actually configured for each port.
1439167514Skmacy *	We always configure the RSS mapping for two ports since the mapping
1440167514Skmacy *	table has plenty of entries.
1441167514Skmacy */
1442167514Skmacystatic void
1443167514Skmacysetup_rss(adapter_t *adap)
1444167514Skmacy{
1445167514Skmacy	int i;
1446171471Skmacy	u_int nq[2];
1447167514Skmacy	uint8_t cpus[SGE_QSETS + 1];
1448167514Skmacy	uint16_t rspq_map[RSS_TABLE_SIZE];
1449171471Skmacy
1450167514Skmacy	for (i = 0; i < SGE_QSETS; ++i)
1451167514Skmacy		cpus[i] = i;
1452167514Skmacy	cpus[SGE_QSETS] = 0xff;
1453167514Skmacy
1454171978Skmacy	nq[0] = nq[1] = 0;
1455171978Skmacy	for_each_port(adap, i) {
1456171978Skmacy		const struct port_info *pi = adap2pinfo(adap, i);
1457171978Skmacy
1458171978Skmacy		nq[pi->tx_chan] += pi->nqsets;
1459171978Skmacy	}
1460167514Skmacy	for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
1461176472Skmacy		rspq_map[i] = nq[0] ? i % nq[0] : 0;
1462176472Skmacy		rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0;
1463167514Skmacy	}
1464196840Sjhb
1465171471Skmacy	/* Calculate the reverse RSS map table */
1466196840Sjhb	for (i = 0; i < SGE_QSETS; ++i)
1467196840Sjhb		adap->rrss_map[i] = 0xff;
1468171471Skmacy	for (i = 0; i < RSS_TABLE_SIZE; ++i)
1469171471Skmacy		if (adap->rrss_map[rspq_map[i]] == 0xff)
1470171471Skmacy			adap->rrss_map[rspq_map[i]] = i;
1471167514Skmacy
1472167514Skmacy	t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
1473171471Skmacy		      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
1474176472Skmacy	              F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ,
1475176472Skmacy	              cpus, rspq_map);
1476171471Skmacy
1477167514Skmacy}
1478167514Skmacy
1479169978Skmacy/*
1480169978Skmacy * Sends an mbuf to an offload queue driver
1481169978Skmacy * after dealing with any active network taps.
1482169978Skmacy */
1483169978Skmacystatic inline int
1484174626Skmacyoffload_tx(struct t3cdev *tdev, struct mbuf *m)
1485169978Skmacy{
1486169978Skmacy	int ret;
1487169978Skmacy
1488169978Skmacy	ret = t3_offload_tx(tdev, m);
1489170654Skmacy	return (ret);
1490169978Skmacy}
1491169978Skmacy
1492169978Skmacystatic int
1493169978Skmacywrite_smt_entry(struct adapter *adapter, int idx)
1494169978Skmacy{
1495169978Skmacy	struct port_info *pi = &adapter->port[idx];
1496169978Skmacy	struct cpl_smt_write_req *req;
1497169978Skmacy	struct mbuf *m;
1498169978Skmacy
1499169978Skmacy	if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
1500169978Skmacy		return (ENOMEM);
1501169978Skmacy
1502169978Skmacy	req = mtod(m, struct cpl_smt_write_req *);
1503174708Skmacy	m->m_pkthdr.len = m->m_len = sizeof(struct cpl_smt_write_req);
1504174708Skmacy
1505194521Skmacy	req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1506169978Skmacy	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
1507169978Skmacy	req->mtu_idx = NMTUS - 1;  /* should be 0 but there's a T3 bug */
1508169978Skmacy	req->iff = idx;
1509169978Skmacy	memset(req->src_mac1, 0, sizeof(req->src_mac1));
1510169978Skmacy	memcpy(req->src_mac0, pi->hw_addr, ETHER_ADDR_LEN);
1511169978Skmacy
1512169978Skmacy	m_set_priority(m, 1);
1513169978Skmacy
1514169978Skmacy	offload_tx(&adapter->tdev, m);
1515169978Skmacy
1516169978Skmacy	return (0);
1517169978Skmacy}
1518169978Skmacy
1519169978Skmacystatic int
1520169978Skmacyinit_smt(struct adapter *adapter)
1521169978Skmacy{
1522169978Skmacy	int i;
1523169978Skmacy
1524169978Skmacy	for_each_port(adapter, i)
1525169978Skmacy		write_smt_entry(adapter, i);
1526169978Skmacy	return 0;
1527169978Skmacy}
1528169978Skmacy
1529167514Skmacystatic void
1530169978Skmacyinit_port_mtus(adapter_t *adapter)
1531169978Skmacy{
1532194521Skmacy	unsigned int mtus = ETHERMTU | (ETHERMTU << 16);
1533169978Skmacy
1534169978Skmacy	t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
1535169978Skmacy}
1536169978Skmacy
1537169978Skmacystatic void
1538167514Skmacysend_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
1539167514Skmacy			      int hi, int port)
1540167514Skmacy{
1541167514Skmacy	struct mbuf *m;
1542167514Skmacy	struct mngt_pktsched_wr *req;
1543167514Skmacy
1544171471Skmacy	m = m_gethdr(M_DONTWAIT, MT_DATA);
1545167848Skmacy	if (m) {
1546169978Skmacy		req = mtod(m, struct mngt_pktsched_wr *);
1547194521Skmacy		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1548167848Skmacy		req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1549167848Skmacy		req->sched = sched;
1550167848Skmacy		req->idx = qidx;
1551167848Skmacy		req->min = lo;
1552167848Skmacy		req->max = hi;
1553167848Skmacy		req->binding = port;
1554167848Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1555167848Skmacy		t3_mgmt_tx(adap, m);
1556167848Skmacy	}
1557167514Skmacy}
1558167514Skmacy
1559167514Skmacystatic void
1560167514Skmacybind_qsets(adapter_t *sc)
1561167514Skmacy{
1562167514Skmacy	int i, j;
1563167514Skmacy
1564167514Skmacy	for (i = 0; i < (sc)->params.nports; ++i) {
1565167514Skmacy		const struct port_info *pi = adap2pinfo(sc, i);
1566167514Skmacy
1567172096Skmacy		for (j = 0; j < pi->nqsets; ++j) {
1568167514Skmacy			send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1569172096Skmacy					  -1, pi->tx_chan);
1570172096Skmacy
1571172096Skmacy		}
1572167514Skmacy	}
1573167514Skmacy}
1574167514Skmacy
1575171471Skmacystatic void
1576171471Skmacyupdate_tpeeprom(struct adapter *adap)
1577171471Skmacy{
1578172109Skmacy#ifdef FIRMWARE_LATEST
1579171471Skmacy	const struct firmware *tpeeprom;
1580172109Skmacy#else
1581172109Skmacy	struct firmware *tpeeprom;
1582172109Skmacy#endif
1583172109Skmacy
1584171471Skmacy	uint32_t version;
1585171471Skmacy	unsigned int major, minor;
1586171471Skmacy	int ret, len;
1587189643Sgnn	char rev, name[32];
1588171471Skmacy
1589171471Skmacy	t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
1590171471Skmacy
1591171471Skmacy	major = G_TP_VERSION_MAJOR(version);
1592171471Skmacy	minor = G_TP_VERSION_MINOR(version);
1593171471Skmacy	if (major == TP_VERSION_MAJOR  && minor == TP_VERSION_MINOR)
1594171471Skmacy		return;
1595171471Skmacy
1596171471Skmacy	rev = t3rev2char(adap);
1597189643Sgnn	snprintf(name, sizeof(name), TPEEPROM_NAME, rev);
1598171471Skmacy
1599189643Sgnn	tpeeprom = firmware_get(name);
1600171471Skmacy	if (tpeeprom == NULL) {
1601190330Sgnn		device_printf(adap->dev,
1602190330Sgnn			      "could not load TP EEPROM: unable to load %s\n",
1603190330Sgnn			      name);
1604171471Skmacy		return;
1605171471Skmacy	}
1606171471Skmacy
1607171471Skmacy	len = tpeeprom->datasize - 4;
1608171471Skmacy
1609171471Skmacy	ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
1610171471Skmacy	if (ret)
1611171471Skmacy		goto release_tpeeprom;
1612171471Skmacy
1613171471Skmacy	if (len != TP_SRAM_LEN) {
1614190330Sgnn		device_printf(adap->dev,
1615190330Sgnn			      "%s length is wrong len=%d expected=%d\n", name,
1616190330Sgnn			      len, TP_SRAM_LEN);
1617171471Skmacy		return;
1618171471Skmacy	}
1619171471Skmacy
1620171471Skmacy	ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
1621171471Skmacy	    TP_SRAM_OFFSET);
1622171471Skmacy
1623171471Skmacy	if (!ret) {
1624171471Skmacy		device_printf(adap->dev,
1625171471Skmacy			"Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
1626171471Skmacy			 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1627171471Skmacy	} else
1628190330Sgnn		device_printf(adap->dev,
1629190330Sgnn			      "Protocol SRAM image update in EEPROM failed\n");
1630171471Skmacy
1631171471Skmacyrelease_tpeeprom:
1632171471Skmacy	firmware_put(tpeeprom, FIRMWARE_UNLOAD);
1633171471Skmacy
1634171471Skmacy	return;
1635171471Skmacy}
1636171471Skmacy
1637171471Skmacystatic int
1638171471Skmacyupdate_tpsram(struct adapter *adap)
1639171471Skmacy{
1640172109Skmacy#ifdef FIRMWARE_LATEST
1641171471Skmacy	const struct firmware *tpsram;
1642172109Skmacy#else
1643172109Skmacy	struct firmware *tpsram;
1644172109Skmacy#endif
1645171471Skmacy	int ret;
1646189643Sgnn	char rev, name[32];
1647171471Skmacy
1648171471Skmacy	rev = t3rev2char(adap);
1649189643Sgnn	snprintf(name, sizeof(name), TPSRAM_NAME, rev);
1650171471Skmacy
1651171471Skmacy	update_tpeeprom(adap);
1652171471Skmacy
1653189643Sgnn	tpsram = firmware_get(name);
1654171471Skmacy	if (tpsram == NULL){
1655176613Skmacy		device_printf(adap->dev, "could not load TP SRAM\n");
1656171471Skmacy		return (EINVAL);
1657171471Skmacy	} else
1658176613Skmacy		device_printf(adap->dev, "updating TP SRAM\n");
1659171471Skmacy
1660171471Skmacy	ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
1661171471Skmacy	if (ret)
1662171471Skmacy		goto release_tpsram;
1663171471Skmacy
1664171471Skmacy	ret = t3_set_proto_sram(adap, tpsram->data);
1665171471Skmacy	if (ret)
1666171471Skmacy		device_printf(adap->dev, "loading protocol SRAM failed\n");
1667171471Skmacy
1668171471Skmacyrelease_tpsram:
1669171471Skmacy	firmware_put(tpsram, FIRMWARE_UNLOAD);
1670171471Skmacy
1671171471Skmacy	return ret;
1672171471Skmacy}
1673171471Skmacy
1674169978Skmacy/**
1675169978Skmacy *	cxgb_up - enable the adapter
1676169978Skmacy *	@adap: adapter being enabled
1677169978Skmacy *
1678169978Skmacy *	Called when the first port is enabled, this function performs the
1679169978Skmacy *	actions necessary to make an adapter operational, such as completing
1680169978Skmacy *	the initialization of HW modules, and enabling interrupts.
1681169978Skmacy */
1682169978Skmacystatic int
1683169978Skmacycxgb_up(struct adapter *sc)
1684169978Skmacy{
1685169978Skmacy	int err = 0;
1686169978Skmacy
1687194521Skmacy	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1688194521Skmacy	KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)",
1689194521Skmacy					   __func__, sc->open_device_map));
1690194521Skmacy
1691169978Skmacy	if ((sc->flags & FULL_INIT_DONE) == 0) {
1692169978Skmacy
1693169978Skmacy		if ((sc->flags & FW_UPTODATE) == 0)
1694171471Skmacy			if ((err = upgrade_fw(sc)))
1695171471Skmacy				goto out;
1696194521Skmacy
1697171471Skmacy		if ((sc->flags & TPS_UPTODATE) == 0)
1698171471Skmacy			if ((err = update_tpsram(sc)))
1699171471Skmacy				goto out;
1700194521Skmacy
1701169978Skmacy		err = t3_init_hw(sc, 0);
1702169978Skmacy		if (err)
1703169978Skmacy			goto out;
1704169978Skmacy
1705176472Skmacy		t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1706169978Skmacy		t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1707169978Skmacy
1708169978Skmacy		err = setup_sge_qsets(sc);
1709169978Skmacy		if (err)
1710169978Skmacy			goto out;
1711169978Skmacy
1712169978Skmacy		setup_rss(sc);
1713192933Sgnn
1714192933Sgnn		t3_intr_clear(sc);
1715192933Sgnn		err = cxgb_setup_interrupts(sc);
1716192933Sgnn		if (err)
1717192933Sgnn			goto out;
1718192933Sgnn
1719174708Skmacy		t3_add_configured_sysctls(sc);
1720169978Skmacy		sc->flags |= FULL_INIT_DONE;
1721169978Skmacy	}
1722169978Skmacy
1723169978Skmacy	t3_intr_clear(sc);
1724169978Skmacy	t3_sge_start(sc);
1725169978Skmacy	t3_intr_enable(sc);
1726169978Skmacy
1727176472Skmacy	if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) &&
1728176472Skmacy	    is_offload(sc) && init_tp_parity(sc) == 0)
1729176472Skmacy		sc->flags |= TP_PARITY_INIT;
1730176472Skmacy
1731176472Skmacy	if (sc->flags & TP_PARITY_INIT) {
1732194521Skmacy		t3_write_reg(sc, A_TP_INT_CAUSE, F_CMCACHEPERR | F_ARPLUTPERR);
1733176472Skmacy		t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff);
1734176472Skmacy	}
1735176472Skmacy
1736172096Skmacy	if (!(sc->flags & QUEUES_BOUND)) {
1737169978Skmacy		bind_qsets(sc);
1738171471Skmacy		sc->flags |= QUEUES_BOUND;
1739171471Skmacy	}
1740194521Skmacy
1741194521Skmacy	t3_sge_reset_adapter(sc);
1742169978Skmacyout:
1743169978Skmacy	return (err);
1744169978Skmacy}
1745169978Skmacy
1746169978Skmacy/*
1747194521Skmacy * Called when the last open device is closed.  Does NOT undo all of cxgb_up's
1748194521Skmacy * work.  Specifically, the resources grabbed under FULL_INIT_DONE are released
1749194521Skmacy * during controller_detach, not here.
1750169978Skmacy */
1751167514Skmacystatic void
1752194521Skmacycxgb_down(struct adapter *sc)
1753169978Skmacy{
1754194521Skmacy	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1755194521Skmacy
1756169978Skmacy	t3_sge_stop(sc);
1757169978Skmacy	t3_intr_disable(sc);
1758169978Skmacy}
1759169978Skmacy
1760169978Skmacystatic int
1761169978Skmacyoffload_open(struct port_info *pi)
1762169978Skmacy{
1763194521Skmacy	struct adapter *sc = pi->adapter;
1764194521Skmacy	struct t3cdev *tdev = &sc->tdev;
1765183059Skmacy
1766194521Skmacy	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1767169978Skmacy
1768194521Skmacy	setbit(&sc->open_device_map, OFFLOAD_DEVMAP_BIT);
1769169978Skmacy
1770194521Skmacy	t3_tp_set_offload_mode(sc, 1);
1771174708Skmacy	tdev->lldev = pi->ifp;
1772194521Skmacy	init_port_mtus(sc);
1773194521Skmacy	t3_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd,
1774194521Skmacy		     sc->params.rev == 0 ?  sc->port[0].ifp->if_mtu : 0xffff);
1775194521Skmacy	init_smt(sc);
1776178767Skmacy	cxgb_add_clients(tdev);
1777178767Skmacy
1778194521Skmacy	return (0);
1779169978Skmacy}
1780174708Skmacy
1781169978Skmacystatic int
1782174708Skmacyoffload_close(struct t3cdev *tdev)
1783169978Skmacy{
1784169978Skmacy	struct adapter *adapter = tdev2adap(tdev);
1785169978Skmacy
1786176472Skmacy	if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT))
1787170654Skmacy		return (0);
1788178767Skmacy
1789178767Skmacy	/* Call back all registered clients */
1790178767Skmacy	cxgb_remove_clients(tdev);
1791178767Skmacy
1792169978Skmacy	tdev->lldev = NULL;
1793169978Skmacy	cxgb_set_dummy_ops(tdev);
1794169978Skmacy	t3_tp_set_offload_mode(adapter, 0);
1795194521Skmacy
1796169978Skmacy	clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
1797169978Skmacy
1798194521Skmacy	return (0);
1799194521Skmacy}
1800192537Sgnn
1801194521Skmacy/*
1802194521Skmacy * Begin a synchronized operation.  If this call succeeds, it is guaranteed that
1803194521Skmacy * no one will remove the port or its ifp from underneath the caller.  Caller is
1804194521Skmacy * also granted exclusive access to open_device_map.
1805194521Skmacy *
1806194521Skmacy * operation here means init, uninit, detach, and ioctl service.
1807194521Skmacy *
1808194521Skmacy * May fail.
1809194521Skmacy * EINTR (ctrl-c pressed during ifconfig for example).
1810194521Skmacy * ENXIO (port is about to detach - due to kldunload for example).
1811194521Skmacy */
1812194521Skmacyint
1813194521Skmacycxgb_begin_op(struct port_info *p, const char *wmsg)
1814194521Skmacy{
1815194521Skmacy	int rc = 0;
1816194521Skmacy	struct adapter *sc = p->adapter;
1817192537Sgnn
1818194521Skmacy	ADAPTER_LOCK(sc);
1819194521Skmacy
1820194521Skmacy	while (!IS_DOOMED(p) && IS_BUSY(sc)) {
1821194521Skmacy		if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, wmsg, 0)) {
1822194521Skmacy			rc = EINTR;
1823194521Skmacy			goto done;
1824194521Skmacy		}
1825194521Skmacy	}
1826194521Skmacy
1827194521Skmacy	if (IS_DOOMED(p))
1828194521Skmacy		rc = ENXIO;
1829194521Skmacy	else if (!IS_BUSY(sc))
1830194521Skmacy		SET_BUSY(sc);
1831194521Skmacy	else {
1832194521Skmacy		KASSERT(0, ("%s: port %d, p->flags = %x , sc->flags = %x",
1833194521Skmacy			    __func__, p->port_id, p->flags, sc->flags));
1834194521Skmacy		rc = EDOOFUS;
1835194521Skmacy	}
1836194521Skmacy
1837194521Skmacydone:
1838194521Skmacy	ADAPTER_UNLOCK(sc);
1839194521Skmacy	return (rc);
1840194521Skmacy}
1841194521Skmacy
1842194521Skmacy/*
1843194521Skmacy * End a synchronized operation.  Read comment block above cxgb_begin_op.
1844194521Skmacy */
1845194521Skmacyint
1846194521Skmacycxgb_end_op(struct port_info *p)
1847194521Skmacy{
1848194521Skmacy	struct adapter *sc = p->adapter;
1849194521Skmacy
1850194521Skmacy	ADAPTER_LOCK(sc);
1851194521Skmacy	KASSERT(IS_BUSY(sc), ("%s: not busy.", __func__));
1852194521Skmacy	CLR_BUSY(sc);
1853194521Skmacy	wakeup_one(&sc->flags);
1854194521Skmacy	ADAPTER_UNLOCK(sc);
1855194521Skmacy
1856170654Skmacy	return (0);
1857169978Skmacy}
1858169978Skmacy
1859194521Skmacy/*
1860194521Skmacy * Prepare for port detachment.  Detach is a special kind of synchronized
1861194521Skmacy * operation.  Also read comment before cxgb_begin_op.
1862194521Skmacy */
1863194521Skmacystatic int
1864194521Skmacycxgb_begin_detach(struct port_info *p)
1865194521Skmacy{
1866194521Skmacy	struct adapter *sc = p->adapter;
1867174708Skmacy
1868194521Skmacy	/*
1869194521Skmacy	 * Inform those waiting for this port that it is going to be destroyed
1870194521Skmacy	 * and they should not continue further.  (They'll return with ENXIO).
1871194521Skmacy	 */
1872194521Skmacy	ADAPTER_LOCK(sc);
1873194521Skmacy	SET_DOOMED(p);
1874194521Skmacy	wakeup(&sc->flags);
1875194521Skmacy	ADAPTER_UNLOCK(sc);
1876194521Skmacy
1877194521Skmacy	/*
1878194521Skmacy	 * Wait for in-progress operations.
1879194521Skmacy	 */
1880194521Skmacy	ADAPTER_LOCK(sc);
1881194521Skmacy	while (IS_BUSY(sc)) {
1882194521Skmacy		mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0);
1883194521Skmacy	}
1884194521Skmacy	SET_BUSY(sc);
1885194521Skmacy	ADAPTER_UNLOCK(sc);
1886194521Skmacy
1887194521Skmacy	return (0);
1888194521Skmacy}
1889194521Skmacy
1890194521Skmacy/*
1891194521Skmacy * if_init for cxgb ports.
1892194521Skmacy */
1893169978Skmacystatic void
1894167514Skmacycxgb_init(void *arg)
1895167514Skmacy{
1896167514Skmacy	struct port_info *p = arg;
1897167514Skmacy
1898194521Skmacy	if (cxgb_begin_op(p, "cxgbinit"))
1899194521Skmacy		return;
1900194521Skmacy
1901194521Skmacy	cxgb_init_synchronized(p);
1902194521Skmacy	cxgb_end_op(p);
1903167514Skmacy}
1904167514Skmacy
1905194521Skmacystatic int
1906194521Skmacycxgb_init_synchronized(struct port_info *p)
1907167514Skmacy{
1908194521Skmacy	struct adapter *sc = p->adapter;
1909194521Skmacy	struct ifnet *ifp = p->ifp;
1910194521Skmacy	struct cmac *mac = &p->mac;
1911194521Skmacy	int i, rc;
1912167514Skmacy
1913194521Skmacy	if (sc->open_device_map == 0) {
1914194521Skmacy		if ((rc = cxgb_up(sc)) != 0)
1915194521Skmacy			return (rc);
1916167514Skmacy
1917194521Skmacy		if (is_offload(sc) && !ofld_disable && offload_open(p))
1918169978Skmacy			log(LOG_WARNING,
1919169978Skmacy			    "Could not initialize offload capabilities\n");
1920169978Skmacy	}
1921192540Sgnn
1922194521Skmacy	PORT_LOCK(p);
1923192540Sgnn	t3_port_intr_enable(sc, p->port_id);
1924194521Skmacy	if (!mac->multiport)
1925197791Snp		t3_mac_init(mac);
1926194521Skmacy	cxgb_update_mac_settings(p);
1927194521Skmacy	t3_link_start(&p->phy, mac, &p->link_config);
1928194521Skmacy	t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1929194521Skmacy	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1930194521Skmacy	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1931194521Skmacy	PORT_UNLOCK(p);
1932192540Sgnn
1933177415Skmacy	t3_link_changed(sc, p->port_id);
1934171978Skmacy
1935194521Skmacy	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1936194521Skmacy		struct sge_qset *qs = &sc->sge.qs[i];
1937194521Skmacy		struct sge_txq *txq = &qs->txq[TXQ_ETH];
1938170869Skmacy
1939194521Skmacy		callout_reset_on(&txq->txq_watchdog, hz, cxgb_tx_watchdog, qs,
1940194521Skmacy				 txq->txq_watchdog.c_cpu);
1941194521Skmacy	}
1942167514Skmacy
1943194521Skmacy	/* all ok */
1944194521Skmacy	setbit(&sc->open_device_map, p->port_id);
1945167760Skmacy
1946194521Skmacy	return (0);
1947167514Skmacy}
1948167514Skmacy
1949194521Skmacy/*
1950194521Skmacy * Called on "ifconfig down", and from port_detach
1951194521Skmacy */
1952194521Skmacystatic int
1953194521Skmacycxgb_uninit_synchronized(struct port_info *pi)
1954167514Skmacy{
1955194521Skmacy	struct adapter *sc = pi->adapter;
1956194521Skmacy	struct ifnet *ifp = pi->ifp;
1957167514Skmacy
1958194521Skmacy	/*
1959194521Skmacy	 * Clear this port's bit from the open device map, and then drain all
1960194521Skmacy	 * the tasks that can access/manipulate this port's port_info or ifp.
1961194521Skmacy	 * We disable this port's interrupts here and so the the slow/ext
1962194521Skmacy	 * interrupt tasks won't be enqueued.  The tick task will continue to
1963194521Skmacy	 * be enqueued every second but the runs after this drain will not see
1964194521Skmacy	 * this port in the open device map.
1965194521Skmacy	 *
1966194521Skmacy	 * A well behaved task must take open_device_map into account and ignore
1967194521Skmacy	 * ports that are not open.
1968194521Skmacy	 */
1969194521Skmacy	clrbit(&sc->open_device_map, pi->port_id);
1970194521Skmacy	t3_port_intr_disable(sc, pi->port_id);
1971194521Skmacy	taskqueue_drain(sc->tq, &sc->slow_intr_task);
1972194521Skmacy	taskqueue_drain(sc->tq, &sc->ext_intr_task);
1973194521Skmacy	taskqueue_drain(sc->tq, &sc->tick_task);
1974194521Skmacy
1975194521Skmacy	PORT_LOCK(pi);
1976169978Skmacy	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1977169978Skmacy
1978177340Skmacy	/* disable pause frames */
1979194521Skmacy	t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0);
1980170869Skmacy
1981177340Skmacy	/* Reset RX FIFO HWM */
1982194521Skmacy	t3_set_reg_field(sc, A_XGM_RXFIFO_CFG +  pi->mac.offset,
1983177340Skmacy			 V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0);
1984177340Skmacy
1985177340Skmacy	DELAY(100);
1986177340Skmacy
1987177340Skmacy	/* Wait for TXFIFO empty */
1988194521Skmacy	t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset,
1989177340Skmacy			F_TXFIFO_EMPTY, 1, 20, 5);
1990177340Skmacy
1991177340Skmacy	DELAY(100);
1992177340Skmacy	t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1993177340Skmacy
1994194521Skmacy
1995177340Skmacy	pi->phy.ops->power_down(&pi->phy, 1);
1996177340Skmacy
1997194521Skmacy	PORT_UNLOCK(pi);
1998167514Skmacy
1999194521Skmacy	pi->link_config.link_ok = 0;
2000197791Snp	t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0);
2001194521Skmacy
2002194521Skmacy	if ((sc->open_device_map & PORT_MASK) == 0)
2003194521Skmacy		offload_close(&sc->tdev);
2004194521Skmacy
2005194521Skmacy	if (sc->open_device_map == 0)
2006194521Skmacy		cxgb_down(pi->adapter);
2007194521Skmacy
2008194521Skmacy	return (0);
2009170654Skmacy}
2010170654Skmacy
2011183289Skmacy#ifdef LRO_SUPPORTED
2012181616Skmacy/*
2013181616Skmacy * Mark lro enabled or disabled in all qsets for this port
2014181616Skmacy */
2015170654Skmacystatic int
2016181616Skmacycxgb_set_lro(struct port_info *p, int enabled)
2017181616Skmacy{
2018181616Skmacy	int i;
2019181616Skmacy	struct adapter *adp = p->adapter;
2020181616Skmacy	struct sge_qset *q;
2021181616Skmacy
2022181616Skmacy	PORT_LOCK_ASSERT_OWNED(p);
2023181616Skmacy	for (i = 0; i < p->nqsets; i++) {
2024181616Skmacy		q = &adp->sge.qs[p->first_qset + i];
2025181616Skmacy		q->lro.enabled = (enabled != 0);
2026181616Skmacy	}
2027181616Skmacy	return (0);
2028181616Skmacy}
2029183289Skmacy#endif
2030181616Skmacy
2031181616Skmacystatic int
2032167514Skmacycxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
2033167514Skmacy{
2034167514Skmacy	struct port_info *p = ifp->if_softc;
2035167514Skmacy	struct ifreq *ifr = (struct ifreq *)data;
2036194521Skmacy	int flags, error = 0, mtu, handle_unsynchronized = 0;
2037167514Skmacy	uint32_t mask;
2038167514Skmacy
2039194521Skmacy	if ((error = cxgb_begin_op(p, "cxgbioct")) != 0)
2040194521Skmacy		return (error);
2041194521Skmacy
2042194521Skmacy	/*
2043194521Skmacy	 * Only commands that should be handled within begin-op/end-op are
2044194521Skmacy	 * serviced in this switch statement.  See handle_unsynchronized.
2045168737Skmacy	 */
2046167514Skmacy	switch (command) {
2047167514Skmacy	case SIOCSIFMTU:
2048194521Skmacy		mtu = ifr->ifr_mtu;
2049194521Skmacy		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
2050194521Skmacy			error = EINVAL;
2051194521Skmacy		} else {
2052194521Skmacy			ifp->if_mtu = mtu;
2053194521Skmacy			PORT_LOCK(p);
2054194521Skmacy			cxgb_update_mac_settings(p);
2055194521Skmacy			PORT_UNLOCK(p);
2056194521Skmacy		}
2057194521Skmacy
2058167514Skmacy		break;
2059167514Skmacy	case SIOCSIFFLAGS:
2060167514Skmacy		if (ifp->if_flags & IFF_UP) {
2061167514Skmacy			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2062167514Skmacy				flags = p->if_flags;
2063167514Skmacy				if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
2064194521Skmacy				    ((ifp->if_flags ^ flags) & IFF_ALLMULTI)) {
2065194521Skmacy					PORT_LOCK(p);
2066194521Skmacy					cxgb_update_mac_settings(p);
2067194521Skmacy					PORT_UNLOCK(p);
2068194521Skmacy				}
2069167514Skmacy			} else
2070194521Skmacy				error = cxgb_init_synchronized(p);
2071167760Skmacy			p->if_flags = ifp->if_flags;
2072170869Skmacy		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2073194521Skmacy			error = cxgb_uninit_synchronized(p);
2074194521Skmacy
2075176472Skmacy		break;
2076176472Skmacy	case SIOCADDMULTI:
2077176472Skmacy	case SIOCDELMULTI:
2078170869Skmacy		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2079194521Skmacy			PORT_LOCK(p);
2080194521Skmacy			cxgb_update_mac_settings(p);
2081194521Skmacy			PORT_UNLOCK(p);
2082167514Skmacy		}
2083194521Skmacy
2084167514Skmacy		break;
2085167514Skmacy	case SIOCSIFCAP:
2086167514Skmacy		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2087167514Skmacy		if (mask & IFCAP_TXCSUM) {
2088167514Skmacy			if (IFCAP_TXCSUM & ifp->if_capenable) {
2089167514Skmacy				ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
2090167514Skmacy				ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
2091180583Skmacy				    | CSUM_IP | CSUM_TSO);
2092167514Skmacy			} else {
2093167514Skmacy				ifp->if_capenable |= IFCAP_TXCSUM;
2094180583Skmacy				ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP
2095180583Skmacy				    | CSUM_IP);
2096167514Skmacy			}
2097167514Skmacy		}
2098180583Skmacy		if (mask & IFCAP_RXCSUM) {
2099180583Skmacy			ifp->if_capenable ^= IFCAP_RXCSUM;
2100180583Skmacy		}
2101167514Skmacy		if (mask & IFCAP_TSO4) {
2102167514Skmacy			if (IFCAP_TSO4 & ifp->if_capenable) {
2103167514Skmacy				ifp->if_capenable &= ~IFCAP_TSO4;
2104167514Skmacy				ifp->if_hwassist &= ~CSUM_TSO;
2105167514Skmacy			} else if (IFCAP_TXCSUM & ifp->if_capenable) {
2106167514Skmacy				ifp->if_capenable |= IFCAP_TSO4;
2107167514Skmacy				ifp->if_hwassist |= CSUM_TSO;
2108194521Skmacy			} else
2109167514Skmacy				error = EINVAL;
2110167514Skmacy		}
2111183289Skmacy#ifdef LRO_SUPPORTED
2112181616Skmacy		if (mask & IFCAP_LRO) {
2113181616Skmacy			ifp->if_capenable ^= IFCAP_LRO;
2114181616Skmacy
2115181616Skmacy			/* Safe to do this even if cxgb_up not called yet */
2116181616Skmacy			cxgb_set_lro(p, ifp->if_capenable & IFCAP_LRO);
2117181616Skmacy		}
2118183289Skmacy#endif
2119180583Skmacy		if (mask & IFCAP_VLAN_HWTAGGING) {
2120180583Skmacy			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2121194521Skmacy			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2122194521Skmacy				PORT_LOCK(p);
2123194521Skmacy				cxgb_update_mac_settings(p);
2124194521Skmacy				PORT_UNLOCK(p);
2125194521Skmacy			}
2126180583Skmacy		}
2127180583Skmacy		if (mask & IFCAP_VLAN_MTU) {
2128180583Skmacy			ifp->if_capenable ^= IFCAP_VLAN_MTU;
2129194521Skmacy			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2130194521Skmacy				PORT_LOCK(p);
2131194521Skmacy				cxgb_update_mac_settings(p);
2132194521Skmacy				PORT_UNLOCK(p);
2133194521Skmacy			}
2134180583Skmacy		}
2135180583Skmacy		if (mask & IFCAP_VLAN_HWCSUM) {
2136180583Skmacy			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2137180583Skmacy		}
2138180583Skmacy
2139180583Skmacy#ifdef VLAN_CAPABILITIES
2140180583Skmacy		VLAN_CAPABILITIES(ifp);
2141180583Skmacy#endif
2142167514Skmacy		break;
2143167514Skmacy	default:
2144194521Skmacy		handle_unsynchronized = 1;
2145167514Skmacy		break;
2146167514Skmacy	}
2147194521Skmacy
2148194521Skmacy	/*
2149194521Skmacy	 * We don't want to call anything outside the driver while inside a
2150194521Skmacy	 * begin-op/end-op block.  If it calls us back (eg.  ether_ioctl may
2151194661Snp	 * call cxgb_init) we may deadlock if the state is already marked busy.
2152194521Skmacy	 *
2153194521Skmacy	 * XXX: this probably opens a small race window with kldunload...
2154194521Skmacy	 */
2155194521Skmacy	cxgb_end_op(p);
2156194521Skmacy
2157194521Skmacy	/* The IS_DOOMED check is racy, we're clutching at straws here */
2158194521Skmacy	if (handle_unsynchronized && !IS_DOOMED(p)) {
2159194661Snp		if (command == SIOCSIFMEDIA || command == SIOCGIFMEDIA)
2160194521Skmacy			error = ifmedia_ioctl(ifp, ifr, &p->media, command);
2161194661Snp		else
2162194521Skmacy			error = ether_ioctl(ifp, command, data);
2163194521Skmacy	}
2164194521Skmacy
2165167514Skmacy	return (error);
2166167514Skmacy}
2167167514Skmacy
2168174708Skmacystatic int
2169167514Skmacycxgb_media_change(struct ifnet *ifp)
2170167514Skmacy{
2171194921Snp	return (EOPNOTSUPP);
2172167514Skmacy}
2173167514Skmacy
2174186282Sgnn/*
2175194921Snp * Translates phy->modtype to the correct Ethernet media subtype.
2176186282Sgnn */
2177186282Sgnnstatic int
2178194921Snpcxgb_ifm_type(int mod)
2179186282Sgnn{
2180194921Snp	switch (mod) {
2181186282Sgnn	case phy_modtype_sr:
2182194921Snp		return (IFM_10G_SR);
2183186282Sgnn	case phy_modtype_lr:
2184194921Snp		return (IFM_10G_LR);
2185186282Sgnn	case phy_modtype_lrm:
2186194921Snp		return (IFM_10G_LRM);
2187186282Sgnn	case phy_modtype_twinax:
2188194921Snp		return (IFM_10G_TWINAX);
2189186282Sgnn	case phy_modtype_twinax_long:
2190194921Snp		return (IFM_10G_TWINAX_LONG);
2191186282Sgnn	case phy_modtype_none:
2192194921Snp		return (IFM_NONE);
2193186282Sgnn	case phy_modtype_unknown:
2194194921Snp		return (IFM_UNKNOWN);
2195186282Sgnn	}
2196186282Sgnn
2197194921Snp	KASSERT(0, ("%s: modtype %d unknown", __func__, mod));
2198194921Snp	return (IFM_UNKNOWN);
2199186282Sgnn}
2200186282Sgnn
2201194921Snp/*
2202194921Snp * Rebuilds the ifmedia list for this port, and sets the current media.
2203194921Snp */
2204167514Skmacystatic void
2205194921Snpcxgb_build_medialist(struct port_info *p)
2206194921Snp{
2207194921Snp	struct cphy *phy = &p->phy;
2208194921Snp	struct ifmedia *media = &p->media;
2209194921Snp	int mod = phy->modtype;
2210194921Snp	int m = IFM_ETHER | IFM_FDX;
2211194921Snp
2212194921Snp	PORT_LOCK(p);
2213194921Snp
2214194921Snp	ifmedia_removeall(media);
2215194921Snp	if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) {
2216194921Snp		/* Copper (RJ45) */
2217194921Snp
2218194921Snp		if (phy->caps & SUPPORTED_10000baseT_Full)
2219194921Snp			ifmedia_add(media, m | IFM_10G_T, mod, NULL);
2220194921Snp
2221194921Snp		if (phy->caps & SUPPORTED_1000baseT_Full)
2222194921Snp			ifmedia_add(media, m | IFM_1000_T, mod, NULL);
2223194921Snp
2224194921Snp		if (phy->caps & SUPPORTED_100baseT_Full)
2225194921Snp			ifmedia_add(media, m | IFM_100_TX, mod, NULL);
2226194921Snp
2227194921Snp		if (phy->caps & SUPPORTED_10baseT_Full)
2228194921Snp			ifmedia_add(media, m | IFM_10_T, mod, NULL);
2229194921Snp
2230194921Snp		ifmedia_add(media, IFM_ETHER | IFM_AUTO, mod, NULL);
2231194921Snp		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2232194921Snp
2233194921Snp	} else if (phy->caps & SUPPORTED_TP) {
2234194921Snp		/* Copper (CX4) */
2235194921Snp
2236194921Snp		KASSERT(phy->caps & SUPPORTED_10000baseT_Full,
2237194921Snp			("%s: unexpected cap 0x%x", __func__, phy->caps));
2238194921Snp
2239194921Snp		ifmedia_add(media, m | IFM_10G_CX4, mod, NULL);
2240194921Snp		ifmedia_set(media, m | IFM_10G_CX4);
2241194921Snp
2242194921Snp	} else if (phy->caps & SUPPORTED_FIBRE &&
2243194921Snp		   phy->caps & SUPPORTED_10000baseT_Full) {
2244194921Snp		/* 10G optical (but includes SFP+ twinax) */
2245194921Snp
2246194921Snp		m |= cxgb_ifm_type(mod);
2247194921Snp		if (IFM_SUBTYPE(m) == IFM_NONE)
2248194921Snp			m &= ~IFM_FDX;
2249194921Snp
2250194921Snp		ifmedia_add(media, m, mod, NULL);
2251194921Snp		ifmedia_set(media, m);
2252194921Snp
2253194921Snp	} else if (phy->caps & SUPPORTED_FIBRE &&
2254194921Snp		   phy->caps & SUPPORTED_1000baseT_Full) {
2255194921Snp		/* 1G optical */
2256194921Snp
2257194921Snp		/* XXX: Lie and claim to be SX, could actually be any 1G-X */
2258194921Snp		ifmedia_add(media, m | IFM_1000_SX, mod, NULL);
2259194921Snp		ifmedia_set(media, m | IFM_1000_SX);
2260194921Snp
2261194921Snp	} else {
2262194921Snp		KASSERT(0, ("%s: don't know how to handle 0x%x.", __func__,
2263194921Snp			    phy->caps));
2264194921Snp	}
2265194921Snp
2266194921Snp	PORT_UNLOCK(p);
2267194921Snp}
2268194921Snp
2269194921Snpstatic void
2270167514Skmacycxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2271167514Skmacy{
2272167514Skmacy	struct port_info *p = ifp->if_softc;
2273186282Sgnn	struct ifmedia_entry *cur = p->media.ifm_cur;
2274194921Snp	int speed = p->link_config.speed;
2275167514Skmacy
2276194921Snp	if (cur->ifm_data != p->phy.modtype) {
2277194921Snp		cxgb_build_medialist(p);
2278194921Snp		cur = p->media.ifm_cur;
2279186282Sgnn	}
2280186282Sgnn
2281167514Skmacy	ifmr->ifm_status = IFM_AVALID;
2282167514Skmacy	if (!p->link_config.link_ok)
2283167514Skmacy		return;
2284167514Skmacy
2285167514Skmacy	ifmr->ifm_status |= IFM_ACTIVE;
2286167514Skmacy
2287194921Snp	/*
2288194921Snp	 * active and current will differ iff current media is autoselect.  That
2289194921Snp	 * can happen only for copper RJ45.
2290194921Snp	 */
2291194921Snp	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
2292194921Snp		return;
2293194921Snp	KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg,
2294194921Snp		("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps));
2295194921Snp
2296194921Snp	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2297194921Snp	if (speed == SPEED_10000)
2298194921Snp		ifmr->ifm_active |= IFM_10G_T;
2299194921Snp	else if (speed == SPEED_1000)
2300194921Snp		ifmr->ifm_active |= IFM_1000_T;
2301194921Snp	else if (speed == SPEED_100)
2302194921Snp		ifmr->ifm_active |= IFM_100_TX;
2303194921Snp	else if (speed == SPEED_10)
2304170654Skmacy		ifmr->ifm_active |= IFM_10_T;
2305167514Skmacy	else
2306194921Snp		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
2307194921Snp			    speed));
2308167514Skmacy}
2309167514Skmacy
2310167514Skmacystatic void
2311167514Skmacycxgb_async_intr(void *data)
2312167514Skmacy{
2313167760Skmacy	adapter_t *sc = data;
2314167760Skmacy
2315167514Skmacy	if (cxgb_debug)
2316167760Skmacy		device_printf(sc->dev, "cxgb_async_intr\n");
2317170869Skmacy	/*
2318170869Skmacy	 * May need to sleep - defer to taskqueue
2319170869Skmacy	 */
2320170869Skmacy	taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
2321167514Skmacy}
2322167514Skmacy
2323167514Skmacystatic void
2324167514Skmacycxgb_ext_intr_handler(void *arg, int count)
2325167514Skmacy{
2326167514Skmacy	adapter_t *sc = (adapter_t *)arg;
2327167514Skmacy
2328167514Skmacy	if (cxgb_debug)
2329167514Skmacy		printf("cxgb_ext_intr_handler\n");
2330167514Skmacy
2331167514Skmacy	t3_phy_intr_handler(sc);
2332167514Skmacy
2333167514Skmacy	/* Now reenable external interrupts */
2334169978Skmacy	ADAPTER_LOCK(sc);
2335167514Skmacy	if (sc->slow_intr_mask) {
2336167514Skmacy		sc->slow_intr_mask |= F_T3DBG;
2337167514Skmacy		t3_write_reg(sc, A_PL_INT_CAUSE0, F_T3DBG);
2338167514Skmacy		t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
2339167514Skmacy	}
2340169978Skmacy	ADAPTER_UNLOCK(sc);
2341167514Skmacy}
2342167514Skmacy
2343197791Snpstatic inline int
2344197791Snplink_poll_needed(struct port_info *p)
2345197791Snp{
2346197791Snp	struct cphy *phy = &p->phy;
2347197791Snp
2348197791Snp	if (phy->caps & POLL_LINK_1ST_TIME) {
2349197791Snp		p->phy.caps &= ~POLL_LINK_1ST_TIME;
2350197791Snp		return (1);
2351197791Snp	}
2352197791Snp
2353197791Snp	return (p->link_fault || !(phy->caps & SUPPORTED_LINK_IRQ));
2354197791Snp}
2355197791Snp
2356167514Skmacystatic void
2357167746Skmacycheck_link_status(adapter_t *sc)
2358167514Skmacy{
2359167746Skmacy	int i;
2360167514Skmacy
2361167746Skmacy	for (i = 0; i < (sc)->params.nports; ++i) {
2362167746Skmacy		struct port_info *p = &sc->port[i];
2363167514Skmacy
2364194521Skmacy		if (!isset(&sc->open_device_map, p->port_id))
2365194521Skmacy			continue;
2366194521Skmacy
2367197791Snp		if (link_poll_needed(p))
2368167746Skmacy			t3_link_changed(sc, i);
2369167746Skmacy	}
2370167514Skmacy}
2371167514Skmacy
2372167514Skmacystatic void
2373194521Skmacycheck_t3b2_mac(struct adapter *sc)
2374167514Skmacy{
2375167514Skmacy	int i;
2376167514Skmacy
2377194521Skmacy	if (sc->flags & CXGB_SHUTDOWN)
2378176472Skmacy		return;
2379194521Skmacy
2380194521Skmacy	for_each_port(sc, i) {
2381194521Skmacy		struct port_info *p = &sc->port[i];
2382194521Skmacy		int status;
2383194521Skmacy#ifdef INVARIANTS
2384167746Skmacy		struct ifnet *ifp = p->ifp;
2385194521Skmacy#endif
2386194521Skmacy
2387197791Snp		if (!isset(&sc->open_device_map, p->port_id) || p->link_fault ||
2388197791Snp		    !p->link_config.link_ok)
2389167746Skmacy			continue;
2390194521Skmacy
2391194521Skmacy		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2392194521Skmacy			("%s: state mismatch (drv_flags %x, device_map %x)",
2393194521Skmacy			 __func__, ifp->if_drv_flags, sc->open_device_map));
2394194521Skmacy
2395167746Skmacy		PORT_LOCK(p);
2396194521Skmacy		status = t3b2_mac_watchdog_task(&p->mac);
2397167746Skmacy		if (status == 1)
2398167746Skmacy			p->mac.stats.num_toggled++;
2399167746Skmacy		else if (status == 2) {
2400167746Skmacy			struct cmac *mac = &p->mac;
2401167746Skmacy
2402194521Skmacy			cxgb_update_mac_settings(p);
2403167746Skmacy			t3_link_start(&p->phy, mac, &p->link_config);
2404167746Skmacy			t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2405194521Skmacy			t3_port_intr_enable(sc, p->port_id);
2406167746Skmacy			p->mac.stats.num_resets++;
2407167746Skmacy		}
2408167746Skmacy		PORT_UNLOCK(p);
2409167514Skmacy	}
2410167514Skmacy}
2411167514Skmacy
2412167746Skmacystatic void
2413167746Skmacycxgb_tick(void *arg)
2414167746Skmacy{
2415167746Skmacy	adapter_t *sc = (adapter_t *)arg;
2416170869Skmacy
2417194521Skmacy	if (sc->flags & CXGB_SHUTDOWN)
2418176472Skmacy		return;
2419174708Skmacy
2420185508Skmacy	taskqueue_enqueue(sc->tq, &sc->tick_task);
2421181652Skmacy	callout_reset(&sc->cxgb_tick_ch, CXGB_TICKS(sc), cxgb_tick, sc);
2422170869Skmacy}
2423170869Skmacy
2424170869Skmacystatic void
2425170869Skmacycxgb_tick_handler(void *arg, int count)
2426170869Skmacy{
2427170869Skmacy	adapter_t *sc = (adapter_t *)arg;
2428167746Skmacy	const struct adapter_params *p = &sc->params;
2429181652Skmacy	int i;
2430189643Sgnn	uint32_t cause, reset;
2431167746Skmacy
2432194521Skmacy	if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE))
2433176472Skmacy		return;
2434176472Skmacy
2435192540Sgnn	check_link_status(sc);
2436185508Skmacy
2437185508Skmacy	if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map)
2438185508Skmacy		check_t3b2_mac(sc);
2439185508Skmacy
2440189643Sgnn	cause = t3_read_reg(sc, A_SG_INT_CAUSE);
2441189643Sgnn	reset = 0;
2442189643Sgnn	if (cause & F_FLEMPTY) {
2443189643Sgnn		struct sge_qset *qs = &sc->sge.qs[0];
2444189643Sgnn
2445189643Sgnn		i = 0;
2446189643Sgnn		reset |= F_FLEMPTY;
2447189643Sgnn
2448189643Sgnn		cause = (t3_read_reg(sc, A_SG_RSPQ_FL_STATUS) >>
2449189643Sgnn			 S_FL0EMPTY) & 0xffff;
2450189643Sgnn		while (cause) {
2451189643Sgnn			qs->fl[i].empty += (cause & 1);
2452189643Sgnn			if (i)
2453189643Sgnn				qs++;
2454189643Sgnn			i ^= 1;
2455189643Sgnn			cause >>= 1;
2456189643Sgnn		}
2457189643Sgnn	}
2458189643Sgnn	t3_write_reg(sc, A_SG_INT_CAUSE, reset);
2459189643Sgnn
2460185506Skmacy	for (i = 0; i < sc->params.nports; i++) {
2461185506Skmacy		struct port_info *pi = &sc->port[i];
2462185506Skmacy		struct ifnet *ifp = pi->ifp;
2463189643Sgnn		struct cmac *mac = &pi->mac;
2464189643Sgnn		struct mac_stats *mstats = &mac->stats;
2465194521Skmacy
2466194521Skmacy		if (!isset(&sc->open_device_map, pi->port_id))
2467194521Skmacy			continue;
2468194521Skmacy
2469185508Skmacy		PORT_LOCK(pi);
2470189643Sgnn		t3_mac_update_stats(mac);
2471185508Skmacy		PORT_UNLOCK(pi);
2472185508Skmacy
2473185506Skmacy		ifp->if_opackets =
2474185506Skmacy		    mstats->tx_frames_64 +
2475185506Skmacy		    mstats->tx_frames_65_127 +
2476185506Skmacy		    mstats->tx_frames_128_255 +
2477185506Skmacy		    mstats->tx_frames_256_511 +
2478185506Skmacy		    mstats->tx_frames_512_1023 +
2479185506Skmacy		    mstats->tx_frames_1024_1518 +
2480185506Skmacy		    mstats->tx_frames_1519_max;
2481185506Skmacy
2482185506Skmacy		ifp->if_ipackets =
2483185506Skmacy		    mstats->rx_frames_64 +
2484185506Skmacy		    mstats->rx_frames_65_127 +
2485185506Skmacy		    mstats->rx_frames_128_255 +
2486185506Skmacy		    mstats->rx_frames_256_511 +
2487185506Skmacy		    mstats->rx_frames_512_1023 +
2488185506Skmacy		    mstats->rx_frames_1024_1518 +
2489185506Skmacy		    mstats->rx_frames_1519_max;
2490185506Skmacy
2491185506Skmacy		ifp->if_obytes = mstats->tx_octets;
2492185506Skmacy		ifp->if_ibytes = mstats->rx_octets;
2493185506Skmacy		ifp->if_omcasts = mstats->tx_mcast_frames;
2494185506Skmacy		ifp->if_imcasts = mstats->rx_mcast_frames;
2495185506Skmacy
2496185506Skmacy		ifp->if_collisions =
2497185506Skmacy		    mstats->tx_total_collisions;
2498185506Skmacy
2499185506Skmacy		ifp->if_iqdrops = mstats->rx_cong_drops;
2500185506Skmacy
2501185506Skmacy		ifp->if_oerrors =
2502185506Skmacy		    mstats->tx_excess_collisions +
2503185506Skmacy		    mstats->tx_underrun +
2504185506Skmacy		    mstats->tx_len_errs +
2505185506Skmacy		    mstats->tx_mac_internal_errs +
2506185506Skmacy		    mstats->tx_excess_deferral +
2507185506Skmacy		    mstats->tx_fcs_errs;
2508185506Skmacy		ifp->if_ierrors =
2509185506Skmacy		    mstats->rx_jabber +
2510185506Skmacy		    mstats->rx_data_errs +
2511185506Skmacy		    mstats->rx_sequence_errs +
2512185506Skmacy		    mstats->rx_runt +
2513185506Skmacy		    mstats->rx_too_long +
2514185506Skmacy		    mstats->rx_mac_internal_errs +
2515185506Skmacy		    mstats->rx_short +
2516185506Skmacy		    mstats->rx_fcs_errs;
2517189643Sgnn
2518189643Sgnn		if (mac->multiport)
2519189643Sgnn			continue;
2520189643Sgnn
2521189643Sgnn		/* Count rx fifo overflows, once per second */
2522189643Sgnn		cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset);
2523189643Sgnn		reset = 0;
2524189643Sgnn		if (cause & F_RXFIFO_OVERFLOW) {
2525189643Sgnn			mac->stats.rx_fifo_ovfl++;
2526189643Sgnn			reset |= F_RXFIFO_OVERFLOW;
2527189643Sgnn		}
2528189643Sgnn		t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset);
2529185506Skmacy	}
2530167746Skmacy}
2531167746Skmacy
2532171978Skmacystatic void
2533171978Skmacytouch_bars(device_t dev)
2534171978Skmacy{
2535171978Skmacy	/*
2536171978Skmacy	 * Don't enable yet
2537171978Skmacy	 */
2538171978Skmacy#if !defined(__LP64__) && 0
2539171978Skmacy	u32 v;
2540171978Skmacy
2541171978Skmacy	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
2542171978Skmacy	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
2543171978Skmacy	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
2544171978Skmacy	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
2545171978Skmacy	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
2546171978Skmacy	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
2547171978Skmacy#endif
2548171978Skmacy}
2549171978Skmacy
2550167514Skmacystatic int
2551171471Skmacyset_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
2552171471Skmacy{
2553171471Skmacy	uint8_t *buf;
2554171471Skmacy	int err = 0;
2555171471Skmacy	u32 aligned_offset, aligned_len, *p;
2556171471Skmacy	struct adapter *adapter = pi->adapter;
2557171471Skmacy
2558171471Skmacy
2559171471Skmacy	aligned_offset = offset & ~3;
2560171471Skmacy	aligned_len = (len + (offset & 3) + 3) & ~3;
2561171471Skmacy
2562171471Skmacy	if (aligned_offset != offset || aligned_len != len) {
2563171471Skmacy		buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);
2564171471Skmacy		if (!buf)
2565171471Skmacy			return (ENOMEM);
2566171471Skmacy		err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
2567171471Skmacy		if (!err && aligned_len > 4)
2568171471Skmacy			err = t3_seeprom_read(adapter,
2569171471Skmacy					      aligned_offset + aligned_len - 4,
2570171471Skmacy					      (u32 *)&buf[aligned_len - 4]);
2571171471Skmacy		if (err)
2572171471Skmacy			goto out;
2573171471Skmacy		memcpy(buf + (offset & 3), data, len);
2574171471Skmacy	} else
2575171471Skmacy		buf = (uint8_t *)(uintptr_t)data;
2576171471Skmacy
2577171471Skmacy	err = t3_seeprom_wp(adapter, 0);
2578171471Skmacy	if (err)
2579171471Skmacy		goto out;
2580171471Skmacy
2581171471Skmacy	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2582171471Skmacy		err = t3_seeprom_write(adapter, aligned_offset, *p);
2583171471Skmacy		aligned_offset += 4;
2584171471Skmacy	}
2585171471Skmacy
2586171471Skmacy	if (!err)
2587171471Skmacy		err = t3_seeprom_wp(adapter, 1);
2588171471Skmacyout:
2589171471Skmacy	if (buf != data)
2590171471Skmacy		free(buf, M_DEVBUF);
2591171471Skmacy	return err;
2592171471Skmacy}
2593171471Skmacy
2594171471Skmacy
2595171471Skmacystatic int
2596167514Skmacyin_range(int val, int lo, int hi)
2597167514Skmacy{
2598167514Skmacy	return val < 0 || (val <= hi && val >= lo);
2599167514Skmacy}
2600167514Skmacy
2601167514Skmacystatic int
2602192450Simpcxgb_extension_open(struct cdev *dev, int flags, int fmp, struct thread *td)
2603170654Skmacy{
2604170654Skmacy       return (0);
2605170654Skmacy}
2606170654Skmacy
2607170654Skmacystatic int
2608192450Simpcxgb_extension_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2609170654Skmacy{
2610170654Skmacy       return (0);
2611170654Skmacy}
2612170654Skmacy
2613170654Skmacystatic int
2614167514Skmacycxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
2615167514Skmacy    int fflag, struct thread *td)
2616167514Skmacy{
2617167514Skmacy	int mmd, error = 0;
2618167514Skmacy	struct port_info *pi = dev->si_drv1;
2619167514Skmacy	adapter_t *sc = pi->adapter;
2620167514Skmacy
2621167514Skmacy#ifdef PRIV_SUPPORTED
2622167514Skmacy	if (priv_check(td, PRIV_DRIVER)) {
2623167514Skmacy		if (cxgb_debug)
2624167514Skmacy			printf("user does not have access to privileged ioctls\n");
2625167514Skmacy		return (EPERM);
2626167514Skmacy	}
2627167514Skmacy#else
2628167514Skmacy	if (suser(td)) {
2629167514Skmacy		if (cxgb_debug)
2630167514Skmacy			printf("user does not have access to privileged ioctls\n");
2631167514Skmacy		return (EPERM);
2632167514Skmacy	}
2633167514Skmacy#endif
2634167514Skmacy
2635167514Skmacy	switch (cmd) {
2636182679Skmacy	case CHELSIO_GET_MIIREG: {
2637167514Skmacy		uint32_t val;
2638167514Skmacy		struct cphy *phy = &pi->phy;
2639182679Skmacy		struct ch_mii_data *mid = (struct ch_mii_data *)data;
2640167514Skmacy
2641167514Skmacy		if (!phy->mdio_read)
2642167514Skmacy			return (EOPNOTSUPP);
2643167514Skmacy		if (is_10G(sc)) {
2644167514Skmacy			mmd = mid->phy_id >> 8;
2645167514Skmacy			if (!mmd)
2646167514Skmacy				mmd = MDIO_DEV_PCS;
2647190330Sgnn			else if (mmd > MDIO_DEV_VEND2)
2648171471Skmacy				return (EINVAL);
2649167514Skmacy
2650167514Skmacy			error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
2651167514Skmacy					     mid->reg_num, &val);
2652167514Skmacy		} else
2653167514Skmacy		        error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
2654167514Skmacy					     mid->reg_num & 0x1f, &val);
2655167514Skmacy		if (error == 0)
2656167514Skmacy			mid->val_out = val;
2657167514Skmacy		break;
2658167514Skmacy	}
2659182679Skmacy	case CHELSIO_SET_MIIREG: {
2660167514Skmacy		struct cphy *phy = &pi->phy;
2661182679Skmacy		struct ch_mii_data *mid = (struct ch_mii_data *)data;
2662167514Skmacy
2663167514Skmacy		if (!phy->mdio_write)
2664167514Skmacy			return (EOPNOTSUPP);
2665167514Skmacy		if (is_10G(sc)) {
2666167514Skmacy			mmd = mid->phy_id >> 8;
2667167514Skmacy			if (!mmd)
2668167514Skmacy				mmd = MDIO_DEV_PCS;
2669190330Sgnn			else if (mmd > MDIO_DEV_VEND2)
2670167514Skmacy				return (EINVAL);
2671167514Skmacy
2672167514Skmacy			error = phy->mdio_write(sc, mid->phy_id & 0x1f,
2673167514Skmacy					      mmd, mid->reg_num, mid->val_in);
2674167514Skmacy		} else
2675167514Skmacy			error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
2676167514Skmacy					      mid->reg_num & 0x1f,
2677167514Skmacy					      mid->val_in);
2678167514Skmacy		break;
2679167514Skmacy	}
2680167514Skmacy	case CHELSIO_SETREG: {
2681167514Skmacy		struct ch_reg *edata = (struct ch_reg *)data;
2682167514Skmacy		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2683167514Skmacy			return (EFAULT);
2684167514Skmacy		t3_write_reg(sc, edata->addr, edata->val);
2685167514Skmacy		break;
2686167514Skmacy	}
2687167514Skmacy	case CHELSIO_GETREG: {
2688167514Skmacy		struct ch_reg *edata = (struct ch_reg *)data;
2689167514Skmacy		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2690167514Skmacy			return (EFAULT);
2691167514Skmacy		edata->val = t3_read_reg(sc, edata->addr);
2692167514Skmacy		break;
2693167514Skmacy	}
2694167514Skmacy	case CHELSIO_GET_SGE_CONTEXT: {
2695167514Skmacy		struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
2696176472Skmacy		mtx_lock_spin(&sc->sge.reg_lock);
2697167514Skmacy		switch (ecntxt->cntxt_type) {
2698167514Skmacy		case CNTXT_TYPE_EGRESS:
2699182679Skmacy			error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
2700167514Skmacy			    ecntxt->data);
2701167514Skmacy			break;
2702167514Skmacy		case CNTXT_TYPE_FL:
2703182679Skmacy			error = -t3_sge_read_fl(sc, ecntxt->cntxt_id,
2704167514Skmacy			    ecntxt->data);
2705167514Skmacy			break;
2706167514Skmacy		case CNTXT_TYPE_RSP:
2707182679Skmacy			error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id,
2708167514Skmacy			    ecntxt->data);
2709167514Skmacy			break;
2710167514Skmacy		case CNTXT_TYPE_CQ:
2711182679Skmacy			error = -t3_sge_read_cq(sc, ecntxt->cntxt_id,
2712167514Skmacy			    ecntxt->data);
2713167514Skmacy			break;
2714167514Skmacy		default:
2715167514Skmacy			error = EINVAL;
2716167514Skmacy			break;
2717167514Skmacy		}
2718176472Skmacy		mtx_unlock_spin(&sc->sge.reg_lock);
2719167514Skmacy		break;
2720167514Skmacy	}
2721167514Skmacy	case CHELSIO_GET_SGE_DESC: {
2722167514Skmacy		struct ch_desc *edesc = (struct ch_desc *)data;
2723167514Skmacy		int ret;
2724167514Skmacy		if (edesc->queue_num >= SGE_QSETS * 6)
2725167514Skmacy			return (EINVAL);
2726167514Skmacy		ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
2727167514Skmacy		    edesc->queue_num % 6, edesc->idx, edesc->data);
2728167514Skmacy		if (ret < 0)
2729167514Skmacy			return (EINVAL);
2730167514Skmacy		edesc->size = ret;
2731167514Skmacy		break;
2732167514Skmacy	}
2733182679Skmacy	case CHELSIO_GET_QSET_PARAMS: {
2734167514Skmacy		struct qset_params *q;
2735167514Skmacy		struct ch_qset_params *t = (struct ch_qset_params *)data;
2736182679Skmacy		int q1 = pi->first_qset;
2737182679Skmacy		int nqsets = pi->nqsets;
2738176472Skmacy		int i;
2739176472Skmacy
2740182679Skmacy		if (t->qset_idx >= nqsets)
2741182679Skmacy			return EINVAL;
2742167514Skmacy
2743182679Skmacy		i = q1 + t->qset_idx;
2744182679Skmacy		q = &sc->params.sge.qset[i];
2745167514Skmacy		t->rspq_size   = q->rspq_size;
2746167514Skmacy		t->txq_size[0] = q->txq_size[0];
2747167514Skmacy		t->txq_size[1] = q->txq_size[1];
2748167514Skmacy		t->txq_size[2] = q->txq_size[2];
2749167514Skmacy		t->fl_size[0]  = q->fl_size;
2750167514Skmacy		t->fl_size[1]  = q->jumbo_size;
2751167514Skmacy		t->polling     = q->polling;
2752182679Skmacy		t->lro         = q->lro;
2753180583Skmacy		t->intr_lat    = q->coalesce_usecs;
2754167514Skmacy		t->cong_thres  = q->cong_thres;
2755182679Skmacy		t->qnum        = i;
2756182679Skmacy
2757182679Skmacy		if (sc->flags & USING_MSIX)
2758182679Skmacy			t->vector = rman_get_start(sc->msix_irq_res[i]);
2759182679Skmacy		else
2760182679Skmacy			t->vector = rman_get_start(sc->irq_res);
2761182679Skmacy
2762167514Skmacy		break;
2763167514Skmacy	}
2764182679Skmacy	case CHELSIO_GET_QSET_NUM: {
2765167514Skmacy		struct ch_reg *edata = (struct ch_reg *)data;
2766182679Skmacy		edata->val = pi->nqsets;
2767182679Skmacy		break;
2768182679Skmacy	}
2769182679Skmacy	case CHELSIO_LOAD_FW: {
2770182679Skmacy		uint8_t *fw_data;
2771182679Skmacy		uint32_t vers;
2772182679Skmacy		struct ch_mem_range *t = (struct ch_mem_range *)data;
2773182679Skmacy
2774167514Skmacy		/*
2775182679Skmacy		 * You're allowed to load a firmware only before FULL_INIT_DONE
2776182679Skmacy		 *
2777182679Skmacy		 * FW_UPTODATE is also set so the rest of the initialization
2778182679Skmacy		 * will not overwrite what was loaded here.  This gives you the
2779182679Skmacy		 * flexibility to load any firmware (and maybe shoot yourself in
2780182679Skmacy		 * the foot).
2781167514Skmacy		 */
2782182679Skmacy
2783182679Skmacy		ADAPTER_LOCK(sc);
2784182679Skmacy		if (sc->open_device_map || sc->flags & FULL_INIT_DONE) {
2785182679Skmacy			ADAPTER_UNLOCK(sc);
2786182679Skmacy			return (EBUSY);
2787182679Skmacy		}
2788182679Skmacy
2789182679Skmacy		fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2790182679Skmacy		if (!fw_data)
2791182679Skmacy			error = ENOMEM;
2792182679Skmacy		else
2793182679Skmacy			error = copyin(t->buf, fw_data, t->len);
2794182679Skmacy
2795182679Skmacy		if (!error)
2796182679Skmacy			error = -t3_load_fw(sc, fw_data, t->len);
2797182679Skmacy
2798182679Skmacy		if (t3_get_fw_version(sc, &vers) == 0) {
2799182679Skmacy			snprintf(&sc->fw_version[0], sizeof(sc->fw_version),
2800182679Skmacy			    "%d.%d.%d", G_FW_VERSION_MAJOR(vers),
2801182679Skmacy			    G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers));
2802182679Skmacy		}
2803182679Skmacy
2804182679Skmacy		if (!error)
2805182679Skmacy			sc->flags |= FW_UPTODATE;
2806182679Skmacy
2807182679Skmacy		free(fw_data, M_DEVBUF);
2808182679Skmacy		ADAPTER_UNLOCK(sc);
2809167514Skmacy		break;
2810167514Skmacy	}
2811182679Skmacy	case CHELSIO_LOAD_BOOT: {
2812182679Skmacy		uint8_t *boot_data;
2813182679Skmacy		struct ch_mem_range *t = (struct ch_mem_range *)data;
2814182679Skmacy
2815182679Skmacy		boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2816182679Skmacy		if (!boot_data)
2817182679Skmacy			return ENOMEM;
2818182679Skmacy
2819182679Skmacy		error = copyin(t->buf, boot_data, t->len);
2820182679Skmacy		if (!error)
2821182679Skmacy			error = -t3_load_boot(sc, boot_data, t->len);
2822182679Skmacy
2823182679Skmacy		free(boot_data, M_DEVBUF);
2824167514Skmacy		break;
2825167514Skmacy	}
2826182679Skmacy	case CHELSIO_GET_PM: {
2827182679Skmacy		struct ch_pm *m = (struct ch_pm *)data;
2828182679Skmacy		struct tp_params *p = &sc->params.tp;
2829182679Skmacy
2830182679Skmacy		if (!is_offload(sc))
2831182679Skmacy			return (EOPNOTSUPP);
2832182679Skmacy
2833182679Skmacy		m->tx_pg_sz = p->tx_pg_size;
2834182679Skmacy		m->tx_num_pg = p->tx_num_pgs;
2835182679Skmacy		m->rx_pg_sz  = p->rx_pg_size;
2836182679Skmacy		m->rx_num_pg = p->rx_num_pgs;
2837182679Skmacy		m->pm_total  = p->pmtx_size + p->chan_rx_size * p->nchan;
2838182679Skmacy
2839167514Skmacy		break;
2840182679Skmacy	}
2841182679Skmacy	case CHELSIO_SET_PM: {
2842182679Skmacy		struct ch_pm *m = (struct ch_pm *)data;
2843182679Skmacy		struct tp_params *p = &sc->params.tp;
2844182679Skmacy
2845182679Skmacy		if (!is_offload(sc))
2846182679Skmacy			return (EOPNOTSUPP);
2847182679Skmacy		if (sc->flags & FULL_INIT_DONE)
2848182679Skmacy			return (EBUSY);
2849182679Skmacy
2850182679Skmacy		if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) ||
2851182679Skmacy		    !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1)))
2852182679Skmacy			return (EINVAL);	/* not power of 2 */
2853182679Skmacy		if (!(m->rx_pg_sz & 0x14000))
2854182679Skmacy			return (EINVAL);	/* not 16KB or 64KB */
2855182679Skmacy		if (!(m->tx_pg_sz & 0x1554000))
2856182679Skmacy			return (EINVAL);
2857182679Skmacy		if (m->tx_num_pg == -1)
2858182679Skmacy			m->tx_num_pg = p->tx_num_pgs;
2859182679Skmacy		if (m->rx_num_pg == -1)
2860182679Skmacy			m->rx_num_pg = p->rx_num_pgs;
2861182679Skmacy		if (m->tx_num_pg % 24 || m->rx_num_pg % 24)
2862182679Skmacy			return (EINVAL);
2863182679Skmacy		if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size ||
2864182679Skmacy		    m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size)
2865182679Skmacy			return (EINVAL);
2866182679Skmacy
2867182679Skmacy		p->rx_pg_size = m->rx_pg_sz;
2868182679Skmacy		p->tx_pg_size = m->tx_pg_sz;
2869182679Skmacy		p->rx_num_pgs = m->rx_num_pg;
2870182679Skmacy		p->tx_num_pgs = m->tx_num_pg;
2871182679Skmacy		break;
2872182679Skmacy	}
2873169978Skmacy	case CHELSIO_SETMTUTAB: {
2874169978Skmacy		struct ch_mtus *m = (struct ch_mtus *)data;
2875169978Skmacy		int i;
2876169978Skmacy
2877169978Skmacy		if (!is_offload(sc))
2878169978Skmacy			return (EOPNOTSUPP);
2879169978Skmacy		if (offload_running(sc))
2880169978Skmacy			return (EBUSY);
2881169978Skmacy		if (m->nmtus != NMTUS)
2882169978Skmacy			return (EINVAL);
2883169978Skmacy		if (m->mtus[0] < 81)         /* accommodate SACK */
2884169978Skmacy			return (EINVAL);
2885169978Skmacy
2886169978Skmacy		/*
2887169978Skmacy		 * MTUs must be in ascending order
2888169978Skmacy		 */
2889169978Skmacy		for (i = 1; i < NMTUS; ++i)
2890169978Skmacy			if (m->mtus[i] < m->mtus[i - 1])
2891169978Skmacy				return (EINVAL);
2892169978Skmacy
2893182679Skmacy		memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus));
2894169978Skmacy		break;
2895169978Skmacy	}
2896169978Skmacy	case CHELSIO_GETMTUTAB: {
2897169978Skmacy		struct ch_mtus *m = (struct ch_mtus *)data;
2898169978Skmacy
2899169978Skmacy		if (!is_offload(sc))
2900169978Skmacy			return (EOPNOTSUPP);
2901169978Skmacy
2902169978Skmacy		memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
2903169978Skmacy		m->nmtus = NMTUS;
2904169978Skmacy		break;
2905171471Skmacy	}
2906167514Skmacy	case CHELSIO_GET_MEM: {
2907167514Skmacy		struct ch_mem_range *t = (struct ch_mem_range *)data;
2908167514Skmacy		struct mc7 *mem;
2909167514Skmacy		uint8_t *useraddr;
2910167514Skmacy		u64 buf[32];
2911182679Skmacy
2912182679Skmacy		/*
2913182679Skmacy		 * Use these to avoid modifying len/addr in the the return
2914182679Skmacy		 * struct
2915182679Skmacy		 */
2916182679Skmacy		uint32_t len = t->len, addr = t->addr;
2917182679Skmacy
2918167514Skmacy		if (!is_offload(sc))
2919167514Skmacy			return (EOPNOTSUPP);
2920167514Skmacy		if (!(sc->flags & FULL_INIT_DONE))
2921167514Skmacy			return (EIO);         /* need the memory controllers */
2922182679Skmacy		if ((addr & 0x7) || (len & 0x7))
2923167514Skmacy			return (EINVAL);
2924167514Skmacy		if (t->mem_id == MEM_CM)
2925167514Skmacy			mem = &sc->cm;
2926167514Skmacy		else if (t->mem_id == MEM_PMRX)
2927167514Skmacy			mem = &sc->pmrx;
2928167514Skmacy		else if (t->mem_id == MEM_PMTX)
2929167514Skmacy			mem = &sc->pmtx;
2930167514Skmacy		else
2931167514Skmacy			return (EINVAL);
2932167514Skmacy
2933167514Skmacy		/*
2934167514Skmacy		 * Version scheme:
2935167514Skmacy		 * bits 0..9: chip version
2936167514Skmacy		 * bits 10..15: chip revision
2937167514Skmacy		 */
2938167514Skmacy		t->version = 3 | (sc->params.rev << 10);
2939167514Skmacy
2940167514Skmacy		/*
2941167514Skmacy		 * Read 256 bytes at a time as len can be large and we don't
2942167514Skmacy		 * want to use huge intermediate buffers.
2943167514Skmacy		 */
2944174708Skmacy		useraddr = (uint8_t *)t->buf;
2945182679Skmacy		while (len) {
2946182679Skmacy			unsigned int chunk = min(len, sizeof(buf));
2947167514Skmacy
2948182679Skmacy			error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf);
2949167514Skmacy			if (error)
2950167514Skmacy				return (-error);
2951167514Skmacy			if (copyout(buf, useraddr, chunk))
2952167514Skmacy				return (EFAULT);
2953167514Skmacy			useraddr += chunk;
2954182679Skmacy			addr += chunk;
2955182679Skmacy			len -= chunk;
2956167514Skmacy		}
2957167514Skmacy		break;
2958167514Skmacy	}
2959169978Skmacy	case CHELSIO_READ_TCAM_WORD: {
2960169978Skmacy		struct ch_tcam_word *t = (struct ch_tcam_word *)data;
2961169978Skmacy
2962169978Skmacy		if (!is_offload(sc))
2963169978Skmacy			return (EOPNOTSUPP);
2964171471Skmacy		if (!(sc->flags & FULL_INIT_DONE))
2965171471Skmacy			return (EIO);         /* need MC5 */
2966169978Skmacy		return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
2967169978Skmacy		break;
2968169978Skmacy	}
2969167514Skmacy	case CHELSIO_SET_TRACE_FILTER: {
2970167514Skmacy		struct ch_trace *t = (struct ch_trace *)data;
2971167514Skmacy		const struct trace_params *tp;
2972167514Skmacy
2973167514Skmacy		tp = (const struct trace_params *)&t->sip;
2974167514Skmacy		if (t->config_tx)
2975167514Skmacy			t3_config_trace_filter(sc, tp, 0, t->invert_match,
2976167514Skmacy					       t->trace_tx);
2977167514Skmacy		if (t->config_rx)
2978167514Skmacy			t3_config_trace_filter(sc, tp, 1, t->invert_match,
2979167514Skmacy					       t->trace_rx);
2980167514Skmacy		break;
2981167514Skmacy	}
2982167514Skmacy	case CHELSIO_SET_PKTSCHED: {
2983167514Skmacy		struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
2984167514Skmacy		if (sc->open_device_map == 0)
2985167514Skmacy			return (EAGAIN);
2986167514Skmacy		send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
2987167514Skmacy		    p->binding);
2988167514Skmacy		break;
2989167514Skmacy	}
2990167514Skmacy	case CHELSIO_IFCONF_GETREGS: {
2991182679Skmacy		struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data;
2992167514Skmacy		int reglen = cxgb_get_regs_len();
2993182679Skmacy		uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT);
2994167514Skmacy		if (buf == NULL) {
2995167514Skmacy			return (ENOMEM);
2996182679Skmacy		}
2997182679Skmacy		if (regs->len > reglen)
2998167514Skmacy			regs->len = reglen;
2999182679Skmacy		else if (regs->len < reglen)
3000189643Sgnn			error = ENOBUFS;
3001182679Skmacy
3002182679Skmacy		if (!error) {
3003182679Skmacy			cxgb_get_regs(sc, regs, buf);
3004182679Skmacy			error = copyout(buf, regs->data, reglen);
3005167514Skmacy		}
3006167514Skmacy		free(buf, M_DEVBUF);
3007167514Skmacy
3008167514Skmacy		break;
3009167514Skmacy	}
3010169978Skmacy	case CHELSIO_SET_HW_SCHED: {
3011169978Skmacy		struct ch_hw_sched *t = (struct ch_hw_sched *)data;
3012169978Skmacy		unsigned int ticks_per_usec = core_ticks_per_usec(sc);
3013169978Skmacy
3014169978Skmacy		if ((sc->flags & FULL_INIT_DONE) == 0)
3015169978Skmacy			return (EAGAIN);       /* need TP to be initialized */
3016169978Skmacy		if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
3017169978Skmacy		    !in_range(t->channel, 0, 1) ||
3018169978Skmacy		    !in_range(t->kbps, 0, 10000000) ||
3019169978Skmacy		    !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
3020169978Skmacy		    !in_range(t->flow_ipg, 0,
3021169978Skmacy			      dack_ticks_to_usec(sc, 0x7ff)))
3022169978Skmacy			return (EINVAL);
3023169978Skmacy
3024169978Skmacy		if (t->kbps >= 0) {
3025169978Skmacy			error = t3_config_sched(sc, t->kbps, t->sched);
3026169978Skmacy			if (error < 0)
3027169978Skmacy				return (-error);
3028169978Skmacy		}
3029169978Skmacy		if (t->class_ipg >= 0)
3030169978Skmacy			t3_set_sched_ipg(sc, t->sched, t->class_ipg);
3031169978Skmacy		if (t->flow_ipg >= 0) {
3032169978Skmacy			t->flow_ipg *= 1000;     /* us -> ns */
3033169978Skmacy			t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
3034169978Skmacy		}
3035169978Skmacy		if (t->mode >= 0) {
3036169978Skmacy			int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
3037169978Skmacy
3038169978Skmacy			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
3039169978Skmacy					 bit, t->mode ? bit : 0);
3040169978Skmacy		}
3041169978Skmacy		if (t->channel >= 0)
3042169978Skmacy			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
3043169978Skmacy					 1 << t->sched, t->channel << t->sched);
3044169978Skmacy		break;
3045182679Skmacy	}
3046182679Skmacy	case CHELSIO_GET_EEPROM: {
3047182679Skmacy		int i;
3048182679Skmacy		struct ch_eeprom *e = (struct ch_eeprom *)data;
3049182679Skmacy		uint8_t *buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT);
3050182679Skmacy
3051182679Skmacy		if (buf == NULL) {
3052182679Skmacy			return (ENOMEM);
3053182679Skmacy		}
3054182679Skmacy		e->magic = EEPROM_MAGIC;
3055182679Skmacy		for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4)
3056182679Skmacy			error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]);
3057182679Skmacy
3058182679Skmacy		if (!error)
3059182679Skmacy			error = copyout(buf + e->offset, e->data, e->len);
3060182679Skmacy
3061182679Skmacy		free(buf, M_DEVBUF);
3062182679Skmacy		break;
3063182679Skmacy	}
3064182679Skmacy	case CHELSIO_CLEAR_STATS: {
3065182679Skmacy		if (!(sc->flags & FULL_INIT_DONE))
3066182679Skmacy			return EAGAIN;
3067182679Skmacy
3068182679Skmacy		PORT_LOCK(pi);
3069182679Skmacy		t3_mac_update_stats(&pi->mac);
3070182679Skmacy		memset(&pi->mac.stats, 0, sizeof(pi->mac.stats));
3071182679Skmacy		PORT_UNLOCK(pi);
3072182679Skmacy		break;
3073182679Skmacy	}
3074189643Sgnn	case CHELSIO_GET_UP_LA: {
3075189643Sgnn		struct ch_up_la *la = (struct ch_up_la *)data;
3076189643Sgnn		uint8_t *buf = malloc(LA_BUFSIZE, M_DEVBUF, M_NOWAIT);
3077189643Sgnn		if (buf == NULL) {
3078189643Sgnn			return (ENOMEM);
3079189643Sgnn		}
3080189643Sgnn		if (la->bufsize < LA_BUFSIZE)
3081189643Sgnn			error = ENOBUFS;
3082189643Sgnn
3083189643Sgnn		if (!error)
3084189643Sgnn			error = -t3_get_up_la(sc, &la->stopped, &la->idx,
3085189643Sgnn					      &la->bufsize, buf);
3086189643Sgnn		if (!error)
3087189643Sgnn			error = copyout(buf, la->data, la->bufsize);
3088189643Sgnn
3089189643Sgnn		free(buf, M_DEVBUF);
3090189643Sgnn		break;
3091189643Sgnn	}
3092189643Sgnn	case CHELSIO_GET_UP_IOQS: {
3093189643Sgnn		struct ch_up_ioqs *ioqs = (struct ch_up_ioqs *)data;
3094189643Sgnn		uint8_t *buf = malloc(IOQS_BUFSIZE, M_DEVBUF, M_NOWAIT);
3095189643Sgnn		uint32_t *v;
3096189643Sgnn
3097189643Sgnn		if (buf == NULL) {
3098189643Sgnn			return (ENOMEM);
3099189643Sgnn		}
3100189643Sgnn		if (ioqs->bufsize < IOQS_BUFSIZE)
3101189643Sgnn			error = ENOBUFS;
3102189643Sgnn
3103189643Sgnn		if (!error)
3104189643Sgnn			error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf);
3105189643Sgnn
3106189643Sgnn		if (!error) {
3107189643Sgnn			v = (uint32_t *)buf;
3108189643Sgnn
3109189643Sgnn			ioqs->bufsize -= 4 * sizeof(uint32_t);
3110189643Sgnn			ioqs->ioq_rx_enable = *v++;
3111189643Sgnn			ioqs->ioq_tx_enable = *v++;
3112189643Sgnn			ioqs->ioq_rx_status = *v++;
3113189643Sgnn			ioqs->ioq_tx_status = *v++;
3114189643Sgnn
3115189643Sgnn			error = copyout(v, ioqs->data, ioqs->bufsize);
3116189643Sgnn		}
3117189643Sgnn
3118189643Sgnn		free(buf, M_DEVBUF);
3119189643Sgnn		break;
3120189643Sgnn	}
3121167514Skmacy	default:
3122167514Skmacy		return (EOPNOTSUPP);
3123167514Skmacy		break;
3124167514Skmacy	}
3125167514Skmacy
3126167514Skmacy	return (error);
3127167514Skmacy}
3128167514Skmacy
3129167514Skmacystatic __inline void
3130167514Skmacyreg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
3131167514Skmacy    unsigned int end)
3132167514Skmacy{
3133182679Skmacy	uint32_t *p = (uint32_t *)(buf + start);
3134167514Skmacy
3135167514Skmacy	for ( ; start <= end; start += sizeof(uint32_t))
3136167514Skmacy		*p++ = t3_read_reg(ap, start);
3137167514Skmacy}
3138167514Skmacy
3139167514Skmacy#define T3_REGMAP_SIZE (3 * 1024)
3140167514Skmacystatic int
3141167514Skmacycxgb_get_regs_len(void)
3142167514Skmacy{
3143167514Skmacy	return T3_REGMAP_SIZE;
3144167514Skmacy}
3145167514Skmacy
3146167514Skmacystatic void
3147182679Skmacycxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf)
3148167514Skmacy{
3149167514Skmacy
3150167514Skmacy	/*
3151167514Skmacy	 * Version scheme:
3152167514Skmacy	 * bits 0..9: chip version
3153167514Skmacy	 * bits 10..15: chip revision
3154167514Skmacy	 * bit 31: set for PCIe cards
3155167514Skmacy	 */
3156167514Skmacy	regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
3157167514Skmacy
3158167514Skmacy	/*
3159167514Skmacy	 * We skip the MAC statistics registers because they are clear-on-read.
3160167514Skmacy	 * Also reading multi-register stats would need to synchronize with the
3161167514Skmacy	 * periodic mac stats accumulation.  Hard to justify the complexity.
3162167514Skmacy	 */
3163182679Skmacy	memset(buf, 0, cxgb_get_regs_len());
3164167514Skmacy	reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
3165167514Skmacy	reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
3166167514Skmacy	reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
3167167514Skmacy	reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
3168167514Skmacy	reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
3169167514Skmacy	reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
3170167514Skmacy		       XGM_REG(A_XGM_SERDES_STAT3, 1));
3171167514Skmacy	reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
3172167514Skmacy		       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
3173167514Skmacy}
3174176572Skmacy
3175176572Skmacy
3176176572SkmacyMODULE_DEPEND(if_cxgb, cxgb_t3fw, 1, 1, 1);
3177