cxgb_main.c revision 231116
1167514Skmacy/**************************************************************************
2167514Skmacy
3189643SgnnCopyright (c) 2007-2009, Chelsio Inc.
4167514SkmacyAll rights reserved.
5167514Skmacy
6167514SkmacyRedistribution and use in source and binary forms, with or without
7167514Skmacymodification, are permitted provided that the following conditions are met:
8167514Skmacy
9167514Skmacy 1. Redistributions of source code must retain the above copyright notice,
10167514Skmacy    this list of conditions and the following disclaimer.
11167514Skmacy
12178302Skmacy 2. Neither the name of the Chelsio Corporation nor the names of its
13167514Skmacy    contributors may be used to endorse or promote products derived from
14167514Skmacy    this software without specific prior written permission.
15167514Skmacy
16167514SkmacyTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17167514SkmacyAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18167514SkmacyIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19167514SkmacyARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20167514SkmacyLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21167514SkmacyCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22167514SkmacySUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23167514SkmacyINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24167514SkmacyCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25167514SkmacyARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26167514SkmacyPOSSIBILITY OF SUCH DAMAGE.
27167514Skmacy
28167514Skmacy***************************************************************************/
29167514Skmacy
30167514Skmacy#include <sys/cdefs.h>
31167514Skmacy__FBSDID("$FreeBSD: head/sys/dev/cxgb/cxgb_main.c 231116 2012-02-07 07:32:39Z np $");
32167514Skmacy
33167514Skmacy#include <sys/param.h>
34167514Skmacy#include <sys/systm.h>
35167514Skmacy#include <sys/kernel.h>
36167514Skmacy#include <sys/bus.h>
37167514Skmacy#include <sys/module.h>
38167514Skmacy#include <sys/pciio.h>
39167514Skmacy#include <sys/conf.h>
40167514Skmacy#include <machine/bus.h>
41167514Skmacy#include <machine/resource.h>
42167514Skmacy#include <sys/bus_dma.h>
43176472Skmacy#include <sys/ktr.h>
44167514Skmacy#include <sys/rman.h>
45167514Skmacy#include <sys/ioccom.h>
46167514Skmacy#include <sys/mbuf.h>
47167514Skmacy#include <sys/linker.h>
48167514Skmacy#include <sys/firmware.h>
49167514Skmacy#include <sys/socket.h>
50167514Skmacy#include <sys/sockio.h>
51167514Skmacy#include <sys/smp.h>
52167514Skmacy#include <sys/sysctl.h>
53174708Skmacy#include <sys/syslog.h>
54167514Skmacy#include <sys/queue.h>
55167514Skmacy#include <sys/taskqueue.h>
56174708Skmacy#include <sys/proc.h>
57167514Skmacy
58167514Skmacy#include <net/bpf.h>
59167514Skmacy#include <net/ethernet.h>
60167514Skmacy#include <net/if.h>
61167514Skmacy#include <net/if_arp.h>
62167514Skmacy#include <net/if_dl.h>
63167514Skmacy#include <net/if_media.h>
64167514Skmacy#include <net/if_types.h>
65180583Skmacy#include <net/if_vlan_var.h>
66167514Skmacy
67167514Skmacy#include <netinet/in_systm.h>
68167514Skmacy#include <netinet/in.h>
69167514Skmacy#include <netinet/if_ether.h>
70167514Skmacy#include <netinet/ip.h>
71167514Skmacy#include <netinet/ip.h>
72167514Skmacy#include <netinet/tcp.h>
73167514Skmacy#include <netinet/udp.h>
74167514Skmacy
75167514Skmacy#include <dev/pci/pcireg.h>
76167514Skmacy#include <dev/pci/pcivar.h>
77167514Skmacy#include <dev/pci/pci_private.h>
78167514Skmacy
79170076Skmacy#include <cxgb_include.h>
80167514Skmacy
81167514Skmacy#ifdef PRIV_SUPPORTED
82167514Skmacy#include <sys/priv.h>
83167514Skmacy#endif
84167514Skmacy
85192933Sgnnstatic int cxgb_setup_interrupts(adapter_t *);
86192933Sgnnstatic void cxgb_teardown_interrupts(adapter_t *);
87167514Skmacystatic void cxgb_init(void *);
88202671Snpstatic int cxgb_init_locked(struct port_info *);
89202671Snpstatic int cxgb_uninit_locked(struct port_info *);
90194521Skmacystatic int cxgb_uninit_synchronized(struct port_info *);
91167514Skmacystatic int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
92167514Skmacystatic int cxgb_media_change(struct ifnet *);
93186282Sgnnstatic int cxgb_ifm_type(int);
94194921Snpstatic void cxgb_build_medialist(struct port_info *);
95167514Skmacystatic void cxgb_media_status(struct ifnet *, struct ifmediareq *);
96167514Skmacystatic int setup_sge_qsets(adapter_t *);
97167514Skmacystatic void cxgb_async_intr(void *);
98170869Skmacystatic void cxgb_tick_handler(void *, int);
99167514Skmacystatic void cxgb_tick(void *);
100209841Snpstatic void link_check_callout(void *);
101209841Snpstatic void check_link_status(void *, int);
102167514Skmacystatic void setup_rss(adapter_t *sc);
103207643Snpstatic int alloc_filters(struct adapter *);
104207643Snpstatic int setup_hw_filters(struct adapter *);
105207643Snpstatic int set_filter(struct adapter *, int, const struct filter_info *);
106207643Snpstatic inline void mk_set_tcb_field(struct cpl_set_tcb_field *, unsigned int,
107207643Snp    unsigned int, u64, u64);
108207643Snpstatic inline void set_tcb_field_ulp(struct cpl_set_tcb_field *, unsigned int,
109207643Snp    unsigned int, u64, u64);
110167514Skmacy
111167514Skmacy/* Attachment glue for the PCI controller end of the device.  Each port of
112167514Skmacy * the device is attached separately, as defined later.
113167514Skmacy */
114167514Skmacystatic int cxgb_controller_probe(device_t);
115167514Skmacystatic int cxgb_controller_attach(device_t);
116167514Skmacystatic int cxgb_controller_detach(device_t);
117167514Skmacystatic void cxgb_free(struct adapter *);
118167514Skmacystatic __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
119167514Skmacy    unsigned int end);
120182679Skmacystatic void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf);
121167514Skmacystatic int cxgb_get_regs_len(void);
122169978Skmacystatic int offload_open(struct port_info *pi);
123171978Skmacystatic void touch_bars(device_t dev);
124174626Skmacystatic int offload_close(struct t3cdev *tdev);
125197791Snpstatic void cxgb_update_mac_settings(struct port_info *p);
126167514Skmacy
127167514Skmacystatic device_method_t cxgb_controller_methods[] = {
128167514Skmacy	DEVMETHOD(device_probe,		cxgb_controller_probe),
129167514Skmacy	DEVMETHOD(device_attach,	cxgb_controller_attach),
130167514Skmacy	DEVMETHOD(device_detach,	cxgb_controller_detach),
131167514Skmacy
132227843Smarius	DEVMETHOD_END
133167514Skmacy};
134167514Skmacy
135167514Skmacystatic driver_t cxgb_controller_driver = {
136167514Skmacy	"cxgbc",
137167514Skmacy	cxgb_controller_methods,
138167514Skmacy	sizeof(struct adapter)
139167514Skmacy};
140167514Skmacy
141167514Skmacystatic devclass_t	cxgb_controller_devclass;
142167514SkmacyDRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, 0, 0);
143167514Skmacy
144167514Skmacy/*
145167514Skmacy * Attachment glue for the ports.  Attachment is done directly to the
146167514Skmacy * controller device.
147167514Skmacy */
148167514Skmacystatic int cxgb_port_probe(device_t);
149167514Skmacystatic int cxgb_port_attach(device_t);
150167514Skmacystatic int cxgb_port_detach(device_t);
151167514Skmacy
152167514Skmacystatic device_method_t cxgb_port_methods[] = {
153167514Skmacy	DEVMETHOD(device_probe,		cxgb_port_probe),
154167514Skmacy	DEVMETHOD(device_attach,	cxgb_port_attach),
155167514Skmacy	DEVMETHOD(device_detach,	cxgb_port_detach),
156167514Skmacy	{ 0, 0 }
157167514Skmacy};
158167514Skmacy
159167514Skmacystatic driver_t cxgb_port_driver = {
160167514Skmacy	"cxgb",
161167514Skmacy	cxgb_port_methods,
162167514Skmacy	0
163167514Skmacy};
164167514Skmacy
165167514Skmacystatic d_ioctl_t cxgb_extension_ioctl;
166170654Skmacystatic d_open_t cxgb_extension_open;
167170654Skmacystatic d_close_t cxgb_extension_close;
168167514Skmacy
169170654Skmacystatic struct cdevsw cxgb_cdevsw = {
170170654Skmacy       .d_version =    D_VERSION,
171170654Skmacy       .d_flags =      0,
172170654Skmacy       .d_open =       cxgb_extension_open,
173170654Skmacy       .d_close =      cxgb_extension_close,
174170654Skmacy       .d_ioctl =      cxgb_extension_ioctl,
175170654Skmacy       .d_name =       "cxgb",
176170654Skmacy};
177170654Skmacy
178167514Skmacystatic devclass_t	cxgb_port_devclass;
179167514SkmacyDRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
180167514Skmacy
181167514Skmacy/*
182167514Skmacy * The driver uses the best interrupt scheme available on a platform in the
183167514Skmacy * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
184167514Skmacy * of these schemes the driver may consider as follows:
185167514Skmacy *
186167514Skmacy * msi = 2: choose from among all three options
187167514Skmacy * msi = 1 : only consider MSI and pin interrupts
188167514Skmacy * msi = 0: force pin interrupts
189167514Skmacy */
190167760Skmacystatic int msi_allowed = 2;
191170083Skmacy
192167514SkmacyTUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
193167514SkmacySYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
194217321SmdfSYSCTL_INT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
195167514Skmacy    "MSI-X, MSI, INTx selector");
196169978Skmacy
197169053Skmacy/*
198169978Skmacy * The driver enables offload as a default.
199169978Skmacy * To disable it, use ofld_disable = 1.
200169053Skmacy */
201169978Skmacystatic int ofld_disable = 0;
202169978SkmacyTUNABLE_INT("hw.cxgb.ofld_disable", &ofld_disable);
203217321SmdfSYSCTL_INT(_hw_cxgb, OID_AUTO, ofld_disable, CTLFLAG_RDTUN, &ofld_disable, 0,
204169978Skmacy    "disable ULP offload");
205169978Skmacy
206169978Skmacy/*
207169978Skmacy * The driver uses an auto-queue algorithm by default.
208185165Skmacy * To disable it and force a single queue-set per port, use multiq = 0
209169978Skmacy */
210185165Skmacystatic int multiq = 1;
211185165SkmacyTUNABLE_INT("hw.cxgb.multiq", &multiq);
212217321SmdfSYSCTL_INT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0,
213185165Skmacy    "use min(ncpus/ports, 8) queue-sets per port");
214167514Skmacy
215176572Skmacy/*
216185165Skmacy * By default the driver will not update the firmware unless
217185165Skmacy * it was compiled against a newer version
218185165Skmacy *
219176572Skmacy */
220176572Skmacystatic int force_fw_update = 0;
221176572SkmacyTUNABLE_INT("hw.cxgb.force_fw_update", &force_fw_update);
222217321SmdfSYSCTL_INT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0,
223176572Skmacy    "update firmware even if up to date");
224175200Skmacy
225205950Snpint cxgb_use_16k_clusters = -1;
226175200SkmacyTUNABLE_INT("hw.cxgb.use_16k_clusters", &cxgb_use_16k_clusters);
227205950SnpSYSCTL_INT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
228175200Skmacy    &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
229175200Skmacy
230208887Snpstatic int nfilters = -1;
231208887SnpTUNABLE_INT("hw.cxgb.nfilters", &nfilters);
232208887SnpSYSCTL_INT(_hw_cxgb, OID_AUTO, nfilters, CTLFLAG_RDTUN,
233208887Snp    &nfilters, 0, "max number of entries in the filter table");
234194039Sgnn
235167514Skmacyenum {
236167514Skmacy	MAX_TXQ_ENTRIES      = 16384,
237167514Skmacy	MAX_CTRL_TXQ_ENTRIES = 1024,
238167514Skmacy	MAX_RSPQ_ENTRIES     = 16384,
239167514Skmacy	MAX_RX_BUFFERS       = 16384,
240167514Skmacy	MAX_RX_JUMBO_BUFFERS = 16384,
241167514Skmacy	MIN_TXQ_ENTRIES      = 4,
242167514Skmacy	MIN_CTRL_TXQ_ENTRIES = 4,
243167514Skmacy	MIN_RSPQ_ENTRIES     = 32,
244172096Skmacy	MIN_FL_ENTRIES       = 32,
245172096Skmacy	MIN_FL_JUMBO_ENTRIES = 32
246167514Skmacy};
247167514Skmacy
248171471Skmacystruct filter_info {
249171471Skmacy	u32 sip;
250171471Skmacy	u32 sip_mask;
251171471Skmacy	u32 dip;
252171471Skmacy	u16 sport;
253171471Skmacy	u16 dport;
254171471Skmacy	u32 vlan:12;
255171471Skmacy	u32 vlan_prio:3;
256171471Skmacy	u32 mac_hit:1;
257171471Skmacy	u32 mac_idx:4;
258171471Skmacy	u32 mac_vld:1;
259171471Skmacy	u32 pkt_type:2;
260171471Skmacy	u32 report_filter_id:1;
261171471Skmacy	u32 pass:1;
262171471Skmacy	u32 rss:1;
263171471Skmacy	u32 qset:3;
264171471Skmacy	u32 locked:1;
265171471Skmacy	u32 valid:1;
266171471Skmacy};
267171471Skmacy
268171471Skmacyenum { FILTER_NO_VLAN_PRI = 7 };
269171471Skmacy
270182679Skmacy#define EEPROM_MAGIC 0x38E2F10C
271182679Skmacy
272167514Skmacy#define PORT_MASK ((1 << MAX_NPORTS) - 1)
273167514Skmacy
274167514Skmacy/* Table for probing the cards.  The desc field isn't actually used */
275167514Skmacystruct cxgb_ident {
276167514Skmacy	uint16_t	vendor;
277167514Skmacy	uint16_t	device;
278167514Skmacy	int		index;
279167514Skmacy	char		*desc;
280167514Skmacy} cxgb_identifiers[] = {
281167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
282167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
283167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
284167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
285167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
286167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
287167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
288167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
289167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
290167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
291170654Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
292197791Snp	{PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "T3C10"},
293197791Snp	{PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"},
294197791Snp	{PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"},
295167514Skmacy	{0, 0, 0, NULL}
296167514Skmacy};
297167514Skmacy
298171471Skmacystatic int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
299171471Skmacy
300176472Skmacy
301174708Skmacystatic __inline char
302171471Skmacyt3rev2char(struct adapter *adapter)
303171471Skmacy{
304171471Skmacy	char rev = 'z';
305171471Skmacy
306171471Skmacy	switch(adapter->params.rev) {
307171471Skmacy	case T3_REV_A:
308171471Skmacy		rev = 'a';
309171471Skmacy		break;
310171471Skmacy	case T3_REV_B:
311171471Skmacy	case T3_REV_B2:
312171471Skmacy		rev = 'b';
313171471Skmacy		break;
314171471Skmacy	case T3_REV_C:
315171471Skmacy		rev = 'c';
316171471Skmacy		break;
317171471Skmacy	}
318171471Skmacy	return rev;
319171471Skmacy}
320171471Skmacy
321167514Skmacystatic struct cxgb_ident *
322167514Skmacycxgb_get_ident(device_t dev)
323167514Skmacy{
324167514Skmacy	struct cxgb_ident *id;
325167514Skmacy
326167514Skmacy	for (id = cxgb_identifiers; id->desc != NULL; id++) {
327167514Skmacy		if ((id->vendor == pci_get_vendor(dev)) &&
328167514Skmacy		    (id->device == pci_get_device(dev))) {
329167514Skmacy			return (id);
330167514Skmacy		}
331167514Skmacy	}
332167514Skmacy	return (NULL);
333167514Skmacy}
334167514Skmacy
335167514Skmacystatic const struct adapter_info *
336167514Skmacycxgb_get_adapter_info(device_t dev)
337167514Skmacy{
338167514Skmacy	struct cxgb_ident *id;
339167514Skmacy	const struct adapter_info *ai;
340183063Skmacy
341167514Skmacy	id = cxgb_get_ident(dev);
342167514Skmacy	if (id == NULL)
343167514Skmacy		return (NULL);
344167514Skmacy
345167514Skmacy	ai = t3_get_adapter_info(id->index);
346167514Skmacy
347167514Skmacy	return (ai);
348167514Skmacy}
349167514Skmacy
350167514Skmacystatic int
351167514Skmacycxgb_controller_probe(device_t dev)
352167514Skmacy{
353167514Skmacy	const struct adapter_info *ai;
354167514Skmacy	char *ports, buf[80];
355170654Skmacy	int nports;
356183063Skmacy
357167514Skmacy	ai = cxgb_get_adapter_info(dev);
358167514Skmacy	if (ai == NULL)
359167514Skmacy		return (ENXIO);
360167514Skmacy
361170654Skmacy	nports = ai->nports0 + ai->nports1;
362170654Skmacy	if (nports == 1)
363167514Skmacy		ports = "port";
364167514Skmacy	else
365167514Skmacy		ports = "ports";
366167514Skmacy
367199237Snp	snprintf(buf, sizeof(buf), "%s, %d %s", ai->desc, nports, ports);
368167514Skmacy	device_set_desc_copy(dev, buf);
369167514Skmacy	return (BUS_PROBE_DEFAULT);
370167514Skmacy}
371167514Skmacy
372176572Skmacy#define FW_FNAME "cxgb_t3fw"
373190330Sgnn#define TPEEPROM_NAME "cxgb_t3%c_tp_eeprom"
374190330Sgnn#define TPSRAM_NAME "cxgb_t3%c_protocol_sram"
375171471Skmacy
376167514Skmacystatic int
377169978Skmacyupgrade_fw(adapter_t *sc)
378167514Skmacy{
379167514Skmacy	const struct firmware *fw;
380167514Skmacy	int status;
381205944Snp	u32 vers;
382167514Skmacy
383176572Skmacy	if ((fw = firmware_get(FW_FNAME)) == NULL)  {
384176572Skmacy		device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME);
385169978Skmacy		return (ENOENT);
386171471Skmacy	} else
387205944Snp		device_printf(sc->dev, "installing firmware on card\n");
388167514Skmacy	status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
389167514Skmacy
390205944Snp	if (status != 0) {
391205944Snp		device_printf(sc->dev, "failed to install firmware: %d\n",
392205944Snp		    status);
393205944Snp	} else {
394205944Snp		t3_get_fw_version(sc, &vers);
395205944Snp		snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
396205944Snp		    G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
397205944Snp		    G_FW_VERSION_MICRO(vers));
398205944Snp	}
399205944Snp
400167514Skmacy	firmware_put(fw, FIRMWARE_UNLOAD);
401167514Skmacy
402167514Skmacy	return (status);
403167514Skmacy}
404167514Skmacy
405192537Sgnn/*
406192537Sgnn * The cxgb_controller_attach function is responsible for the initial
407192537Sgnn * bringup of the device.  Its responsibilities include:
408192537Sgnn *
409192537Sgnn *  1. Determine if the device supports MSI or MSI-X.
410192537Sgnn *  2. Allocate bus resources so that we can access the Base Address Register
411192537Sgnn *  3. Create and initialize mutexes for the controller and its control
412192537Sgnn *     logic such as SGE and MDIO.
413192537Sgnn *  4. Call hardware specific setup routine for the adapter as a whole.
414192537Sgnn *  5. Allocate the BAR for doing MSI-X.
415192537Sgnn *  6. Setup the line interrupt iff MSI-X is not supported.
416192537Sgnn *  7. Create the driver's taskq.
417192584Sgnn *  8. Start one task queue service thread.
418192584Sgnn *  9. Check if the firmware and SRAM are up-to-date.  They will be
419192584Sgnn *     auto-updated later (before FULL_INIT_DONE), if required.
420192537Sgnn * 10. Create a child device for each MAC (port)
421192537Sgnn * 11. Initialize T3 private state.
422192537Sgnn * 12. Trigger the LED
423192537Sgnn * 13. Setup offload iff supported.
424192537Sgnn * 14. Reset/restart the tick callout.
425192537Sgnn * 15. Attach sysctls
426192537Sgnn *
427192537Sgnn * NOTE: Any modification or deviation from this list MUST be reflected in
428192537Sgnn * the above comment.  Failure to do so will result in problems on various
429192537Sgnn * error conditions including link flapping.
430192537Sgnn */
431167514Skmacystatic int
432167514Skmacycxgb_controller_attach(device_t dev)
433167514Skmacy{
434167514Skmacy	device_t child;
435167514Skmacy	const struct adapter_info *ai;
436167514Skmacy	struct adapter *sc;
437172109Skmacy	int i, error = 0;
438167514Skmacy	uint32_t vers;
439167760Skmacy	int port_qsets = 1;
440172109Skmacy	int msi_needed, reg;
441185655Sgnn	char buf[80];
442185655Sgnn
443167514Skmacy	sc = device_get_softc(dev);
444167514Skmacy	sc->dev = dev;
445169978Skmacy	sc->msi_count = 0;
446172109Skmacy	ai = cxgb_get_adapter_info(dev);
447172109Skmacy
448167840Skmacy	/* find the PCIe link width and set max read request to 4KB*/
449219902Sjhb	if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
450210505Sjhb		uint16_t lnk;
451171471Skmacy
452210505Sjhb		lnk = pci_read_config(dev, reg + PCIR_EXPRESS_LINK_STA, 2);
453210505Sjhb		sc->link_width = (lnk & PCIM_LINK_STA_WIDTH) >> 4;
454210505Sjhb		if (sc->link_width < 8 &&
455210505Sjhb		    (ai->caps & SUPPORTED_10000baseT_Full)) {
456210505Sjhb			device_printf(sc->dev,
457210505Sjhb			    "PCIe x%d Link, expect reduced performance\n",
458210505Sjhb			    sc->link_width);
459210505Sjhb		}
460210505Sjhb
461210505Sjhb		pci_set_max_read_req(dev, 4096);
462167840Skmacy	}
463204274Snp
464171978Skmacy	touch_bars(dev);
465167514Skmacy	pci_enable_busmaster(dev);
466167514Skmacy	/*
467167514Skmacy	 * Allocate the registers and make them available to the driver.
468167514Skmacy	 * The registers that we care about for NIC mode are in BAR 0
469167514Skmacy	 */
470167514Skmacy	sc->regs_rid = PCIR_BAR(0);
471167514Skmacy	if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
472167514Skmacy	    &sc->regs_rid, RF_ACTIVE)) == NULL) {
473176472Skmacy		device_printf(dev, "Cannot allocate BAR region 0\n");
474167514Skmacy		return (ENXIO);
475167514Skmacy	}
476176472Skmacy	sc->udbs_rid = PCIR_BAR(2);
477185662Sgnn	sc->udbs_res = NULL;
478185662Sgnn	if (is_offload(sc) &&
479185662Sgnn	    ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
480185662Sgnn		   &sc->udbs_rid, RF_ACTIVE)) == NULL)) {
481176472Skmacy		device_printf(dev, "Cannot allocate BAR region 1\n");
482176472Skmacy		error = ENXIO;
483176472Skmacy		goto out;
484185662Sgnn	}
485167514Skmacy
486170869Skmacy	snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
487170869Skmacy	    device_get_unit(dev));
488170869Skmacy	ADAPTER_LOCK_INIT(sc, sc->lockbuf);
489170869Skmacy
490170869Skmacy	snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
491170869Skmacy	    device_get_unit(dev));
492170869Skmacy	snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
493170869Skmacy	    device_get_unit(dev));
494170869Skmacy	snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
495170869Skmacy	    device_get_unit(dev));
496167514Skmacy
497176472Skmacy	MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN);
498170869Skmacy	MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
499170869Skmacy	MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
500170869Skmacy
501167514Skmacy	sc->bt = rman_get_bustag(sc->regs_res);
502167514Skmacy	sc->bh = rman_get_bushandle(sc->regs_res);
503167514Skmacy	sc->mmio_len = rman_get_size(sc->regs_res);
504167769Skmacy
505197791Snp	for (i = 0; i < MAX_NPORTS; i++)
506197791Snp		sc->port[i].adapter = sc;
507197791Snp
508167769Skmacy	if (t3_prep_adapter(sc, ai, 1) < 0) {
509170654Skmacy		printf("prep adapter failed\n");
510167769Skmacy		error = ENODEV;
511167769Skmacy		goto out;
512167769Skmacy	}
513177464Skmacy        /* Allocate the BAR for doing MSI-X.  If it succeeds, try to allocate
514167514Skmacy	 * enough messages for the queue sets.  If that fails, try falling
515167514Skmacy	 * back to MSI.  If that fails, then try falling back to the legacy
516167514Skmacy	 * interrupt pin model.
517167514Skmacy	 */
518167514Skmacy	sc->msix_regs_rid = 0x20;
519167514Skmacy	if ((msi_allowed >= 2) &&
520167514Skmacy	    (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
521167514Skmacy	    &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
522167514Skmacy
523192933Sgnn		if (multiq)
524192933Sgnn			port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus);
525192933Sgnn		msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1;
526167760Skmacy
527192933Sgnn		if (pci_msix_count(dev) == 0 ||
528192933Sgnn		    (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 ||
529192933Sgnn		    sc->msi_count != msi_needed) {
530192933Sgnn			device_printf(dev, "alloc msix failed - "
531192933Sgnn				      "msi_count=%d, msi_needed=%d, err=%d; "
532192933Sgnn				      "will try MSI\n", sc->msi_count,
533192933Sgnn				      msi_needed, error);
534169978Skmacy			sc->msi_count = 0;
535192933Sgnn			port_qsets = 1;
536167514Skmacy			pci_release_msi(dev);
537167514Skmacy			bus_release_resource(dev, SYS_RES_MEMORY,
538167514Skmacy			    sc->msix_regs_rid, sc->msix_regs_res);
539167514Skmacy			sc->msix_regs_res = NULL;
540167514Skmacy		} else {
541167514Skmacy			sc->flags |= USING_MSIX;
542192933Sgnn			sc->cxgb_intr = cxgb_async_intr;
543192933Sgnn			device_printf(dev,
544192933Sgnn				      "using MSI-X interrupts (%u vectors)\n",
545192933Sgnn				      sc->msi_count);
546167514Skmacy		}
547167514Skmacy	}
548167514Skmacy
549169978Skmacy	if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
550169978Skmacy		sc->msi_count = 1;
551192933Sgnn		if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) {
552192933Sgnn			device_printf(dev, "alloc msi failed - "
553192933Sgnn				      "err=%d; will try INTx\n", error);
554169978Skmacy			sc->msi_count = 0;
555192933Sgnn			port_qsets = 1;
556167514Skmacy			pci_release_msi(dev);
557167514Skmacy		} else {
558167514Skmacy			sc->flags |= USING_MSI;
559170081Skmacy			sc->cxgb_intr = t3_intr_msi;
560192933Sgnn			device_printf(dev, "using MSI interrupts\n");
561167514Skmacy		}
562167514Skmacy	}
563169978Skmacy	if (sc->msi_count == 0) {
564167760Skmacy		device_printf(dev, "using line interrupts\n");
565170081Skmacy		sc->cxgb_intr = t3b_intr;
566167514Skmacy	}
567167514Skmacy
568167514Skmacy	/* Create a private taskqueue thread for handling driver events */
569167514Skmacy	sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
570167514Skmacy	    taskqueue_thread_enqueue, &sc->tq);
571167514Skmacy	if (sc->tq == NULL) {
572167514Skmacy		device_printf(dev, "failed to allocate controller task queue\n");
573167514Skmacy		goto out;
574167514Skmacy	}
575171804Skmacy
576167514Skmacy	taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
577167514Skmacy	    device_get_nameunit(dev));
578170869Skmacy	TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
579167514Skmacy
580167514Skmacy
581167514Skmacy	/* Create a periodic callout for checking adapter status */
582170869Skmacy	callout_init(&sc->cxgb_tick_ch, TRUE);
583167514Skmacy
584189643Sgnn	if (t3_check_fw_version(sc) < 0 || force_fw_update) {
585167514Skmacy		/*
586167514Skmacy		 * Warn user that a firmware update will be attempted in init.
587167514Skmacy		 */
588169978Skmacy		device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
589169978Skmacy		    FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
590167514Skmacy		sc->flags &= ~FW_UPTODATE;
591167514Skmacy	} else {
592167514Skmacy		sc->flags |= FW_UPTODATE;
593167514Skmacy	}
594171471Skmacy
595189643Sgnn	if (t3_check_tpsram_version(sc) < 0) {
596171471Skmacy		/*
597171471Skmacy		 * Warn user that a firmware update will be attempted in init.
598171471Skmacy		 */
599171471Skmacy		device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
600171471Skmacy		    t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
601171471Skmacy		sc->flags &= ~TPS_UPTODATE;
602171471Skmacy	} else {
603171471Skmacy		sc->flags |= TPS_UPTODATE;
604171471Skmacy	}
605167514Skmacy
606167514Skmacy	/*
607167514Skmacy	 * Create a child device for each MAC.  The ethernet attachment
608167514Skmacy	 * will be done in these children.
609167760Skmacy	 */
610167760Skmacy	for (i = 0; i < (sc)->params.nports; i++) {
611171978Skmacy		struct port_info *pi;
612171978Skmacy
613167514Skmacy		if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
614167514Skmacy			device_printf(dev, "failed to add child port\n");
615167514Skmacy			error = EINVAL;
616167514Skmacy			goto out;
617167514Skmacy		}
618171978Skmacy		pi = &sc->port[i];
619171978Skmacy		pi->adapter = sc;
620171978Skmacy		pi->nqsets = port_qsets;
621171978Skmacy		pi->first_qset = i*port_qsets;
622171978Skmacy		pi->port_id = i;
623171978Skmacy		pi->tx_chan = i >= ai->nports0;
624171978Skmacy		pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
625171978Skmacy		sc->rxpkt_map[pi->txpkt_intf] = i;
626174708Skmacy		sc->port[i].tx_chan = i >= ai->nports0;
627171471Skmacy		sc->portdev[i] = child;
628171978Skmacy		device_set_softc(child, pi);
629167514Skmacy	}
630167514Skmacy	if ((error = bus_generic_attach(dev)) != 0)
631167514Skmacy		goto out;
632167514Skmacy
633167514Skmacy	/* initialize sge private state */
634170654Skmacy	t3_sge_init_adapter(sc);
635167514Skmacy
636167514Skmacy	t3_led_ready(sc);
637169978Skmacy
638169978Skmacy	cxgb_offload_init();
639169978Skmacy	if (is_offload(sc)) {
640169978Skmacy		setbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
641169978Skmacy		cxgb_adapter_ofld(sc);
642169978Skmacy        }
643167514Skmacy	error = t3_get_fw_version(sc, &vers);
644167514Skmacy	if (error)
645167514Skmacy		goto out;
646167514Skmacy
647169978Skmacy	snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
648169978Skmacy	    G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
649169978Skmacy	    G_FW_VERSION_MICRO(vers));
650169978Skmacy
651199237Snp	snprintf(buf, sizeof(buf), "%s %sNIC\t E/C: %s S/N: %s",
652199237Snp		 ai->desc, is_offload(sc) ? "R" : "",
653185655Sgnn		 sc->params.vpd.ec, sc->params.vpd.sn);
654185655Sgnn	device_set_desc_copy(dev, buf);
655185655Sgnn
656192540Sgnn	snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x",
657192540Sgnn		 sc->params.vpd.port_type[0], sc->params.vpd.port_type[1],
658192540Sgnn		 sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]);
659192540Sgnn
660176472Skmacy	device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]);
661209841Snp	callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
662174708Skmacy	t3_add_attach_sysctls(sc);
663167514Skmacyout:
664167514Skmacy	if (error)
665167514Skmacy		cxgb_free(sc);
666167514Skmacy
667167514Skmacy	return (error);
668167514Skmacy}
669167514Skmacy
670192537Sgnn/*
671192584Sgnn * The cxgb_controller_detach routine is called with the device is
672192537Sgnn * unloaded from the system.
673192537Sgnn */
674192537Sgnn
675167514Skmacystatic int
676167514Skmacycxgb_controller_detach(device_t dev)
677167514Skmacy{
678167514Skmacy	struct adapter *sc;
679167514Skmacy
680167514Skmacy	sc = device_get_softc(dev);
681167514Skmacy
682167514Skmacy	cxgb_free(sc);
683167514Skmacy
684167514Skmacy	return (0);
685167514Skmacy}
686167514Skmacy
687192537Sgnn/*
688192537Sgnn * The cxgb_free() is called by the cxgb_controller_detach() routine
689192537Sgnn * to tear down the structures that were built up in
690192537Sgnn * cxgb_controller_attach(), and should be the final piece of work
691192584Sgnn * done when fully unloading the driver.
692192537Sgnn *
693192537Sgnn *
694192537Sgnn *  1. Shutting down the threads started by the cxgb_controller_attach()
695192537Sgnn *     routine.
696192537Sgnn *  2. Stopping the lower level device and all callouts (cxgb_down_locked()).
697192537Sgnn *  3. Detaching all of the port devices created during the
698192537Sgnn *     cxgb_controller_attach() routine.
699192537Sgnn *  4. Removing the device children created via cxgb_controller_attach().
700192933Sgnn *  5. Releasing PCI resources associated with the device.
701192537Sgnn *  6. Turning off the offload support, iff it was turned on.
702192537Sgnn *  7. Destroying the mutexes created in cxgb_controller_attach().
703192537Sgnn *
704192537Sgnn */
705167514Skmacystatic void
706167514Skmacycxgb_free(struct adapter *sc)
707167514Skmacy{
708219946Snp	int i, nqsets = 0;
709167514Skmacy
710176472Skmacy	ADAPTER_LOCK(sc);
711176472Skmacy	sc->flags |= CXGB_SHUTDOWN;
712176472Skmacy	ADAPTER_UNLOCK(sc);
713192537Sgnn
714192537Sgnn	/*
715194521Skmacy	 * Make sure all child devices are gone.
716192537Sgnn	 */
717192537Sgnn	bus_generic_detach(sc->dev);
718192537Sgnn	for (i = 0; i < (sc)->params.nports; i++) {
719192584Sgnn		if (sc->portdev[i] &&
720192584Sgnn		    device_delete_child(sc->dev, sc->portdev[i]) != 0)
721192537Sgnn			device_printf(sc->dev, "failed to delete child port\n");
722219946Snp		nqsets += sc->port[i].nqsets;
723192537Sgnn	}
724192537Sgnn
725194521Skmacy	/*
726194521Skmacy	 * At this point, it is as if cxgb_port_detach has run on all ports, and
727194521Skmacy	 * cxgb_down has run on the adapter.  All interrupts have been silenced,
728194521Skmacy	 * all open devices have been closed.
729194521Skmacy	 */
730194521Skmacy	KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)",
731194521Skmacy					   __func__, sc->open_device_map));
732194521Skmacy	for (i = 0; i < sc->params.nports; i++) {
733194521Skmacy		KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!",
734194521Skmacy						  __func__, i));
735194521Skmacy	}
736194521Skmacy
737194521Skmacy	/*
738194521Skmacy	 * Finish off the adapter's callouts.
739194521Skmacy	 */
740194521Skmacy	callout_drain(&sc->cxgb_tick_ch);
741194521Skmacy	callout_drain(&sc->sge_timer_ch);
742194521Skmacy
743194521Skmacy	/*
744194521Skmacy	 * Release resources grabbed under FULL_INIT_DONE by cxgb_up.  The
745194521Skmacy	 * sysctls are cleaned up by the kernel linker.
746194521Skmacy	 */
747194521Skmacy	if (sc->flags & FULL_INIT_DONE) {
748219946Snp 		t3_free_sge_resources(sc, nqsets);
749194521Skmacy 		sc->flags &= ~FULL_INIT_DONE;
750194521Skmacy 	}
751194521Skmacy
752194521Skmacy	/*
753194521Skmacy	 * Release all interrupt resources.
754194521Skmacy	 */
755192933Sgnn	cxgb_teardown_interrupts(sc);
756169978Skmacy	if (sc->flags & (USING_MSI | USING_MSIX)) {
757169978Skmacy		device_printf(sc->dev, "releasing msi message(s)\n");
758169978Skmacy		pci_release_msi(sc->dev);
759169978Skmacy	} else {
760169978Skmacy		device_printf(sc->dev, "no msi message to release\n");
761169978Skmacy	}
762192933Sgnn
763169978Skmacy	if (sc->msix_regs_res != NULL) {
764169978Skmacy		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
765169978Skmacy		    sc->msix_regs_res);
766169978Skmacy	}
767176472Skmacy
768194521Skmacy	/*
769194521Skmacy	 * Free the adapter's taskqueue.
770194521Skmacy	 */
771176472Skmacy	if (sc->tq != NULL) {
772171978Skmacy		taskqueue_free(sc->tq);
773176472Skmacy		sc->tq = NULL;
774176472Skmacy	}
775176472Skmacy
776169978Skmacy	if (is_offload(sc)) {
777194521Skmacy		clrbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
778169978Skmacy		cxgb_adapter_unofld(sc);
779194521Skmacy	}
780194521Skmacy
781183059Skmacy#ifdef notyet
782176472Skmacy	if (sc->flags & CXGB_OFLD_INIT)
783176472Skmacy		cxgb_offload_deactivate(sc);
784178302Skmacy#endif
785171471Skmacy	free(sc->filters, M_DEVBUF);
786167514Skmacy	t3_sge_free(sc);
787194521Skmacy
788170869Skmacy	cxgb_offload_exit();
789176472Skmacy
790176472Skmacy	if (sc->udbs_res != NULL)
791176472Skmacy		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid,
792176472Skmacy		    sc->udbs_res);
793176472Skmacy
794167514Skmacy	if (sc->regs_res != NULL)
795167514Skmacy		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
796167514Skmacy		    sc->regs_res);
797167514Skmacy
798170869Skmacy	MTX_DESTROY(&sc->mdio_lock);
799170869Skmacy	MTX_DESTROY(&sc->sge.reg_lock);
800170869Skmacy	MTX_DESTROY(&sc->elmer_lock);
801170869Skmacy	ADAPTER_LOCK_DEINIT(sc);
802167514Skmacy}
803167514Skmacy
804167514Skmacy/**
805167514Skmacy *	setup_sge_qsets - configure SGE Tx/Rx/response queues
806167514Skmacy *	@sc: the controller softc
807167514Skmacy *
808167514Skmacy *	Determines how many sets of SGE queues to use and initializes them.
809167514Skmacy *	We support multiple queue sets per port if we have MSI-X, otherwise
810167514Skmacy *	just one queue set per port.
811167514Skmacy */
812167514Skmacystatic int
813167514Skmacysetup_sge_qsets(adapter_t *sc)
814167514Skmacy{
815172096Skmacy	int i, j, err, irq_idx = 0, qset_idx = 0;
816169978Skmacy	u_int ntxq = SGE_TXQ_PER_SET;
817167514Skmacy
818167514Skmacy	if ((err = t3_sge_alloc(sc)) != 0) {
819167760Skmacy		device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
820167514Skmacy		return (err);
821167514Skmacy	}
822167514Skmacy
823167514Skmacy	if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
824167514Skmacy		irq_idx = -1;
825167514Skmacy
826172096Skmacy	for (i = 0; i < (sc)->params.nports; i++) {
827167514Skmacy		struct port_info *pi = &sc->port[i];
828167514Skmacy
829171978Skmacy		for (j = 0; j < pi->nqsets; j++, qset_idx++) {
830167760Skmacy			err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
831167514Skmacy			    (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
832167514Skmacy			    &sc->params.sge.qset[qset_idx], ntxq, pi);
833167514Skmacy			if (err) {
834219946Snp				t3_free_sge_resources(sc, qset_idx);
835219946Snp				device_printf(sc->dev,
836219946Snp				    "t3_sge_alloc_qset failed with %d\n", err);
837167514Skmacy				return (err);
838167514Skmacy			}
839167514Skmacy		}
840167514Skmacy	}
841167514Skmacy
842167514Skmacy	return (0);
843167514Skmacy}
844167514Skmacy
845170654Skmacystatic void
846192933Sgnncxgb_teardown_interrupts(adapter_t *sc)
847170654Skmacy{
848192933Sgnn	int i;
849170654Skmacy
850192933Sgnn	for (i = 0; i < SGE_QSETS; i++) {
851192933Sgnn		if (sc->msix_intr_tag[i] == NULL) {
852192933Sgnn
853192933Sgnn			/* Should have been setup fully or not at all */
854192933Sgnn			KASSERT(sc->msix_irq_res[i] == NULL &&
855192933Sgnn				sc->msix_irq_rid[i] == 0,
856192933Sgnn				("%s: half-done interrupt (%d).", __func__, i));
857192933Sgnn
858192933Sgnn			continue;
859170654Skmacy		}
860192933Sgnn
861192933Sgnn		bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
862192933Sgnn				  sc->msix_intr_tag[i]);
863192933Sgnn		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i],
864192933Sgnn				     sc->msix_irq_res[i]);
865192933Sgnn
866192933Sgnn		sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL;
867192933Sgnn		sc->msix_irq_rid[i] = 0;
868170654Skmacy	}
869192933Sgnn
870192933Sgnn	if (sc->intr_tag) {
871192933Sgnn		KASSERT(sc->irq_res != NULL,
872192933Sgnn			("%s: half-done interrupt.", __func__));
873192933Sgnn
874192933Sgnn		bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
875192933Sgnn		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
876192933Sgnn				     sc->irq_res);
877192933Sgnn
878192933Sgnn		sc->irq_res = sc->intr_tag = NULL;
879192933Sgnn		sc->irq_rid = 0;
880192933Sgnn	}
881170654Skmacy}
882170654Skmacy
883167514Skmacystatic int
884192933Sgnncxgb_setup_interrupts(adapter_t *sc)
885167514Skmacy{
886192933Sgnn	struct resource *res;
887192933Sgnn	void *tag;
888192933Sgnn	int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX);
889167514Skmacy
890192933Sgnn	sc->irq_rid = intr_flag ? 1 : 0;
891192933Sgnn	sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid,
892192933Sgnn					     RF_SHAREABLE | RF_ACTIVE);
893192933Sgnn	if (sc->irq_res == NULL) {
894192933Sgnn		device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n",
895192933Sgnn			      intr_flag, sc->irq_rid);
896192933Sgnn		err = EINVAL;
897192933Sgnn		sc->irq_rid = 0;
898192933Sgnn	} else {
899192933Sgnn		err = bus_setup_intr(sc->dev, sc->irq_res,
900204274Snp		    INTR_MPSAFE | INTR_TYPE_NET, NULL,
901204274Snp		    sc->cxgb_intr, sc, &sc->intr_tag);
902192933Sgnn
903192933Sgnn		if (err) {
904192933Sgnn			device_printf(sc->dev,
905192933Sgnn				      "Cannot set up interrupt (%x, %u, %d)\n",
906192933Sgnn				      intr_flag, sc->irq_rid, err);
907192933Sgnn			bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
908192933Sgnn					     sc->irq_res);
909192933Sgnn			sc->irq_res = sc->intr_tag = NULL;
910192933Sgnn			sc->irq_rid = 0;
911192933Sgnn		}
912167514Skmacy	}
913171804Skmacy
914192933Sgnn	/* That's all for INTx or MSI */
915192933Sgnn	if (!(intr_flag & USING_MSIX) || err)
916192933Sgnn		return (err);
917192933Sgnn
918192933Sgnn	for (i = 0; i < sc->msi_count - 1; i++) {
919192933Sgnn		rid = i + 2;
920192933Sgnn		res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid,
921192933Sgnn					     RF_SHAREABLE | RF_ACTIVE);
922192933Sgnn		if (res == NULL) {
923192933Sgnn			device_printf(sc->dev, "Cannot allocate interrupt "
924192933Sgnn				      "for message %d\n", rid);
925192933Sgnn			err = EINVAL;
926192933Sgnn			break;
927192933Sgnn		}
928192933Sgnn
929192933Sgnn		err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET,
930204274Snp				     NULL, t3_intr_msix, &sc->sge.qs[i], &tag);
931192933Sgnn		if (err) {
932192933Sgnn			device_printf(sc->dev, "Cannot set up interrupt "
933192933Sgnn				      "for message %d (%d)\n", rid, err);
934192933Sgnn			bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res);
935192933Sgnn			break;
936167514Skmacy		}
937192933Sgnn
938192933Sgnn		sc->msix_irq_rid[i] = rid;
939192933Sgnn		sc->msix_irq_res[i] = res;
940192933Sgnn		sc->msix_intr_tag[i] = tag;
941167514Skmacy	}
942167760Skmacy
943192933Sgnn	if (err)
944192933Sgnn		cxgb_teardown_interrupts(sc);
945192933Sgnn
946192933Sgnn	return (err);
947167514Skmacy}
948167514Skmacy
949192933Sgnn
950167514Skmacystatic int
951167514Skmacycxgb_port_probe(device_t dev)
952167514Skmacy{
953167514Skmacy	struct port_info *p;
954167514Skmacy	char buf[80];
955176472Skmacy	const char *desc;
956176472Skmacy
957167514Skmacy	p = device_get_softc(dev);
958176472Skmacy	desc = p->phy.desc;
959176472Skmacy	snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, desc);
960167514Skmacy	device_set_desc_copy(dev, buf);
961167514Skmacy	return (0);
962167514Skmacy}
963167514Skmacy
964167514Skmacy
965167514Skmacystatic int
966167514Skmacycxgb_makedev(struct port_info *pi)
967167514Skmacy{
968167514Skmacy
969170654Skmacy	pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit,
970209115Snp	    UID_ROOT, GID_WHEEL, 0600, "%s", if_name(pi->ifp));
971167514Skmacy
972167514Skmacy	if (pi->port_cdev == NULL)
973167514Skmacy		return (ENOMEM);
974167514Skmacy
975167514Skmacy	pi->port_cdev->si_drv1 = (void *)pi;
976167514Skmacy
977167514Skmacy	return (0);
978167514Skmacy}
979167514Skmacy
980204274Snp#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
981204348Snp    IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
982207639Snp    IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE)
983204274Snp#define CXGB_CAP_ENABLE (CXGB_CAP & ~IFCAP_TSO6)
984167514Skmacy
985167514Skmacystatic int
986167514Skmacycxgb_port_attach(device_t dev)
987167514Skmacy{
988167514Skmacy	struct port_info *p;
989167514Skmacy	struct ifnet *ifp;
990194921Snp	int err;
991176472Skmacy	struct adapter *sc;
992204274Snp
993167514Skmacy	p = device_get_softc(dev);
994176472Skmacy	sc = p->adapter;
995170869Skmacy	snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
996171803Skmacy	    device_get_unit(device_get_parent(dev)), p->port_id);
997170869Skmacy	PORT_LOCK_INIT(p, p->lockbuf);
998167514Skmacy
999209841Snp	callout_init(&p->link_check_ch, CALLOUT_MPSAFE);
1000209841Snp	TASK_INIT(&p->link_check_task, 0, check_link_status, p);
1001209841Snp
1002167514Skmacy	/* Allocate an ifnet object and set it up */
1003167514Skmacy	ifp = p->ifp = if_alloc(IFT_ETHER);
1004167514Skmacy	if (ifp == NULL) {
1005167514Skmacy		device_printf(dev, "Cannot allocate ifnet\n");
1006167514Skmacy		return (ENOMEM);
1007167514Skmacy	}
1008167514Skmacy
1009167514Skmacy	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1010167514Skmacy	ifp->if_init = cxgb_init;
1011167514Skmacy	ifp->if_softc = p;
1012167514Skmacy	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1013167514Skmacy	ifp->if_ioctl = cxgb_ioctl;
1014231116Snp	ifp->if_transmit = cxgb_transmit;
1015231116Snp	ifp->if_qflush = cxgb_qflush;
1016174708Skmacy
1017204274Snp	ifp->if_capabilities = CXGB_CAP;
1018204274Snp	ifp->if_capenable = CXGB_CAP_ENABLE;
1019204274Snp	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO;
1020204274Snp
1021171471Skmacy	/*
1022204274Snp	 * Disable TSO on 4-port - it isn't supported by the firmware.
1023171471Skmacy	 */
1024204274Snp	if (sc->params.nports > 2) {
1025204348Snp		ifp->if_capabilities &= ~(IFCAP_TSO | IFCAP_VLAN_HWTSO);
1026204348Snp		ifp->if_capenable &= ~(IFCAP_TSO | IFCAP_VLAN_HWTSO);
1027171471Skmacy		ifp->if_hwassist &= ~CSUM_TSO;
1028171471Skmacy	}
1029171471Skmacy
1030167514Skmacy	ether_ifattach(ifp, p->hw_addr);
1031192537Sgnn
1032204274Snp#ifdef DEFAULT_JUMBO
1033204274Snp	if (sc->params.nports <= 2)
1034180583Skmacy		ifp->if_mtu = ETHERMTU_JUMBO;
1035204274Snp#endif
1036167514Skmacy	if ((err = cxgb_makedev(p)) != 0) {
1037167514Skmacy		printf("makedev failed %d\n", err);
1038167514Skmacy		return (err);
1039167514Skmacy	}
1040194921Snp
1041194921Snp	/* Create a list of media supported by this port */
1042167514Skmacy	ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
1043167514Skmacy	    cxgb_media_status);
1044194921Snp	cxgb_build_medialist(p);
1045176472Skmacy
1046170654Skmacy	t3_sge_init_port(p);
1047189643Sgnn
1048192537Sgnn	return (err);
1049167514Skmacy}
1050167514Skmacy
1051192537Sgnn/*
1052192537Sgnn * cxgb_port_detach() is called via the device_detach methods when
1053192537Sgnn * cxgb_free() calls the bus_generic_detach.  It is responsible for
1054192537Sgnn * removing the device from the view of the kernel, i.e. from all
1055192537Sgnn * interfaces lists etc.  This routine is only called when the driver is
1056192537Sgnn * being unloaded, not when the link goes down.
1057192537Sgnn */
1058167514Skmacystatic int
1059167514Skmacycxgb_port_detach(device_t dev)
1060167514Skmacy{
1061167514Skmacy	struct port_info *p;
1062192537Sgnn	struct adapter *sc;
1063194521Skmacy	int i;
1064167514Skmacy
1065167514Skmacy	p = device_get_softc(dev);
1066192537Sgnn	sc = p->adapter;
1067169978Skmacy
1068202671Snp	/* Tell cxgb_ioctl and if_init that the port is going away */
1069202671Snp	ADAPTER_LOCK(sc);
1070202671Snp	SET_DOOMED(p);
1071202671Snp	wakeup(&sc->flags);
1072202671Snp	while (IS_BUSY(sc))
1073202671Snp		mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0);
1074202671Snp	SET_BUSY(sc);
1075202671Snp	ADAPTER_UNLOCK(sc);
1076194521Skmacy
1077192537Sgnn	if (p->port_cdev != NULL)
1078192537Sgnn		destroy_dev(p->port_cdev);
1079194521Skmacy
1080194521Skmacy	cxgb_uninit_synchronized(p);
1081192537Sgnn	ether_ifdetach(p->ifp);
1082192537Sgnn
1083194521Skmacy	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1084194521Skmacy		struct sge_qset *qs = &sc->sge.qs[i];
1085194521Skmacy		struct sge_txq *txq = &qs->txq[TXQ_ETH];
1086194521Skmacy
1087194521Skmacy		callout_drain(&txq->txq_watchdog);
1088194521Skmacy		callout_drain(&txq->txq_timer);
1089192537Sgnn	}
1090192537Sgnn
1091170869Skmacy	PORT_LOCK_DEINIT(p);
1092167514Skmacy	if_free(p->ifp);
1093194521Skmacy	p->ifp = NULL;
1094194521Skmacy
1095202671Snp	ADAPTER_LOCK(sc);
1096202671Snp	CLR_BUSY(sc);
1097202671Snp	wakeup_one(&sc->flags);
1098202671Snp	ADAPTER_UNLOCK(sc);
1099167514Skmacy	return (0);
1100167514Skmacy}
1101167514Skmacy
1102167514Skmacyvoid
1103167514Skmacyt3_fatal_err(struct adapter *sc)
1104167514Skmacy{
1105167514Skmacy	u_int fw_status[4];
1106183062Skmacy
1107172096Skmacy	if (sc->flags & FULL_INIT_DONE) {
1108172096Skmacy		t3_sge_stop(sc);
1109172096Skmacy		t3_write_reg(sc, A_XGM_TX_CTRL, 0);
1110172096Skmacy		t3_write_reg(sc, A_XGM_RX_CTRL, 0);
1111172096Skmacy		t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
1112172096Skmacy		t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
1113172096Skmacy		t3_intr_disable(sc);
1114172096Skmacy	}
1115167514Skmacy	device_printf(sc->dev,"encountered fatal error, operation suspended\n");
1116167514Skmacy	if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
1117167514Skmacy		device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
1118167514Skmacy		    fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
1119167514Skmacy}
1120167514Skmacy
1121167514Skmacyint
1122167514Skmacyt3_os_find_pci_capability(adapter_t *sc, int cap)
1123167514Skmacy{
1124167514Skmacy	device_t dev;
1125167514Skmacy	struct pci_devinfo *dinfo;
1126167514Skmacy	pcicfgregs *cfg;
1127167514Skmacy	uint32_t status;
1128167514Skmacy	uint8_t ptr;
1129167514Skmacy
1130167514Skmacy	dev = sc->dev;
1131167514Skmacy	dinfo = device_get_ivars(dev);
1132167514Skmacy	cfg = &dinfo->cfg;
1133167514Skmacy
1134167514Skmacy	status = pci_read_config(dev, PCIR_STATUS, 2);
1135167514Skmacy	if (!(status & PCIM_STATUS_CAPPRESENT))
1136167514Skmacy		return (0);
1137167514Skmacy
1138167514Skmacy	switch (cfg->hdrtype & PCIM_HDRTYPE) {
1139167514Skmacy	case 0:
1140167514Skmacy	case 1:
1141167514Skmacy		ptr = PCIR_CAP_PTR;
1142167514Skmacy		break;
1143167514Skmacy	case 2:
1144167514Skmacy		ptr = PCIR_CAP_PTR_2;
1145167514Skmacy		break;
1146167514Skmacy	default:
1147167514Skmacy		return (0);
1148167514Skmacy		break;
1149167514Skmacy	}
1150167514Skmacy	ptr = pci_read_config(dev, ptr, 1);
1151167514Skmacy
1152167514Skmacy	while (ptr != 0) {
1153167514Skmacy		if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
1154167514Skmacy			return (ptr);
1155167514Skmacy		ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1156167514Skmacy	}
1157167514Skmacy
1158167514Skmacy	return (0);
1159167514Skmacy}
1160167514Skmacy
1161167514Skmacyint
1162167514Skmacyt3_os_pci_save_state(struct adapter *sc)
1163167514Skmacy{
1164167514Skmacy	device_t dev;
1165167514Skmacy	struct pci_devinfo *dinfo;
1166167514Skmacy
1167167514Skmacy	dev = sc->dev;
1168167514Skmacy	dinfo = device_get_ivars(dev);
1169167514Skmacy
1170167514Skmacy	pci_cfg_save(dev, dinfo, 0);
1171167514Skmacy	return (0);
1172167514Skmacy}
1173167514Skmacy
1174167514Skmacyint
1175167514Skmacyt3_os_pci_restore_state(struct adapter *sc)
1176167514Skmacy{
1177167514Skmacy	device_t dev;
1178167514Skmacy	struct pci_devinfo *dinfo;
1179167514Skmacy
1180167514Skmacy	dev = sc->dev;
1181167514Skmacy	dinfo = device_get_ivars(dev);
1182167514Skmacy
1183167514Skmacy	pci_cfg_restore(dev, dinfo);
1184167514Skmacy	return (0);
1185167514Skmacy}
1186167514Skmacy
1187167514Skmacy/**
1188167514Skmacy *	t3_os_link_changed - handle link status changes
1189197791Snp *	@sc: the adapter associated with the link change
1190197791Snp *	@port_id: the port index whose link status has changed
1191177340Skmacy *	@link_status: the new status of the link
1192167514Skmacy *	@speed: the new speed setting
1193167514Skmacy *	@duplex: the new duplex setting
1194167514Skmacy *	@fc: the new flow-control setting
1195167514Skmacy *
1196167514Skmacy *	This is the OS-dependent handler for link status changes.  The OS
1197167514Skmacy *	neutral handler takes care of most of the processing for these events,
1198167514Skmacy *	then calls this handler for any OS-specific processing.
1199167514Skmacy */
1200167514Skmacyvoid
1201167514Skmacyt3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
1202197791Snp     int duplex, int fc, int mac_was_reset)
1203167514Skmacy{
1204167514Skmacy	struct port_info *pi = &adapter->port[port_id];
1205194521Skmacy	struct ifnet *ifp = pi->ifp;
1206167514Skmacy
1207194521Skmacy	/* no race with detach, so ifp should always be good */
1208194521Skmacy	KASSERT(ifp, ("%s: if detached.", __func__));
1209194521Skmacy
1210197791Snp	/* Reapply mac settings if they were lost due to a reset */
1211197791Snp	if (mac_was_reset) {
1212197791Snp		PORT_LOCK(pi);
1213197791Snp		cxgb_update_mac_settings(pi);
1214197791Snp		PORT_UNLOCK(pi);
1215197791Snp	}
1216197791Snp
1217169978Skmacy	if (link_status) {
1218194521Skmacy		ifp->if_baudrate = IF_Mbps(speed);
1219194521Skmacy		if_link_state_change(ifp, LINK_STATE_UP);
1220192540Sgnn	} else
1221194521Skmacy		if_link_state_change(ifp, LINK_STATE_DOWN);
1222167514Skmacy}
1223167514Skmacy
1224181614Skmacy/**
1225181614Skmacy *	t3_os_phymod_changed - handle PHY module changes
1226181614Skmacy *	@phy: the PHY reporting the module change
1227181614Skmacy *	@mod_type: new module type
1228181614Skmacy *
1229181614Skmacy *	This is the OS-dependent handler for PHY module changes.  It is
1230181614Skmacy *	invoked when a PHY module is removed or inserted for any OS-specific
1231181614Skmacy *	processing.
1232181614Skmacy */
1233181614Skmacyvoid t3_os_phymod_changed(struct adapter *adap, int port_id)
1234181614Skmacy{
1235181614Skmacy	static const char *mod_str[] = {
1236204921Snp		NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX-L", "unknown"
1237181614Skmacy	};
1238181614Skmacy	struct port_info *pi = &adap->port[port_id];
1239194921Snp	int mod = pi->phy.modtype;
1240181614Skmacy
1241194921Snp	if (mod != pi->media.ifm_cur->ifm_data)
1242194921Snp		cxgb_build_medialist(pi);
1243194921Snp
1244194921Snp	if (mod == phy_modtype_none)
1245194921Snp		if_printf(pi->ifp, "PHY module unplugged\n");
1246181614Skmacy	else {
1247194921Snp		KASSERT(mod < ARRAY_SIZE(mod_str),
1248194921Snp			("invalid PHY module type %d", mod));
1249194921Snp		if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]);
1250181614Skmacy	}
1251181614Skmacy}
1252181614Skmacy
1253167514Skmacyvoid
1254167514Skmacyt3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
1255167514Skmacy{
1256167514Skmacy
1257167514Skmacy	/*
1258167514Skmacy	 * The ifnet might not be allocated before this gets called,
1259167514Skmacy	 * as this is called early on in attach by t3_prep_adapter
1260167514Skmacy	 * save the address off in the port structure
1261167514Skmacy	 */
1262167514Skmacy	if (cxgb_debug)
1263167514Skmacy		printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
1264167514Skmacy	bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
1265167514Skmacy}
1266167514Skmacy
1267194521Skmacy/*
1268194521Skmacy * Programs the XGMAC based on the settings in the ifnet.  These settings
1269194521Skmacy * include MTU, MAC address, mcast addresses, etc.
1270167514Skmacy */
1271167514Skmacystatic void
1272194521Skmacycxgb_update_mac_settings(struct port_info *p)
1273167514Skmacy{
1274194521Skmacy	struct ifnet *ifp = p->ifp;
1275167514Skmacy	struct t3_rx_mode rm;
1276167514Skmacy	struct cmac *mac = &p->mac;
1277180583Skmacy	int mtu, hwtagging;
1278167514Skmacy
1279194521Skmacy	PORT_LOCK_ASSERT_OWNED(p);
1280167514Skmacy
1281180583Skmacy	bcopy(IF_LLADDR(ifp), p->hw_addr, ETHER_ADDR_LEN);
1282180583Skmacy
1283180583Skmacy	mtu = ifp->if_mtu;
1284180583Skmacy	if (ifp->if_capenable & IFCAP_VLAN_MTU)
1285180583Skmacy		mtu += ETHER_VLAN_ENCAP_LEN;
1286180583Skmacy
1287180583Skmacy	hwtagging = (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0;
1288180583Skmacy
1289180583Skmacy	t3_mac_set_mtu(mac, mtu);
1290180583Skmacy	t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging);
1291167514Skmacy	t3_mac_set_address(mac, 0, p->hw_addr);
1292194521Skmacy	t3_init_rx_mode(&rm, p);
1293167514Skmacy	t3_mac_set_rx_mode(mac, &rm);
1294167514Skmacy}
1295167514Skmacy
1296176472Skmacy
1297176472Skmacystatic int
1298176472Skmacyawait_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
1299176472Skmacy			      unsigned long n)
1300176472Skmacy{
1301176472Skmacy	int attempts = 5;
1302176472Skmacy
1303176472Skmacy	while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
1304176472Skmacy		if (!--attempts)
1305176472Skmacy			return (ETIMEDOUT);
1306176472Skmacy		t3_os_sleep(10);
1307176472Skmacy	}
1308176472Skmacy	return 0;
1309176472Skmacy}
1310176472Skmacy
1311176472Skmacystatic int
1312176472Skmacyinit_tp_parity(struct adapter *adap)
1313176472Skmacy{
1314176472Skmacy	int i;
1315176472Skmacy	struct mbuf *m;
1316176472Skmacy	struct cpl_set_tcb_field *greq;
1317176472Skmacy	unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
1318176472Skmacy
1319176472Skmacy	t3_tp_set_offload_mode(adap, 1);
1320176472Skmacy
1321176472Skmacy	for (i = 0; i < 16; i++) {
1322176472Skmacy		struct cpl_smt_write_req *req;
1323176472Skmacy
1324176472Skmacy		m = m_gethdr(M_WAITOK, MT_DATA);
1325176472Skmacy		req = mtod(m, struct cpl_smt_write_req *);
1326176472Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1327176472Skmacy		memset(req, 0, sizeof(*req));
1328194521Skmacy		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1329176472Skmacy		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
1330176472Skmacy		req->iff = i;
1331176472Skmacy		t3_mgmt_tx(adap, m);
1332176472Skmacy	}
1333176472Skmacy
1334176472Skmacy	for (i = 0; i < 2048; i++) {
1335176472Skmacy		struct cpl_l2t_write_req *req;
1336176472Skmacy
1337176472Skmacy		m = m_gethdr(M_WAITOK, MT_DATA);
1338176472Skmacy		req = mtod(m, struct cpl_l2t_write_req *);
1339176472Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1340176472Skmacy		memset(req, 0, sizeof(*req));
1341194521Skmacy		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1342176472Skmacy		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
1343176472Skmacy		req->params = htonl(V_L2T_W_IDX(i));
1344176472Skmacy		t3_mgmt_tx(adap, m);
1345176472Skmacy	}
1346176472Skmacy
1347176472Skmacy	for (i = 0; i < 2048; i++) {
1348176472Skmacy		struct cpl_rte_write_req *req;
1349176472Skmacy
1350176472Skmacy		m = m_gethdr(M_WAITOK, MT_DATA);
1351176472Skmacy		req = mtod(m, struct cpl_rte_write_req *);
1352176472Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1353176472Skmacy		memset(req, 0, sizeof(*req));
1354194521Skmacy		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1355176472Skmacy		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
1356176472Skmacy		req->l2t_idx = htonl(V_L2T_W_IDX(i));
1357176472Skmacy		t3_mgmt_tx(adap, m);
1358176472Skmacy	}
1359176472Skmacy
1360176472Skmacy	m = m_gethdr(M_WAITOK, MT_DATA);
1361176472Skmacy	greq = mtod(m, struct cpl_set_tcb_field *);
1362176472Skmacy	m->m_len = m->m_pkthdr.len = sizeof(*greq);
1363176472Skmacy	memset(greq, 0, sizeof(*greq));
1364194521Skmacy	greq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1365176472Skmacy	OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
1366176472Skmacy	greq->mask = htobe64(1);
1367176472Skmacy	t3_mgmt_tx(adap, m);
1368176472Skmacy
1369176472Skmacy	i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
1370176472Skmacy	t3_tp_set_offload_mode(adap, 0);
1371176472Skmacy	return (i);
1372176472Skmacy}
1373176472Skmacy
1374167514Skmacy/**
1375167514Skmacy *	setup_rss - configure Receive Side Steering (per-queue connection demux)
1376167514Skmacy *	@adap: the adapter
1377167514Skmacy *
1378167514Skmacy *	Sets up RSS to distribute packets to multiple receive queues.  We
1379167514Skmacy *	configure the RSS CPU lookup table to distribute to the number of HW
1380167514Skmacy *	receive queues, and the response queue lookup table to narrow that
1381167514Skmacy *	down to the response queues actually configured for each port.
1382167514Skmacy *	We always configure the RSS mapping for two ports since the mapping
1383167514Skmacy *	table has plenty of entries.
1384167514Skmacy */
1385167514Skmacystatic void
1386167514Skmacysetup_rss(adapter_t *adap)
1387167514Skmacy{
1388167514Skmacy	int i;
1389171471Skmacy	u_int nq[2];
1390167514Skmacy	uint8_t cpus[SGE_QSETS + 1];
1391167514Skmacy	uint16_t rspq_map[RSS_TABLE_SIZE];
1392171471Skmacy
1393167514Skmacy	for (i = 0; i < SGE_QSETS; ++i)
1394167514Skmacy		cpus[i] = i;
1395167514Skmacy	cpus[SGE_QSETS] = 0xff;
1396167514Skmacy
1397171978Skmacy	nq[0] = nq[1] = 0;
1398171978Skmacy	for_each_port(adap, i) {
1399171978Skmacy		const struct port_info *pi = adap2pinfo(adap, i);
1400171978Skmacy
1401171978Skmacy		nq[pi->tx_chan] += pi->nqsets;
1402171978Skmacy	}
1403167514Skmacy	for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
1404176472Skmacy		rspq_map[i] = nq[0] ? i % nq[0] : 0;
1405176472Skmacy		rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0;
1406167514Skmacy	}
1407196840Sjhb
1408171471Skmacy	/* Calculate the reverse RSS map table */
1409196840Sjhb	for (i = 0; i < SGE_QSETS; ++i)
1410196840Sjhb		adap->rrss_map[i] = 0xff;
1411171471Skmacy	for (i = 0; i < RSS_TABLE_SIZE; ++i)
1412171471Skmacy		if (adap->rrss_map[rspq_map[i]] == 0xff)
1413171471Skmacy			adap->rrss_map[rspq_map[i]] = i;
1414167514Skmacy
1415167514Skmacy	t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
1416171471Skmacy		      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
1417176472Skmacy	              F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ,
1418176472Skmacy	              cpus, rspq_map);
1419171471Skmacy
1420167514Skmacy}
1421167514Skmacy
1422169978Skmacy/*
1423169978Skmacy * Sends an mbuf to an offload queue driver
1424169978Skmacy * after dealing with any active network taps.
1425169978Skmacy */
1426169978Skmacystatic inline int
1427174626Skmacyoffload_tx(struct t3cdev *tdev, struct mbuf *m)
1428169978Skmacy{
1429169978Skmacy	int ret;
1430169978Skmacy
1431169978Skmacy	ret = t3_offload_tx(tdev, m);
1432170654Skmacy	return (ret);
1433169978Skmacy}
1434169978Skmacy
1435169978Skmacystatic int
1436169978Skmacywrite_smt_entry(struct adapter *adapter, int idx)
1437169978Skmacy{
1438169978Skmacy	struct port_info *pi = &adapter->port[idx];
1439169978Skmacy	struct cpl_smt_write_req *req;
1440169978Skmacy	struct mbuf *m;
1441169978Skmacy
1442169978Skmacy	if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
1443169978Skmacy		return (ENOMEM);
1444169978Skmacy
1445169978Skmacy	req = mtod(m, struct cpl_smt_write_req *);
1446174708Skmacy	m->m_pkthdr.len = m->m_len = sizeof(struct cpl_smt_write_req);
1447174708Skmacy
1448194521Skmacy	req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1449169978Skmacy	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
1450169978Skmacy	req->mtu_idx = NMTUS - 1;  /* should be 0 but there's a T3 bug */
1451169978Skmacy	req->iff = idx;
1452169978Skmacy	memset(req->src_mac1, 0, sizeof(req->src_mac1));
1453169978Skmacy	memcpy(req->src_mac0, pi->hw_addr, ETHER_ADDR_LEN);
1454169978Skmacy
1455169978Skmacy	m_set_priority(m, 1);
1456169978Skmacy
1457169978Skmacy	offload_tx(&adapter->tdev, m);
1458169978Skmacy
1459169978Skmacy	return (0);
1460169978Skmacy}
1461169978Skmacy
1462169978Skmacystatic int
1463169978Skmacyinit_smt(struct adapter *adapter)
1464169978Skmacy{
1465169978Skmacy	int i;
1466169978Skmacy
1467169978Skmacy	for_each_port(adapter, i)
1468169978Skmacy		write_smt_entry(adapter, i);
1469169978Skmacy	return 0;
1470169978Skmacy}
1471169978Skmacy
1472167514Skmacystatic void
1473169978Skmacyinit_port_mtus(adapter_t *adapter)
1474169978Skmacy{
1475194521Skmacy	unsigned int mtus = ETHERMTU | (ETHERMTU << 16);
1476169978Skmacy
1477169978Skmacy	t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
1478169978Skmacy}
1479169978Skmacy
1480169978Skmacystatic void
1481167514Skmacysend_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
1482167514Skmacy			      int hi, int port)
1483167514Skmacy{
1484167514Skmacy	struct mbuf *m;
1485167514Skmacy	struct mngt_pktsched_wr *req;
1486167514Skmacy
1487171471Skmacy	m = m_gethdr(M_DONTWAIT, MT_DATA);
1488167848Skmacy	if (m) {
1489169978Skmacy		req = mtod(m, struct mngt_pktsched_wr *);
1490194521Skmacy		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1491167848Skmacy		req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1492167848Skmacy		req->sched = sched;
1493167848Skmacy		req->idx = qidx;
1494167848Skmacy		req->min = lo;
1495167848Skmacy		req->max = hi;
1496167848Skmacy		req->binding = port;
1497167848Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1498167848Skmacy		t3_mgmt_tx(adap, m);
1499167848Skmacy	}
1500167514Skmacy}
1501167514Skmacy
1502167514Skmacystatic void
1503167514Skmacybind_qsets(adapter_t *sc)
1504167514Skmacy{
1505167514Skmacy	int i, j;
1506167514Skmacy
1507167514Skmacy	for (i = 0; i < (sc)->params.nports; ++i) {
1508167514Skmacy		const struct port_info *pi = adap2pinfo(sc, i);
1509167514Skmacy
1510172096Skmacy		for (j = 0; j < pi->nqsets; ++j) {
1511167514Skmacy			send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1512172096Skmacy					  -1, pi->tx_chan);
1513172096Skmacy
1514172096Skmacy		}
1515167514Skmacy	}
1516167514Skmacy}
1517167514Skmacy
1518171471Skmacystatic void
1519171471Skmacyupdate_tpeeprom(struct adapter *adap)
1520171471Skmacy{
1521171471Skmacy	const struct firmware *tpeeprom;
1522172109Skmacy
1523171471Skmacy	uint32_t version;
1524171471Skmacy	unsigned int major, minor;
1525171471Skmacy	int ret, len;
1526189643Sgnn	char rev, name[32];
1527171471Skmacy
1528171471Skmacy	t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
1529171471Skmacy
1530171471Skmacy	major = G_TP_VERSION_MAJOR(version);
1531171471Skmacy	minor = G_TP_VERSION_MINOR(version);
1532171471Skmacy	if (major == TP_VERSION_MAJOR  && minor == TP_VERSION_MINOR)
1533171471Skmacy		return;
1534171471Skmacy
1535171471Skmacy	rev = t3rev2char(adap);
1536189643Sgnn	snprintf(name, sizeof(name), TPEEPROM_NAME, rev);
1537171471Skmacy
1538189643Sgnn	tpeeprom = firmware_get(name);
1539171471Skmacy	if (tpeeprom == NULL) {
1540190330Sgnn		device_printf(adap->dev,
1541190330Sgnn			      "could not load TP EEPROM: unable to load %s\n",
1542190330Sgnn			      name);
1543171471Skmacy		return;
1544171471Skmacy	}
1545171471Skmacy
1546171471Skmacy	len = tpeeprom->datasize - 4;
1547171471Skmacy
1548171471Skmacy	ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
1549171471Skmacy	if (ret)
1550171471Skmacy		goto release_tpeeprom;
1551171471Skmacy
1552171471Skmacy	if (len != TP_SRAM_LEN) {
1553190330Sgnn		device_printf(adap->dev,
1554190330Sgnn			      "%s length is wrong len=%d expected=%d\n", name,
1555190330Sgnn			      len, TP_SRAM_LEN);
1556171471Skmacy		return;
1557171471Skmacy	}
1558171471Skmacy
1559171471Skmacy	ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
1560171471Skmacy	    TP_SRAM_OFFSET);
1561171471Skmacy
1562171471Skmacy	if (!ret) {
1563171471Skmacy		device_printf(adap->dev,
1564171471Skmacy			"Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
1565171471Skmacy			 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1566171471Skmacy	} else
1567190330Sgnn		device_printf(adap->dev,
1568190330Sgnn			      "Protocol SRAM image update in EEPROM failed\n");
1569171471Skmacy
1570171471Skmacyrelease_tpeeprom:
1571171471Skmacy	firmware_put(tpeeprom, FIRMWARE_UNLOAD);
1572171471Skmacy
1573171471Skmacy	return;
1574171471Skmacy}
1575171471Skmacy
1576171471Skmacystatic int
1577171471Skmacyupdate_tpsram(struct adapter *adap)
1578171471Skmacy{
1579171471Skmacy	const struct firmware *tpsram;
1580171471Skmacy	int ret;
1581189643Sgnn	char rev, name[32];
1582171471Skmacy
1583171471Skmacy	rev = t3rev2char(adap);
1584189643Sgnn	snprintf(name, sizeof(name), TPSRAM_NAME, rev);
1585171471Skmacy
1586171471Skmacy	update_tpeeprom(adap);
1587171471Skmacy
1588189643Sgnn	tpsram = firmware_get(name);
1589171471Skmacy	if (tpsram == NULL){
1590176613Skmacy		device_printf(adap->dev, "could not load TP SRAM\n");
1591171471Skmacy		return (EINVAL);
1592171471Skmacy	} else
1593176613Skmacy		device_printf(adap->dev, "updating TP SRAM\n");
1594171471Skmacy
1595171471Skmacy	ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
1596171471Skmacy	if (ret)
1597171471Skmacy		goto release_tpsram;
1598171471Skmacy
1599171471Skmacy	ret = t3_set_proto_sram(adap, tpsram->data);
1600171471Skmacy	if (ret)
1601171471Skmacy		device_printf(adap->dev, "loading protocol SRAM failed\n");
1602171471Skmacy
1603171471Skmacyrelease_tpsram:
1604171471Skmacy	firmware_put(tpsram, FIRMWARE_UNLOAD);
1605171471Skmacy
1606171471Skmacy	return ret;
1607171471Skmacy}
1608171471Skmacy
1609169978Skmacy/**
1610169978Skmacy *	cxgb_up - enable the adapter
1611169978Skmacy *	@adap: adapter being enabled
1612169978Skmacy *
1613169978Skmacy *	Called when the first port is enabled, this function performs the
1614169978Skmacy *	actions necessary to make an adapter operational, such as completing
1615169978Skmacy *	the initialization of HW modules, and enabling interrupts.
1616169978Skmacy */
1617169978Skmacystatic int
1618169978Skmacycxgb_up(struct adapter *sc)
1619169978Skmacy{
1620169978Skmacy	int err = 0;
1621208887Snp	unsigned int mxf = t3_mc5_size(&sc->mc5) - MC5_MIN_TIDS;
1622169978Skmacy
1623194521Skmacy	KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)",
1624194521Skmacy					   __func__, sc->open_device_map));
1625194521Skmacy
1626169978Skmacy	if ((sc->flags & FULL_INIT_DONE) == 0) {
1627169978Skmacy
1628202671Snp		ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1629202671Snp
1630169978Skmacy		if ((sc->flags & FW_UPTODATE) == 0)
1631171471Skmacy			if ((err = upgrade_fw(sc)))
1632171471Skmacy				goto out;
1633194521Skmacy
1634171471Skmacy		if ((sc->flags & TPS_UPTODATE) == 0)
1635171471Skmacy			if ((err = update_tpsram(sc)))
1636171471Skmacy				goto out;
1637194521Skmacy
1638208887Snp		if (is_offload(sc) && nfilters != 0) {
1639207643Snp			sc->params.mc5.nservers = 0;
1640208887Snp
1641208887Snp			if (nfilters < 0)
1642208887Snp				sc->params.mc5.nfilters = mxf;
1643208887Snp			else
1644208887Snp				sc->params.mc5.nfilters = min(nfilters, mxf);
1645207643Snp		}
1646207643Snp
1647169978Skmacy		err = t3_init_hw(sc, 0);
1648169978Skmacy		if (err)
1649169978Skmacy			goto out;
1650169978Skmacy
1651176472Skmacy		t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1652169978Skmacy		t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1653169978Skmacy
1654169978Skmacy		err = setup_sge_qsets(sc);
1655169978Skmacy		if (err)
1656169978Skmacy			goto out;
1657169978Skmacy
1658207643Snp		alloc_filters(sc);
1659169978Skmacy		setup_rss(sc);
1660192933Sgnn
1661192933Sgnn		t3_intr_clear(sc);
1662192933Sgnn		err = cxgb_setup_interrupts(sc);
1663192933Sgnn		if (err)
1664192933Sgnn			goto out;
1665192933Sgnn
1666174708Skmacy		t3_add_configured_sysctls(sc);
1667169978Skmacy		sc->flags |= FULL_INIT_DONE;
1668169978Skmacy	}
1669169978Skmacy
1670169978Skmacy	t3_intr_clear(sc);
1671169978Skmacy	t3_sge_start(sc);
1672169978Skmacy	t3_intr_enable(sc);
1673169978Skmacy
1674176472Skmacy	if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) &&
1675176472Skmacy	    is_offload(sc) && init_tp_parity(sc) == 0)
1676176472Skmacy		sc->flags |= TP_PARITY_INIT;
1677176472Skmacy
1678176472Skmacy	if (sc->flags & TP_PARITY_INIT) {
1679194521Skmacy		t3_write_reg(sc, A_TP_INT_CAUSE, F_CMCACHEPERR | F_ARPLUTPERR);
1680176472Skmacy		t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff);
1681176472Skmacy	}
1682176472Skmacy
1683172096Skmacy	if (!(sc->flags & QUEUES_BOUND)) {
1684169978Skmacy		bind_qsets(sc);
1685207643Snp		setup_hw_filters(sc);
1686171471Skmacy		sc->flags |= QUEUES_BOUND;
1687171471Skmacy	}
1688194521Skmacy
1689194521Skmacy	t3_sge_reset_adapter(sc);
1690169978Skmacyout:
1691169978Skmacy	return (err);
1692169978Skmacy}
1693169978Skmacy
1694169978Skmacy/*
1695194521Skmacy * Called when the last open device is closed.  Does NOT undo all of cxgb_up's
1696194521Skmacy * work.  Specifically, the resources grabbed under FULL_INIT_DONE are released
1697194521Skmacy * during controller_detach, not here.
1698169978Skmacy */
1699167514Skmacystatic void
1700194521Skmacycxgb_down(struct adapter *sc)
1701169978Skmacy{
1702169978Skmacy	t3_sge_stop(sc);
1703169978Skmacy	t3_intr_disable(sc);
1704169978Skmacy}
1705169978Skmacy
1706169978Skmacystatic int
1707169978Skmacyoffload_open(struct port_info *pi)
1708169978Skmacy{
1709194521Skmacy	struct adapter *sc = pi->adapter;
1710194521Skmacy	struct t3cdev *tdev = &sc->tdev;
1711183059Skmacy
1712194521Skmacy	setbit(&sc->open_device_map, OFFLOAD_DEVMAP_BIT);
1713169978Skmacy
1714194521Skmacy	t3_tp_set_offload_mode(sc, 1);
1715174708Skmacy	tdev->lldev = pi->ifp;
1716194521Skmacy	init_port_mtus(sc);
1717194521Skmacy	t3_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd,
1718194521Skmacy		     sc->params.rev == 0 ?  sc->port[0].ifp->if_mtu : 0xffff);
1719194521Skmacy	init_smt(sc);
1720178767Skmacy	cxgb_add_clients(tdev);
1721178767Skmacy
1722194521Skmacy	return (0);
1723169978Skmacy}
1724174708Skmacy
1725169978Skmacystatic int
1726174708Skmacyoffload_close(struct t3cdev *tdev)
1727169978Skmacy{
1728169978Skmacy	struct adapter *adapter = tdev2adap(tdev);
1729169978Skmacy
1730176472Skmacy	if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT))
1731170654Skmacy		return (0);
1732178767Skmacy
1733178767Skmacy	/* Call back all registered clients */
1734178767Skmacy	cxgb_remove_clients(tdev);
1735178767Skmacy
1736169978Skmacy	tdev->lldev = NULL;
1737169978Skmacy	cxgb_set_dummy_ops(tdev);
1738169978Skmacy	t3_tp_set_offload_mode(adapter, 0);
1739194521Skmacy
1740169978Skmacy	clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
1741169978Skmacy
1742194521Skmacy	return (0);
1743194521Skmacy}
1744192537Sgnn
1745194521Skmacy/*
1746202671Snp * if_init for cxgb ports.
1747194521Skmacy */
1748202671Snpstatic void
1749202671Snpcxgb_init(void *arg)
1750194521Skmacy{
1751202671Snp	struct port_info *p = arg;
1752194521Skmacy	struct adapter *sc = p->adapter;
1753192537Sgnn
1754194521Skmacy	ADAPTER_LOCK(sc);
1755202671Snp	cxgb_init_locked(p); /* releases adapter lock */
1756202671Snp	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1757202671Snp}
1758194521Skmacy
1759202671Snpstatic int
1760202671Snpcxgb_init_locked(struct port_info *p)
1761202671Snp{
1762202671Snp	struct adapter *sc = p->adapter;
1763202671Snp	struct ifnet *ifp = p->ifp;
1764202671Snp	struct cmac *mac = &p->mac;
1765211345Snp	int i, rc = 0, may_sleep = 0, gave_up_lock = 0;
1766202671Snp
1767202671Snp	ADAPTER_LOCK_ASSERT_OWNED(sc);
1768202671Snp
1769194521Skmacy	while (!IS_DOOMED(p) && IS_BUSY(sc)) {
1770211345Snp		gave_up_lock = 1;
1771202671Snp		if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbinit", 0)) {
1772194521Skmacy			rc = EINTR;
1773194521Skmacy			goto done;
1774194521Skmacy		}
1775194521Skmacy	}
1776202671Snp	if (IS_DOOMED(p)) {
1777194521Skmacy		rc = ENXIO;
1778202671Snp		goto done;
1779194521Skmacy	}
1780202671Snp	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1781194521Skmacy
1782194521Skmacy	/*
1783202671Snp	 * The code that runs during one-time adapter initialization can sleep
1784202671Snp	 * so it's important not to hold any locks across it.
1785194521Skmacy	 */
1786202671Snp	may_sleep = sc->flags & FULL_INIT_DONE ? 0 : 1;
1787194521Skmacy
1788202671Snp	if (may_sleep) {
1789202671Snp		SET_BUSY(sc);
1790211345Snp		gave_up_lock = 1;
1791202671Snp		ADAPTER_UNLOCK(sc);
1792194521Skmacy	}
1793194521Skmacy
1794194521Skmacy	if (sc->open_device_map == 0) {
1795194521Skmacy		if ((rc = cxgb_up(sc)) != 0)
1796202671Snp			goto done;
1797167514Skmacy
1798194521Skmacy		if (is_offload(sc) && !ofld_disable && offload_open(p))
1799169978Skmacy			log(LOG_WARNING,
1800169978Skmacy			    "Could not initialize offload capabilities\n");
1801169978Skmacy	}
1802192540Sgnn
1803194521Skmacy	PORT_LOCK(p);
1804202671Snp	if (isset(&sc->open_device_map, p->port_id) &&
1805202671Snp	    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1806202671Snp		PORT_UNLOCK(p);
1807202671Snp		goto done;
1808202671Snp	}
1809192540Sgnn	t3_port_intr_enable(sc, p->port_id);
1810194521Skmacy	if (!mac->multiport)
1811197791Snp		t3_mac_init(mac);
1812194521Skmacy	cxgb_update_mac_settings(p);
1813194521Skmacy	t3_link_start(&p->phy, mac, &p->link_config);
1814194521Skmacy	t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1815194521Skmacy	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1816194521Skmacy	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1817194521Skmacy	PORT_UNLOCK(p);
1818192540Sgnn
1819194521Skmacy	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1820194521Skmacy		struct sge_qset *qs = &sc->sge.qs[i];
1821194521Skmacy		struct sge_txq *txq = &qs->txq[TXQ_ETH];
1822170869Skmacy
1823194521Skmacy		callout_reset_on(&txq->txq_watchdog, hz, cxgb_tx_watchdog, qs,
1824194521Skmacy				 txq->txq_watchdog.c_cpu);
1825194521Skmacy	}
1826167514Skmacy
1827194521Skmacy	/* all ok */
1828194521Skmacy	setbit(&sc->open_device_map, p->port_id);
1829209841Snp	callout_reset(&p->link_check_ch,
1830209841Snp	    p->phy.caps & SUPPORTED_LINK_IRQ ?  hz * 3 : hz / 4,
1831209841Snp	    link_check_callout, p);
1832167760Skmacy
1833202671Snpdone:
1834202671Snp	if (may_sleep) {
1835202671Snp		ADAPTER_LOCK(sc);
1836202671Snp		KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1837202671Snp		CLR_BUSY(sc);
1838211345Snp	}
1839211345Snp	if (gave_up_lock)
1840202671Snp		wakeup_one(&sc->flags);
1841202671Snp	ADAPTER_UNLOCK(sc);
1842202671Snp	return (rc);
1843167514Skmacy}
1844167514Skmacy
1845202671Snpstatic int
1846202671Snpcxgb_uninit_locked(struct port_info *p)
1847202671Snp{
1848202671Snp	struct adapter *sc = p->adapter;
1849202671Snp	int rc;
1850202671Snp
1851202671Snp	ADAPTER_LOCK_ASSERT_OWNED(sc);
1852202671Snp
1853202671Snp	while (!IS_DOOMED(p) && IS_BUSY(sc)) {
1854202671Snp		if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbunin", 0)) {
1855202671Snp			rc = EINTR;
1856202671Snp			goto done;
1857202671Snp		}
1858202671Snp	}
1859202671Snp	if (IS_DOOMED(p)) {
1860202671Snp		rc = ENXIO;
1861202671Snp		goto done;
1862202671Snp	}
1863202671Snp	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1864202671Snp	SET_BUSY(sc);
1865202671Snp	ADAPTER_UNLOCK(sc);
1866202671Snp
1867202671Snp	rc = cxgb_uninit_synchronized(p);
1868202671Snp
1869202671Snp	ADAPTER_LOCK(sc);
1870202671Snp	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1871202671Snp	CLR_BUSY(sc);
1872202671Snp	wakeup_one(&sc->flags);
1873202671Snpdone:
1874202671Snp	ADAPTER_UNLOCK(sc);
1875202671Snp	return (rc);
1876202671Snp}
1877202671Snp
1878194521Skmacy/*
1879194521Skmacy * Called on "ifconfig down", and from port_detach
1880194521Skmacy */
1881194521Skmacystatic int
1882194521Skmacycxgb_uninit_synchronized(struct port_info *pi)
1883167514Skmacy{
1884194521Skmacy	struct adapter *sc = pi->adapter;
1885194521Skmacy	struct ifnet *ifp = pi->ifp;
1886167514Skmacy
1887194521Skmacy	/*
1888202671Snp	 * taskqueue_drain may cause a deadlock if the adapter lock is held.
1889202671Snp	 */
1890202671Snp	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1891202671Snp
1892202671Snp	/*
1893194521Skmacy	 * Clear this port's bit from the open device map, and then drain all
1894194521Skmacy	 * the tasks that can access/manipulate this port's port_info or ifp.
1895218909Sbrucec	 * We disable this port's interrupts here and so the slow/ext
1896194521Skmacy	 * interrupt tasks won't be enqueued.  The tick task will continue to
1897194521Skmacy	 * be enqueued every second but the runs after this drain will not see
1898194521Skmacy	 * this port in the open device map.
1899194521Skmacy	 *
1900194521Skmacy	 * A well behaved task must take open_device_map into account and ignore
1901194521Skmacy	 * ports that are not open.
1902194521Skmacy	 */
1903194521Skmacy	clrbit(&sc->open_device_map, pi->port_id);
1904194521Skmacy	t3_port_intr_disable(sc, pi->port_id);
1905194521Skmacy	taskqueue_drain(sc->tq, &sc->slow_intr_task);
1906194521Skmacy	taskqueue_drain(sc->tq, &sc->tick_task);
1907194521Skmacy
1908209841Snp	callout_drain(&pi->link_check_ch);
1909209841Snp	taskqueue_drain(sc->tq, &pi->link_check_task);
1910209841Snp
1911194521Skmacy	PORT_LOCK(pi);
1912169978Skmacy	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1913169978Skmacy
1914177340Skmacy	/* disable pause frames */
1915194521Skmacy	t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0);
1916170869Skmacy
1917177340Skmacy	/* Reset RX FIFO HWM */
1918194521Skmacy	t3_set_reg_field(sc, A_XGM_RXFIFO_CFG +  pi->mac.offset,
1919177340Skmacy			 V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0);
1920177340Skmacy
1921199240Snp	DELAY(100 * 1000);
1922177340Skmacy
1923177340Skmacy	/* Wait for TXFIFO empty */
1924194521Skmacy	t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset,
1925177340Skmacy			F_TXFIFO_EMPTY, 1, 20, 5);
1926177340Skmacy
1927199240Snp	DELAY(100 * 1000);
1928199240Snp	t3_mac_disable(&pi->mac, MAC_DIRECTION_RX);
1929177340Skmacy
1930194521Skmacy
1931177340Skmacy	pi->phy.ops->power_down(&pi->phy, 1);
1932177340Skmacy
1933194521Skmacy	PORT_UNLOCK(pi);
1934167514Skmacy
1935194521Skmacy	pi->link_config.link_ok = 0;
1936197791Snp	t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0);
1937194521Skmacy
1938194521Skmacy	if ((sc->open_device_map & PORT_MASK) == 0)
1939194521Skmacy		offload_close(&sc->tdev);
1940194521Skmacy
1941194521Skmacy	if (sc->open_device_map == 0)
1942194521Skmacy		cxgb_down(pi->adapter);
1943194521Skmacy
1944194521Skmacy	return (0);
1945170654Skmacy}
1946170654Skmacy
1947181616Skmacy/*
1948181616Skmacy * Mark lro enabled or disabled in all qsets for this port
1949181616Skmacy */
1950170654Skmacystatic int
1951181616Skmacycxgb_set_lro(struct port_info *p, int enabled)
1952181616Skmacy{
1953181616Skmacy	int i;
1954181616Skmacy	struct adapter *adp = p->adapter;
1955181616Skmacy	struct sge_qset *q;
1956181616Skmacy
1957181616Skmacy	for (i = 0; i < p->nqsets; i++) {
1958181616Skmacy		q = &adp->sge.qs[p->first_qset + i];
1959181616Skmacy		q->lro.enabled = (enabled != 0);
1960181616Skmacy	}
1961181616Skmacy	return (0);
1962181616Skmacy}
1963181616Skmacy
1964181616Skmacystatic int
1965167514Skmacycxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
1966167514Skmacy{
1967167514Skmacy	struct port_info *p = ifp->if_softc;
1968202671Snp	struct adapter *sc = p->adapter;
1969167514Skmacy	struct ifreq *ifr = (struct ifreq *)data;
1970202671Snp	int flags, error = 0, mtu;
1971167514Skmacy	uint32_t mask;
1972167514Skmacy
1973167514Skmacy	switch (command) {
1974167514Skmacy	case SIOCSIFMTU:
1975202671Snp		ADAPTER_LOCK(sc);
1976202671Snp		error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1977202671Snp		if (error) {
1978202671Snpfail:
1979202671Snp			ADAPTER_UNLOCK(sc);
1980202671Snp			return (error);
1981202671Snp		}
1982202671Snp
1983194521Skmacy		mtu = ifr->ifr_mtu;
1984194521Skmacy		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
1985194521Skmacy			error = EINVAL;
1986194521Skmacy		} else {
1987194521Skmacy			ifp->if_mtu = mtu;
1988194521Skmacy			PORT_LOCK(p);
1989194521Skmacy			cxgb_update_mac_settings(p);
1990194521Skmacy			PORT_UNLOCK(p);
1991194521Skmacy		}
1992202671Snp		ADAPTER_UNLOCK(sc);
1993167514Skmacy		break;
1994167514Skmacy	case SIOCSIFFLAGS:
1995202671Snp		ADAPTER_LOCK(sc);
1996202671Snp		if (IS_DOOMED(p)) {
1997202671Snp			error = ENXIO;
1998202671Snp			goto fail;
1999202671Snp		}
2000167514Skmacy		if (ifp->if_flags & IFF_UP) {
2001167514Skmacy			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2002167514Skmacy				flags = p->if_flags;
2003167514Skmacy				if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
2004194521Skmacy				    ((ifp->if_flags ^ flags) & IFF_ALLMULTI)) {
2005202671Snp					if (IS_BUSY(sc)) {
2006202671Snp						error = EBUSY;
2007202671Snp						goto fail;
2008202671Snp					}
2009194521Skmacy					PORT_LOCK(p);
2010194521Skmacy					cxgb_update_mac_settings(p);
2011194521Skmacy					PORT_UNLOCK(p);
2012194521Skmacy				}
2013202671Snp				ADAPTER_UNLOCK(sc);
2014167514Skmacy			} else
2015202671Snp				error = cxgb_init_locked(p);
2016167760Skmacy			p->if_flags = ifp->if_flags;
2017170869Skmacy		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2018202671Snp			error = cxgb_uninit_locked(p);
2019202863Snp		else
2020202863Snp			ADAPTER_UNLOCK(sc);
2021202671Snp
2022202671Snp		ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2023176472Skmacy		break;
2024176472Skmacy	case SIOCADDMULTI:
2025176472Skmacy	case SIOCDELMULTI:
2026202671Snp		ADAPTER_LOCK(sc);
2027202671Snp		error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2028202671Snp		if (error)
2029202671Snp			goto fail;
2030202671Snp
2031170869Skmacy		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2032194521Skmacy			PORT_LOCK(p);
2033194521Skmacy			cxgb_update_mac_settings(p);
2034194521Skmacy			PORT_UNLOCK(p);
2035167514Skmacy		}
2036202671Snp		ADAPTER_UNLOCK(sc);
2037194521Skmacy
2038167514Skmacy		break;
2039167514Skmacy	case SIOCSIFCAP:
2040202671Snp		ADAPTER_LOCK(sc);
2041202671Snp		error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2042202671Snp		if (error)
2043202671Snp			goto fail;
2044202671Snp
2045167514Skmacy		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2046167514Skmacy		if (mask & IFCAP_TXCSUM) {
2047204348Snp			ifp->if_capenable ^= IFCAP_TXCSUM;
2048204348Snp			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2049204348Snp
2050204348Snp			if (IFCAP_TSO & ifp->if_capenable &&
2051204348Snp			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
2052204348Snp				ifp->if_capenable &= ~IFCAP_TSO;
2053204348Snp				ifp->if_hwassist &= ~CSUM_TSO;
2054204348Snp				if_printf(ifp,
2055204348Snp				    "tso disabled due to -txcsum.\n");
2056167514Skmacy			}
2057167514Skmacy		}
2058204348Snp		if (mask & IFCAP_RXCSUM)
2059180583Skmacy			ifp->if_capenable ^= IFCAP_RXCSUM;
2060167514Skmacy		if (mask & IFCAP_TSO4) {
2061204348Snp			ifp->if_capenable ^= IFCAP_TSO4;
2062204348Snp
2063204348Snp			if (IFCAP_TSO & ifp->if_capenable) {
2064204348Snp				if (IFCAP_TXCSUM & ifp->if_capenable)
2065204348Snp					ifp->if_hwassist |= CSUM_TSO;
2066204348Snp				else {
2067204348Snp					ifp->if_capenable &= ~IFCAP_TSO;
2068204348Snp					ifp->if_hwassist &= ~CSUM_TSO;
2069204348Snp					if_printf(ifp,
2070204348Snp					    "enable txcsum first.\n");
2071204348Snp					error = EAGAIN;
2072204348Snp				}
2073204348Snp			} else
2074167514Skmacy				ifp->if_hwassist &= ~CSUM_TSO;
2075167514Skmacy		}
2076181616Skmacy		if (mask & IFCAP_LRO) {
2077181616Skmacy			ifp->if_capenable ^= IFCAP_LRO;
2078181616Skmacy
2079181616Skmacy			/* Safe to do this even if cxgb_up not called yet */
2080181616Skmacy			cxgb_set_lro(p, ifp->if_capenable & IFCAP_LRO);
2081181616Skmacy		}
2082180583Skmacy		if (mask & IFCAP_VLAN_HWTAGGING) {
2083180583Skmacy			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2084194521Skmacy			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2085194521Skmacy				PORT_LOCK(p);
2086194521Skmacy				cxgb_update_mac_settings(p);
2087194521Skmacy				PORT_UNLOCK(p);
2088194521Skmacy			}
2089180583Skmacy		}
2090180583Skmacy		if (mask & IFCAP_VLAN_MTU) {
2091180583Skmacy			ifp->if_capenable ^= IFCAP_VLAN_MTU;
2092194521Skmacy			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2093194521Skmacy				PORT_LOCK(p);
2094194521Skmacy				cxgb_update_mac_settings(p);
2095194521Skmacy				PORT_UNLOCK(p);
2096194521Skmacy			}
2097180583Skmacy		}
2098204348Snp		if (mask & IFCAP_VLAN_HWTSO)
2099204348Snp			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2100202671Snp		if (mask & IFCAP_VLAN_HWCSUM)
2101180583Skmacy			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2102180583Skmacy
2103180583Skmacy#ifdef VLAN_CAPABILITIES
2104180583Skmacy		VLAN_CAPABILITIES(ifp);
2105180583Skmacy#endif
2106202671Snp		ADAPTER_UNLOCK(sc);
2107167514Skmacy		break;
2108202671Snp	case SIOCSIFMEDIA:
2109202671Snp	case SIOCGIFMEDIA:
2110202671Snp		error = ifmedia_ioctl(ifp, ifr, &p->media, command);
2111202671Snp		break;
2112167514Skmacy	default:
2113202671Snp		error = ether_ioctl(ifp, command, data);
2114167514Skmacy	}
2115194521Skmacy
2116167514Skmacy	return (error);
2117167514Skmacy}
2118167514Skmacy
2119174708Skmacystatic int
2120167514Skmacycxgb_media_change(struct ifnet *ifp)
2121167514Skmacy{
2122194921Snp	return (EOPNOTSUPP);
2123167514Skmacy}
2124167514Skmacy
2125186282Sgnn/*
2126194921Snp * Translates phy->modtype to the correct Ethernet media subtype.
2127186282Sgnn */
2128186282Sgnnstatic int
2129194921Snpcxgb_ifm_type(int mod)
2130186282Sgnn{
2131194921Snp	switch (mod) {
2132186282Sgnn	case phy_modtype_sr:
2133194921Snp		return (IFM_10G_SR);
2134186282Sgnn	case phy_modtype_lr:
2135194921Snp		return (IFM_10G_LR);
2136186282Sgnn	case phy_modtype_lrm:
2137194921Snp		return (IFM_10G_LRM);
2138186282Sgnn	case phy_modtype_twinax:
2139194921Snp		return (IFM_10G_TWINAX);
2140186282Sgnn	case phy_modtype_twinax_long:
2141194921Snp		return (IFM_10G_TWINAX_LONG);
2142186282Sgnn	case phy_modtype_none:
2143194921Snp		return (IFM_NONE);
2144186282Sgnn	case phy_modtype_unknown:
2145194921Snp		return (IFM_UNKNOWN);
2146186282Sgnn	}
2147186282Sgnn
2148194921Snp	KASSERT(0, ("%s: modtype %d unknown", __func__, mod));
2149194921Snp	return (IFM_UNKNOWN);
2150186282Sgnn}
2151186282Sgnn
2152194921Snp/*
2153194921Snp * Rebuilds the ifmedia list for this port, and sets the current media.
2154194921Snp */
2155167514Skmacystatic void
2156194921Snpcxgb_build_medialist(struct port_info *p)
2157194921Snp{
2158194921Snp	struct cphy *phy = &p->phy;
2159194921Snp	struct ifmedia *media = &p->media;
2160194921Snp	int mod = phy->modtype;
2161194921Snp	int m = IFM_ETHER | IFM_FDX;
2162194921Snp
2163194921Snp	PORT_LOCK(p);
2164194921Snp
2165194921Snp	ifmedia_removeall(media);
2166194921Snp	if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) {
2167194921Snp		/* Copper (RJ45) */
2168194921Snp
2169194921Snp		if (phy->caps & SUPPORTED_10000baseT_Full)
2170194921Snp			ifmedia_add(media, m | IFM_10G_T, mod, NULL);
2171194921Snp
2172194921Snp		if (phy->caps & SUPPORTED_1000baseT_Full)
2173194921Snp			ifmedia_add(media, m | IFM_1000_T, mod, NULL);
2174194921Snp
2175194921Snp		if (phy->caps & SUPPORTED_100baseT_Full)
2176194921Snp			ifmedia_add(media, m | IFM_100_TX, mod, NULL);
2177194921Snp
2178194921Snp		if (phy->caps & SUPPORTED_10baseT_Full)
2179194921Snp			ifmedia_add(media, m | IFM_10_T, mod, NULL);
2180194921Snp
2181194921Snp		ifmedia_add(media, IFM_ETHER | IFM_AUTO, mod, NULL);
2182194921Snp		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2183194921Snp
2184194921Snp	} else if (phy->caps & SUPPORTED_TP) {
2185194921Snp		/* Copper (CX4) */
2186194921Snp
2187194921Snp		KASSERT(phy->caps & SUPPORTED_10000baseT_Full,
2188194921Snp			("%s: unexpected cap 0x%x", __func__, phy->caps));
2189194921Snp
2190194921Snp		ifmedia_add(media, m | IFM_10G_CX4, mod, NULL);
2191194921Snp		ifmedia_set(media, m | IFM_10G_CX4);
2192194921Snp
2193194921Snp	} else if (phy->caps & SUPPORTED_FIBRE &&
2194194921Snp		   phy->caps & SUPPORTED_10000baseT_Full) {
2195194921Snp		/* 10G optical (but includes SFP+ twinax) */
2196194921Snp
2197194921Snp		m |= cxgb_ifm_type(mod);
2198194921Snp		if (IFM_SUBTYPE(m) == IFM_NONE)
2199194921Snp			m &= ~IFM_FDX;
2200194921Snp
2201194921Snp		ifmedia_add(media, m, mod, NULL);
2202194921Snp		ifmedia_set(media, m);
2203194921Snp
2204194921Snp	} else if (phy->caps & SUPPORTED_FIBRE &&
2205194921Snp		   phy->caps & SUPPORTED_1000baseT_Full) {
2206194921Snp		/* 1G optical */
2207194921Snp
2208194921Snp		/* XXX: Lie and claim to be SX, could actually be any 1G-X */
2209194921Snp		ifmedia_add(media, m | IFM_1000_SX, mod, NULL);
2210194921Snp		ifmedia_set(media, m | IFM_1000_SX);
2211194921Snp
2212194921Snp	} else {
2213194921Snp		KASSERT(0, ("%s: don't know how to handle 0x%x.", __func__,
2214194921Snp			    phy->caps));
2215194921Snp	}
2216194921Snp
2217194921Snp	PORT_UNLOCK(p);
2218194921Snp}
2219194921Snp
2220194921Snpstatic void
2221167514Skmacycxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2222167514Skmacy{
2223167514Skmacy	struct port_info *p = ifp->if_softc;
2224186282Sgnn	struct ifmedia_entry *cur = p->media.ifm_cur;
2225194921Snp	int speed = p->link_config.speed;
2226167514Skmacy
2227194921Snp	if (cur->ifm_data != p->phy.modtype) {
2228194921Snp		cxgb_build_medialist(p);
2229194921Snp		cur = p->media.ifm_cur;
2230186282Sgnn	}
2231186282Sgnn
2232167514Skmacy	ifmr->ifm_status = IFM_AVALID;
2233167514Skmacy	if (!p->link_config.link_ok)
2234167514Skmacy		return;
2235167514Skmacy
2236167514Skmacy	ifmr->ifm_status |= IFM_ACTIVE;
2237167514Skmacy
2238194921Snp	/*
2239194921Snp	 * active and current will differ iff current media is autoselect.  That
2240194921Snp	 * can happen only for copper RJ45.
2241194921Snp	 */
2242194921Snp	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
2243194921Snp		return;
2244194921Snp	KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg,
2245194921Snp		("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps));
2246194921Snp
2247194921Snp	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2248194921Snp	if (speed == SPEED_10000)
2249194921Snp		ifmr->ifm_active |= IFM_10G_T;
2250194921Snp	else if (speed == SPEED_1000)
2251194921Snp		ifmr->ifm_active |= IFM_1000_T;
2252194921Snp	else if (speed == SPEED_100)
2253194921Snp		ifmr->ifm_active |= IFM_100_TX;
2254194921Snp	else if (speed == SPEED_10)
2255170654Skmacy		ifmr->ifm_active |= IFM_10_T;
2256167514Skmacy	else
2257194921Snp		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
2258194921Snp			    speed));
2259167514Skmacy}
2260167514Skmacy
2261167514Skmacystatic void
2262167514Skmacycxgb_async_intr(void *data)
2263167514Skmacy{
2264167760Skmacy	adapter_t *sc = data;
2265167760Skmacy
2266209840Snp	t3_write_reg(sc, A_PL_INT_ENABLE0, 0);
2267209840Snp	(void) t3_read_reg(sc, A_PL_INT_ENABLE0);
2268170869Skmacy	taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
2269167514Skmacy}
2270167514Skmacy
2271209841Snpstatic void
2272209841Snplink_check_callout(void *arg)
2273197791Snp{
2274209841Snp	struct port_info *pi = arg;
2275209841Snp	struct adapter *sc = pi->adapter;
2276197791Snp
2277209841Snp	if (!isset(&sc->open_device_map, pi->port_id))
2278209841Snp		return;
2279197791Snp
2280209841Snp	taskqueue_enqueue(sc->tq, &pi->link_check_task);
2281197791Snp}
2282197791Snp
2283167514Skmacystatic void
2284209841Snpcheck_link_status(void *arg, int pending)
2285167514Skmacy{
2286209841Snp	struct port_info *pi = arg;
2287209841Snp	struct adapter *sc = pi->adapter;
2288167514Skmacy
2289209841Snp	if (!isset(&sc->open_device_map, pi->port_id))
2290209841Snp		return;
2291167514Skmacy
2292209841Snp	t3_link_changed(sc, pi->port_id);
2293194521Skmacy
2294209841Snp	if (pi->link_fault || !(pi->phy.caps & SUPPORTED_LINK_IRQ))
2295209841Snp		callout_reset(&pi->link_check_ch, hz, link_check_callout, pi);
2296167514Skmacy}
2297167514Skmacy
2298209841Snpvoid
2299209841Snpt3_os_link_intr(struct port_info *pi)
2300209841Snp{
2301209841Snp	/*
2302209841Snp	 * Schedule a link check in the near future.  If the link is flapping
2303209841Snp	 * rapidly we'll keep resetting the callout and delaying the check until
2304209841Snp	 * things stabilize a bit.
2305209841Snp	 */
2306209841Snp	callout_reset(&pi->link_check_ch, hz / 4, link_check_callout, pi);
2307209841Snp}
2308209841Snp
2309167514Skmacystatic void
2310194521Skmacycheck_t3b2_mac(struct adapter *sc)
2311167514Skmacy{
2312167514Skmacy	int i;
2313167514Skmacy
2314194521Skmacy	if (sc->flags & CXGB_SHUTDOWN)
2315176472Skmacy		return;
2316194521Skmacy
2317194521Skmacy	for_each_port(sc, i) {
2318194521Skmacy		struct port_info *p = &sc->port[i];
2319194521Skmacy		int status;
2320194521Skmacy#ifdef INVARIANTS
2321167746Skmacy		struct ifnet *ifp = p->ifp;
2322194521Skmacy#endif
2323194521Skmacy
2324197791Snp		if (!isset(&sc->open_device_map, p->port_id) || p->link_fault ||
2325197791Snp		    !p->link_config.link_ok)
2326167746Skmacy			continue;
2327194521Skmacy
2328194521Skmacy		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2329194521Skmacy			("%s: state mismatch (drv_flags %x, device_map %x)",
2330194521Skmacy			 __func__, ifp->if_drv_flags, sc->open_device_map));
2331194521Skmacy
2332167746Skmacy		PORT_LOCK(p);
2333194521Skmacy		status = t3b2_mac_watchdog_task(&p->mac);
2334167746Skmacy		if (status == 1)
2335167746Skmacy			p->mac.stats.num_toggled++;
2336167746Skmacy		else if (status == 2) {
2337167746Skmacy			struct cmac *mac = &p->mac;
2338167746Skmacy
2339194521Skmacy			cxgb_update_mac_settings(p);
2340167746Skmacy			t3_link_start(&p->phy, mac, &p->link_config);
2341167746Skmacy			t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2342194521Skmacy			t3_port_intr_enable(sc, p->port_id);
2343167746Skmacy			p->mac.stats.num_resets++;
2344167746Skmacy		}
2345167746Skmacy		PORT_UNLOCK(p);
2346167514Skmacy	}
2347167514Skmacy}
2348167514Skmacy
2349167746Skmacystatic void
2350167746Skmacycxgb_tick(void *arg)
2351167746Skmacy{
2352167746Skmacy	adapter_t *sc = (adapter_t *)arg;
2353170869Skmacy
2354194521Skmacy	if (sc->flags & CXGB_SHUTDOWN)
2355176472Skmacy		return;
2356174708Skmacy
2357185508Skmacy	taskqueue_enqueue(sc->tq, &sc->tick_task);
2358209841Snp	callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
2359170869Skmacy}
2360170869Skmacy
2361170869Skmacystatic void
2362170869Skmacycxgb_tick_handler(void *arg, int count)
2363170869Skmacy{
2364170869Skmacy	adapter_t *sc = (adapter_t *)arg;
2365167746Skmacy	const struct adapter_params *p = &sc->params;
2366181652Skmacy	int i;
2367189643Sgnn	uint32_t cause, reset;
2368167746Skmacy
2369194521Skmacy	if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE))
2370176472Skmacy		return;
2371176472Skmacy
2372185508Skmacy	if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map)
2373185508Skmacy		check_t3b2_mac(sc);
2374185508Skmacy
2375206109Snp	cause = t3_read_reg(sc, A_SG_INT_CAUSE) & (F_RSPQSTARVE | F_FLEMPTY);
2376206109Snp	if (cause) {
2377189643Sgnn		struct sge_qset *qs = &sc->sge.qs[0];
2378206109Snp		uint32_t mask, v;
2379189643Sgnn
2380206109Snp		v = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS) & ~0xff00;
2381189643Sgnn
2382206109Snp		mask = 1;
2383206109Snp		for (i = 0; i < SGE_QSETS; i++) {
2384206109Snp			if (v & mask)
2385206109Snp				qs[i].rspq.starved++;
2386206109Snp			mask <<= 1;
2387189643Sgnn		}
2388206109Snp
2389206109Snp		mask <<= SGE_QSETS; /* skip RSPQXDISABLED */
2390206109Snp
2391206109Snp		for (i = 0; i < SGE_QSETS * 2; i++) {
2392206109Snp			if (v & mask) {
2393206109Snp				qs[i / 2].fl[i % 2].empty++;
2394206109Snp			}
2395206109Snp			mask <<= 1;
2396206109Snp		}
2397206109Snp
2398206109Snp		/* clear */
2399206109Snp		t3_write_reg(sc, A_SG_RSPQ_FL_STATUS, v);
2400206109Snp		t3_write_reg(sc, A_SG_INT_CAUSE, cause);
2401189643Sgnn	}
2402189643Sgnn
2403185506Skmacy	for (i = 0; i < sc->params.nports; i++) {
2404185506Skmacy		struct port_info *pi = &sc->port[i];
2405185506Skmacy		struct ifnet *ifp = pi->ifp;
2406189643Sgnn		struct cmac *mac = &pi->mac;
2407189643Sgnn		struct mac_stats *mstats = &mac->stats;
2408205948Snp		int drops, j;
2409194521Skmacy
2410194521Skmacy		if (!isset(&sc->open_device_map, pi->port_id))
2411194521Skmacy			continue;
2412194521Skmacy
2413185508Skmacy		PORT_LOCK(pi);
2414189643Sgnn		t3_mac_update_stats(mac);
2415185508Skmacy		PORT_UNLOCK(pi);
2416185508Skmacy
2417205948Snp		ifp->if_opackets = mstats->tx_frames;
2418205948Snp		ifp->if_ipackets = mstats->rx_frames;
2419185506Skmacy		ifp->if_obytes = mstats->tx_octets;
2420185506Skmacy		ifp->if_ibytes = mstats->rx_octets;
2421185506Skmacy		ifp->if_omcasts = mstats->tx_mcast_frames;
2422185506Skmacy		ifp->if_imcasts = mstats->rx_mcast_frames;
2423205948Snp		ifp->if_collisions = mstats->tx_total_collisions;
2424205948Snp		ifp->if_iqdrops = mstats->rx_cong_drops;
2425185506Skmacy
2426205948Snp		drops = 0;
2427205948Snp		for (j = pi->first_qset; j < pi->first_qset + pi->nqsets; j++)
2428205948Snp			drops += sc->sge.qs[j].txq[TXQ_ETH].txq_mr->br_drops;
2429205948Snp		ifp->if_snd.ifq_drops = drops;
2430205948Snp
2431185506Skmacy		ifp->if_oerrors =
2432185506Skmacy		    mstats->tx_excess_collisions +
2433185506Skmacy		    mstats->tx_underrun +
2434185506Skmacy		    mstats->tx_len_errs +
2435185506Skmacy		    mstats->tx_mac_internal_errs +
2436185506Skmacy		    mstats->tx_excess_deferral +
2437185506Skmacy		    mstats->tx_fcs_errs;
2438185506Skmacy		ifp->if_ierrors =
2439185506Skmacy		    mstats->rx_jabber +
2440185506Skmacy		    mstats->rx_data_errs +
2441185506Skmacy		    mstats->rx_sequence_errs +
2442185506Skmacy		    mstats->rx_runt +
2443185506Skmacy		    mstats->rx_too_long +
2444185506Skmacy		    mstats->rx_mac_internal_errs +
2445185506Skmacy		    mstats->rx_short +
2446185506Skmacy		    mstats->rx_fcs_errs;
2447189643Sgnn
2448189643Sgnn		if (mac->multiport)
2449189643Sgnn			continue;
2450189643Sgnn
2451189643Sgnn		/* Count rx fifo overflows, once per second */
2452189643Sgnn		cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset);
2453189643Sgnn		reset = 0;
2454189643Sgnn		if (cause & F_RXFIFO_OVERFLOW) {
2455189643Sgnn			mac->stats.rx_fifo_ovfl++;
2456189643Sgnn			reset |= F_RXFIFO_OVERFLOW;
2457189643Sgnn		}
2458189643Sgnn		t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset);
2459185506Skmacy	}
2460167746Skmacy}
2461167746Skmacy
2462171978Skmacystatic void
2463171978Skmacytouch_bars(device_t dev)
2464171978Skmacy{
2465171978Skmacy	/*
2466171978Skmacy	 * Don't enable yet
2467171978Skmacy	 */
2468171978Skmacy#if !defined(__LP64__) && 0
2469171978Skmacy	u32 v;
2470171978Skmacy
2471171978Skmacy	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
2472171978Skmacy	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
2473171978Skmacy	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
2474171978Skmacy	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
2475171978Skmacy	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
2476171978Skmacy	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
2477171978Skmacy#endif
2478171978Skmacy}
2479171978Skmacy
2480167514Skmacystatic int
2481171471Skmacyset_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
2482171471Skmacy{
2483171471Skmacy	uint8_t *buf;
2484171471Skmacy	int err = 0;
2485171471Skmacy	u32 aligned_offset, aligned_len, *p;
2486171471Skmacy	struct adapter *adapter = pi->adapter;
2487171471Skmacy
2488171471Skmacy
2489171471Skmacy	aligned_offset = offset & ~3;
2490171471Skmacy	aligned_len = (len + (offset & 3) + 3) & ~3;
2491171471Skmacy
2492171471Skmacy	if (aligned_offset != offset || aligned_len != len) {
2493171471Skmacy		buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);
2494171471Skmacy		if (!buf)
2495171471Skmacy			return (ENOMEM);
2496171471Skmacy		err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
2497171471Skmacy		if (!err && aligned_len > 4)
2498171471Skmacy			err = t3_seeprom_read(adapter,
2499171471Skmacy					      aligned_offset + aligned_len - 4,
2500171471Skmacy					      (u32 *)&buf[aligned_len - 4]);
2501171471Skmacy		if (err)
2502171471Skmacy			goto out;
2503171471Skmacy		memcpy(buf + (offset & 3), data, len);
2504171471Skmacy	} else
2505171471Skmacy		buf = (uint8_t *)(uintptr_t)data;
2506171471Skmacy
2507171471Skmacy	err = t3_seeprom_wp(adapter, 0);
2508171471Skmacy	if (err)
2509171471Skmacy		goto out;
2510171471Skmacy
2511171471Skmacy	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2512171471Skmacy		err = t3_seeprom_write(adapter, aligned_offset, *p);
2513171471Skmacy		aligned_offset += 4;
2514171471Skmacy	}
2515171471Skmacy
2516171471Skmacy	if (!err)
2517171471Skmacy		err = t3_seeprom_wp(adapter, 1);
2518171471Skmacyout:
2519171471Skmacy	if (buf != data)
2520171471Skmacy		free(buf, M_DEVBUF);
2521171471Skmacy	return err;
2522171471Skmacy}
2523171471Skmacy
2524171471Skmacy
2525171471Skmacystatic int
2526167514Skmacyin_range(int val, int lo, int hi)
2527167514Skmacy{
2528167514Skmacy	return val < 0 || (val <= hi && val >= lo);
2529167514Skmacy}
2530167514Skmacy
2531167514Skmacystatic int
2532192450Simpcxgb_extension_open(struct cdev *dev, int flags, int fmp, struct thread *td)
2533170654Skmacy{
2534170654Skmacy       return (0);
2535170654Skmacy}
2536170654Skmacy
2537170654Skmacystatic int
2538192450Simpcxgb_extension_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2539170654Skmacy{
2540170654Skmacy       return (0);
2541170654Skmacy}
2542170654Skmacy
2543170654Skmacystatic int
2544167514Skmacycxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
2545167514Skmacy    int fflag, struct thread *td)
2546167514Skmacy{
2547167514Skmacy	int mmd, error = 0;
2548167514Skmacy	struct port_info *pi = dev->si_drv1;
2549167514Skmacy	adapter_t *sc = pi->adapter;
2550167514Skmacy
2551167514Skmacy#ifdef PRIV_SUPPORTED
2552167514Skmacy	if (priv_check(td, PRIV_DRIVER)) {
2553167514Skmacy		if (cxgb_debug)
2554167514Skmacy			printf("user does not have access to privileged ioctls\n");
2555167514Skmacy		return (EPERM);
2556167514Skmacy	}
2557167514Skmacy#else
2558167514Skmacy	if (suser(td)) {
2559167514Skmacy		if (cxgb_debug)
2560167514Skmacy			printf("user does not have access to privileged ioctls\n");
2561167514Skmacy		return (EPERM);
2562167514Skmacy	}
2563167514Skmacy#endif
2564167514Skmacy
2565167514Skmacy	switch (cmd) {
2566182679Skmacy	case CHELSIO_GET_MIIREG: {
2567167514Skmacy		uint32_t val;
2568167514Skmacy		struct cphy *phy = &pi->phy;
2569182679Skmacy		struct ch_mii_data *mid = (struct ch_mii_data *)data;
2570167514Skmacy
2571167514Skmacy		if (!phy->mdio_read)
2572167514Skmacy			return (EOPNOTSUPP);
2573167514Skmacy		if (is_10G(sc)) {
2574167514Skmacy			mmd = mid->phy_id >> 8;
2575167514Skmacy			if (!mmd)
2576167514Skmacy				mmd = MDIO_DEV_PCS;
2577190330Sgnn			else if (mmd > MDIO_DEV_VEND2)
2578171471Skmacy				return (EINVAL);
2579167514Skmacy
2580167514Skmacy			error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
2581167514Skmacy					     mid->reg_num, &val);
2582167514Skmacy		} else
2583167514Skmacy		        error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
2584167514Skmacy					     mid->reg_num & 0x1f, &val);
2585167514Skmacy		if (error == 0)
2586167514Skmacy			mid->val_out = val;
2587167514Skmacy		break;
2588167514Skmacy	}
2589182679Skmacy	case CHELSIO_SET_MIIREG: {
2590167514Skmacy		struct cphy *phy = &pi->phy;
2591182679Skmacy		struct ch_mii_data *mid = (struct ch_mii_data *)data;
2592167514Skmacy
2593167514Skmacy		if (!phy->mdio_write)
2594167514Skmacy			return (EOPNOTSUPP);
2595167514Skmacy		if (is_10G(sc)) {
2596167514Skmacy			mmd = mid->phy_id >> 8;
2597167514Skmacy			if (!mmd)
2598167514Skmacy				mmd = MDIO_DEV_PCS;
2599190330Sgnn			else if (mmd > MDIO_DEV_VEND2)
2600167514Skmacy				return (EINVAL);
2601167514Skmacy
2602167514Skmacy			error = phy->mdio_write(sc, mid->phy_id & 0x1f,
2603167514Skmacy					      mmd, mid->reg_num, mid->val_in);
2604167514Skmacy		} else
2605167514Skmacy			error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
2606167514Skmacy					      mid->reg_num & 0x1f,
2607167514Skmacy					      mid->val_in);
2608167514Skmacy		break;
2609167514Skmacy	}
2610167514Skmacy	case CHELSIO_SETREG: {
2611167514Skmacy		struct ch_reg *edata = (struct ch_reg *)data;
2612167514Skmacy		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2613167514Skmacy			return (EFAULT);
2614167514Skmacy		t3_write_reg(sc, edata->addr, edata->val);
2615167514Skmacy		break;
2616167514Skmacy	}
2617167514Skmacy	case CHELSIO_GETREG: {
2618167514Skmacy		struct ch_reg *edata = (struct ch_reg *)data;
2619167514Skmacy		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2620167514Skmacy			return (EFAULT);
2621167514Skmacy		edata->val = t3_read_reg(sc, edata->addr);
2622167514Skmacy		break;
2623167514Skmacy	}
2624167514Skmacy	case CHELSIO_GET_SGE_CONTEXT: {
2625167514Skmacy		struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
2626176472Skmacy		mtx_lock_spin(&sc->sge.reg_lock);
2627167514Skmacy		switch (ecntxt->cntxt_type) {
2628167514Skmacy		case CNTXT_TYPE_EGRESS:
2629182679Skmacy			error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
2630167514Skmacy			    ecntxt->data);
2631167514Skmacy			break;
2632167514Skmacy		case CNTXT_TYPE_FL:
2633182679Skmacy			error = -t3_sge_read_fl(sc, ecntxt->cntxt_id,
2634167514Skmacy			    ecntxt->data);
2635167514Skmacy			break;
2636167514Skmacy		case CNTXT_TYPE_RSP:
2637182679Skmacy			error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id,
2638167514Skmacy			    ecntxt->data);
2639167514Skmacy			break;
2640167514Skmacy		case CNTXT_TYPE_CQ:
2641182679Skmacy			error = -t3_sge_read_cq(sc, ecntxt->cntxt_id,
2642167514Skmacy			    ecntxt->data);
2643167514Skmacy			break;
2644167514Skmacy		default:
2645167514Skmacy			error = EINVAL;
2646167514Skmacy			break;
2647167514Skmacy		}
2648176472Skmacy		mtx_unlock_spin(&sc->sge.reg_lock);
2649167514Skmacy		break;
2650167514Skmacy	}
2651167514Skmacy	case CHELSIO_GET_SGE_DESC: {
2652167514Skmacy		struct ch_desc *edesc = (struct ch_desc *)data;
2653167514Skmacy		int ret;
2654167514Skmacy		if (edesc->queue_num >= SGE_QSETS * 6)
2655167514Skmacy			return (EINVAL);
2656167514Skmacy		ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
2657167514Skmacy		    edesc->queue_num % 6, edesc->idx, edesc->data);
2658167514Skmacy		if (ret < 0)
2659167514Skmacy			return (EINVAL);
2660167514Skmacy		edesc->size = ret;
2661167514Skmacy		break;
2662167514Skmacy	}
2663182679Skmacy	case CHELSIO_GET_QSET_PARAMS: {
2664167514Skmacy		struct qset_params *q;
2665167514Skmacy		struct ch_qset_params *t = (struct ch_qset_params *)data;
2666182679Skmacy		int q1 = pi->first_qset;
2667182679Skmacy		int nqsets = pi->nqsets;
2668176472Skmacy		int i;
2669176472Skmacy
2670182679Skmacy		if (t->qset_idx >= nqsets)
2671182679Skmacy			return EINVAL;
2672167514Skmacy
2673182679Skmacy		i = q1 + t->qset_idx;
2674182679Skmacy		q = &sc->params.sge.qset[i];
2675167514Skmacy		t->rspq_size   = q->rspq_size;
2676167514Skmacy		t->txq_size[0] = q->txq_size[0];
2677167514Skmacy		t->txq_size[1] = q->txq_size[1];
2678167514Skmacy		t->txq_size[2] = q->txq_size[2];
2679167514Skmacy		t->fl_size[0]  = q->fl_size;
2680167514Skmacy		t->fl_size[1]  = q->jumbo_size;
2681167514Skmacy		t->polling     = q->polling;
2682182679Skmacy		t->lro         = q->lro;
2683180583Skmacy		t->intr_lat    = q->coalesce_usecs;
2684167514Skmacy		t->cong_thres  = q->cong_thres;
2685182679Skmacy		t->qnum        = i;
2686182679Skmacy
2687205946Snp		if ((sc->flags & FULL_INIT_DONE) == 0)
2688205946Snp			t->vector = 0;
2689205946Snp		else if (sc->flags & USING_MSIX)
2690182679Skmacy			t->vector = rman_get_start(sc->msix_irq_res[i]);
2691182679Skmacy		else
2692182679Skmacy			t->vector = rman_get_start(sc->irq_res);
2693182679Skmacy
2694167514Skmacy		break;
2695167514Skmacy	}
2696182679Skmacy	case CHELSIO_GET_QSET_NUM: {
2697167514Skmacy		struct ch_reg *edata = (struct ch_reg *)data;
2698182679Skmacy		edata->val = pi->nqsets;
2699182679Skmacy		break;
2700182679Skmacy	}
2701182679Skmacy	case CHELSIO_LOAD_FW: {
2702182679Skmacy		uint8_t *fw_data;
2703182679Skmacy		uint32_t vers;
2704182679Skmacy		struct ch_mem_range *t = (struct ch_mem_range *)data;
2705182679Skmacy
2706167514Skmacy		/*
2707182679Skmacy		 * You're allowed to load a firmware only before FULL_INIT_DONE
2708182679Skmacy		 *
2709182679Skmacy		 * FW_UPTODATE is also set so the rest of the initialization
2710182679Skmacy		 * will not overwrite what was loaded here.  This gives you the
2711182679Skmacy		 * flexibility to load any firmware (and maybe shoot yourself in
2712182679Skmacy		 * the foot).
2713167514Skmacy		 */
2714182679Skmacy
2715182679Skmacy		ADAPTER_LOCK(sc);
2716182679Skmacy		if (sc->open_device_map || sc->flags & FULL_INIT_DONE) {
2717182679Skmacy			ADAPTER_UNLOCK(sc);
2718182679Skmacy			return (EBUSY);
2719182679Skmacy		}
2720182679Skmacy
2721182679Skmacy		fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2722182679Skmacy		if (!fw_data)
2723182679Skmacy			error = ENOMEM;
2724182679Skmacy		else
2725182679Skmacy			error = copyin(t->buf, fw_data, t->len);
2726182679Skmacy
2727182679Skmacy		if (!error)
2728182679Skmacy			error = -t3_load_fw(sc, fw_data, t->len);
2729182679Skmacy
2730182679Skmacy		if (t3_get_fw_version(sc, &vers) == 0) {
2731182679Skmacy			snprintf(&sc->fw_version[0], sizeof(sc->fw_version),
2732182679Skmacy			    "%d.%d.%d", G_FW_VERSION_MAJOR(vers),
2733182679Skmacy			    G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers));
2734182679Skmacy		}
2735182679Skmacy
2736182679Skmacy		if (!error)
2737182679Skmacy			sc->flags |= FW_UPTODATE;
2738182679Skmacy
2739182679Skmacy		free(fw_data, M_DEVBUF);
2740182679Skmacy		ADAPTER_UNLOCK(sc);
2741167514Skmacy		break;
2742167514Skmacy	}
2743182679Skmacy	case CHELSIO_LOAD_BOOT: {
2744182679Skmacy		uint8_t *boot_data;
2745182679Skmacy		struct ch_mem_range *t = (struct ch_mem_range *)data;
2746182679Skmacy
2747182679Skmacy		boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2748182679Skmacy		if (!boot_data)
2749182679Skmacy			return ENOMEM;
2750182679Skmacy
2751182679Skmacy		error = copyin(t->buf, boot_data, t->len);
2752182679Skmacy		if (!error)
2753182679Skmacy			error = -t3_load_boot(sc, boot_data, t->len);
2754182679Skmacy
2755182679Skmacy		free(boot_data, M_DEVBUF);
2756167514Skmacy		break;
2757167514Skmacy	}
2758182679Skmacy	case CHELSIO_GET_PM: {
2759182679Skmacy		struct ch_pm *m = (struct ch_pm *)data;
2760182679Skmacy		struct tp_params *p = &sc->params.tp;
2761182679Skmacy
2762182679Skmacy		if (!is_offload(sc))
2763182679Skmacy			return (EOPNOTSUPP);
2764182679Skmacy
2765182679Skmacy		m->tx_pg_sz = p->tx_pg_size;
2766182679Skmacy		m->tx_num_pg = p->tx_num_pgs;
2767182679Skmacy		m->rx_pg_sz  = p->rx_pg_size;
2768182679Skmacy		m->rx_num_pg = p->rx_num_pgs;
2769182679Skmacy		m->pm_total  = p->pmtx_size + p->chan_rx_size * p->nchan;
2770182679Skmacy
2771167514Skmacy		break;
2772182679Skmacy	}
2773182679Skmacy	case CHELSIO_SET_PM: {
2774182679Skmacy		struct ch_pm *m = (struct ch_pm *)data;
2775182679Skmacy		struct tp_params *p = &sc->params.tp;
2776182679Skmacy
2777182679Skmacy		if (!is_offload(sc))
2778182679Skmacy			return (EOPNOTSUPP);
2779182679Skmacy		if (sc->flags & FULL_INIT_DONE)
2780182679Skmacy			return (EBUSY);
2781182679Skmacy
2782182679Skmacy		if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) ||
2783182679Skmacy		    !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1)))
2784182679Skmacy			return (EINVAL);	/* not power of 2 */
2785182679Skmacy		if (!(m->rx_pg_sz & 0x14000))
2786182679Skmacy			return (EINVAL);	/* not 16KB or 64KB */
2787182679Skmacy		if (!(m->tx_pg_sz & 0x1554000))
2788182679Skmacy			return (EINVAL);
2789182679Skmacy		if (m->tx_num_pg == -1)
2790182679Skmacy			m->tx_num_pg = p->tx_num_pgs;
2791182679Skmacy		if (m->rx_num_pg == -1)
2792182679Skmacy			m->rx_num_pg = p->rx_num_pgs;
2793182679Skmacy		if (m->tx_num_pg % 24 || m->rx_num_pg % 24)
2794182679Skmacy			return (EINVAL);
2795182679Skmacy		if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size ||
2796182679Skmacy		    m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size)
2797182679Skmacy			return (EINVAL);
2798182679Skmacy
2799182679Skmacy		p->rx_pg_size = m->rx_pg_sz;
2800182679Skmacy		p->tx_pg_size = m->tx_pg_sz;
2801182679Skmacy		p->rx_num_pgs = m->rx_num_pg;
2802182679Skmacy		p->tx_num_pgs = m->tx_num_pg;
2803182679Skmacy		break;
2804182679Skmacy	}
2805169978Skmacy	case CHELSIO_SETMTUTAB: {
2806169978Skmacy		struct ch_mtus *m = (struct ch_mtus *)data;
2807169978Skmacy		int i;
2808169978Skmacy
2809169978Skmacy		if (!is_offload(sc))
2810169978Skmacy			return (EOPNOTSUPP);
2811169978Skmacy		if (offload_running(sc))
2812169978Skmacy			return (EBUSY);
2813169978Skmacy		if (m->nmtus != NMTUS)
2814169978Skmacy			return (EINVAL);
2815169978Skmacy		if (m->mtus[0] < 81)         /* accommodate SACK */
2816169978Skmacy			return (EINVAL);
2817169978Skmacy
2818169978Skmacy		/*
2819169978Skmacy		 * MTUs must be in ascending order
2820169978Skmacy		 */
2821169978Skmacy		for (i = 1; i < NMTUS; ++i)
2822169978Skmacy			if (m->mtus[i] < m->mtus[i - 1])
2823169978Skmacy				return (EINVAL);
2824169978Skmacy
2825182679Skmacy		memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus));
2826169978Skmacy		break;
2827169978Skmacy	}
2828169978Skmacy	case CHELSIO_GETMTUTAB: {
2829169978Skmacy		struct ch_mtus *m = (struct ch_mtus *)data;
2830169978Skmacy
2831169978Skmacy		if (!is_offload(sc))
2832169978Skmacy			return (EOPNOTSUPP);
2833169978Skmacy
2834169978Skmacy		memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
2835169978Skmacy		m->nmtus = NMTUS;
2836169978Skmacy		break;
2837171471Skmacy	}
2838167514Skmacy	case CHELSIO_GET_MEM: {
2839167514Skmacy		struct ch_mem_range *t = (struct ch_mem_range *)data;
2840167514Skmacy		struct mc7 *mem;
2841167514Skmacy		uint8_t *useraddr;
2842167514Skmacy		u64 buf[32];
2843182679Skmacy
2844182679Skmacy		/*
2845218909Sbrucec		 * Use these to avoid modifying len/addr in the return
2846182679Skmacy		 * struct
2847182679Skmacy		 */
2848182679Skmacy		uint32_t len = t->len, addr = t->addr;
2849182679Skmacy
2850167514Skmacy		if (!is_offload(sc))
2851167514Skmacy			return (EOPNOTSUPP);
2852167514Skmacy		if (!(sc->flags & FULL_INIT_DONE))
2853167514Skmacy			return (EIO);         /* need the memory controllers */
2854182679Skmacy		if ((addr & 0x7) || (len & 0x7))
2855167514Skmacy			return (EINVAL);
2856167514Skmacy		if (t->mem_id == MEM_CM)
2857167514Skmacy			mem = &sc->cm;
2858167514Skmacy		else if (t->mem_id == MEM_PMRX)
2859167514Skmacy			mem = &sc->pmrx;
2860167514Skmacy		else if (t->mem_id == MEM_PMTX)
2861167514Skmacy			mem = &sc->pmtx;
2862167514Skmacy		else
2863167514Skmacy			return (EINVAL);
2864167514Skmacy
2865167514Skmacy		/*
2866167514Skmacy		 * Version scheme:
2867167514Skmacy		 * bits 0..9: chip version
2868167514Skmacy		 * bits 10..15: chip revision
2869167514Skmacy		 */
2870167514Skmacy		t->version = 3 | (sc->params.rev << 10);
2871167514Skmacy
2872167514Skmacy		/*
2873167514Skmacy		 * Read 256 bytes at a time as len can be large and we don't
2874167514Skmacy		 * want to use huge intermediate buffers.
2875167514Skmacy		 */
2876174708Skmacy		useraddr = (uint8_t *)t->buf;
2877182679Skmacy		while (len) {
2878182679Skmacy			unsigned int chunk = min(len, sizeof(buf));
2879167514Skmacy
2880182679Skmacy			error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf);
2881167514Skmacy			if (error)
2882167514Skmacy				return (-error);
2883167514Skmacy			if (copyout(buf, useraddr, chunk))
2884167514Skmacy				return (EFAULT);
2885167514Skmacy			useraddr += chunk;
2886182679Skmacy			addr += chunk;
2887182679Skmacy			len -= chunk;
2888167514Skmacy		}
2889167514Skmacy		break;
2890167514Skmacy	}
2891169978Skmacy	case CHELSIO_READ_TCAM_WORD: {
2892169978Skmacy		struct ch_tcam_word *t = (struct ch_tcam_word *)data;
2893169978Skmacy
2894169978Skmacy		if (!is_offload(sc))
2895169978Skmacy			return (EOPNOTSUPP);
2896171471Skmacy		if (!(sc->flags & FULL_INIT_DONE))
2897171471Skmacy			return (EIO);         /* need MC5 */
2898169978Skmacy		return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
2899169978Skmacy		break;
2900169978Skmacy	}
2901167514Skmacy	case CHELSIO_SET_TRACE_FILTER: {
2902167514Skmacy		struct ch_trace *t = (struct ch_trace *)data;
2903167514Skmacy		const struct trace_params *tp;
2904167514Skmacy
2905167514Skmacy		tp = (const struct trace_params *)&t->sip;
2906167514Skmacy		if (t->config_tx)
2907167514Skmacy			t3_config_trace_filter(sc, tp, 0, t->invert_match,
2908167514Skmacy					       t->trace_tx);
2909167514Skmacy		if (t->config_rx)
2910167514Skmacy			t3_config_trace_filter(sc, tp, 1, t->invert_match,
2911167514Skmacy					       t->trace_rx);
2912167514Skmacy		break;
2913167514Skmacy	}
2914167514Skmacy	case CHELSIO_SET_PKTSCHED: {
2915167514Skmacy		struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
2916167514Skmacy		if (sc->open_device_map == 0)
2917167514Skmacy			return (EAGAIN);
2918167514Skmacy		send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
2919167514Skmacy		    p->binding);
2920167514Skmacy		break;
2921167514Skmacy	}
2922167514Skmacy	case CHELSIO_IFCONF_GETREGS: {
2923182679Skmacy		struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data;
2924167514Skmacy		int reglen = cxgb_get_regs_len();
2925182679Skmacy		uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT);
2926167514Skmacy		if (buf == NULL) {
2927167514Skmacy			return (ENOMEM);
2928182679Skmacy		}
2929182679Skmacy		if (regs->len > reglen)
2930167514Skmacy			regs->len = reglen;
2931182679Skmacy		else if (regs->len < reglen)
2932189643Sgnn			error = ENOBUFS;
2933182679Skmacy
2934182679Skmacy		if (!error) {
2935182679Skmacy			cxgb_get_regs(sc, regs, buf);
2936182679Skmacy			error = copyout(buf, regs->data, reglen);
2937167514Skmacy		}
2938167514Skmacy		free(buf, M_DEVBUF);
2939167514Skmacy
2940167514Skmacy		break;
2941167514Skmacy	}
2942169978Skmacy	case CHELSIO_SET_HW_SCHED: {
2943169978Skmacy		struct ch_hw_sched *t = (struct ch_hw_sched *)data;
2944169978Skmacy		unsigned int ticks_per_usec = core_ticks_per_usec(sc);
2945169978Skmacy
2946169978Skmacy		if ((sc->flags & FULL_INIT_DONE) == 0)
2947169978Skmacy			return (EAGAIN);       /* need TP to be initialized */
2948169978Skmacy		if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
2949169978Skmacy		    !in_range(t->channel, 0, 1) ||
2950169978Skmacy		    !in_range(t->kbps, 0, 10000000) ||
2951169978Skmacy		    !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
2952169978Skmacy		    !in_range(t->flow_ipg, 0,
2953169978Skmacy			      dack_ticks_to_usec(sc, 0x7ff)))
2954169978Skmacy			return (EINVAL);
2955169978Skmacy
2956169978Skmacy		if (t->kbps >= 0) {
2957169978Skmacy			error = t3_config_sched(sc, t->kbps, t->sched);
2958169978Skmacy			if (error < 0)
2959169978Skmacy				return (-error);
2960169978Skmacy		}
2961169978Skmacy		if (t->class_ipg >= 0)
2962169978Skmacy			t3_set_sched_ipg(sc, t->sched, t->class_ipg);
2963169978Skmacy		if (t->flow_ipg >= 0) {
2964169978Skmacy			t->flow_ipg *= 1000;     /* us -> ns */
2965169978Skmacy			t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
2966169978Skmacy		}
2967169978Skmacy		if (t->mode >= 0) {
2968169978Skmacy			int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
2969169978Skmacy
2970169978Skmacy			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2971169978Skmacy					 bit, t->mode ? bit : 0);
2972169978Skmacy		}
2973169978Skmacy		if (t->channel >= 0)
2974169978Skmacy			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2975169978Skmacy					 1 << t->sched, t->channel << t->sched);
2976169978Skmacy		break;
2977182679Skmacy	}
2978182679Skmacy	case CHELSIO_GET_EEPROM: {
2979182679Skmacy		int i;
2980182679Skmacy		struct ch_eeprom *e = (struct ch_eeprom *)data;
2981182679Skmacy		uint8_t *buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT);
2982182679Skmacy
2983182679Skmacy		if (buf == NULL) {
2984182679Skmacy			return (ENOMEM);
2985182679Skmacy		}
2986182679Skmacy		e->magic = EEPROM_MAGIC;
2987182679Skmacy		for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4)
2988182679Skmacy			error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]);
2989182679Skmacy
2990182679Skmacy		if (!error)
2991182679Skmacy			error = copyout(buf + e->offset, e->data, e->len);
2992182679Skmacy
2993182679Skmacy		free(buf, M_DEVBUF);
2994182679Skmacy		break;
2995182679Skmacy	}
2996182679Skmacy	case CHELSIO_CLEAR_STATS: {
2997182679Skmacy		if (!(sc->flags & FULL_INIT_DONE))
2998182679Skmacy			return EAGAIN;
2999182679Skmacy
3000182679Skmacy		PORT_LOCK(pi);
3001182679Skmacy		t3_mac_update_stats(&pi->mac);
3002182679Skmacy		memset(&pi->mac.stats, 0, sizeof(pi->mac.stats));
3003182679Skmacy		PORT_UNLOCK(pi);
3004182679Skmacy		break;
3005182679Skmacy	}
3006189643Sgnn	case CHELSIO_GET_UP_LA: {
3007189643Sgnn		struct ch_up_la *la = (struct ch_up_la *)data;
3008189643Sgnn		uint8_t *buf = malloc(LA_BUFSIZE, M_DEVBUF, M_NOWAIT);
3009189643Sgnn		if (buf == NULL) {
3010189643Sgnn			return (ENOMEM);
3011189643Sgnn		}
3012189643Sgnn		if (la->bufsize < LA_BUFSIZE)
3013189643Sgnn			error = ENOBUFS;
3014189643Sgnn
3015189643Sgnn		if (!error)
3016189643Sgnn			error = -t3_get_up_la(sc, &la->stopped, &la->idx,
3017189643Sgnn					      &la->bufsize, buf);
3018189643Sgnn		if (!error)
3019189643Sgnn			error = copyout(buf, la->data, la->bufsize);
3020189643Sgnn
3021189643Sgnn		free(buf, M_DEVBUF);
3022189643Sgnn		break;
3023189643Sgnn	}
3024189643Sgnn	case CHELSIO_GET_UP_IOQS: {
3025189643Sgnn		struct ch_up_ioqs *ioqs = (struct ch_up_ioqs *)data;
3026189643Sgnn		uint8_t *buf = malloc(IOQS_BUFSIZE, M_DEVBUF, M_NOWAIT);
3027189643Sgnn		uint32_t *v;
3028189643Sgnn
3029189643Sgnn		if (buf == NULL) {
3030189643Sgnn			return (ENOMEM);
3031189643Sgnn		}
3032189643Sgnn		if (ioqs->bufsize < IOQS_BUFSIZE)
3033189643Sgnn			error = ENOBUFS;
3034189643Sgnn
3035189643Sgnn		if (!error)
3036189643Sgnn			error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf);
3037189643Sgnn
3038189643Sgnn		if (!error) {
3039189643Sgnn			v = (uint32_t *)buf;
3040189643Sgnn
3041189643Sgnn			ioqs->ioq_rx_enable = *v++;
3042189643Sgnn			ioqs->ioq_tx_enable = *v++;
3043189643Sgnn			ioqs->ioq_rx_status = *v++;
3044189643Sgnn			ioqs->ioq_tx_status = *v++;
3045189643Sgnn
3046189643Sgnn			error = copyout(v, ioqs->data, ioqs->bufsize);
3047189643Sgnn		}
3048189643Sgnn
3049189643Sgnn		free(buf, M_DEVBUF);
3050189643Sgnn		break;
3051189643Sgnn	}
3052207643Snp	case CHELSIO_SET_FILTER: {
3053207643Snp		struct ch_filter *f = (struct ch_filter *)data;;
3054207643Snp		struct filter_info *p;
3055207643Snp		unsigned int nfilters = sc->params.mc5.nfilters;
3056207643Snp
3057207643Snp		if (!is_offload(sc))
3058207643Snp			return (EOPNOTSUPP);	/* No TCAM */
3059207643Snp		if (!(sc->flags & FULL_INIT_DONE))
3060207643Snp			return (EAGAIN);	/* mc5 not setup yet */
3061207643Snp		if (nfilters == 0)
3062207643Snp			return (EBUSY);		/* TOE will use TCAM */
3063207643Snp
3064207643Snp		/* sanity checks */
3065207643Snp		if (f->filter_id >= nfilters ||
3066207643Snp		    (f->val.dip && f->mask.dip != 0xffffffff) ||
3067207643Snp		    (f->val.sport && f->mask.sport != 0xffff) ||
3068207643Snp		    (f->val.dport && f->mask.dport != 0xffff) ||
3069207643Snp		    (f->val.vlan && f->mask.vlan != 0xfff) ||
3070207643Snp		    (f->val.vlan_prio &&
3071207643Snp			f->mask.vlan_prio != FILTER_NO_VLAN_PRI) ||
3072207643Snp		    (f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) ||
3073207643Snp		    f->qset >= SGE_QSETS ||
3074207643Snp		    sc->rrss_map[f->qset] >= RSS_TABLE_SIZE)
3075207643Snp			return (EINVAL);
3076207643Snp
3077207643Snp		/* Was allocated with M_WAITOK */
3078207643Snp		KASSERT(sc->filters, ("filter table NULL\n"));
3079207643Snp
3080207643Snp		p = &sc->filters[f->filter_id];
3081207643Snp		if (p->locked)
3082207643Snp			return (EPERM);
3083207643Snp
3084207643Snp		bzero(p, sizeof(*p));
3085207643Snp		p->sip = f->val.sip;
3086207643Snp		p->sip_mask = f->mask.sip;
3087207643Snp		p->dip = f->val.dip;
3088207643Snp		p->sport = f->val.sport;
3089207643Snp		p->dport = f->val.dport;
3090207643Snp		p->vlan = f->mask.vlan ? f->val.vlan : 0xfff;
3091207643Snp		p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) :
3092207643Snp		    FILTER_NO_VLAN_PRI;
3093207643Snp		p->mac_hit = f->mac_hit;
3094207643Snp		p->mac_vld = f->mac_addr_idx != 0xffff;
3095207643Snp		p->mac_idx = f->mac_addr_idx;
3096207643Snp		p->pkt_type = f->proto;
3097207643Snp		p->report_filter_id = f->want_filter_id;
3098207643Snp		p->pass = f->pass;
3099207643Snp		p->rss = f->rss;
3100207643Snp		p->qset = f->qset;
3101207643Snp
3102207643Snp		error = set_filter(sc, f->filter_id, p);
3103207643Snp		if (error == 0)
3104207643Snp			p->valid = 1;
3105207643Snp		break;
3106207643Snp	}
3107207643Snp	case CHELSIO_DEL_FILTER: {
3108207643Snp		struct ch_filter *f = (struct ch_filter *)data;
3109207643Snp		struct filter_info *p;
3110207643Snp		unsigned int nfilters = sc->params.mc5.nfilters;
3111207643Snp
3112207643Snp		if (!is_offload(sc))
3113207643Snp			return (EOPNOTSUPP);
3114207643Snp		if (!(sc->flags & FULL_INIT_DONE))
3115207643Snp			return (EAGAIN);
3116207643Snp		if (nfilters == 0 || sc->filters == NULL)
3117207643Snp			return (EINVAL);
3118207643Snp		if (f->filter_id >= nfilters)
3119207643Snp		       return (EINVAL);
3120207643Snp
3121207643Snp		p = &sc->filters[f->filter_id];
3122207643Snp		if (p->locked)
3123207643Snp			return (EPERM);
3124207643Snp		if (!p->valid)
3125207643Snp			return (EFAULT); /* Read "Bad address" as "Bad index" */
3126207643Snp
3127207643Snp		bzero(p, sizeof(*p));
3128207643Snp		p->sip = p->sip_mask = 0xffffffff;
3129207643Snp		p->vlan = 0xfff;
3130207643Snp		p->vlan_prio = FILTER_NO_VLAN_PRI;
3131207643Snp		p->pkt_type = 1;
3132207643Snp		error = set_filter(sc, f->filter_id, p);
3133207643Snp		break;
3134207643Snp	}
3135207643Snp	case CHELSIO_GET_FILTER: {
3136207643Snp		struct ch_filter *f = (struct ch_filter *)data;
3137207643Snp		struct filter_info *p;
3138207643Snp		unsigned int i, nfilters = sc->params.mc5.nfilters;
3139207643Snp
3140207643Snp		if (!is_offload(sc))
3141207643Snp			return (EOPNOTSUPP);
3142207643Snp		if (!(sc->flags & FULL_INIT_DONE))
3143207643Snp			return (EAGAIN);
3144207643Snp		if (nfilters == 0 || sc->filters == NULL)
3145207643Snp			return (EINVAL);
3146207643Snp
3147207643Snp		i = f->filter_id == 0xffffffff ? 0 : f->filter_id + 1;
3148207643Snp		for (; i < nfilters; i++) {
3149207643Snp			p = &sc->filters[i];
3150207643Snp			if (!p->valid)
3151207643Snp				continue;
3152207643Snp
3153207643Snp			bzero(f, sizeof(*f));
3154207643Snp
3155207643Snp			f->filter_id = i;
3156207643Snp			f->val.sip = p->sip;
3157207643Snp			f->mask.sip = p->sip_mask;
3158207643Snp			f->val.dip = p->dip;
3159207643Snp			f->mask.dip = p->dip ? 0xffffffff : 0;
3160207643Snp			f->val.sport = p->sport;
3161207643Snp			f->mask.sport = p->sport ? 0xffff : 0;
3162207643Snp			f->val.dport = p->dport;
3163207643Snp			f->mask.dport = p->dport ? 0xffff : 0;
3164207643Snp			f->val.vlan = p->vlan == 0xfff ? 0 : p->vlan;
3165207643Snp			f->mask.vlan = p->vlan == 0xfff ? 0 : 0xfff;
3166207643Snp			f->val.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
3167207643Snp			    0 : p->vlan_prio;
3168207643Snp			f->mask.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
3169207643Snp			    0 : FILTER_NO_VLAN_PRI;
3170207643Snp			f->mac_hit = p->mac_hit;
3171207643Snp			f->mac_addr_idx = p->mac_vld ? p->mac_idx : 0xffff;
3172207643Snp			f->proto = p->pkt_type;
3173207643Snp			f->want_filter_id = p->report_filter_id;
3174207643Snp			f->pass = p->pass;
3175207643Snp			f->rss = p->rss;
3176207643Snp			f->qset = p->qset;
3177207643Snp
3178207643Snp			break;
3179207643Snp		}
3180207643Snp
3181207643Snp		if (i == nfilters)
3182207643Snp			f->filter_id = 0xffffffff;
3183207643Snp		break;
3184207643Snp	}
3185167514Skmacy	default:
3186167514Skmacy		return (EOPNOTSUPP);
3187167514Skmacy		break;
3188167514Skmacy	}
3189167514Skmacy
3190167514Skmacy	return (error);
3191167514Skmacy}
3192167514Skmacy
3193167514Skmacystatic __inline void
3194167514Skmacyreg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
3195167514Skmacy    unsigned int end)
3196167514Skmacy{
3197182679Skmacy	uint32_t *p = (uint32_t *)(buf + start);
3198167514Skmacy
3199167514Skmacy	for ( ; start <= end; start += sizeof(uint32_t))
3200167514Skmacy		*p++ = t3_read_reg(ap, start);
3201167514Skmacy}
3202167514Skmacy
3203167514Skmacy#define T3_REGMAP_SIZE (3 * 1024)
3204167514Skmacystatic int
3205167514Skmacycxgb_get_regs_len(void)
3206167514Skmacy{
3207167514Skmacy	return T3_REGMAP_SIZE;
3208167514Skmacy}
3209167514Skmacy
3210167514Skmacystatic void
3211182679Skmacycxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf)
3212167514Skmacy{
3213167514Skmacy
3214167514Skmacy	/*
3215167514Skmacy	 * Version scheme:
3216167514Skmacy	 * bits 0..9: chip version
3217167514Skmacy	 * bits 10..15: chip revision
3218167514Skmacy	 * bit 31: set for PCIe cards
3219167514Skmacy	 */
3220167514Skmacy	regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
3221167514Skmacy
3222167514Skmacy	/*
3223167514Skmacy	 * We skip the MAC statistics registers because they are clear-on-read.
3224167514Skmacy	 * Also reading multi-register stats would need to synchronize with the
3225167514Skmacy	 * periodic mac stats accumulation.  Hard to justify the complexity.
3226167514Skmacy	 */
3227182679Skmacy	memset(buf, 0, cxgb_get_regs_len());
3228167514Skmacy	reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
3229167514Skmacy	reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
3230167514Skmacy	reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
3231167514Skmacy	reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
3232167514Skmacy	reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
3233167514Skmacy	reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
3234167514Skmacy		       XGM_REG(A_XGM_SERDES_STAT3, 1));
3235167514Skmacy	reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
3236167514Skmacy		       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
3237167514Skmacy}
3238176572Skmacy
3239207643Snpstatic int
3240207643Snpalloc_filters(struct adapter *sc)
3241207643Snp{
3242207643Snp	struct filter_info *p;
3243207643Snp	unsigned int nfilters = sc->params.mc5.nfilters;
3244176572Skmacy
3245207643Snp	if (nfilters == 0)
3246207643Snp		return (0);
3247207643Snp
3248207643Snp	p = malloc(sizeof(*p) * nfilters, M_DEVBUF, M_WAITOK | M_ZERO);
3249207643Snp	sc->filters = p;
3250207643Snp
3251207643Snp	p = &sc->filters[nfilters - 1];
3252207643Snp	p->vlan = 0xfff;
3253207643Snp	p->vlan_prio = FILTER_NO_VLAN_PRI;
3254207643Snp	p->pass = p->rss = p->valid = p->locked = 1;
3255207643Snp
3256207643Snp	return (0);
3257207643Snp}
3258207643Snp
3259207643Snpstatic int
3260207643Snpsetup_hw_filters(struct adapter *sc)
3261207643Snp{
3262207643Snp	int i, rc;
3263207643Snp	unsigned int nfilters = sc->params.mc5.nfilters;
3264207643Snp
3265207643Snp	if (!sc->filters)
3266207643Snp		return (0);
3267207643Snp
3268207643Snp	t3_enable_filters(sc);
3269207643Snp
3270207643Snp	for (i = rc = 0; i < nfilters && !rc; i++) {
3271207643Snp		if (sc->filters[i].locked)
3272207643Snp			rc = set_filter(sc, i, &sc->filters[i]);
3273207643Snp	}
3274207643Snp
3275207643Snp	return (rc);
3276207643Snp}
3277207643Snp
3278207643Snpstatic int
3279207643Snpset_filter(struct adapter *sc, int id, const struct filter_info *f)
3280207643Snp{
3281207643Snp	int len;
3282207643Snp	struct mbuf *m;
3283207643Snp	struct ulp_txpkt *txpkt;
3284207643Snp	struct work_request_hdr *wr;
3285207643Snp	struct cpl_pass_open_req *oreq;
3286207643Snp	struct cpl_set_tcb_field *sreq;
3287207643Snp
3288207643Snp	len = sizeof(*wr) + sizeof(*oreq) + 2 * sizeof(*sreq);
3289207643Snp	KASSERT(len <= MHLEN, ("filter request too big for an mbuf"));
3290207643Snp
3291207643Snp	id += t3_mc5_size(&sc->mc5) - sc->params.mc5.nroutes -
3292207643Snp	      sc->params.mc5.nfilters;
3293207643Snp
3294207643Snp	m = m_gethdr(M_WAITOK, MT_DATA);
3295207643Snp	m->m_len = m->m_pkthdr.len = len;
3296207643Snp	bzero(mtod(m, char *), len);
3297207643Snp
3298207643Snp	wr = mtod(m, struct work_request_hdr *);
3299207643Snp	wr->wrh_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC);
3300207643Snp
3301207643Snp	oreq = (struct cpl_pass_open_req *)(wr + 1);
3302207643Snp	txpkt = (struct ulp_txpkt *)oreq;
3303207643Snp	txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
3304207643Snp	txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8));
3305207643Snp	OPCODE_TID(oreq) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, id));
3306207643Snp	oreq->local_port = htons(f->dport);
3307207643Snp	oreq->peer_port = htons(f->sport);
3308207643Snp	oreq->local_ip = htonl(f->dip);
3309207643Snp	oreq->peer_ip = htonl(f->sip);
3310207643Snp	oreq->peer_netmask = htonl(f->sip_mask);
3311207643Snp	oreq->opt0h = 0;
3312207643Snp	oreq->opt0l = htonl(F_NO_OFFLOAD);
3313207643Snp	oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) |
3314207643Snp			 V_CONN_POLICY(CPL_CONN_POLICY_FILTER) |
3315207643Snp			 V_VLAN_PRI(f->vlan_prio >> 1) |
3316207643Snp			 V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) |
3317207643Snp			 V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) |
3318207643Snp			 V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4)));
3319207643Snp
3320207643Snp	sreq = (struct cpl_set_tcb_field *)(oreq + 1);
3321207643Snp	set_tcb_field_ulp(sreq, id, 1, 0x1800808000ULL,
3322207643Snp			  (f->report_filter_id << 15) | (1 << 23) |
3323207643Snp			  ((u64)f->pass << 35) | ((u64)!f->rss << 36));
3324207643Snp	set_tcb_field_ulp(sreq + 1, id, 0, 0xffffffff, (2 << 19) | 1);
3325207643Snp	t3_mgmt_tx(sc, m);
3326207643Snp
3327207643Snp	if (f->pass && !f->rss) {
3328207643Snp		len = sizeof(*sreq);
3329207643Snp		m = m_gethdr(M_WAITOK, MT_DATA);
3330207643Snp		m->m_len = m->m_pkthdr.len = len;
3331207643Snp		bzero(mtod(m, char *), len);
3332207643Snp		sreq = mtod(m, struct cpl_set_tcb_field *);
3333207643Snp		sreq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
3334207643Snp		mk_set_tcb_field(sreq, id, 25, 0x3f80000,
3335207643Snp				 (u64)sc->rrss_map[f->qset] << 19);
3336207643Snp		t3_mgmt_tx(sc, m);
3337207643Snp	}
3338207643Snp	return 0;
3339207643Snp}
3340207643Snp
3341207643Snpstatic inline void
3342207643Snpmk_set_tcb_field(struct cpl_set_tcb_field *req, unsigned int tid,
3343207643Snp    unsigned int word, u64 mask, u64 val)
3344207643Snp{
3345207643Snp	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
3346207643Snp	req->reply = V_NO_REPLY(1);
3347207643Snp	req->cpu_idx = 0;
3348207643Snp	req->word = htons(word);
3349207643Snp	req->mask = htobe64(mask);
3350207643Snp	req->val = htobe64(val);
3351207643Snp}
3352207643Snp
3353207643Snpstatic inline void
3354207643Snpset_tcb_field_ulp(struct cpl_set_tcb_field *req, unsigned int tid,
3355207643Snp    unsigned int word, u64 mask, u64 val)
3356207643Snp{
3357207643Snp	struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
3358207643Snp
3359207643Snp	txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
3360207643Snp	txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8));
3361207643Snp	mk_set_tcb_field(req, tid, word, mask, val);
3362207643Snp}
3363