1167514Skmacy/**************************************************************************
2167514Skmacy
3189643SgnnCopyright (c) 2007-2009, Chelsio Inc.
4167514SkmacyAll rights reserved.
5167514Skmacy
6167514SkmacyRedistribution and use in source and binary forms, with or without
7167514Skmacymodification, are permitted provided that the following conditions are met:
8167514Skmacy
9167514Skmacy 1. Redistributions of source code must retain the above copyright notice,
10167514Skmacy    this list of conditions and the following disclaimer.
11167514Skmacy
12178302Skmacy 2. Neither the name of the Chelsio Corporation nor the names of its
13167514Skmacy    contributors may be used to endorse or promote products derived from
14167514Skmacy    this software without specific prior written permission.
15167514Skmacy
16167514SkmacyTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17167514SkmacyAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18167514SkmacyIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19167514SkmacyARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20167514SkmacyLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21167514SkmacyCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22167514SkmacySUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23167514SkmacyINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24167514SkmacyCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25167514SkmacyARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26167514SkmacyPOSSIBILITY OF SUCH DAMAGE.
27167514Skmacy
28167514Skmacy***************************************************************************/
29167514Skmacy
30167514Skmacy#include <sys/cdefs.h>
31167514Skmacy__FBSDID("$FreeBSD$");
32167514Skmacy
33237920Snp#include "opt_inet.h"
34237920Snp
35167514Skmacy#include <sys/param.h>
36167514Skmacy#include <sys/systm.h>
37167514Skmacy#include <sys/kernel.h>
38167514Skmacy#include <sys/bus.h>
39167514Skmacy#include <sys/module.h>
40167514Skmacy#include <sys/pciio.h>
41167514Skmacy#include <sys/conf.h>
42167514Skmacy#include <machine/bus.h>
43167514Skmacy#include <machine/resource.h>
44167514Skmacy#include <sys/bus_dma.h>
45176472Skmacy#include <sys/ktr.h>
46167514Skmacy#include <sys/rman.h>
47167514Skmacy#include <sys/ioccom.h>
48167514Skmacy#include <sys/mbuf.h>
49167514Skmacy#include <sys/linker.h>
50167514Skmacy#include <sys/firmware.h>
51167514Skmacy#include <sys/socket.h>
52167514Skmacy#include <sys/sockio.h>
53167514Skmacy#include <sys/smp.h>
54167514Skmacy#include <sys/sysctl.h>
55174708Skmacy#include <sys/syslog.h>
56167514Skmacy#include <sys/queue.h>
57167514Skmacy#include <sys/taskqueue.h>
58174708Skmacy#include <sys/proc.h>
59167514Skmacy
60167514Skmacy#include <net/bpf.h>
61167514Skmacy#include <net/ethernet.h>
62167514Skmacy#include <net/if.h>
63167514Skmacy#include <net/if_arp.h>
64167514Skmacy#include <net/if_dl.h>
65167514Skmacy#include <net/if_media.h>
66167514Skmacy#include <net/if_types.h>
67180583Skmacy#include <net/if_vlan_var.h>
68167514Skmacy
69167514Skmacy#include <netinet/in_systm.h>
70167514Skmacy#include <netinet/in.h>
71167514Skmacy#include <netinet/if_ether.h>
72167514Skmacy#include <netinet/ip.h>
73167514Skmacy#include <netinet/ip.h>
74167514Skmacy#include <netinet/tcp.h>
75167514Skmacy#include <netinet/udp.h>
76167514Skmacy
77167514Skmacy#include <dev/pci/pcireg.h>
78167514Skmacy#include <dev/pci/pcivar.h>
79167514Skmacy#include <dev/pci/pci_private.h>
80167514Skmacy
81170076Skmacy#include <cxgb_include.h>
82167514Skmacy
83167514Skmacy#ifdef PRIV_SUPPORTED
84167514Skmacy#include <sys/priv.h>
85167514Skmacy#endif
86167514Skmacy
87192933Sgnnstatic int cxgb_setup_interrupts(adapter_t *);
88192933Sgnnstatic void cxgb_teardown_interrupts(adapter_t *);
89167514Skmacystatic void cxgb_init(void *);
90202671Snpstatic int cxgb_init_locked(struct port_info *);
91202671Snpstatic int cxgb_uninit_locked(struct port_info *);
92194521Skmacystatic int cxgb_uninit_synchronized(struct port_info *);
93167514Skmacystatic int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
94167514Skmacystatic int cxgb_media_change(struct ifnet *);
95186282Sgnnstatic int cxgb_ifm_type(int);
96194921Snpstatic void cxgb_build_medialist(struct port_info *);
97167514Skmacystatic void cxgb_media_status(struct ifnet *, struct ifmediareq *);
98167514Skmacystatic int setup_sge_qsets(adapter_t *);
99167514Skmacystatic void cxgb_async_intr(void *);
100170869Skmacystatic void cxgb_tick_handler(void *, int);
101167514Skmacystatic void cxgb_tick(void *);
102209841Snpstatic void link_check_callout(void *);
103209841Snpstatic void check_link_status(void *, int);
104167514Skmacystatic void setup_rss(adapter_t *sc);
105207643Snpstatic int alloc_filters(struct adapter *);
106207643Snpstatic int setup_hw_filters(struct adapter *);
107207643Snpstatic int set_filter(struct adapter *, int, const struct filter_info *);
108207643Snpstatic inline void mk_set_tcb_field(struct cpl_set_tcb_field *, unsigned int,
109207643Snp    unsigned int, u64, u64);
110207643Snpstatic inline void set_tcb_field_ulp(struct cpl_set_tcb_field *, unsigned int,
111207643Snp    unsigned int, u64, u64);
112237920Snp#ifdef TCP_OFFLOAD
113237920Snpstatic int cpl_not_handled(struct sge_qset *, struct rsp_desc *, struct mbuf *);
114237920Snp#endif
115167514Skmacy
116167514Skmacy/* Attachment glue for the PCI controller end of the device.  Each port of
117167514Skmacy * the device is attached separately, as defined later.
118167514Skmacy */
119167514Skmacystatic int cxgb_controller_probe(device_t);
120167514Skmacystatic int cxgb_controller_attach(device_t);
121167514Skmacystatic int cxgb_controller_detach(device_t);
122167514Skmacystatic void cxgb_free(struct adapter *);
123167514Skmacystatic __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
124167514Skmacy    unsigned int end);
125182679Skmacystatic void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf);
126167514Skmacystatic int cxgb_get_regs_len(void);
127171978Skmacystatic void touch_bars(device_t dev);
128197791Snpstatic void cxgb_update_mac_settings(struct port_info *p);
129237920Snp#ifdef TCP_OFFLOAD
130237920Snpstatic int toe_capability(struct port_info *, int);
131237920Snp#endif
132167514Skmacy
133167514Skmacystatic device_method_t cxgb_controller_methods[] = {
134167514Skmacy	DEVMETHOD(device_probe,		cxgb_controller_probe),
135167514Skmacy	DEVMETHOD(device_attach,	cxgb_controller_attach),
136167514Skmacy	DEVMETHOD(device_detach,	cxgb_controller_detach),
137167514Skmacy
138229093Shselasky	DEVMETHOD_END
139167514Skmacy};
140167514Skmacy
141167514Skmacystatic driver_t cxgb_controller_driver = {
142167514Skmacy	"cxgbc",
143167514Skmacy	cxgb_controller_methods,
144167514Skmacy	sizeof(struct adapter)
145167514Skmacy};
146167514Skmacy
147237920Snpstatic int cxgbc_mod_event(module_t, int, void *);
148167514Skmacystatic devclass_t	cxgb_controller_devclass;
149237920SnpDRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass,
150237920Snp    cxgbc_mod_event, 0);
151237920SnpMODULE_VERSION(cxgbc, 1);
152252495SnpMODULE_DEPEND(cxgbc, firmware, 1, 1, 1);
153167514Skmacy
154167514Skmacy/*
155167514Skmacy * Attachment glue for the ports.  Attachment is done directly to the
156167514Skmacy * controller device.
157167514Skmacy */
158167514Skmacystatic int cxgb_port_probe(device_t);
159167514Skmacystatic int cxgb_port_attach(device_t);
160167514Skmacystatic int cxgb_port_detach(device_t);
161167514Skmacy
162167514Skmacystatic device_method_t cxgb_port_methods[] = {
163167514Skmacy	DEVMETHOD(device_probe,		cxgb_port_probe),
164167514Skmacy	DEVMETHOD(device_attach,	cxgb_port_attach),
165167514Skmacy	DEVMETHOD(device_detach,	cxgb_port_detach),
166167514Skmacy	{ 0, 0 }
167167514Skmacy};
168167514Skmacy
169167514Skmacystatic driver_t cxgb_port_driver = {
170167514Skmacy	"cxgb",
171167514Skmacy	cxgb_port_methods,
172167514Skmacy	0
173167514Skmacy};
174167514Skmacy
175167514Skmacystatic d_ioctl_t cxgb_extension_ioctl;
176170654Skmacystatic d_open_t cxgb_extension_open;
177170654Skmacystatic d_close_t cxgb_extension_close;
178167514Skmacy
179170654Skmacystatic struct cdevsw cxgb_cdevsw = {
180170654Skmacy       .d_version =    D_VERSION,
181170654Skmacy       .d_flags =      0,
182170654Skmacy       .d_open =       cxgb_extension_open,
183170654Skmacy       .d_close =      cxgb_extension_close,
184170654Skmacy       .d_ioctl =      cxgb_extension_ioctl,
185170654Skmacy       .d_name =       "cxgb",
186170654Skmacy};
187170654Skmacy
188167514Skmacystatic devclass_t	cxgb_port_devclass;
189167514SkmacyDRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
190237920SnpMODULE_VERSION(cxgb, 1);
191167514Skmacy
192237920Snpstatic struct mtx t3_list_lock;
193237920Snpstatic SLIST_HEAD(, adapter) t3_list;
194237920Snp#ifdef TCP_OFFLOAD
195237920Snpstatic struct mtx t3_uld_list_lock;
196237920Snpstatic SLIST_HEAD(, uld_info) t3_uld_list;
197237920Snp#endif
198237920Snp
199167514Skmacy/*
200167514Skmacy * The driver uses the best interrupt scheme available on a platform in the
201167514Skmacy * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
202167514Skmacy * of these schemes the driver may consider as follows:
203167514Skmacy *
204167514Skmacy * msi = 2: choose from among all three options
205167514Skmacy * msi = 1 : only consider MSI and pin interrupts
206167514Skmacy * msi = 0: force pin interrupts
207167514Skmacy */
208167760Skmacystatic int msi_allowed = 2;
209170083Skmacy
210167514SkmacyTUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
211167514SkmacySYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
212217321SmdfSYSCTL_INT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
213167514Skmacy    "MSI-X, MSI, INTx selector");
214169978Skmacy
215169053Skmacy/*
216169978Skmacy * The driver uses an auto-queue algorithm by default.
217185165Skmacy * To disable it and force a single queue-set per port, use multiq = 0
218169978Skmacy */
219185165Skmacystatic int multiq = 1;
220185165SkmacyTUNABLE_INT("hw.cxgb.multiq", &multiq);
221217321SmdfSYSCTL_INT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0,
222185165Skmacy    "use min(ncpus/ports, 8) queue-sets per port");
223167514Skmacy
224176572Skmacy/*
225185165Skmacy * By default the driver will not update the firmware unless
226185165Skmacy * it was compiled against a newer version
227185165Skmacy *
228176572Skmacy */
229176572Skmacystatic int force_fw_update = 0;
230176572SkmacyTUNABLE_INT("hw.cxgb.force_fw_update", &force_fw_update);
231217321SmdfSYSCTL_INT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0,
232176572Skmacy    "update firmware even if up to date");
233175200Skmacy
234205950Snpint cxgb_use_16k_clusters = -1;
235175200SkmacyTUNABLE_INT("hw.cxgb.use_16k_clusters", &cxgb_use_16k_clusters);
236205950SnpSYSCTL_INT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
237175200Skmacy    &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
238175200Skmacy
239208887Snpstatic int nfilters = -1;
240208887SnpTUNABLE_INT("hw.cxgb.nfilters", &nfilters);
241208887SnpSYSCTL_INT(_hw_cxgb, OID_AUTO, nfilters, CTLFLAG_RDTUN,
242208887Snp    &nfilters, 0, "max number of entries in the filter table");
243194039Sgnn
244167514Skmacyenum {
245167514Skmacy	MAX_TXQ_ENTRIES      = 16384,
246167514Skmacy	MAX_CTRL_TXQ_ENTRIES = 1024,
247167514Skmacy	MAX_RSPQ_ENTRIES     = 16384,
248167514Skmacy	MAX_RX_BUFFERS       = 16384,
249167514Skmacy	MAX_RX_JUMBO_BUFFERS = 16384,
250167514Skmacy	MIN_TXQ_ENTRIES      = 4,
251167514Skmacy	MIN_CTRL_TXQ_ENTRIES = 4,
252167514Skmacy	MIN_RSPQ_ENTRIES     = 32,
253172096Skmacy	MIN_FL_ENTRIES       = 32,
254172096Skmacy	MIN_FL_JUMBO_ENTRIES = 32
255167514Skmacy};
256167514Skmacy
257171471Skmacystruct filter_info {
258171471Skmacy	u32 sip;
259171471Skmacy	u32 sip_mask;
260171471Skmacy	u32 dip;
261171471Skmacy	u16 sport;
262171471Skmacy	u16 dport;
263171471Skmacy	u32 vlan:12;
264171471Skmacy	u32 vlan_prio:3;
265171471Skmacy	u32 mac_hit:1;
266171471Skmacy	u32 mac_idx:4;
267171471Skmacy	u32 mac_vld:1;
268171471Skmacy	u32 pkt_type:2;
269171471Skmacy	u32 report_filter_id:1;
270171471Skmacy	u32 pass:1;
271171471Skmacy	u32 rss:1;
272171471Skmacy	u32 qset:3;
273171471Skmacy	u32 locked:1;
274171471Skmacy	u32 valid:1;
275171471Skmacy};
276171471Skmacy
277171471Skmacyenum { FILTER_NO_VLAN_PRI = 7 };
278171471Skmacy
279182679Skmacy#define EEPROM_MAGIC 0x38E2F10C
280182679Skmacy
281167514Skmacy#define PORT_MASK ((1 << MAX_NPORTS) - 1)
282167514Skmacy
283167514Skmacy/* Table for probing the cards.  The desc field isn't actually used */
284167514Skmacystruct cxgb_ident {
285167514Skmacy	uint16_t	vendor;
286167514Skmacy	uint16_t	device;
287167514Skmacy	int		index;
288167514Skmacy	char		*desc;
289167514Skmacy} cxgb_identifiers[] = {
290167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
291167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
292167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
293167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
294167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
295167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
296167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
297167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
298167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
299167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
300170654Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
301197791Snp	{PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "T3C10"},
302197791Snp	{PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"},
303197791Snp	{PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"},
304167514Skmacy	{0, 0, 0, NULL}
305167514Skmacy};
306167514Skmacy
307171471Skmacystatic int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
308171471Skmacy
309176472Skmacy
310174708Skmacystatic __inline char
311171471Skmacyt3rev2char(struct adapter *adapter)
312171471Skmacy{
313171471Skmacy	char rev = 'z';
314171471Skmacy
315171471Skmacy	switch(adapter->params.rev) {
316171471Skmacy	case T3_REV_A:
317171471Skmacy		rev = 'a';
318171471Skmacy		break;
319171471Skmacy	case T3_REV_B:
320171471Skmacy	case T3_REV_B2:
321171471Skmacy		rev = 'b';
322171471Skmacy		break;
323171471Skmacy	case T3_REV_C:
324171471Skmacy		rev = 'c';
325171471Skmacy		break;
326171471Skmacy	}
327171471Skmacy	return rev;
328171471Skmacy}
329171471Skmacy
330167514Skmacystatic struct cxgb_ident *
331167514Skmacycxgb_get_ident(device_t dev)
332167514Skmacy{
333167514Skmacy	struct cxgb_ident *id;
334167514Skmacy
335167514Skmacy	for (id = cxgb_identifiers; id->desc != NULL; id++) {
336167514Skmacy		if ((id->vendor == pci_get_vendor(dev)) &&
337167514Skmacy		    (id->device == pci_get_device(dev))) {
338167514Skmacy			return (id);
339167514Skmacy		}
340167514Skmacy	}
341167514Skmacy	return (NULL);
342167514Skmacy}
343167514Skmacy
344167514Skmacystatic const struct adapter_info *
345167514Skmacycxgb_get_adapter_info(device_t dev)
346167514Skmacy{
347167514Skmacy	struct cxgb_ident *id;
348167514Skmacy	const struct adapter_info *ai;
349183063Skmacy
350167514Skmacy	id = cxgb_get_ident(dev);
351167514Skmacy	if (id == NULL)
352167514Skmacy		return (NULL);
353167514Skmacy
354167514Skmacy	ai = t3_get_adapter_info(id->index);
355167514Skmacy
356167514Skmacy	return (ai);
357167514Skmacy}
358167514Skmacy
359167514Skmacystatic int
360167514Skmacycxgb_controller_probe(device_t dev)
361167514Skmacy{
362167514Skmacy	const struct adapter_info *ai;
363167514Skmacy	char *ports, buf[80];
364170654Skmacy	int nports;
365183063Skmacy
366167514Skmacy	ai = cxgb_get_adapter_info(dev);
367167514Skmacy	if (ai == NULL)
368167514Skmacy		return (ENXIO);
369167514Skmacy
370170654Skmacy	nports = ai->nports0 + ai->nports1;
371170654Skmacy	if (nports == 1)
372167514Skmacy		ports = "port";
373167514Skmacy	else
374167514Skmacy		ports = "ports";
375167514Skmacy
376199237Snp	snprintf(buf, sizeof(buf), "%s, %d %s", ai->desc, nports, ports);
377167514Skmacy	device_set_desc_copy(dev, buf);
378167514Skmacy	return (BUS_PROBE_DEFAULT);
379167514Skmacy}
380167514Skmacy
381176572Skmacy#define FW_FNAME "cxgb_t3fw"
382190330Sgnn#define TPEEPROM_NAME "cxgb_t3%c_tp_eeprom"
383190330Sgnn#define TPSRAM_NAME "cxgb_t3%c_protocol_sram"
384171471Skmacy
385167514Skmacystatic int
386169978Skmacyupgrade_fw(adapter_t *sc)
387167514Skmacy{
388167514Skmacy	const struct firmware *fw;
389167514Skmacy	int status;
390205944Snp	u32 vers;
391167514Skmacy
392176572Skmacy	if ((fw = firmware_get(FW_FNAME)) == NULL)  {
393176572Skmacy		device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME);
394169978Skmacy		return (ENOENT);
395171471Skmacy	} else
396205944Snp		device_printf(sc->dev, "installing firmware on card\n");
397167514Skmacy	status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
398167514Skmacy
399205944Snp	if (status != 0) {
400205944Snp		device_printf(sc->dev, "failed to install firmware: %d\n",
401205944Snp		    status);
402205944Snp	} else {
403205944Snp		t3_get_fw_version(sc, &vers);
404205944Snp		snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
405205944Snp		    G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
406205944Snp		    G_FW_VERSION_MICRO(vers));
407205944Snp	}
408205944Snp
409167514Skmacy	firmware_put(fw, FIRMWARE_UNLOAD);
410167514Skmacy
411167514Skmacy	return (status);
412167514Skmacy}
413167514Skmacy
414192537Sgnn/*
415192537Sgnn * The cxgb_controller_attach function is responsible for the initial
416192537Sgnn * bringup of the device.  Its responsibilities include:
417192537Sgnn *
418192537Sgnn *  1. Determine if the device supports MSI or MSI-X.
419192537Sgnn *  2. Allocate bus resources so that we can access the Base Address Register
420192537Sgnn *  3. Create and initialize mutexes for the controller and its control
421192537Sgnn *     logic such as SGE and MDIO.
422192537Sgnn *  4. Call hardware specific setup routine for the adapter as a whole.
423192537Sgnn *  5. Allocate the BAR for doing MSI-X.
424192537Sgnn *  6. Setup the line interrupt iff MSI-X is not supported.
425192537Sgnn *  7. Create the driver's taskq.
426192584Sgnn *  8. Start one task queue service thread.
427192584Sgnn *  9. Check if the firmware and SRAM are up-to-date.  They will be
428192584Sgnn *     auto-updated later (before FULL_INIT_DONE), if required.
429192537Sgnn * 10. Create a child device for each MAC (port)
430192537Sgnn * 11. Initialize T3 private state.
431192537Sgnn * 12. Trigger the LED
432192537Sgnn * 13. Setup offload iff supported.
433192537Sgnn * 14. Reset/restart the tick callout.
434192537Sgnn * 15. Attach sysctls
435192537Sgnn *
436192537Sgnn * NOTE: Any modification or deviation from this list MUST be reflected in
437192537Sgnn * the above comment.  Failure to do so will result in problems on various
438192537Sgnn * error conditions including link flapping.
439192537Sgnn */
440167514Skmacystatic int
441167514Skmacycxgb_controller_attach(device_t dev)
442167514Skmacy{
443167514Skmacy	device_t child;
444167514Skmacy	const struct adapter_info *ai;
445167514Skmacy	struct adapter *sc;
446172109Skmacy	int i, error = 0;
447167514Skmacy	uint32_t vers;
448167760Skmacy	int port_qsets = 1;
449172109Skmacy	int msi_needed, reg;
450185655Sgnn	char buf[80];
451185655Sgnn
452167514Skmacy	sc = device_get_softc(dev);
453167514Skmacy	sc->dev = dev;
454169978Skmacy	sc->msi_count = 0;
455172109Skmacy	ai = cxgb_get_adapter_info(dev);
456172109Skmacy
457237920Snp	snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
458237920Snp	    device_get_unit(dev));
459237920Snp	ADAPTER_LOCK_INIT(sc, sc->lockbuf);
460237920Snp
461237920Snp	snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
462237920Snp	    device_get_unit(dev));
463237920Snp	snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
464237920Snp	    device_get_unit(dev));
465237920Snp	snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
466237920Snp	    device_get_unit(dev));
467237920Snp
468237920Snp	MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN);
469237920Snp	MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
470237920Snp	MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
471237920Snp
472237920Snp	mtx_lock(&t3_list_lock);
473237920Snp	SLIST_INSERT_HEAD(&t3_list, sc, link);
474237920Snp	mtx_unlock(&t3_list_lock);
475237920Snp
476167840Skmacy	/* find the PCIe link width and set max read request to 4KB*/
477219902Sjhb	if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
478210505Sjhb		uint16_t lnk;
479171471Skmacy
480242015Sgavin		lnk = pci_read_config(dev, reg + PCIER_LINK_STA, 2);
481242015Sgavin		sc->link_width = (lnk & PCIEM_LINK_STA_WIDTH) >> 4;
482210505Sjhb		if (sc->link_width < 8 &&
483210505Sjhb		    (ai->caps & SUPPORTED_10000baseT_Full)) {
484210505Sjhb			device_printf(sc->dev,
485210505Sjhb			    "PCIe x%d Link, expect reduced performance\n",
486210505Sjhb			    sc->link_width);
487210505Sjhb		}
488210505Sjhb
489210505Sjhb		pci_set_max_read_req(dev, 4096);
490167840Skmacy	}
491204274Snp
492171978Skmacy	touch_bars(dev);
493167514Skmacy	pci_enable_busmaster(dev);
494167514Skmacy	/*
495167514Skmacy	 * Allocate the registers and make them available to the driver.
496167514Skmacy	 * The registers that we care about for NIC mode are in BAR 0
497167514Skmacy	 */
498167514Skmacy	sc->regs_rid = PCIR_BAR(0);
499167514Skmacy	if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
500167514Skmacy	    &sc->regs_rid, RF_ACTIVE)) == NULL) {
501176472Skmacy		device_printf(dev, "Cannot allocate BAR region 0\n");
502237920Snp		error = ENXIO;
503237920Snp		goto out;
504167514Skmacy	}
505167514Skmacy
506167514Skmacy	sc->bt = rman_get_bustag(sc->regs_res);
507167514Skmacy	sc->bh = rman_get_bushandle(sc->regs_res);
508167514Skmacy	sc->mmio_len = rman_get_size(sc->regs_res);
509167769Skmacy
510197791Snp	for (i = 0; i < MAX_NPORTS; i++)
511197791Snp		sc->port[i].adapter = sc;
512197791Snp
513167769Skmacy	if (t3_prep_adapter(sc, ai, 1) < 0) {
514170654Skmacy		printf("prep adapter failed\n");
515167769Skmacy		error = ENODEV;
516167769Skmacy		goto out;
517167769Skmacy	}
518231604Snp
519231604Snp	sc->udbs_rid = PCIR_BAR(2);
520231604Snp	sc->udbs_res = NULL;
521231604Snp	if (is_offload(sc) &&
522231604Snp	    ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
523231604Snp		   &sc->udbs_rid, RF_ACTIVE)) == NULL)) {
524231604Snp		device_printf(dev, "Cannot allocate BAR region 1\n");
525231604Snp		error = ENXIO;
526231604Snp		goto out;
527231604Snp	}
528231604Snp
529177464Skmacy        /* Allocate the BAR for doing MSI-X.  If it succeeds, try to allocate
530167514Skmacy	 * enough messages for the queue sets.  If that fails, try falling
531167514Skmacy	 * back to MSI.  If that fails, then try falling back to the legacy
532167514Skmacy	 * interrupt pin model.
533167514Skmacy	 */
534167514Skmacy	sc->msix_regs_rid = 0x20;
535167514Skmacy	if ((msi_allowed >= 2) &&
536167514Skmacy	    (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
537167514Skmacy	    &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
538167514Skmacy
539192933Sgnn		if (multiq)
540192933Sgnn			port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus);
541192933Sgnn		msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1;
542167760Skmacy
543192933Sgnn		if (pci_msix_count(dev) == 0 ||
544192933Sgnn		    (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 ||
545192933Sgnn		    sc->msi_count != msi_needed) {
546192933Sgnn			device_printf(dev, "alloc msix failed - "
547192933Sgnn				      "msi_count=%d, msi_needed=%d, err=%d; "
548192933Sgnn				      "will try MSI\n", sc->msi_count,
549192933Sgnn				      msi_needed, error);
550169978Skmacy			sc->msi_count = 0;
551192933Sgnn			port_qsets = 1;
552167514Skmacy			pci_release_msi(dev);
553167514Skmacy			bus_release_resource(dev, SYS_RES_MEMORY,
554167514Skmacy			    sc->msix_regs_rid, sc->msix_regs_res);
555167514Skmacy			sc->msix_regs_res = NULL;
556167514Skmacy		} else {
557167514Skmacy			sc->flags |= USING_MSIX;
558192933Sgnn			sc->cxgb_intr = cxgb_async_intr;
559192933Sgnn			device_printf(dev,
560192933Sgnn				      "using MSI-X interrupts (%u vectors)\n",
561192933Sgnn				      sc->msi_count);
562167514Skmacy		}
563167514Skmacy	}
564167514Skmacy
565169978Skmacy	if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
566169978Skmacy		sc->msi_count = 1;
567192933Sgnn		if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) {
568192933Sgnn			device_printf(dev, "alloc msi failed - "
569192933Sgnn				      "err=%d; will try INTx\n", error);
570169978Skmacy			sc->msi_count = 0;
571192933Sgnn			port_qsets = 1;
572167514Skmacy			pci_release_msi(dev);
573167514Skmacy		} else {
574167514Skmacy			sc->flags |= USING_MSI;
575170081Skmacy			sc->cxgb_intr = t3_intr_msi;
576192933Sgnn			device_printf(dev, "using MSI interrupts\n");
577167514Skmacy		}
578167514Skmacy	}
579169978Skmacy	if (sc->msi_count == 0) {
580167760Skmacy		device_printf(dev, "using line interrupts\n");
581170081Skmacy		sc->cxgb_intr = t3b_intr;
582167514Skmacy	}
583167514Skmacy
584167514Skmacy	/* Create a private taskqueue thread for handling driver events */
585167514Skmacy	sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
586167514Skmacy	    taskqueue_thread_enqueue, &sc->tq);
587167514Skmacy	if (sc->tq == NULL) {
588167514Skmacy		device_printf(dev, "failed to allocate controller task queue\n");
589167514Skmacy		goto out;
590167514Skmacy	}
591171804Skmacy
592167514Skmacy	taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
593167514Skmacy	    device_get_nameunit(dev));
594170869Skmacy	TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
595167514Skmacy
596167514Skmacy
597167514Skmacy	/* Create a periodic callout for checking adapter status */
598170869Skmacy	callout_init(&sc->cxgb_tick_ch, TRUE);
599167514Skmacy
600189643Sgnn	if (t3_check_fw_version(sc) < 0 || force_fw_update) {
601167514Skmacy		/*
602167514Skmacy		 * Warn user that a firmware update will be attempted in init.
603167514Skmacy		 */
604169978Skmacy		device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
605169978Skmacy		    FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
606167514Skmacy		sc->flags &= ~FW_UPTODATE;
607167514Skmacy	} else {
608167514Skmacy		sc->flags |= FW_UPTODATE;
609167514Skmacy	}
610171471Skmacy
611189643Sgnn	if (t3_check_tpsram_version(sc) < 0) {
612171471Skmacy		/*
613171471Skmacy		 * Warn user that a firmware update will be attempted in init.
614171471Skmacy		 */
615171471Skmacy		device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
616171471Skmacy		    t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
617171471Skmacy		sc->flags &= ~TPS_UPTODATE;
618171471Skmacy	} else {
619171471Skmacy		sc->flags |= TPS_UPTODATE;
620171471Skmacy	}
621237920Snp
622167514Skmacy	/*
623167514Skmacy	 * Create a child device for each MAC.  The ethernet attachment
624167514Skmacy	 * will be done in these children.
625167760Skmacy	 */
626167760Skmacy	for (i = 0; i < (sc)->params.nports; i++) {
627171978Skmacy		struct port_info *pi;
628171978Skmacy
629167514Skmacy		if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
630167514Skmacy			device_printf(dev, "failed to add child port\n");
631167514Skmacy			error = EINVAL;
632167514Skmacy			goto out;
633167514Skmacy		}
634171978Skmacy		pi = &sc->port[i];
635171978Skmacy		pi->adapter = sc;
636171978Skmacy		pi->nqsets = port_qsets;
637171978Skmacy		pi->first_qset = i*port_qsets;
638171978Skmacy		pi->port_id = i;
639171978Skmacy		pi->tx_chan = i >= ai->nports0;
640171978Skmacy		pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
641171978Skmacy		sc->rxpkt_map[pi->txpkt_intf] = i;
642174708Skmacy		sc->port[i].tx_chan = i >= ai->nports0;
643171471Skmacy		sc->portdev[i] = child;
644171978Skmacy		device_set_softc(child, pi);
645167514Skmacy	}
646167514Skmacy	if ((error = bus_generic_attach(dev)) != 0)
647167514Skmacy		goto out;
648167514Skmacy
649167514Skmacy	/* initialize sge private state */
650170654Skmacy	t3_sge_init_adapter(sc);
651167514Skmacy
652167514Skmacy	t3_led_ready(sc);
653237920Snp
654167514Skmacy	error = t3_get_fw_version(sc, &vers);
655167514Skmacy	if (error)
656167514Skmacy		goto out;
657167514Skmacy
658169978Skmacy	snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
659169978Skmacy	    G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
660169978Skmacy	    G_FW_VERSION_MICRO(vers));
661169978Skmacy
662199237Snp	snprintf(buf, sizeof(buf), "%s %sNIC\t E/C: %s S/N: %s",
663199237Snp		 ai->desc, is_offload(sc) ? "R" : "",
664185655Sgnn		 sc->params.vpd.ec, sc->params.vpd.sn);
665185655Sgnn	device_set_desc_copy(dev, buf);
666185655Sgnn
667192540Sgnn	snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x",
668192540Sgnn		 sc->params.vpd.port_type[0], sc->params.vpd.port_type[1],
669192540Sgnn		 sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]);
670192540Sgnn
671176472Skmacy	device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]);
672209841Snp	callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
673174708Skmacy	t3_add_attach_sysctls(sc);
674237920Snp
675237920Snp#ifdef TCP_OFFLOAD
676237920Snp	for (i = 0; i < NUM_CPL_HANDLERS; i++)
677237920Snp		sc->cpl_handler[i] = cpl_not_handled;
678237920Snp#endif
679241314Sjhb
680241314Sjhb	t3_intr_clear(sc);
681241314Sjhb	error = cxgb_setup_interrupts(sc);
682167514Skmacyout:
683167514Skmacy	if (error)
684167514Skmacy		cxgb_free(sc);
685167514Skmacy
686167514Skmacy	return (error);
687167514Skmacy}
688167514Skmacy
689192537Sgnn/*
690192584Sgnn * The cxgb_controller_detach routine is called with the device is
691192537Sgnn * unloaded from the system.
692192537Sgnn */
693192537Sgnn
694167514Skmacystatic int
695167514Skmacycxgb_controller_detach(device_t dev)
696167514Skmacy{
697167514Skmacy	struct adapter *sc;
698167514Skmacy
699167514Skmacy	sc = device_get_softc(dev);
700167514Skmacy
701167514Skmacy	cxgb_free(sc);
702167514Skmacy
703167514Skmacy	return (0);
704167514Skmacy}
705167514Skmacy
706192537Sgnn/*
707192537Sgnn * The cxgb_free() is called by the cxgb_controller_detach() routine
708192537Sgnn * to tear down the structures that were built up in
709192537Sgnn * cxgb_controller_attach(), and should be the final piece of work
710192584Sgnn * done when fully unloading the driver.
711192537Sgnn *
712192537Sgnn *
713192537Sgnn *  1. Shutting down the threads started by the cxgb_controller_attach()
714192537Sgnn *     routine.
715192537Sgnn *  2. Stopping the lower level device and all callouts (cxgb_down_locked()).
716192537Sgnn *  3. Detaching all of the port devices created during the
717192537Sgnn *     cxgb_controller_attach() routine.
718192537Sgnn *  4. Removing the device children created via cxgb_controller_attach().
719192933Sgnn *  5. Releasing PCI resources associated with the device.
720192537Sgnn *  6. Turning off the offload support, iff it was turned on.
721192537Sgnn *  7. Destroying the mutexes created in cxgb_controller_attach().
722192537Sgnn *
723192537Sgnn */
724167514Skmacystatic void
725167514Skmacycxgb_free(struct adapter *sc)
726167514Skmacy{
727219946Snp	int i, nqsets = 0;
728167514Skmacy
729176472Skmacy	ADAPTER_LOCK(sc);
730176472Skmacy	sc->flags |= CXGB_SHUTDOWN;
731176472Skmacy	ADAPTER_UNLOCK(sc);
732192537Sgnn
733192537Sgnn	/*
734194521Skmacy	 * Make sure all child devices are gone.
735192537Sgnn	 */
736192537Sgnn	bus_generic_detach(sc->dev);
737192537Sgnn	for (i = 0; i < (sc)->params.nports; i++) {
738192584Sgnn		if (sc->portdev[i] &&
739192584Sgnn		    device_delete_child(sc->dev, sc->portdev[i]) != 0)
740192537Sgnn			device_printf(sc->dev, "failed to delete child port\n");
741219946Snp		nqsets += sc->port[i].nqsets;
742192537Sgnn	}
743192537Sgnn
744194521Skmacy	/*
745194521Skmacy	 * At this point, it is as if cxgb_port_detach has run on all ports, and
746194521Skmacy	 * cxgb_down has run on the adapter.  All interrupts have been silenced,
747194521Skmacy	 * all open devices have been closed.
748194521Skmacy	 */
749194521Skmacy	KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)",
750194521Skmacy					   __func__, sc->open_device_map));
751194521Skmacy	for (i = 0; i < sc->params.nports; i++) {
752194521Skmacy		KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!",
753194521Skmacy						  __func__, i));
754194521Skmacy	}
755194521Skmacy
756194521Skmacy	/*
757194521Skmacy	 * Finish off the adapter's callouts.
758194521Skmacy	 */
759194521Skmacy	callout_drain(&sc->cxgb_tick_ch);
760194521Skmacy	callout_drain(&sc->sge_timer_ch);
761194521Skmacy
762194521Skmacy	/*
763194521Skmacy	 * Release resources grabbed under FULL_INIT_DONE by cxgb_up.  The
764194521Skmacy	 * sysctls are cleaned up by the kernel linker.
765194521Skmacy	 */
766194521Skmacy	if (sc->flags & FULL_INIT_DONE) {
767219946Snp 		t3_free_sge_resources(sc, nqsets);
768194521Skmacy 		sc->flags &= ~FULL_INIT_DONE;
769194521Skmacy 	}
770194521Skmacy
771194521Skmacy	/*
772194521Skmacy	 * Release all interrupt resources.
773194521Skmacy	 */
774192933Sgnn	cxgb_teardown_interrupts(sc);
775169978Skmacy	if (sc->flags & (USING_MSI | USING_MSIX)) {
776169978Skmacy		device_printf(sc->dev, "releasing msi message(s)\n");
777169978Skmacy		pci_release_msi(sc->dev);
778169978Skmacy	} else {
779169978Skmacy		device_printf(sc->dev, "no msi message to release\n");
780169978Skmacy	}
781192933Sgnn
782169978Skmacy	if (sc->msix_regs_res != NULL) {
783169978Skmacy		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
784169978Skmacy		    sc->msix_regs_res);
785169978Skmacy	}
786176472Skmacy
787194521Skmacy	/*
788194521Skmacy	 * Free the adapter's taskqueue.
789194521Skmacy	 */
790176472Skmacy	if (sc->tq != NULL) {
791171978Skmacy		taskqueue_free(sc->tq);
792176472Skmacy		sc->tq = NULL;
793176472Skmacy	}
794176472Skmacy
795171471Skmacy	free(sc->filters, M_DEVBUF);
796167514Skmacy	t3_sge_free(sc);
797194521Skmacy
798176472Skmacy	if (sc->udbs_res != NULL)
799176472Skmacy		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid,
800176472Skmacy		    sc->udbs_res);
801176472Skmacy
802167514Skmacy	if (sc->regs_res != NULL)
803167514Skmacy		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
804167514Skmacy		    sc->regs_res);
805167514Skmacy
806170869Skmacy	MTX_DESTROY(&sc->mdio_lock);
807170869Skmacy	MTX_DESTROY(&sc->sge.reg_lock);
808170869Skmacy	MTX_DESTROY(&sc->elmer_lock);
809237920Snp	mtx_lock(&t3_list_lock);
810237920Snp	SLIST_REMOVE(&t3_list, sc, adapter, link);
811237920Snp	mtx_unlock(&t3_list_lock);
812170869Skmacy	ADAPTER_LOCK_DEINIT(sc);
813167514Skmacy}
814167514Skmacy
815167514Skmacy/**
816167514Skmacy *	setup_sge_qsets - configure SGE Tx/Rx/response queues
817167514Skmacy *	@sc: the controller softc
818167514Skmacy *
819167514Skmacy *	Determines how many sets of SGE queues to use and initializes them.
820167514Skmacy *	We support multiple queue sets per port if we have MSI-X, otherwise
821167514Skmacy *	just one queue set per port.
822167514Skmacy */
823167514Skmacystatic int
824167514Skmacysetup_sge_qsets(adapter_t *sc)
825167514Skmacy{
826172096Skmacy	int i, j, err, irq_idx = 0, qset_idx = 0;
827169978Skmacy	u_int ntxq = SGE_TXQ_PER_SET;
828167514Skmacy
829167514Skmacy	if ((err = t3_sge_alloc(sc)) != 0) {
830167760Skmacy		device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
831167514Skmacy		return (err);
832167514Skmacy	}
833167514Skmacy
834167514Skmacy	if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
835167514Skmacy		irq_idx = -1;
836167514Skmacy
837172096Skmacy	for (i = 0; i < (sc)->params.nports; i++) {
838167514Skmacy		struct port_info *pi = &sc->port[i];
839167514Skmacy
840171978Skmacy		for (j = 0; j < pi->nqsets; j++, qset_idx++) {
841167760Skmacy			err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
842167514Skmacy			    (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
843167514Skmacy			    &sc->params.sge.qset[qset_idx], ntxq, pi);
844167514Skmacy			if (err) {
845219946Snp				t3_free_sge_resources(sc, qset_idx);
846219946Snp				device_printf(sc->dev,
847219946Snp				    "t3_sge_alloc_qset failed with %d\n", err);
848167514Skmacy				return (err);
849167514Skmacy			}
850167514Skmacy		}
851167514Skmacy	}
852167514Skmacy
853167514Skmacy	return (0);
854167514Skmacy}
855167514Skmacy
856170654Skmacystatic void
857192933Sgnncxgb_teardown_interrupts(adapter_t *sc)
858170654Skmacy{
859192933Sgnn	int i;
860170654Skmacy
861192933Sgnn	for (i = 0; i < SGE_QSETS; i++) {
862192933Sgnn		if (sc->msix_intr_tag[i] == NULL) {
863192933Sgnn
864192933Sgnn			/* Should have been setup fully or not at all */
865192933Sgnn			KASSERT(sc->msix_irq_res[i] == NULL &&
866192933Sgnn				sc->msix_irq_rid[i] == 0,
867192933Sgnn				("%s: half-done interrupt (%d).", __func__, i));
868192933Sgnn
869192933Sgnn			continue;
870170654Skmacy		}
871192933Sgnn
872192933Sgnn		bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
873192933Sgnn				  sc->msix_intr_tag[i]);
874192933Sgnn		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i],
875192933Sgnn				     sc->msix_irq_res[i]);
876192933Sgnn
877192933Sgnn		sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL;
878192933Sgnn		sc->msix_irq_rid[i] = 0;
879170654Skmacy	}
880192933Sgnn
881192933Sgnn	if (sc->intr_tag) {
882192933Sgnn		KASSERT(sc->irq_res != NULL,
883192933Sgnn			("%s: half-done interrupt.", __func__));
884192933Sgnn
885192933Sgnn		bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
886192933Sgnn		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
887192933Sgnn				     sc->irq_res);
888192933Sgnn
889192933Sgnn		sc->irq_res = sc->intr_tag = NULL;
890192933Sgnn		sc->irq_rid = 0;
891192933Sgnn	}
892170654Skmacy}
893170654Skmacy
894167514Skmacystatic int
895192933Sgnncxgb_setup_interrupts(adapter_t *sc)
896167514Skmacy{
897192933Sgnn	struct resource *res;
898192933Sgnn	void *tag;
899192933Sgnn	int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX);
900167514Skmacy
901192933Sgnn	sc->irq_rid = intr_flag ? 1 : 0;
902192933Sgnn	sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid,
903192933Sgnn					     RF_SHAREABLE | RF_ACTIVE);
904192933Sgnn	if (sc->irq_res == NULL) {
905192933Sgnn		device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n",
906192933Sgnn			      intr_flag, sc->irq_rid);
907192933Sgnn		err = EINVAL;
908192933Sgnn		sc->irq_rid = 0;
909192933Sgnn	} else {
910192933Sgnn		err = bus_setup_intr(sc->dev, sc->irq_res,
911204274Snp		    INTR_MPSAFE | INTR_TYPE_NET, NULL,
912204274Snp		    sc->cxgb_intr, sc, &sc->intr_tag);
913192933Sgnn
914192933Sgnn		if (err) {
915192933Sgnn			device_printf(sc->dev,
916192933Sgnn				      "Cannot set up interrupt (%x, %u, %d)\n",
917192933Sgnn				      intr_flag, sc->irq_rid, err);
918192933Sgnn			bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
919192933Sgnn					     sc->irq_res);
920192933Sgnn			sc->irq_res = sc->intr_tag = NULL;
921192933Sgnn			sc->irq_rid = 0;
922192933Sgnn		}
923167514Skmacy	}
924171804Skmacy
925192933Sgnn	/* That's all for INTx or MSI */
926192933Sgnn	if (!(intr_flag & USING_MSIX) || err)
927192933Sgnn		return (err);
928192933Sgnn
929241314Sjhb	bus_describe_intr(sc->dev, sc->irq_res, sc->intr_tag, "err");
930192933Sgnn	for (i = 0; i < sc->msi_count - 1; i++) {
931192933Sgnn		rid = i + 2;
932192933Sgnn		res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid,
933192933Sgnn					     RF_SHAREABLE | RF_ACTIVE);
934192933Sgnn		if (res == NULL) {
935192933Sgnn			device_printf(sc->dev, "Cannot allocate interrupt "
936192933Sgnn				      "for message %d\n", rid);
937192933Sgnn			err = EINVAL;
938192933Sgnn			break;
939192933Sgnn		}
940192933Sgnn
941192933Sgnn		err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET,
942204274Snp				     NULL, t3_intr_msix, &sc->sge.qs[i], &tag);
943192933Sgnn		if (err) {
944192933Sgnn			device_printf(sc->dev, "Cannot set up interrupt "
945192933Sgnn				      "for message %d (%d)\n", rid, err);
946192933Sgnn			bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res);
947192933Sgnn			break;
948167514Skmacy		}
949192933Sgnn
950192933Sgnn		sc->msix_irq_rid[i] = rid;
951192933Sgnn		sc->msix_irq_res[i] = res;
952192933Sgnn		sc->msix_intr_tag[i] = tag;
953241314Sjhb		bus_describe_intr(sc->dev, res, tag, "qs%d", i);
954167514Skmacy	}
955167760Skmacy
956192933Sgnn	if (err)
957192933Sgnn		cxgb_teardown_interrupts(sc);
958192933Sgnn
959192933Sgnn	return (err);
960167514Skmacy}
961167514Skmacy
962192933Sgnn
963167514Skmacystatic int
964167514Skmacycxgb_port_probe(device_t dev)
965167514Skmacy{
966167514Skmacy	struct port_info *p;
967167514Skmacy	char buf[80];
968176472Skmacy	const char *desc;
969176472Skmacy
970167514Skmacy	p = device_get_softc(dev);
971176472Skmacy	desc = p->phy.desc;
972176472Skmacy	snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, desc);
973167514Skmacy	device_set_desc_copy(dev, buf);
974167514Skmacy	return (0);
975167514Skmacy}
976167514Skmacy
977167514Skmacy
978167514Skmacystatic int
979167514Skmacycxgb_makedev(struct port_info *pi)
980167514Skmacy{
981167514Skmacy
982170654Skmacy	pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit,
983209115Snp	    UID_ROOT, GID_WHEEL, 0600, "%s", if_name(pi->ifp));
984167514Skmacy
985167514Skmacy	if (pi->port_cdev == NULL)
986167514Skmacy		return (ENOMEM);
987167514Skmacy
988167514Skmacy	pi->port_cdev->si_drv1 = (void *)pi;
989167514Skmacy
990167514Skmacy	return (0);
991167514Skmacy}
992167514Skmacy
993204274Snp#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
994204348Snp    IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
995237925Snp    IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
996238302Snp#define CXGB_CAP_ENABLE (CXGB_CAP)
997167514Skmacy
998167514Skmacystatic int
999167514Skmacycxgb_port_attach(device_t dev)
1000167514Skmacy{
1001167514Skmacy	struct port_info *p;
1002167514Skmacy	struct ifnet *ifp;
1003194921Snp	int err;
1004176472Skmacy	struct adapter *sc;
1005204274Snp
1006167514Skmacy	p = device_get_softc(dev);
1007176472Skmacy	sc = p->adapter;
1008170869Skmacy	snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
1009171803Skmacy	    device_get_unit(device_get_parent(dev)), p->port_id);
1010170869Skmacy	PORT_LOCK_INIT(p, p->lockbuf);
1011167514Skmacy
1012209841Snp	callout_init(&p->link_check_ch, CALLOUT_MPSAFE);
1013209841Snp	TASK_INIT(&p->link_check_task, 0, check_link_status, p);
1014209841Snp
1015167514Skmacy	/* Allocate an ifnet object and set it up */
1016167514Skmacy	ifp = p->ifp = if_alloc(IFT_ETHER);
1017167514Skmacy	if (ifp == NULL) {
1018167514Skmacy		device_printf(dev, "Cannot allocate ifnet\n");
1019167514Skmacy		return (ENOMEM);
1020167514Skmacy	}
1021167514Skmacy
1022167514Skmacy	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1023167514Skmacy	ifp->if_init = cxgb_init;
1024167514Skmacy	ifp->if_softc = p;
1025167514Skmacy	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1026167514Skmacy	ifp->if_ioctl = cxgb_ioctl;
1027231597Snp	ifp->if_transmit = cxgb_transmit;
1028231597Snp	ifp->if_qflush = cxgb_qflush;
1029174708Skmacy
1030204274Snp	ifp->if_capabilities = CXGB_CAP;
1031237920Snp#ifdef TCP_OFFLOAD
1032237920Snp	if (is_offload(sc))
1033237920Snp		ifp->if_capabilities |= IFCAP_TOE4;
1034237920Snp#endif
1035204274Snp	ifp->if_capenable = CXGB_CAP_ENABLE;
1036237925Snp	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1037237925Snp	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1038204274Snp
1039171471Skmacy	/*
1040204274Snp	 * Disable TSO on 4-port - it isn't supported by the firmware.
1041171471Skmacy	 */
1042204274Snp	if (sc->params.nports > 2) {
1043204348Snp		ifp->if_capabilities &= ~(IFCAP_TSO | IFCAP_VLAN_HWTSO);
1044204348Snp		ifp->if_capenable &= ~(IFCAP_TSO | IFCAP_VLAN_HWTSO);
1045171471Skmacy		ifp->if_hwassist &= ~CSUM_TSO;
1046171471Skmacy	}
1047171471Skmacy
1048167514Skmacy	ether_ifattach(ifp, p->hw_addr);
1049192537Sgnn
1050204274Snp#ifdef DEFAULT_JUMBO
1051204274Snp	if (sc->params.nports <= 2)
1052180583Skmacy		ifp->if_mtu = ETHERMTU_JUMBO;
1053204274Snp#endif
1054167514Skmacy	if ((err = cxgb_makedev(p)) != 0) {
1055167514Skmacy		printf("makedev failed %d\n", err);
1056167514Skmacy		return (err);
1057167514Skmacy	}
1058194921Snp
1059194921Snp	/* Create a list of media supported by this port */
1060167514Skmacy	ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
1061167514Skmacy	    cxgb_media_status);
1062194921Snp	cxgb_build_medialist(p);
1063176472Skmacy
1064170654Skmacy	t3_sge_init_port(p);
1065189643Sgnn
1066192537Sgnn	return (err);
1067167514Skmacy}
1068167514Skmacy
1069192537Sgnn/*
1070192537Sgnn * cxgb_port_detach() is called via the device_detach methods when
1071192537Sgnn * cxgb_free() calls the bus_generic_detach.  It is responsible for
1072192537Sgnn * removing the device from the view of the kernel, i.e. from all
1073192537Sgnn * interfaces lists etc.  This routine is only called when the driver is
1074192537Sgnn * being unloaded, not when the link goes down.
1075192537Sgnn */
1076167514Skmacystatic int
1077167514Skmacycxgb_port_detach(device_t dev)
1078167514Skmacy{
1079167514Skmacy	struct port_info *p;
1080192537Sgnn	struct adapter *sc;
1081194521Skmacy	int i;
1082167514Skmacy
1083167514Skmacy	p = device_get_softc(dev);
1084192537Sgnn	sc = p->adapter;
1085169978Skmacy
1086202671Snp	/* Tell cxgb_ioctl and if_init that the port is going away */
1087202671Snp	ADAPTER_LOCK(sc);
1088202671Snp	SET_DOOMED(p);
1089202671Snp	wakeup(&sc->flags);
1090202671Snp	while (IS_BUSY(sc))
1091202671Snp		mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0);
1092202671Snp	SET_BUSY(sc);
1093202671Snp	ADAPTER_UNLOCK(sc);
1094194521Skmacy
1095192537Sgnn	if (p->port_cdev != NULL)
1096192537Sgnn		destroy_dev(p->port_cdev);
1097194521Skmacy
1098194521Skmacy	cxgb_uninit_synchronized(p);
1099192537Sgnn	ether_ifdetach(p->ifp);
1100192537Sgnn
1101194521Skmacy	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1102194521Skmacy		struct sge_qset *qs = &sc->sge.qs[i];
1103194521Skmacy		struct sge_txq *txq = &qs->txq[TXQ_ETH];
1104194521Skmacy
1105194521Skmacy		callout_drain(&txq->txq_watchdog);
1106194521Skmacy		callout_drain(&txq->txq_timer);
1107192537Sgnn	}
1108192537Sgnn
1109170869Skmacy	PORT_LOCK_DEINIT(p);
1110167514Skmacy	if_free(p->ifp);
1111194521Skmacy	p->ifp = NULL;
1112194521Skmacy
1113202671Snp	ADAPTER_LOCK(sc);
1114202671Snp	CLR_BUSY(sc);
1115202671Snp	wakeup_one(&sc->flags);
1116202671Snp	ADAPTER_UNLOCK(sc);
1117167514Skmacy	return (0);
1118167514Skmacy}
1119167514Skmacy
1120167514Skmacyvoid
1121167514Skmacyt3_fatal_err(struct adapter *sc)
1122167514Skmacy{
1123167514Skmacy	u_int fw_status[4];
1124183062Skmacy
1125172096Skmacy	if (sc->flags & FULL_INIT_DONE) {
1126172096Skmacy		t3_sge_stop(sc);
1127172096Skmacy		t3_write_reg(sc, A_XGM_TX_CTRL, 0);
1128172096Skmacy		t3_write_reg(sc, A_XGM_RX_CTRL, 0);
1129172096Skmacy		t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
1130172096Skmacy		t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
1131172096Skmacy		t3_intr_disable(sc);
1132172096Skmacy	}
1133167514Skmacy	device_printf(sc->dev,"encountered fatal error, operation suspended\n");
1134167514Skmacy	if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
1135167514Skmacy		device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
1136167514Skmacy		    fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
1137167514Skmacy}
1138167514Skmacy
1139167514Skmacyint
1140167514Skmacyt3_os_find_pci_capability(adapter_t *sc, int cap)
1141167514Skmacy{
1142167514Skmacy	device_t dev;
1143167514Skmacy	struct pci_devinfo *dinfo;
1144167514Skmacy	pcicfgregs *cfg;
1145167514Skmacy	uint32_t status;
1146167514Skmacy	uint8_t ptr;
1147167514Skmacy
1148167514Skmacy	dev = sc->dev;
1149167514Skmacy	dinfo = device_get_ivars(dev);
1150167514Skmacy	cfg = &dinfo->cfg;
1151167514Skmacy
1152167514Skmacy	status = pci_read_config(dev, PCIR_STATUS, 2);
1153167514Skmacy	if (!(status & PCIM_STATUS_CAPPRESENT))
1154167514Skmacy		return (0);
1155167514Skmacy
1156167514Skmacy	switch (cfg->hdrtype & PCIM_HDRTYPE) {
1157167514Skmacy	case 0:
1158167514Skmacy	case 1:
1159167514Skmacy		ptr = PCIR_CAP_PTR;
1160167514Skmacy		break;
1161167514Skmacy	case 2:
1162167514Skmacy		ptr = PCIR_CAP_PTR_2;
1163167514Skmacy		break;
1164167514Skmacy	default:
1165167514Skmacy		return (0);
1166167514Skmacy		break;
1167167514Skmacy	}
1168167514Skmacy	ptr = pci_read_config(dev, ptr, 1);
1169167514Skmacy
1170167514Skmacy	while (ptr != 0) {
1171167514Skmacy		if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
1172167514Skmacy			return (ptr);
1173167514Skmacy		ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1174167514Skmacy	}
1175167514Skmacy
1176167514Skmacy	return (0);
1177167514Skmacy}
1178167514Skmacy
1179167514Skmacyint
1180167514Skmacyt3_os_pci_save_state(struct adapter *sc)
1181167514Skmacy{
1182167514Skmacy	device_t dev;
1183167514Skmacy	struct pci_devinfo *dinfo;
1184167514Skmacy
1185167514Skmacy	dev = sc->dev;
1186167514Skmacy	dinfo = device_get_ivars(dev);
1187167514Skmacy
1188167514Skmacy	pci_cfg_save(dev, dinfo, 0);
1189167514Skmacy	return (0);
1190167514Skmacy}
1191167514Skmacy
1192167514Skmacyint
1193167514Skmacyt3_os_pci_restore_state(struct adapter *sc)
1194167514Skmacy{
1195167514Skmacy	device_t dev;
1196167514Skmacy	struct pci_devinfo *dinfo;
1197167514Skmacy
1198167514Skmacy	dev = sc->dev;
1199167514Skmacy	dinfo = device_get_ivars(dev);
1200167514Skmacy
1201167514Skmacy	pci_cfg_restore(dev, dinfo);
1202167514Skmacy	return (0);
1203167514Skmacy}
1204167514Skmacy
1205167514Skmacy/**
1206167514Skmacy *	t3_os_link_changed - handle link status changes
1207197791Snp *	@sc: the adapter associated with the link change
1208197791Snp *	@port_id: the port index whose link status has changed
1209177340Skmacy *	@link_status: the new status of the link
1210167514Skmacy *	@speed: the new speed setting
1211167514Skmacy *	@duplex: the new duplex setting
1212167514Skmacy *	@fc: the new flow-control setting
1213167514Skmacy *
1214167514Skmacy *	This is the OS-dependent handler for link status changes.  The OS
1215167514Skmacy *	neutral handler takes care of most of the processing for these events,
1216167514Skmacy *	then calls this handler for any OS-specific processing.
1217167514Skmacy */
1218167514Skmacyvoid
1219167514Skmacyt3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
1220197791Snp     int duplex, int fc, int mac_was_reset)
1221167514Skmacy{
1222167514Skmacy	struct port_info *pi = &adapter->port[port_id];
1223194521Skmacy	struct ifnet *ifp = pi->ifp;
1224167514Skmacy
1225194521Skmacy	/* no race with detach, so ifp should always be good */
1226194521Skmacy	KASSERT(ifp, ("%s: if detached.", __func__));
1227194521Skmacy
1228197791Snp	/* Reapply mac settings if they were lost due to a reset */
1229197791Snp	if (mac_was_reset) {
1230197791Snp		PORT_LOCK(pi);
1231197791Snp		cxgb_update_mac_settings(pi);
1232197791Snp		PORT_UNLOCK(pi);
1233197791Snp	}
1234197791Snp
1235169978Skmacy	if (link_status) {
1236194521Skmacy		ifp->if_baudrate = IF_Mbps(speed);
1237194521Skmacy		if_link_state_change(ifp, LINK_STATE_UP);
1238192540Sgnn	} else
1239194521Skmacy		if_link_state_change(ifp, LINK_STATE_DOWN);
1240167514Skmacy}
1241167514Skmacy
1242181614Skmacy/**
1243181614Skmacy *	t3_os_phymod_changed - handle PHY module changes
1244181614Skmacy *	@phy: the PHY reporting the module change
1245181614Skmacy *	@mod_type: new module type
1246181614Skmacy *
1247181614Skmacy *	This is the OS-dependent handler for PHY module changes.  It is
1248181614Skmacy *	invoked when a PHY module is removed or inserted for any OS-specific
1249181614Skmacy *	processing.
1250181614Skmacy */
1251181614Skmacyvoid t3_os_phymod_changed(struct adapter *adap, int port_id)
1252181614Skmacy{
1253181614Skmacy	static const char *mod_str[] = {
1254204921Snp		NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX-L", "unknown"
1255181614Skmacy	};
1256181614Skmacy	struct port_info *pi = &adap->port[port_id];
1257194921Snp	int mod = pi->phy.modtype;
1258181614Skmacy
1259194921Snp	if (mod != pi->media.ifm_cur->ifm_data)
1260194921Snp		cxgb_build_medialist(pi);
1261194921Snp
1262194921Snp	if (mod == phy_modtype_none)
1263194921Snp		if_printf(pi->ifp, "PHY module unplugged\n");
1264181614Skmacy	else {
1265194921Snp		KASSERT(mod < ARRAY_SIZE(mod_str),
1266194921Snp			("invalid PHY module type %d", mod));
1267194921Snp		if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]);
1268181614Skmacy	}
1269181614Skmacy}
1270181614Skmacy
1271167514Skmacyvoid
1272167514Skmacyt3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
1273167514Skmacy{
1274167514Skmacy
1275167514Skmacy	/*
1276167514Skmacy	 * The ifnet might not be allocated before this gets called,
1277167514Skmacy	 * as this is called early on in attach by t3_prep_adapter
1278167514Skmacy	 * save the address off in the port structure
1279167514Skmacy	 */
1280167514Skmacy	if (cxgb_debug)
1281167514Skmacy		printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
1282167514Skmacy	bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
1283167514Skmacy}
1284167514Skmacy
1285194521Skmacy/*
1286194521Skmacy * Programs the XGMAC based on the settings in the ifnet.  These settings
1287194521Skmacy * include MTU, MAC address, mcast addresses, etc.
1288167514Skmacy */
1289167514Skmacystatic void
1290194521Skmacycxgb_update_mac_settings(struct port_info *p)
1291167514Skmacy{
1292194521Skmacy	struct ifnet *ifp = p->ifp;
1293167514Skmacy	struct t3_rx_mode rm;
1294167514Skmacy	struct cmac *mac = &p->mac;
1295180583Skmacy	int mtu, hwtagging;
1296167514Skmacy
1297194521Skmacy	PORT_LOCK_ASSERT_OWNED(p);
1298167514Skmacy
1299180583Skmacy	bcopy(IF_LLADDR(ifp), p->hw_addr, ETHER_ADDR_LEN);
1300180583Skmacy
1301180583Skmacy	mtu = ifp->if_mtu;
1302180583Skmacy	if (ifp->if_capenable & IFCAP_VLAN_MTU)
1303180583Skmacy		mtu += ETHER_VLAN_ENCAP_LEN;
1304180583Skmacy
1305180583Skmacy	hwtagging = (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0;
1306180583Skmacy
1307180583Skmacy	t3_mac_set_mtu(mac, mtu);
1308180583Skmacy	t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging);
1309167514Skmacy	t3_mac_set_address(mac, 0, p->hw_addr);
1310194521Skmacy	t3_init_rx_mode(&rm, p);
1311167514Skmacy	t3_mac_set_rx_mode(mac, &rm);
1312167514Skmacy}
1313167514Skmacy
1314176472Skmacy
1315176472Skmacystatic int
1316176472Skmacyawait_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
1317176472Skmacy			      unsigned long n)
1318176472Skmacy{
1319176472Skmacy	int attempts = 5;
1320176472Skmacy
1321176472Skmacy	while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
1322176472Skmacy		if (!--attempts)
1323176472Skmacy			return (ETIMEDOUT);
1324176472Skmacy		t3_os_sleep(10);
1325176472Skmacy	}
1326176472Skmacy	return 0;
1327176472Skmacy}
1328176472Skmacy
1329176472Skmacystatic int
1330176472Skmacyinit_tp_parity(struct adapter *adap)
1331176472Skmacy{
1332176472Skmacy	int i;
1333176472Skmacy	struct mbuf *m;
1334176472Skmacy	struct cpl_set_tcb_field *greq;
1335176472Skmacy	unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
1336176472Skmacy
1337176472Skmacy	t3_tp_set_offload_mode(adap, 1);
1338176472Skmacy
1339176472Skmacy	for (i = 0; i < 16; i++) {
1340176472Skmacy		struct cpl_smt_write_req *req;
1341176472Skmacy
1342176472Skmacy		m = m_gethdr(M_WAITOK, MT_DATA);
1343176472Skmacy		req = mtod(m, struct cpl_smt_write_req *);
1344176472Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1345176472Skmacy		memset(req, 0, sizeof(*req));
1346194521Skmacy		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1347176472Skmacy		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
1348176472Skmacy		req->iff = i;
1349176472Skmacy		t3_mgmt_tx(adap, m);
1350176472Skmacy	}
1351176472Skmacy
1352176472Skmacy	for (i = 0; i < 2048; i++) {
1353176472Skmacy		struct cpl_l2t_write_req *req;
1354176472Skmacy
1355176472Skmacy		m = m_gethdr(M_WAITOK, MT_DATA);
1356176472Skmacy		req = mtod(m, struct cpl_l2t_write_req *);
1357176472Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1358176472Skmacy		memset(req, 0, sizeof(*req));
1359194521Skmacy		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1360176472Skmacy		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
1361176472Skmacy		req->params = htonl(V_L2T_W_IDX(i));
1362176472Skmacy		t3_mgmt_tx(adap, m);
1363176472Skmacy	}
1364176472Skmacy
1365176472Skmacy	for (i = 0; i < 2048; i++) {
1366176472Skmacy		struct cpl_rte_write_req *req;
1367176472Skmacy
1368176472Skmacy		m = m_gethdr(M_WAITOK, MT_DATA);
1369176472Skmacy		req = mtod(m, struct cpl_rte_write_req *);
1370176472Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1371176472Skmacy		memset(req, 0, sizeof(*req));
1372194521Skmacy		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1373176472Skmacy		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
1374176472Skmacy		req->l2t_idx = htonl(V_L2T_W_IDX(i));
1375176472Skmacy		t3_mgmt_tx(adap, m);
1376176472Skmacy	}
1377176472Skmacy
1378176472Skmacy	m = m_gethdr(M_WAITOK, MT_DATA);
1379176472Skmacy	greq = mtod(m, struct cpl_set_tcb_field *);
1380176472Skmacy	m->m_len = m->m_pkthdr.len = sizeof(*greq);
1381176472Skmacy	memset(greq, 0, sizeof(*greq));
1382194521Skmacy	greq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1383176472Skmacy	OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
1384176472Skmacy	greq->mask = htobe64(1);
1385176472Skmacy	t3_mgmt_tx(adap, m);
1386176472Skmacy
1387176472Skmacy	i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
1388176472Skmacy	t3_tp_set_offload_mode(adap, 0);
1389176472Skmacy	return (i);
1390176472Skmacy}
1391176472Skmacy
1392167514Skmacy/**
1393167514Skmacy *	setup_rss - configure Receive Side Steering (per-queue connection demux)
1394167514Skmacy *	@adap: the adapter
1395167514Skmacy *
1396167514Skmacy *	Sets up RSS to distribute packets to multiple receive queues.  We
1397167514Skmacy *	configure the RSS CPU lookup table to distribute to the number of HW
1398167514Skmacy *	receive queues, and the response queue lookup table to narrow that
1399167514Skmacy *	down to the response queues actually configured for each port.
1400167514Skmacy *	We always configure the RSS mapping for two ports since the mapping
1401167514Skmacy *	table has plenty of entries.
1402167514Skmacy */
1403167514Skmacystatic void
1404167514Skmacysetup_rss(adapter_t *adap)
1405167514Skmacy{
1406167514Skmacy	int i;
1407171471Skmacy	u_int nq[2];
1408167514Skmacy	uint8_t cpus[SGE_QSETS + 1];
1409167514Skmacy	uint16_t rspq_map[RSS_TABLE_SIZE];
1410171471Skmacy
1411167514Skmacy	for (i = 0; i < SGE_QSETS; ++i)
1412167514Skmacy		cpus[i] = i;
1413167514Skmacy	cpus[SGE_QSETS] = 0xff;
1414167514Skmacy
1415171978Skmacy	nq[0] = nq[1] = 0;
1416171978Skmacy	for_each_port(adap, i) {
1417171978Skmacy		const struct port_info *pi = adap2pinfo(adap, i);
1418171978Skmacy
1419171978Skmacy		nq[pi->tx_chan] += pi->nqsets;
1420171978Skmacy	}
1421167514Skmacy	for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
1422176472Skmacy		rspq_map[i] = nq[0] ? i % nq[0] : 0;
1423176472Skmacy		rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0;
1424167514Skmacy	}
1425196840Sjhb
1426171471Skmacy	/* Calculate the reverse RSS map table */
1427196840Sjhb	for (i = 0; i < SGE_QSETS; ++i)
1428196840Sjhb		adap->rrss_map[i] = 0xff;
1429171471Skmacy	for (i = 0; i < RSS_TABLE_SIZE; ++i)
1430171471Skmacy		if (adap->rrss_map[rspq_map[i]] == 0xff)
1431171471Skmacy			adap->rrss_map[rspq_map[i]] = i;
1432167514Skmacy
1433167514Skmacy	t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
1434171471Skmacy		      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
1435176472Skmacy	              F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ,
1436176472Skmacy	              cpus, rspq_map);
1437171471Skmacy
1438167514Skmacy}
1439167514Skmacystatic void
1440167514Skmacysend_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
1441167514Skmacy			      int hi, int port)
1442167514Skmacy{
1443167514Skmacy	struct mbuf *m;
1444167514Skmacy	struct mngt_pktsched_wr *req;
1445167514Skmacy
1446248078Smarius	m = m_gethdr(M_NOWAIT, MT_DATA);
1447167848Skmacy	if (m) {
1448169978Skmacy		req = mtod(m, struct mngt_pktsched_wr *);
1449194521Skmacy		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1450167848Skmacy		req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1451167848Skmacy		req->sched = sched;
1452167848Skmacy		req->idx = qidx;
1453167848Skmacy		req->min = lo;
1454167848Skmacy		req->max = hi;
1455167848Skmacy		req->binding = port;
1456167848Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1457167848Skmacy		t3_mgmt_tx(adap, m);
1458167848Skmacy	}
1459167514Skmacy}
1460167514Skmacy
1461167514Skmacystatic void
1462167514Skmacybind_qsets(adapter_t *sc)
1463167514Skmacy{
1464167514Skmacy	int i, j;
1465167514Skmacy
1466167514Skmacy	for (i = 0; i < (sc)->params.nports; ++i) {
1467167514Skmacy		const struct port_info *pi = adap2pinfo(sc, i);
1468167514Skmacy
1469172096Skmacy		for (j = 0; j < pi->nqsets; ++j) {
1470167514Skmacy			send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1471172096Skmacy					  -1, pi->tx_chan);
1472172096Skmacy
1473172096Skmacy		}
1474167514Skmacy	}
1475167514Skmacy}
1476167514Skmacy
1477171471Skmacystatic void
1478171471Skmacyupdate_tpeeprom(struct adapter *adap)
1479171471Skmacy{
1480171471Skmacy	const struct firmware *tpeeprom;
1481172109Skmacy
1482171471Skmacy	uint32_t version;
1483171471Skmacy	unsigned int major, minor;
1484171471Skmacy	int ret, len;
1485189643Sgnn	char rev, name[32];
1486171471Skmacy
1487171471Skmacy	t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
1488171471Skmacy
1489171471Skmacy	major = G_TP_VERSION_MAJOR(version);
1490171471Skmacy	minor = G_TP_VERSION_MINOR(version);
1491171471Skmacy	if (major == TP_VERSION_MAJOR  && minor == TP_VERSION_MINOR)
1492171471Skmacy		return;
1493171471Skmacy
1494171471Skmacy	rev = t3rev2char(adap);
1495189643Sgnn	snprintf(name, sizeof(name), TPEEPROM_NAME, rev);
1496171471Skmacy
1497189643Sgnn	tpeeprom = firmware_get(name);
1498171471Skmacy	if (tpeeprom == NULL) {
1499190330Sgnn		device_printf(adap->dev,
1500190330Sgnn			      "could not load TP EEPROM: unable to load %s\n",
1501190330Sgnn			      name);
1502171471Skmacy		return;
1503171471Skmacy	}
1504171471Skmacy
1505171471Skmacy	len = tpeeprom->datasize - 4;
1506171471Skmacy
1507171471Skmacy	ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
1508171471Skmacy	if (ret)
1509171471Skmacy		goto release_tpeeprom;
1510171471Skmacy
1511171471Skmacy	if (len != TP_SRAM_LEN) {
1512190330Sgnn		device_printf(adap->dev,
1513190330Sgnn			      "%s length is wrong len=%d expected=%d\n", name,
1514190330Sgnn			      len, TP_SRAM_LEN);
1515171471Skmacy		return;
1516171471Skmacy	}
1517171471Skmacy
1518171471Skmacy	ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
1519171471Skmacy	    TP_SRAM_OFFSET);
1520171471Skmacy
1521171471Skmacy	if (!ret) {
1522171471Skmacy		device_printf(adap->dev,
1523171471Skmacy			"Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
1524171471Skmacy			 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1525171471Skmacy	} else
1526190330Sgnn		device_printf(adap->dev,
1527190330Sgnn			      "Protocol SRAM image update in EEPROM failed\n");
1528171471Skmacy
1529171471Skmacyrelease_tpeeprom:
1530171471Skmacy	firmware_put(tpeeprom, FIRMWARE_UNLOAD);
1531171471Skmacy
1532171471Skmacy	return;
1533171471Skmacy}
1534171471Skmacy
1535171471Skmacystatic int
1536171471Skmacyupdate_tpsram(struct adapter *adap)
1537171471Skmacy{
1538171471Skmacy	const struct firmware *tpsram;
1539171471Skmacy	int ret;
1540189643Sgnn	char rev, name[32];
1541171471Skmacy
1542171471Skmacy	rev = t3rev2char(adap);
1543189643Sgnn	snprintf(name, sizeof(name), TPSRAM_NAME, rev);
1544171471Skmacy
1545171471Skmacy	update_tpeeprom(adap);
1546171471Skmacy
1547189643Sgnn	tpsram = firmware_get(name);
1548171471Skmacy	if (tpsram == NULL){
1549176613Skmacy		device_printf(adap->dev, "could not load TP SRAM\n");
1550171471Skmacy		return (EINVAL);
1551171471Skmacy	} else
1552176613Skmacy		device_printf(adap->dev, "updating TP SRAM\n");
1553171471Skmacy
1554171471Skmacy	ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
1555171471Skmacy	if (ret)
1556171471Skmacy		goto release_tpsram;
1557171471Skmacy
1558171471Skmacy	ret = t3_set_proto_sram(adap, tpsram->data);
1559171471Skmacy	if (ret)
1560171471Skmacy		device_printf(adap->dev, "loading protocol SRAM failed\n");
1561171471Skmacy
1562171471Skmacyrelease_tpsram:
1563171471Skmacy	firmware_put(tpsram, FIRMWARE_UNLOAD);
1564171471Skmacy
1565171471Skmacy	return ret;
1566171471Skmacy}
1567171471Skmacy
1568169978Skmacy/**
1569169978Skmacy *	cxgb_up - enable the adapter
1570169978Skmacy *	@adap: adapter being enabled
1571169978Skmacy *
1572169978Skmacy *	Called when the first port is enabled, this function performs the
1573169978Skmacy *	actions necessary to make an adapter operational, such as completing
1574169978Skmacy *	the initialization of HW modules, and enabling interrupts.
1575169978Skmacy */
1576169978Skmacystatic int
1577169978Skmacycxgb_up(struct adapter *sc)
1578169978Skmacy{
1579169978Skmacy	int err = 0;
1580208887Snp	unsigned int mxf = t3_mc5_size(&sc->mc5) - MC5_MIN_TIDS;
1581169978Skmacy
1582194521Skmacy	KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)",
1583194521Skmacy					   __func__, sc->open_device_map));
1584194521Skmacy
1585169978Skmacy	if ((sc->flags & FULL_INIT_DONE) == 0) {
1586169978Skmacy
1587202671Snp		ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1588202671Snp
1589169978Skmacy		if ((sc->flags & FW_UPTODATE) == 0)
1590171471Skmacy			if ((err = upgrade_fw(sc)))
1591171471Skmacy				goto out;
1592194521Skmacy
1593171471Skmacy		if ((sc->flags & TPS_UPTODATE) == 0)
1594171471Skmacy			if ((err = update_tpsram(sc)))
1595171471Skmacy				goto out;
1596194521Skmacy
1597208887Snp		if (is_offload(sc) && nfilters != 0) {
1598207643Snp			sc->params.mc5.nservers = 0;
1599208887Snp
1600208887Snp			if (nfilters < 0)
1601208887Snp				sc->params.mc5.nfilters = mxf;
1602208887Snp			else
1603208887Snp				sc->params.mc5.nfilters = min(nfilters, mxf);
1604207643Snp		}
1605207643Snp
1606169978Skmacy		err = t3_init_hw(sc, 0);
1607169978Skmacy		if (err)
1608169978Skmacy			goto out;
1609169978Skmacy
1610176472Skmacy		t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1611169978Skmacy		t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1612169978Skmacy
1613169978Skmacy		err = setup_sge_qsets(sc);
1614169978Skmacy		if (err)
1615169978Skmacy			goto out;
1616169978Skmacy
1617207643Snp		alloc_filters(sc);
1618169978Skmacy		setup_rss(sc);
1619192933Sgnn
1620174708Skmacy		t3_add_configured_sysctls(sc);
1621169978Skmacy		sc->flags |= FULL_INIT_DONE;
1622169978Skmacy	}
1623169978Skmacy
1624169978Skmacy	t3_intr_clear(sc);
1625169978Skmacy	t3_sge_start(sc);
1626169978Skmacy	t3_intr_enable(sc);
1627169978Skmacy
1628176472Skmacy	if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) &&
1629176472Skmacy	    is_offload(sc) && init_tp_parity(sc) == 0)
1630176472Skmacy		sc->flags |= TP_PARITY_INIT;
1631176472Skmacy
1632176472Skmacy	if (sc->flags & TP_PARITY_INIT) {
1633194521Skmacy		t3_write_reg(sc, A_TP_INT_CAUSE, F_CMCACHEPERR | F_ARPLUTPERR);
1634176472Skmacy		t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff);
1635176472Skmacy	}
1636176472Skmacy
1637172096Skmacy	if (!(sc->flags & QUEUES_BOUND)) {
1638169978Skmacy		bind_qsets(sc);
1639207643Snp		setup_hw_filters(sc);
1640171471Skmacy		sc->flags |= QUEUES_BOUND;
1641171471Skmacy	}
1642194521Skmacy
1643194521Skmacy	t3_sge_reset_adapter(sc);
1644169978Skmacyout:
1645169978Skmacy	return (err);
1646169978Skmacy}
1647169978Skmacy
1648169978Skmacy/*
1649194521Skmacy * Called when the last open device is closed.  Does NOT undo all of cxgb_up's
1650194521Skmacy * work.  Specifically, the resources grabbed under FULL_INIT_DONE are released
1651194521Skmacy * during controller_detach, not here.
1652169978Skmacy */
1653167514Skmacystatic void
1654194521Skmacycxgb_down(struct adapter *sc)
1655169978Skmacy{
1656169978Skmacy	t3_sge_stop(sc);
1657169978Skmacy	t3_intr_disable(sc);
1658169978Skmacy}
1659169978Skmacy
1660194521Skmacy/*
1661202671Snp * if_init for cxgb ports.
1662194521Skmacy */
1663202671Snpstatic void
1664202671Snpcxgb_init(void *arg)
1665194521Skmacy{
1666202671Snp	struct port_info *p = arg;
1667194521Skmacy	struct adapter *sc = p->adapter;
1668192537Sgnn
1669194521Skmacy	ADAPTER_LOCK(sc);
1670202671Snp	cxgb_init_locked(p); /* releases adapter lock */
1671202671Snp	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1672202671Snp}
1673194521Skmacy
1674202671Snpstatic int
1675202671Snpcxgb_init_locked(struct port_info *p)
1676202671Snp{
1677202671Snp	struct adapter *sc = p->adapter;
1678202671Snp	struct ifnet *ifp = p->ifp;
1679202671Snp	struct cmac *mac = &p->mac;
1680211345Snp	int i, rc = 0, may_sleep = 0, gave_up_lock = 0;
1681202671Snp
1682202671Snp	ADAPTER_LOCK_ASSERT_OWNED(sc);
1683202671Snp
1684194521Skmacy	while (!IS_DOOMED(p) && IS_BUSY(sc)) {
1685211345Snp		gave_up_lock = 1;
1686202671Snp		if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbinit", 0)) {
1687194521Skmacy			rc = EINTR;
1688194521Skmacy			goto done;
1689194521Skmacy		}
1690194521Skmacy	}
1691202671Snp	if (IS_DOOMED(p)) {
1692194521Skmacy		rc = ENXIO;
1693202671Snp		goto done;
1694194521Skmacy	}
1695202671Snp	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1696194521Skmacy
1697194521Skmacy	/*
1698202671Snp	 * The code that runs during one-time adapter initialization can sleep
1699202671Snp	 * so it's important not to hold any locks across it.
1700194521Skmacy	 */
1701202671Snp	may_sleep = sc->flags & FULL_INIT_DONE ? 0 : 1;
1702194521Skmacy
1703202671Snp	if (may_sleep) {
1704202671Snp		SET_BUSY(sc);
1705211345Snp		gave_up_lock = 1;
1706202671Snp		ADAPTER_UNLOCK(sc);
1707194521Skmacy	}
1708194521Skmacy
1709237920Snp	if (sc->open_device_map == 0 && ((rc = cxgb_up(sc)) != 0))
1710202671Snp			goto done;
1711167514Skmacy
1712194521Skmacy	PORT_LOCK(p);
1713202671Snp	if (isset(&sc->open_device_map, p->port_id) &&
1714202671Snp	    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1715202671Snp		PORT_UNLOCK(p);
1716202671Snp		goto done;
1717202671Snp	}
1718192540Sgnn	t3_port_intr_enable(sc, p->port_id);
1719194521Skmacy	if (!mac->multiport)
1720197791Snp		t3_mac_init(mac);
1721194521Skmacy	cxgb_update_mac_settings(p);
1722194521Skmacy	t3_link_start(&p->phy, mac, &p->link_config);
1723194521Skmacy	t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1724194521Skmacy	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1725194521Skmacy	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1726194521Skmacy	PORT_UNLOCK(p);
1727192540Sgnn
1728194521Skmacy	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1729194521Skmacy		struct sge_qset *qs = &sc->sge.qs[i];
1730194521Skmacy		struct sge_txq *txq = &qs->txq[TXQ_ETH];
1731170869Skmacy
1732194521Skmacy		callout_reset_on(&txq->txq_watchdog, hz, cxgb_tx_watchdog, qs,
1733194521Skmacy				 txq->txq_watchdog.c_cpu);
1734194521Skmacy	}
1735167514Skmacy
1736194521Skmacy	/* all ok */
1737194521Skmacy	setbit(&sc->open_device_map, p->port_id);
1738209841Snp	callout_reset(&p->link_check_ch,
1739209841Snp	    p->phy.caps & SUPPORTED_LINK_IRQ ?  hz * 3 : hz / 4,
1740209841Snp	    link_check_callout, p);
1741167760Skmacy
1742202671Snpdone:
1743202671Snp	if (may_sleep) {
1744202671Snp		ADAPTER_LOCK(sc);
1745202671Snp		KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1746202671Snp		CLR_BUSY(sc);
1747211345Snp	}
1748211345Snp	if (gave_up_lock)
1749202671Snp		wakeup_one(&sc->flags);
1750202671Snp	ADAPTER_UNLOCK(sc);
1751202671Snp	return (rc);
1752167514Skmacy}
1753167514Skmacy
1754202671Snpstatic int
1755202671Snpcxgb_uninit_locked(struct port_info *p)
1756202671Snp{
1757202671Snp	struct adapter *sc = p->adapter;
1758202671Snp	int rc;
1759202671Snp
1760202671Snp	ADAPTER_LOCK_ASSERT_OWNED(sc);
1761202671Snp
1762202671Snp	while (!IS_DOOMED(p) && IS_BUSY(sc)) {
1763202671Snp		if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbunin", 0)) {
1764202671Snp			rc = EINTR;
1765202671Snp			goto done;
1766202671Snp		}
1767202671Snp	}
1768202671Snp	if (IS_DOOMED(p)) {
1769202671Snp		rc = ENXIO;
1770202671Snp		goto done;
1771202671Snp	}
1772202671Snp	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1773202671Snp	SET_BUSY(sc);
1774202671Snp	ADAPTER_UNLOCK(sc);
1775202671Snp
1776202671Snp	rc = cxgb_uninit_synchronized(p);
1777202671Snp
1778202671Snp	ADAPTER_LOCK(sc);
1779202671Snp	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1780202671Snp	CLR_BUSY(sc);
1781202671Snp	wakeup_one(&sc->flags);
1782202671Snpdone:
1783202671Snp	ADAPTER_UNLOCK(sc);
1784202671Snp	return (rc);
1785202671Snp}
1786202671Snp
1787194521Skmacy/*
1788194521Skmacy * Called on "ifconfig down", and from port_detach
1789194521Skmacy */
1790194521Skmacystatic int
1791194521Skmacycxgb_uninit_synchronized(struct port_info *pi)
1792167514Skmacy{
1793194521Skmacy	struct adapter *sc = pi->adapter;
1794194521Skmacy	struct ifnet *ifp = pi->ifp;
1795167514Skmacy
1796194521Skmacy	/*
1797202671Snp	 * taskqueue_drain may cause a deadlock if the adapter lock is held.
1798202671Snp	 */
1799202671Snp	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1800202671Snp
1801202671Snp	/*
1802194521Skmacy	 * Clear this port's bit from the open device map, and then drain all
1803194521Skmacy	 * the tasks that can access/manipulate this port's port_info or ifp.
1804218909Sbrucec	 * We disable this port's interrupts here and so the slow/ext
1805194521Skmacy	 * interrupt tasks won't be enqueued.  The tick task will continue to
1806194521Skmacy	 * be enqueued every second but the runs after this drain will not see
1807194521Skmacy	 * this port in the open device map.
1808194521Skmacy	 *
1809194521Skmacy	 * A well behaved task must take open_device_map into account and ignore
1810194521Skmacy	 * ports that are not open.
1811194521Skmacy	 */
1812194521Skmacy	clrbit(&sc->open_device_map, pi->port_id);
1813194521Skmacy	t3_port_intr_disable(sc, pi->port_id);
1814194521Skmacy	taskqueue_drain(sc->tq, &sc->slow_intr_task);
1815194521Skmacy	taskqueue_drain(sc->tq, &sc->tick_task);
1816194521Skmacy
1817209841Snp	callout_drain(&pi->link_check_ch);
1818209841Snp	taskqueue_drain(sc->tq, &pi->link_check_task);
1819209841Snp
1820194521Skmacy	PORT_LOCK(pi);
1821169978Skmacy	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1822169978Skmacy
1823177340Skmacy	/* disable pause frames */
1824194521Skmacy	t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0);
1825170869Skmacy
1826177340Skmacy	/* Reset RX FIFO HWM */
1827194521Skmacy	t3_set_reg_field(sc, A_XGM_RXFIFO_CFG +  pi->mac.offset,
1828177340Skmacy			 V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0);
1829177340Skmacy
1830199240Snp	DELAY(100 * 1000);
1831177340Skmacy
1832177340Skmacy	/* Wait for TXFIFO empty */
1833194521Skmacy	t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset,
1834177340Skmacy			F_TXFIFO_EMPTY, 1, 20, 5);
1835177340Skmacy
1836199240Snp	DELAY(100 * 1000);
1837199240Snp	t3_mac_disable(&pi->mac, MAC_DIRECTION_RX);
1838177340Skmacy
1839177340Skmacy	pi->phy.ops->power_down(&pi->phy, 1);
1840177340Skmacy
1841194521Skmacy	PORT_UNLOCK(pi);
1842167514Skmacy
1843194521Skmacy	pi->link_config.link_ok = 0;
1844197791Snp	t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0);
1845194521Skmacy
1846194521Skmacy	if (sc->open_device_map == 0)
1847194521Skmacy		cxgb_down(pi->adapter);
1848194521Skmacy
1849194521Skmacy	return (0);
1850170654Skmacy}
1851170654Skmacy
1852181616Skmacy/*
1853181616Skmacy * Mark lro enabled or disabled in all qsets for this port
1854181616Skmacy */
1855170654Skmacystatic int
1856181616Skmacycxgb_set_lro(struct port_info *p, int enabled)
1857181616Skmacy{
1858181616Skmacy	int i;
1859181616Skmacy	struct adapter *adp = p->adapter;
1860181616Skmacy	struct sge_qset *q;
1861181616Skmacy
1862181616Skmacy	for (i = 0; i < p->nqsets; i++) {
1863181616Skmacy		q = &adp->sge.qs[p->first_qset + i];
1864181616Skmacy		q->lro.enabled = (enabled != 0);
1865181616Skmacy	}
1866181616Skmacy	return (0);
1867181616Skmacy}
1868181616Skmacy
1869181616Skmacystatic int
1870167514Skmacycxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
1871167514Skmacy{
1872167514Skmacy	struct port_info *p = ifp->if_softc;
1873202671Snp	struct adapter *sc = p->adapter;
1874167514Skmacy	struct ifreq *ifr = (struct ifreq *)data;
1875202671Snp	int flags, error = 0, mtu;
1876167514Skmacy	uint32_t mask;
1877167514Skmacy
1878167514Skmacy	switch (command) {
1879167514Skmacy	case SIOCSIFMTU:
1880202671Snp		ADAPTER_LOCK(sc);
1881202671Snp		error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1882202671Snp		if (error) {
1883202671Snpfail:
1884202671Snp			ADAPTER_UNLOCK(sc);
1885202671Snp			return (error);
1886202671Snp		}
1887202671Snp
1888194521Skmacy		mtu = ifr->ifr_mtu;
1889194521Skmacy		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
1890194521Skmacy			error = EINVAL;
1891194521Skmacy		} else {
1892194521Skmacy			ifp->if_mtu = mtu;
1893194521Skmacy			PORT_LOCK(p);
1894194521Skmacy			cxgb_update_mac_settings(p);
1895194521Skmacy			PORT_UNLOCK(p);
1896194521Skmacy		}
1897202671Snp		ADAPTER_UNLOCK(sc);
1898167514Skmacy		break;
1899167514Skmacy	case SIOCSIFFLAGS:
1900202671Snp		ADAPTER_LOCK(sc);
1901202671Snp		if (IS_DOOMED(p)) {
1902202671Snp			error = ENXIO;
1903202671Snp			goto fail;
1904202671Snp		}
1905167514Skmacy		if (ifp->if_flags & IFF_UP) {
1906167514Skmacy			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1907167514Skmacy				flags = p->if_flags;
1908167514Skmacy				if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
1909194521Skmacy				    ((ifp->if_flags ^ flags) & IFF_ALLMULTI)) {
1910202671Snp					if (IS_BUSY(sc)) {
1911202671Snp						error = EBUSY;
1912202671Snp						goto fail;
1913202671Snp					}
1914194521Skmacy					PORT_LOCK(p);
1915194521Skmacy					cxgb_update_mac_settings(p);
1916194521Skmacy					PORT_UNLOCK(p);
1917194521Skmacy				}
1918202671Snp				ADAPTER_UNLOCK(sc);
1919167514Skmacy			} else
1920202671Snp				error = cxgb_init_locked(p);
1921167760Skmacy			p->if_flags = ifp->if_flags;
1922170869Skmacy		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1923202671Snp			error = cxgb_uninit_locked(p);
1924202863Snp		else
1925202863Snp			ADAPTER_UNLOCK(sc);
1926202671Snp
1927202671Snp		ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1928176472Skmacy		break;
1929176472Skmacy	case SIOCADDMULTI:
1930176472Skmacy	case SIOCDELMULTI:
1931202671Snp		ADAPTER_LOCK(sc);
1932202671Snp		error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1933202671Snp		if (error)
1934202671Snp			goto fail;
1935202671Snp
1936170869Skmacy		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1937194521Skmacy			PORT_LOCK(p);
1938194521Skmacy			cxgb_update_mac_settings(p);
1939194521Skmacy			PORT_UNLOCK(p);
1940167514Skmacy		}
1941202671Snp		ADAPTER_UNLOCK(sc);
1942194521Skmacy
1943167514Skmacy		break;
1944167514Skmacy	case SIOCSIFCAP:
1945202671Snp		ADAPTER_LOCK(sc);
1946202671Snp		error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1947202671Snp		if (error)
1948202671Snp			goto fail;
1949202671Snp
1950167514Skmacy		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1951167514Skmacy		if (mask & IFCAP_TXCSUM) {
1952204348Snp			ifp->if_capenable ^= IFCAP_TXCSUM;
1953204348Snp			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1954204348Snp
1955237925Snp			if (IFCAP_TSO4 & ifp->if_capenable &&
1956204348Snp			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1957237925Snp				ifp->if_capenable &= ~IFCAP_TSO4;
1958204348Snp				if_printf(ifp,
1959237925Snp				    "tso4 disabled due to -txcsum.\n");
1960167514Skmacy			}
1961167514Skmacy		}
1962237925Snp		if (mask & IFCAP_TXCSUM_IPV6) {
1963237925Snp			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1964237925Snp			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1965237925Snp
1966237925Snp			if (IFCAP_TSO6 & ifp->if_capenable &&
1967237925Snp			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1968237925Snp				ifp->if_capenable &= ~IFCAP_TSO6;
1969237925Snp				if_printf(ifp,
1970237925Snp				    "tso6 disabled due to -txcsum6.\n");
1971237925Snp			}
1972237925Snp		}
1973204348Snp		if (mask & IFCAP_RXCSUM)
1974180583Skmacy			ifp->if_capenable ^= IFCAP_RXCSUM;
1975237925Snp		if (mask & IFCAP_RXCSUM_IPV6)
1976237925Snp			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1977204348Snp
1978237925Snp		/*
1979237925Snp		 * Note that we leave CSUM_TSO alone (it is always set).  The
1980237925Snp		 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1981237925Snp		 * sending a TSO request our way, so it's sufficient to toggle
1982237925Snp		 * IFCAP_TSOx only.
1983237925Snp		 */
1984237925Snp		if (mask & IFCAP_TSO4) {
1985237925Snp			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1986237925Snp			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1987237925Snp				if_printf(ifp, "enable txcsum first.\n");
1988237925Snp				error = EAGAIN;
1989237925Snp				goto fail;
1990237925Snp			}
1991237925Snp			ifp->if_capenable ^= IFCAP_TSO4;
1992167514Skmacy		}
1993237925Snp		if (mask & IFCAP_TSO6) {
1994237925Snp			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1995237925Snp			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1996237925Snp				if_printf(ifp, "enable txcsum6 first.\n");
1997237925Snp				error = EAGAIN;
1998237925Snp				goto fail;
1999237925Snp			}
2000237925Snp			ifp->if_capenable ^= IFCAP_TSO6;
2001237925Snp		}
2002181616Skmacy		if (mask & IFCAP_LRO) {
2003181616Skmacy			ifp->if_capenable ^= IFCAP_LRO;
2004181616Skmacy
2005181616Skmacy			/* Safe to do this even if cxgb_up not called yet */
2006181616Skmacy			cxgb_set_lro(p, ifp->if_capenable & IFCAP_LRO);
2007181616Skmacy		}
2008237920Snp#ifdef TCP_OFFLOAD
2009237920Snp		if (mask & IFCAP_TOE4) {
2010237920Snp			int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE4;
2011237920Snp
2012237920Snp			error = toe_capability(p, enable);
2013237920Snp			if (error == 0)
2014237920Snp				ifp->if_capenable ^= mask;
2015237920Snp		}
2016237920Snp#endif
2017180583Skmacy		if (mask & IFCAP_VLAN_HWTAGGING) {
2018180583Skmacy			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2019194521Skmacy			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2020194521Skmacy				PORT_LOCK(p);
2021194521Skmacy				cxgb_update_mac_settings(p);
2022194521Skmacy				PORT_UNLOCK(p);
2023194521Skmacy			}
2024180583Skmacy		}
2025180583Skmacy		if (mask & IFCAP_VLAN_MTU) {
2026180583Skmacy			ifp->if_capenable ^= IFCAP_VLAN_MTU;
2027194521Skmacy			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2028194521Skmacy				PORT_LOCK(p);
2029194521Skmacy				cxgb_update_mac_settings(p);
2030194521Skmacy				PORT_UNLOCK(p);
2031194521Skmacy			}
2032180583Skmacy		}
2033204348Snp		if (mask & IFCAP_VLAN_HWTSO)
2034204348Snp			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2035202671Snp		if (mask & IFCAP_VLAN_HWCSUM)
2036180583Skmacy			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2037180583Skmacy
2038180583Skmacy#ifdef VLAN_CAPABILITIES
2039180583Skmacy		VLAN_CAPABILITIES(ifp);
2040180583Skmacy#endif
2041202671Snp		ADAPTER_UNLOCK(sc);
2042167514Skmacy		break;
2043202671Snp	case SIOCSIFMEDIA:
2044202671Snp	case SIOCGIFMEDIA:
2045202671Snp		error = ifmedia_ioctl(ifp, ifr, &p->media, command);
2046202671Snp		break;
2047167514Skmacy	default:
2048202671Snp		error = ether_ioctl(ifp, command, data);
2049167514Skmacy	}
2050194521Skmacy
2051167514Skmacy	return (error);
2052167514Skmacy}
2053167514Skmacy
2054174708Skmacystatic int
2055167514Skmacycxgb_media_change(struct ifnet *ifp)
2056167514Skmacy{
2057194921Snp	return (EOPNOTSUPP);
2058167514Skmacy}
2059167514Skmacy
2060186282Sgnn/*
2061194921Snp * Translates phy->modtype to the correct Ethernet media subtype.
2062186282Sgnn */
2063186282Sgnnstatic int
2064194921Snpcxgb_ifm_type(int mod)
2065186282Sgnn{
2066194921Snp	switch (mod) {
2067186282Sgnn	case phy_modtype_sr:
2068194921Snp		return (IFM_10G_SR);
2069186282Sgnn	case phy_modtype_lr:
2070194921Snp		return (IFM_10G_LR);
2071186282Sgnn	case phy_modtype_lrm:
2072194921Snp		return (IFM_10G_LRM);
2073186282Sgnn	case phy_modtype_twinax:
2074194921Snp		return (IFM_10G_TWINAX);
2075186282Sgnn	case phy_modtype_twinax_long:
2076194921Snp		return (IFM_10G_TWINAX_LONG);
2077186282Sgnn	case phy_modtype_none:
2078194921Snp		return (IFM_NONE);
2079186282Sgnn	case phy_modtype_unknown:
2080194921Snp		return (IFM_UNKNOWN);
2081186282Sgnn	}
2082186282Sgnn
2083194921Snp	KASSERT(0, ("%s: modtype %d unknown", __func__, mod));
2084194921Snp	return (IFM_UNKNOWN);
2085186282Sgnn}
2086186282Sgnn
2087194921Snp/*
2088194921Snp * Rebuilds the ifmedia list for this port, and sets the current media.
2089194921Snp */
2090167514Skmacystatic void
2091194921Snpcxgb_build_medialist(struct port_info *p)
2092194921Snp{
2093194921Snp	struct cphy *phy = &p->phy;
2094194921Snp	struct ifmedia *media = &p->media;
2095194921Snp	int mod = phy->modtype;
2096194921Snp	int m = IFM_ETHER | IFM_FDX;
2097194921Snp
2098194921Snp	PORT_LOCK(p);
2099194921Snp
2100194921Snp	ifmedia_removeall(media);
2101194921Snp	if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) {
2102194921Snp		/* Copper (RJ45) */
2103194921Snp
2104194921Snp		if (phy->caps & SUPPORTED_10000baseT_Full)
2105194921Snp			ifmedia_add(media, m | IFM_10G_T, mod, NULL);
2106194921Snp
2107194921Snp		if (phy->caps & SUPPORTED_1000baseT_Full)
2108194921Snp			ifmedia_add(media, m | IFM_1000_T, mod, NULL);
2109194921Snp
2110194921Snp		if (phy->caps & SUPPORTED_100baseT_Full)
2111194921Snp			ifmedia_add(media, m | IFM_100_TX, mod, NULL);
2112194921Snp
2113194921Snp		if (phy->caps & SUPPORTED_10baseT_Full)
2114194921Snp			ifmedia_add(media, m | IFM_10_T, mod, NULL);
2115194921Snp
2116194921Snp		ifmedia_add(media, IFM_ETHER | IFM_AUTO, mod, NULL);
2117194921Snp		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2118194921Snp
2119194921Snp	} else if (phy->caps & SUPPORTED_TP) {
2120194921Snp		/* Copper (CX4) */
2121194921Snp
2122194921Snp		KASSERT(phy->caps & SUPPORTED_10000baseT_Full,
2123194921Snp			("%s: unexpected cap 0x%x", __func__, phy->caps));
2124194921Snp
2125194921Snp		ifmedia_add(media, m | IFM_10G_CX4, mod, NULL);
2126194921Snp		ifmedia_set(media, m | IFM_10G_CX4);
2127194921Snp
2128194921Snp	} else if (phy->caps & SUPPORTED_FIBRE &&
2129194921Snp		   phy->caps & SUPPORTED_10000baseT_Full) {
2130194921Snp		/* 10G optical (but includes SFP+ twinax) */
2131194921Snp
2132194921Snp		m |= cxgb_ifm_type(mod);
2133194921Snp		if (IFM_SUBTYPE(m) == IFM_NONE)
2134194921Snp			m &= ~IFM_FDX;
2135194921Snp
2136194921Snp		ifmedia_add(media, m, mod, NULL);
2137194921Snp		ifmedia_set(media, m);
2138194921Snp
2139194921Snp	} else if (phy->caps & SUPPORTED_FIBRE &&
2140194921Snp		   phy->caps & SUPPORTED_1000baseT_Full) {
2141194921Snp		/* 1G optical */
2142194921Snp
2143194921Snp		/* XXX: Lie and claim to be SX, could actually be any 1G-X */
2144194921Snp		ifmedia_add(media, m | IFM_1000_SX, mod, NULL);
2145194921Snp		ifmedia_set(media, m | IFM_1000_SX);
2146194921Snp
2147194921Snp	} else {
2148194921Snp		KASSERT(0, ("%s: don't know how to handle 0x%x.", __func__,
2149194921Snp			    phy->caps));
2150194921Snp	}
2151194921Snp
2152194921Snp	PORT_UNLOCK(p);
2153194921Snp}
2154194921Snp
2155194921Snpstatic void
2156167514Skmacycxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2157167514Skmacy{
2158167514Skmacy	struct port_info *p = ifp->if_softc;
2159186282Sgnn	struct ifmedia_entry *cur = p->media.ifm_cur;
2160194921Snp	int speed = p->link_config.speed;
2161167514Skmacy
2162194921Snp	if (cur->ifm_data != p->phy.modtype) {
2163194921Snp		cxgb_build_medialist(p);
2164194921Snp		cur = p->media.ifm_cur;
2165186282Sgnn	}
2166186282Sgnn
2167167514Skmacy	ifmr->ifm_status = IFM_AVALID;
2168167514Skmacy	if (!p->link_config.link_ok)
2169167514Skmacy		return;
2170167514Skmacy
2171167514Skmacy	ifmr->ifm_status |= IFM_ACTIVE;
2172167514Skmacy
2173194921Snp	/*
2174194921Snp	 * active and current will differ iff current media is autoselect.  That
2175194921Snp	 * can happen only for copper RJ45.
2176194921Snp	 */
2177194921Snp	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
2178194921Snp		return;
2179194921Snp	KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg,
2180194921Snp		("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps));
2181194921Snp
2182194921Snp	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2183194921Snp	if (speed == SPEED_10000)
2184194921Snp		ifmr->ifm_active |= IFM_10G_T;
2185194921Snp	else if (speed == SPEED_1000)
2186194921Snp		ifmr->ifm_active |= IFM_1000_T;
2187194921Snp	else if (speed == SPEED_100)
2188194921Snp		ifmr->ifm_active |= IFM_100_TX;
2189194921Snp	else if (speed == SPEED_10)
2190170654Skmacy		ifmr->ifm_active |= IFM_10_T;
2191167514Skmacy	else
2192194921Snp		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
2193194921Snp			    speed));
2194167514Skmacy}
2195167514Skmacy
2196167514Skmacystatic void
2197167514Skmacycxgb_async_intr(void *data)
2198167514Skmacy{
2199167760Skmacy	adapter_t *sc = data;
2200167760Skmacy
2201209840Snp	t3_write_reg(sc, A_PL_INT_ENABLE0, 0);
2202209840Snp	(void) t3_read_reg(sc, A_PL_INT_ENABLE0);
2203170869Skmacy	taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
2204167514Skmacy}
2205167514Skmacy
2206209841Snpstatic void
2207209841Snplink_check_callout(void *arg)
2208197791Snp{
2209209841Snp	struct port_info *pi = arg;
2210209841Snp	struct adapter *sc = pi->adapter;
2211197791Snp
2212209841Snp	if (!isset(&sc->open_device_map, pi->port_id))
2213209841Snp		return;
2214197791Snp
2215209841Snp	taskqueue_enqueue(sc->tq, &pi->link_check_task);
2216197791Snp}
2217197791Snp
2218167514Skmacystatic void
2219209841Snpcheck_link_status(void *arg, int pending)
2220167514Skmacy{
2221209841Snp	struct port_info *pi = arg;
2222209841Snp	struct adapter *sc = pi->adapter;
2223167514Skmacy
2224209841Snp	if (!isset(&sc->open_device_map, pi->port_id))
2225209841Snp		return;
2226167514Skmacy
2227209841Snp	t3_link_changed(sc, pi->port_id);
2228194521Skmacy
2229209841Snp	if (pi->link_fault || !(pi->phy.caps & SUPPORTED_LINK_IRQ))
2230209841Snp		callout_reset(&pi->link_check_ch, hz, link_check_callout, pi);
2231167514Skmacy}
2232167514Skmacy
2233209841Snpvoid
2234209841Snpt3_os_link_intr(struct port_info *pi)
2235209841Snp{
2236209841Snp	/*
2237209841Snp	 * Schedule a link check in the near future.  If the link is flapping
2238209841Snp	 * rapidly we'll keep resetting the callout and delaying the check until
2239209841Snp	 * things stabilize a bit.
2240209841Snp	 */
2241209841Snp	callout_reset(&pi->link_check_ch, hz / 4, link_check_callout, pi);
2242209841Snp}
2243209841Snp
2244167514Skmacystatic void
2245194521Skmacycheck_t3b2_mac(struct adapter *sc)
2246167514Skmacy{
2247167514Skmacy	int i;
2248167514Skmacy
2249194521Skmacy	if (sc->flags & CXGB_SHUTDOWN)
2250176472Skmacy		return;
2251194521Skmacy
2252194521Skmacy	for_each_port(sc, i) {
2253194521Skmacy		struct port_info *p = &sc->port[i];
2254194521Skmacy		int status;
2255194521Skmacy#ifdef INVARIANTS
2256167746Skmacy		struct ifnet *ifp = p->ifp;
2257194521Skmacy#endif
2258194521Skmacy
2259197791Snp		if (!isset(&sc->open_device_map, p->port_id) || p->link_fault ||
2260197791Snp		    !p->link_config.link_ok)
2261167746Skmacy			continue;
2262194521Skmacy
2263194521Skmacy		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2264194521Skmacy			("%s: state mismatch (drv_flags %x, device_map %x)",
2265194521Skmacy			 __func__, ifp->if_drv_flags, sc->open_device_map));
2266194521Skmacy
2267167746Skmacy		PORT_LOCK(p);
2268194521Skmacy		status = t3b2_mac_watchdog_task(&p->mac);
2269167746Skmacy		if (status == 1)
2270167746Skmacy			p->mac.stats.num_toggled++;
2271167746Skmacy		else if (status == 2) {
2272167746Skmacy			struct cmac *mac = &p->mac;
2273167746Skmacy
2274194521Skmacy			cxgb_update_mac_settings(p);
2275167746Skmacy			t3_link_start(&p->phy, mac, &p->link_config);
2276167746Skmacy			t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2277194521Skmacy			t3_port_intr_enable(sc, p->port_id);
2278167746Skmacy			p->mac.stats.num_resets++;
2279167746Skmacy		}
2280167746Skmacy		PORT_UNLOCK(p);
2281167514Skmacy	}
2282167514Skmacy}
2283167514Skmacy
2284167746Skmacystatic void
2285167746Skmacycxgb_tick(void *arg)
2286167746Skmacy{
2287167746Skmacy	adapter_t *sc = (adapter_t *)arg;
2288170869Skmacy
2289194521Skmacy	if (sc->flags & CXGB_SHUTDOWN)
2290176472Skmacy		return;
2291174708Skmacy
2292185508Skmacy	taskqueue_enqueue(sc->tq, &sc->tick_task);
2293209841Snp	callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
2294170869Skmacy}
2295170869Skmacy
2296170869Skmacystatic void
2297170869Skmacycxgb_tick_handler(void *arg, int count)
2298170869Skmacy{
2299170869Skmacy	adapter_t *sc = (adapter_t *)arg;
2300167746Skmacy	const struct adapter_params *p = &sc->params;
2301181652Skmacy	int i;
2302189643Sgnn	uint32_t cause, reset;
2303167746Skmacy
2304194521Skmacy	if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE))
2305176472Skmacy		return;
2306176472Skmacy
2307185508Skmacy	if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map)
2308185508Skmacy		check_t3b2_mac(sc);
2309185508Skmacy
2310206109Snp	cause = t3_read_reg(sc, A_SG_INT_CAUSE) & (F_RSPQSTARVE | F_FLEMPTY);
2311206109Snp	if (cause) {
2312189643Sgnn		struct sge_qset *qs = &sc->sge.qs[0];
2313206109Snp		uint32_t mask, v;
2314189643Sgnn
2315206109Snp		v = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS) & ~0xff00;
2316189643Sgnn
2317206109Snp		mask = 1;
2318206109Snp		for (i = 0; i < SGE_QSETS; i++) {
2319206109Snp			if (v & mask)
2320206109Snp				qs[i].rspq.starved++;
2321206109Snp			mask <<= 1;
2322189643Sgnn		}
2323206109Snp
2324206109Snp		mask <<= SGE_QSETS; /* skip RSPQXDISABLED */
2325206109Snp
2326206109Snp		for (i = 0; i < SGE_QSETS * 2; i++) {
2327206109Snp			if (v & mask) {
2328206109Snp				qs[i / 2].fl[i % 2].empty++;
2329206109Snp			}
2330206109Snp			mask <<= 1;
2331206109Snp		}
2332206109Snp
2333206109Snp		/* clear */
2334206109Snp		t3_write_reg(sc, A_SG_RSPQ_FL_STATUS, v);
2335206109Snp		t3_write_reg(sc, A_SG_INT_CAUSE, cause);
2336189643Sgnn	}
2337189643Sgnn
2338185506Skmacy	for (i = 0; i < sc->params.nports; i++) {
2339185506Skmacy		struct port_info *pi = &sc->port[i];
2340185506Skmacy		struct ifnet *ifp = pi->ifp;
2341189643Sgnn		struct cmac *mac = &pi->mac;
2342189643Sgnn		struct mac_stats *mstats = &mac->stats;
2343205948Snp		int drops, j;
2344194521Skmacy
2345194521Skmacy		if (!isset(&sc->open_device_map, pi->port_id))
2346194521Skmacy			continue;
2347194521Skmacy
2348185508Skmacy		PORT_LOCK(pi);
2349189643Sgnn		t3_mac_update_stats(mac);
2350185508Skmacy		PORT_UNLOCK(pi);
2351185508Skmacy
2352205948Snp		ifp->if_opackets = mstats->tx_frames;
2353205948Snp		ifp->if_ipackets = mstats->rx_frames;
2354185506Skmacy		ifp->if_obytes = mstats->tx_octets;
2355185506Skmacy		ifp->if_ibytes = mstats->rx_octets;
2356185506Skmacy		ifp->if_omcasts = mstats->tx_mcast_frames;
2357185506Skmacy		ifp->if_imcasts = mstats->rx_mcast_frames;
2358205948Snp		ifp->if_collisions = mstats->tx_total_collisions;
2359205948Snp		ifp->if_iqdrops = mstats->rx_cong_drops;
2360185506Skmacy
2361205948Snp		drops = 0;
2362205948Snp		for (j = pi->first_qset; j < pi->first_qset + pi->nqsets; j++)
2363205948Snp			drops += sc->sge.qs[j].txq[TXQ_ETH].txq_mr->br_drops;
2364205948Snp		ifp->if_snd.ifq_drops = drops;
2365205948Snp
2366185506Skmacy		ifp->if_oerrors =
2367185506Skmacy		    mstats->tx_excess_collisions +
2368185506Skmacy		    mstats->tx_underrun +
2369185506Skmacy		    mstats->tx_len_errs +
2370185506Skmacy		    mstats->tx_mac_internal_errs +
2371185506Skmacy		    mstats->tx_excess_deferral +
2372185506Skmacy		    mstats->tx_fcs_errs;
2373185506Skmacy		ifp->if_ierrors =
2374185506Skmacy		    mstats->rx_jabber +
2375185506Skmacy		    mstats->rx_data_errs +
2376185506Skmacy		    mstats->rx_sequence_errs +
2377185506Skmacy		    mstats->rx_runt +
2378185506Skmacy		    mstats->rx_too_long +
2379185506Skmacy		    mstats->rx_mac_internal_errs +
2380185506Skmacy		    mstats->rx_short +
2381185506Skmacy		    mstats->rx_fcs_errs;
2382189643Sgnn
2383189643Sgnn		if (mac->multiport)
2384189643Sgnn			continue;
2385189643Sgnn
2386189643Sgnn		/* Count rx fifo overflows, once per second */
2387189643Sgnn		cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset);
2388189643Sgnn		reset = 0;
2389189643Sgnn		if (cause & F_RXFIFO_OVERFLOW) {
2390189643Sgnn			mac->stats.rx_fifo_ovfl++;
2391189643Sgnn			reset |= F_RXFIFO_OVERFLOW;
2392189643Sgnn		}
2393189643Sgnn		t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset);
2394185506Skmacy	}
2395167746Skmacy}
2396167746Skmacy
2397171978Skmacystatic void
2398171978Skmacytouch_bars(device_t dev)
2399171978Skmacy{
2400171978Skmacy	/*
2401171978Skmacy	 * Don't enable yet
2402171978Skmacy	 */
2403171978Skmacy#if !defined(__LP64__) && 0
2404171978Skmacy	u32 v;
2405171978Skmacy
2406171978Skmacy	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
2407171978Skmacy	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
2408171978Skmacy	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
2409171978Skmacy	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
2410171978Skmacy	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
2411171978Skmacy	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
2412171978Skmacy#endif
2413171978Skmacy}
2414171978Skmacy
2415167514Skmacystatic int
2416171471Skmacyset_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
2417171471Skmacy{
2418171471Skmacy	uint8_t *buf;
2419171471Skmacy	int err = 0;
2420171471Skmacy	u32 aligned_offset, aligned_len, *p;
2421171471Skmacy	struct adapter *adapter = pi->adapter;
2422171471Skmacy
2423171471Skmacy
2424171471Skmacy	aligned_offset = offset & ~3;
2425171471Skmacy	aligned_len = (len + (offset & 3) + 3) & ~3;
2426171471Skmacy
2427171471Skmacy	if (aligned_offset != offset || aligned_len != len) {
2428171471Skmacy		buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);
2429171471Skmacy		if (!buf)
2430171471Skmacy			return (ENOMEM);
2431171471Skmacy		err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
2432171471Skmacy		if (!err && aligned_len > 4)
2433171471Skmacy			err = t3_seeprom_read(adapter,
2434171471Skmacy					      aligned_offset + aligned_len - 4,
2435171471Skmacy					      (u32 *)&buf[aligned_len - 4]);
2436171471Skmacy		if (err)
2437171471Skmacy			goto out;
2438171471Skmacy		memcpy(buf + (offset & 3), data, len);
2439171471Skmacy	} else
2440171471Skmacy		buf = (uint8_t *)(uintptr_t)data;
2441171471Skmacy
2442171471Skmacy	err = t3_seeprom_wp(adapter, 0);
2443171471Skmacy	if (err)
2444171471Skmacy		goto out;
2445171471Skmacy
2446171471Skmacy	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2447171471Skmacy		err = t3_seeprom_write(adapter, aligned_offset, *p);
2448171471Skmacy		aligned_offset += 4;
2449171471Skmacy	}
2450171471Skmacy
2451171471Skmacy	if (!err)
2452171471Skmacy		err = t3_seeprom_wp(adapter, 1);
2453171471Skmacyout:
2454171471Skmacy	if (buf != data)
2455171471Skmacy		free(buf, M_DEVBUF);
2456171471Skmacy	return err;
2457171471Skmacy}
2458171471Skmacy
2459171471Skmacy
2460171471Skmacystatic int
2461167514Skmacyin_range(int val, int lo, int hi)
2462167514Skmacy{
2463167514Skmacy	return val < 0 || (val <= hi && val >= lo);
2464167514Skmacy}
2465167514Skmacy
2466167514Skmacystatic int
2467192450Simpcxgb_extension_open(struct cdev *dev, int flags, int fmp, struct thread *td)
2468170654Skmacy{
2469170654Skmacy       return (0);
2470170654Skmacy}
2471170654Skmacy
2472170654Skmacystatic int
2473192450Simpcxgb_extension_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2474170654Skmacy{
2475170654Skmacy       return (0);
2476170654Skmacy}
2477170654Skmacy
2478170654Skmacystatic int
2479167514Skmacycxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
2480167514Skmacy    int fflag, struct thread *td)
2481167514Skmacy{
2482167514Skmacy	int mmd, error = 0;
2483167514Skmacy	struct port_info *pi = dev->si_drv1;
2484167514Skmacy	adapter_t *sc = pi->adapter;
2485167514Skmacy
2486167514Skmacy#ifdef PRIV_SUPPORTED
2487167514Skmacy	if (priv_check(td, PRIV_DRIVER)) {
2488167514Skmacy		if (cxgb_debug)
2489167514Skmacy			printf("user does not have access to privileged ioctls\n");
2490167514Skmacy		return (EPERM);
2491167514Skmacy	}
2492167514Skmacy#else
2493167514Skmacy	if (suser(td)) {
2494167514Skmacy		if (cxgb_debug)
2495167514Skmacy			printf("user does not have access to privileged ioctls\n");
2496167514Skmacy		return (EPERM);
2497167514Skmacy	}
2498167514Skmacy#endif
2499167514Skmacy
2500167514Skmacy	switch (cmd) {
2501182679Skmacy	case CHELSIO_GET_MIIREG: {
2502167514Skmacy		uint32_t val;
2503167514Skmacy		struct cphy *phy = &pi->phy;
2504182679Skmacy		struct ch_mii_data *mid = (struct ch_mii_data *)data;
2505167514Skmacy
2506167514Skmacy		if (!phy->mdio_read)
2507167514Skmacy			return (EOPNOTSUPP);
2508167514Skmacy		if (is_10G(sc)) {
2509167514Skmacy			mmd = mid->phy_id >> 8;
2510167514Skmacy			if (!mmd)
2511167514Skmacy				mmd = MDIO_DEV_PCS;
2512190330Sgnn			else if (mmd > MDIO_DEV_VEND2)
2513171471Skmacy				return (EINVAL);
2514167514Skmacy
2515167514Skmacy			error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
2516167514Skmacy					     mid->reg_num, &val);
2517167514Skmacy		} else
2518167514Skmacy		        error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
2519167514Skmacy					     mid->reg_num & 0x1f, &val);
2520167514Skmacy		if (error == 0)
2521167514Skmacy			mid->val_out = val;
2522167514Skmacy		break;
2523167514Skmacy	}
2524182679Skmacy	case CHELSIO_SET_MIIREG: {
2525167514Skmacy		struct cphy *phy = &pi->phy;
2526182679Skmacy		struct ch_mii_data *mid = (struct ch_mii_data *)data;
2527167514Skmacy
2528167514Skmacy		if (!phy->mdio_write)
2529167514Skmacy			return (EOPNOTSUPP);
2530167514Skmacy		if (is_10G(sc)) {
2531167514Skmacy			mmd = mid->phy_id >> 8;
2532167514Skmacy			if (!mmd)
2533167514Skmacy				mmd = MDIO_DEV_PCS;
2534190330Sgnn			else if (mmd > MDIO_DEV_VEND2)
2535167514Skmacy				return (EINVAL);
2536167514Skmacy
2537167514Skmacy			error = phy->mdio_write(sc, mid->phy_id & 0x1f,
2538167514Skmacy					      mmd, mid->reg_num, mid->val_in);
2539167514Skmacy		} else
2540167514Skmacy			error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
2541167514Skmacy					      mid->reg_num & 0x1f,
2542167514Skmacy					      mid->val_in);
2543167514Skmacy		break;
2544167514Skmacy	}
2545167514Skmacy	case CHELSIO_SETREG: {
2546167514Skmacy		struct ch_reg *edata = (struct ch_reg *)data;
2547167514Skmacy		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2548167514Skmacy			return (EFAULT);
2549167514Skmacy		t3_write_reg(sc, edata->addr, edata->val);
2550167514Skmacy		break;
2551167514Skmacy	}
2552167514Skmacy	case CHELSIO_GETREG: {
2553167514Skmacy		struct ch_reg *edata = (struct ch_reg *)data;
2554167514Skmacy		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2555167514Skmacy			return (EFAULT);
2556167514Skmacy		edata->val = t3_read_reg(sc, edata->addr);
2557167514Skmacy		break;
2558167514Skmacy	}
2559167514Skmacy	case CHELSIO_GET_SGE_CONTEXT: {
2560167514Skmacy		struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
2561176472Skmacy		mtx_lock_spin(&sc->sge.reg_lock);
2562167514Skmacy		switch (ecntxt->cntxt_type) {
2563167514Skmacy		case CNTXT_TYPE_EGRESS:
2564182679Skmacy			error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
2565167514Skmacy			    ecntxt->data);
2566167514Skmacy			break;
2567167514Skmacy		case CNTXT_TYPE_FL:
2568182679Skmacy			error = -t3_sge_read_fl(sc, ecntxt->cntxt_id,
2569167514Skmacy			    ecntxt->data);
2570167514Skmacy			break;
2571167514Skmacy		case CNTXT_TYPE_RSP:
2572182679Skmacy			error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id,
2573167514Skmacy			    ecntxt->data);
2574167514Skmacy			break;
2575167514Skmacy		case CNTXT_TYPE_CQ:
2576182679Skmacy			error = -t3_sge_read_cq(sc, ecntxt->cntxt_id,
2577167514Skmacy			    ecntxt->data);
2578167514Skmacy			break;
2579167514Skmacy		default:
2580167514Skmacy			error = EINVAL;
2581167514Skmacy			break;
2582167514Skmacy		}
2583176472Skmacy		mtx_unlock_spin(&sc->sge.reg_lock);
2584167514Skmacy		break;
2585167514Skmacy	}
2586167514Skmacy	case CHELSIO_GET_SGE_DESC: {
2587167514Skmacy		struct ch_desc *edesc = (struct ch_desc *)data;
2588167514Skmacy		int ret;
2589167514Skmacy		if (edesc->queue_num >= SGE_QSETS * 6)
2590167514Skmacy			return (EINVAL);
2591167514Skmacy		ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
2592167514Skmacy		    edesc->queue_num % 6, edesc->idx, edesc->data);
2593167514Skmacy		if (ret < 0)
2594167514Skmacy			return (EINVAL);
2595167514Skmacy		edesc->size = ret;
2596167514Skmacy		break;
2597167514Skmacy	}
2598182679Skmacy	case CHELSIO_GET_QSET_PARAMS: {
2599167514Skmacy		struct qset_params *q;
2600167514Skmacy		struct ch_qset_params *t = (struct ch_qset_params *)data;
2601182679Skmacy		int q1 = pi->first_qset;
2602182679Skmacy		int nqsets = pi->nqsets;
2603176472Skmacy		int i;
2604176472Skmacy
2605182679Skmacy		if (t->qset_idx >= nqsets)
2606182679Skmacy			return EINVAL;
2607167514Skmacy
2608182679Skmacy		i = q1 + t->qset_idx;
2609182679Skmacy		q = &sc->params.sge.qset[i];
2610167514Skmacy		t->rspq_size   = q->rspq_size;
2611167514Skmacy		t->txq_size[0] = q->txq_size[0];
2612167514Skmacy		t->txq_size[1] = q->txq_size[1];
2613167514Skmacy		t->txq_size[2] = q->txq_size[2];
2614167514Skmacy		t->fl_size[0]  = q->fl_size;
2615167514Skmacy		t->fl_size[1]  = q->jumbo_size;
2616167514Skmacy		t->polling     = q->polling;
2617182679Skmacy		t->lro         = q->lro;
2618180583Skmacy		t->intr_lat    = q->coalesce_usecs;
2619167514Skmacy		t->cong_thres  = q->cong_thres;
2620182679Skmacy		t->qnum        = i;
2621182679Skmacy
2622205946Snp		if ((sc->flags & FULL_INIT_DONE) == 0)
2623205946Snp			t->vector = 0;
2624205946Snp		else if (sc->flags & USING_MSIX)
2625182679Skmacy			t->vector = rman_get_start(sc->msix_irq_res[i]);
2626182679Skmacy		else
2627182679Skmacy			t->vector = rman_get_start(sc->irq_res);
2628182679Skmacy
2629167514Skmacy		break;
2630167514Skmacy	}
2631182679Skmacy	case CHELSIO_GET_QSET_NUM: {
2632167514Skmacy		struct ch_reg *edata = (struct ch_reg *)data;
2633182679Skmacy		edata->val = pi->nqsets;
2634182679Skmacy		break;
2635182679Skmacy	}
2636182679Skmacy	case CHELSIO_LOAD_FW: {
2637182679Skmacy		uint8_t *fw_data;
2638182679Skmacy		uint32_t vers;
2639182679Skmacy		struct ch_mem_range *t = (struct ch_mem_range *)data;
2640182679Skmacy
2641167514Skmacy		/*
2642182679Skmacy		 * You're allowed to load a firmware only before FULL_INIT_DONE
2643182679Skmacy		 *
2644182679Skmacy		 * FW_UPTODATE is also set so the rest of the initialization
2645182679Skmacy		 * will not overwrite what was loaded here.  This gives you the
2646182679Skmacy		 * flexibility to load any firmware (and maybe shoot yourself in
2647182679Skmacy		 * the foot).
2648167514Skmacy		 */
2649182679Skmacy
2650182679Skmacy		ADAPTER_LOCK(sc);
2651182679Skmacy		if (sc->open_device_map || sc->flags & FULL_INIT_DONE) {
2652182679Skmacy			ADAPTER_UNLOCK(sc);
2653182679Skmacy			return (EBUSY);
2654182679Skmacy		}
2655182679Skmacy
2656182679Skmacy		fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2657182679Skmacy		if (!fw_data)
2658182679Skmacy			error = ENOMEM;
2659182679Skmacy		else
2660182679Skmacy			error = copyin(t->buf, fw_data, t->len);
2661182679Skmacy
2662182679Skmacy		if (!error)
2663182679Skmacy			error = -t3_load_fw(sc, fw_data, t->len);
2664182679Skmacy
2665182679Skmacy		if (t3_get_fw_version(sc, &vers) == 0) {
2666182679Skmacy			snprintf(&sc->fw_version[0], sizeof(sc->fw_version),
2667182679Skmacy			    "%d.%d.%d", G_FW_VERSION_MAJOR(vers),
2668182679Skmacy			    G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers));
2669182679Skmacy		}
2670182679Skmacy
2671182679Skmacy		if (!error)
2672182679Skmacy			sc->flags |= FW_UPTODATE;
2673182679Skmacy
2674182679Skmacy		free(fw_data, M_DEVBUF);
2675182679Skmacy		ADAPTER_UNLOCK(sc);
2676167514Skmacy		break;
2677167514Skmacy	}
2678182679Skmacy	case CHELSIO_LOAD_BOOT: {
2679182679Skmacy		uint8_t *boot_data;
2680182679Skmacy		struct ch_mem_range *t = (struct ch_mem_range *)data;
2681182679Skmacy
2682182679Skmacy		boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2683182679Skmacy		if (!boot_data)
2684182679Skmacy			return ENOMEM;
2685182679Skmacy
2686182679Skmacy		error = copyin(t->buf, boot_data, t->len);
2687182679Skmacy		if (!error)
2688182679Skmacy			error = -t3_load_boot(sc, boot_data, t->len);
2689182679Skmacy
2690182679Skmacy		free(boot_data, M_DEVBUF);
2691167514Skmacy		break;
2692167514Skmacy	}
2693182679Skmacy	case CHELSIO_GET_PM: {
2694182679Skmacy		struct ch_pm *m = (struct ch_pm *)data;
2695182679Skmacy		struct tp_params *p = &sc->params.tp;
2696182679Skmacy
2697182679Skmacy		if (!is_offload(sc))
2698182679Skmacy			return (EOPNOTSUPP);
2699182679Skmacy
2700182679Skmacy		m->tx_pg_sz = p->tx_pg_size;
2701182679Skmacy		m->tx_num_pg = p->tx_num_pgs;
2702182679Skmacy		m->rx_pg_sz  = p->rx_pg_size;
2703182679Skmacy		m->rx_num_pg = p->rx_num_pgs;
2704182679Skmacy		m->pm_total  = p->pmtx_size + p->chan_rx_size * p->nchan;
2705182679Skmacy
2706167514Skmacy		break;
2707182679Skmacy	}
2708182679Skmacy	case CHELSIO_SET_PM: {
2709182679Skmacy		struct ch_pm *m = (struct ch_pm *)data;
2710182679Skmacy		struct tp_params *p = &sc->params.tp;
2711182679Skmacy
2712182679Skmacy		if (!is_offload(sc))
2713182679Skmacy			return (EOPNOTSUPP);
2714182679Skmacy		if (sc->flags & FULL_INIT_DONE)
2715182679Skmacy			return (EBUSY);
2716182679Skmacy
2717182679Skmacy		if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) ||
2718182679Skmacy		    !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1)))
2719182679Skmacy			return (EINVAL);	/* not power of 2 */
2720182679Skmacy		if (!(m->rx_pg_sz & 0x14000))
2721182679Skmacy			return (EINVAL);	/* not 16KB or 64KB */
2722182679Skmacy		if (!(m->tx_pg_sz & 0x1554000))
2723182679Skmacy			return (EINVAL);
2724182679Skmacy		if (m->tx_num_pg == -1)
2725182679Skmacy			m->tx_num_pg = p->tx_num_pgs;
2726182679Skmacy		if (m->rx_num_pg == -1)
2727182679Skmacy			m->rx_num_pg = p->rx_num_pgs;
2728182679Skmacy		if (m->tx_num_pg % 24 || m->rx_num_pg % 24)
2729182679Skmacy			return (EINVAL);
2730182679Skmacy		if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size ||
2731182679Skmacy		    m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size)
2732182679Skmacy			return (EINVAL);
2733182679Skmacy
2734182679Skmacy		p->rx_pg_size = m->rx_pg_sz;
2735182679Skmacy		p->tx_pg_size = m->tx_pg_sz;
2736182679Skmacy		p->rx_num_pgs = m->rx_num_pg;
2737182679Skmacy		p->tx_num_pgs = m->tx_num_pg;
2738182679Skmacy		break;
2739182679Skmacy	}
2740169978Skmacy	case CHELSIO_SETMTUTAB: {
2741169978Skmacy		struct ch_mtus *m = (struct ch_mtus *)data;
2742169978Skmacy		int i;
2743169978Skmacy
2744169978Skmacy		if (!is_offload(sc))
2745169978Skmacy			return (EOPNOTSUPP);
2746169978Skmacy		if (offload_running(sc))
2747169978Skmacy			return (EBUSY);
2748169978Skmacy		if (m->nmtus != NMTUS)
2749169978Skmacy			return (EINVAL);
2750169978Skmacy		if (m->mtus[0] < 81)         /* accommodate SACK */
2751169978Skmacy			return (EINVAL);
2752169978Skmacy
2753169978Skmacy		/*
2754169978Skmacy		 * MTUs must be in ascending order
2755169978Skmacy		 */
2756169978Skmacy		for (i = 1; i < NMTUS; ++i)
2757169978Skmacy			if (m->mtus[i] < m->mtus[i - 1])
2758169978Skmacy				return (EINVAL);
2759169978Skmacy
2760182679Skmacy		memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus));
2761169978Skmacy		break;
2762169978Skmacy	}
2763169978Skmacy	case CHELSIO_GETMTUTAB: {
2764169978Skmacy		struct ch_mtus *m = (struct ch_mtus *)data;
2765169978Skmacy
2766169978Skmacy		if (!is_offload(sc))
2767169978Skmacy			return (EOPNOTSUPP);
2768169978Skmacy
2769169978Skmacy		memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
2770169978Skmacy		m->nmtus = NMTUS;
2771169978Skmacy		break;
2772171471Skmacy	}
2773167514Skmacy	case CHELSIO_GET_MEM: {
2774167514Skmacy		struct ch_mem_range *t = (struct ch_mem_range *)data;
2775167514Skmacy		struct mc7 *mem;
2776167514Skmacy		uint8_t *useraddr;
2777167514Skmacy		u64 buf[32];
2778182679Skmacy
2779182679Skmacy		/*
2780218909Sbrucec		 * Use these to avoid modifying len/addr in the return
2781182679Skmacy		 * struct
2782182679Skmacy		 */
2783182679Skmacy		uint32_t len = t->len, addr = t->addr;
2784182679Skmacy
2785167514Skmacy		if (!is_offload(sc))
2786167514Skmacy			return (EOPNOTSUPP);
2787167514Skmacy		if (!(sc->flags & FULL_INIT_DONE))
2788167514Skmacy			return (EIO);         /* need the memory controllers */
2789182679Skmacy		if ((addr & 0x7) || (len & 0x7))
2790167514Skmacy			return (EINVAL);
2791167514Skmacy		if (t->mem_id == MEM_CM)
2792167514Skmacy			mem = &sc->cm;
2793167514Skmacy		else if (t->mem_id == MEM_PMRX)
2794167514Skmacy			mem = &sc->pmrx;
2795167514Skmacy		else if (t->mem_id == MEM_PMTX)
2796167514Skmacy			mem = &sc->pmtx;
2797167514Skmacy		else
2798167514Skmacy			return (EINVAL);
2799167514Skmacy
2800167514Skmacy		/*
2801167514Skmacy		 * Version scheme:
2802167514Skmacy		 * bits 0..9: chip version
2803167514Skmacy		 * bits 10..15: chip revision
2804167514Skmacy		 */
2805167514Skmacy		t->version = 3 | (sc->params.rev << 10);
2806167514Skmacy
2807167514Skmacy		/*
2808167514Skmacy		 * Read 256 bytes at a time as len can be large and we don't
2809167514Skmacy		 * want to use huge intermediate buffers.
2810167514Skmacy		 */
2811174708Skmacy		useraddr = (uint8_t *)t->buf;
2812182679Skmacy		while (len) {
2813182679Skmacy			unsigned int chunk = min(len, sizeof(buf));
2814167514Skmacy
2815182679Skmacy			error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf);
2816167514Skmacy			if (error)
2817167514Skmacy				return (-error);
2818167514Skmacy			if (copyout(buf, useraddr, chunk))
2819167514Skmacy				return (EFAULT);
2820167514Skmacy			useraddr += chunk;
2821182679Skmacy			addr += chunk;
2822182679Skmacy			len -= chunk;
2823167514Skmacy		}
2824167514Skmacy		break;
2825167514Skmacy	}
2826169978Skmacy	case CHELSIO_READ_TCAM_WORD: {
2827169978Skmacy		struct ch_tcam_word *t = (struct ch_tcam_word *)data;
2828169978Skmacy
2829169978Skmacy		if (!is_offload(sc))
2830169978Skmacy			return (EOPNOTSUPP);
2831171471Skmacy		if (!(sc->flags & FULL_INIT_DONE))
2832171471Skmacy			return (EIO);         /* need MC5 */
2833169978Skmacy		return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
2834169978Skmacy		break;
2835169978Skmacy	}
2836167514Skmacy	case CHELSIO_SET_TRACE_FILTER: {
2837167514Skmacy		struct ch_trace *t = (struct ch_trace *)data;
2838167514Skmacy		const struct trace_params *tp;
2839167514Skmacy
2840167514Skmacy		tp = (const struct trace_params *)&t->sip;
2841167514Skmacy		if (t->config_tx)
2842167514Skmacy			t3_config_trace_filter(sc, tp, 0, t->invert_match,
2843167514Skmacy					       t->trace_tx);
2844167514Skmacy		if (t->config_rx)
2845167514Skmacy			t3_config_trace_filter(sc, tp, 1, t->invert_match,
2846167514Skmacy					       t->trace_rx);
2847167514Skmacy		break;
2848167514Skmacy	}
2849167514Skmacy	case CHELSIO_SET_PKTSCHED: {
2850167514Skmacy		struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
2851167514Skmacy		if (sc->open_device_map == 0)
2852167514Skmacy			return (EAGAIN);
2853167514Skmacy		send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
2854167514Skmacy		    p->binding);
2855167514Skmacy		break;
2856167514Skmacy	}
2857167514Skmacy	case CHELSIO_IFCONF_GETREGS: {
2858182679Skmacy		struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data;
2859167514Skmacy		int reglen = cxgb_get_regs_len();
2860182679Skmacy		uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT);
2861167514Skmacy		if (buf == NULL) {
2862167514Skmacy			return (ENOMEM);
2863182679Skmacy		}
2864182679Skmacy		if (regs->len > reglen)
2865167514Skmacy			regs->len = reglen;
2866182679Skmacy		else if (regs->len < reglen)
2867189643Sgnn			error = ENOBUFS;
2868182679Skmacy
2869182679Skmacy		if (!error) {
2870182679Skmacy			cxgb_get_regs(sc, regs, buf);
2871182679Skmacy			error = copyout(buf, regs->data, reglen);
2872167514Skmacy		}
2873167514Skmacy		free(buf, M_DEVBUF);
2874167514Skmacy
2875167514Skmacy		break;
2876167514Skmacy	}
2877169978Skmacy	case CHELSIO_SET_HW_SCHED: {
2878169978Skmacy		struct ch_hw_sched *t = (struct ch_hw_sched *)data;
2879169978Skmacy		unsigned int ticks_per_usec = core_ticks_per_usec(sc);
2880169978Skmacy
2881169978Skmacy		if ((sc->flags & FULL_INIT_DONE) == 0)
2882169978Skmacy			return (EAGAIN);       /* need TP to be initialized */
2883169978Skmacy		if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
2884169978Skmacy		    !in_range(t->channel, 0, 1) ||
2885169978Skmacy		    !in_range(t->kbps, 0, 10000000) ||
2886169978Skmacy		    !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
2887169978Skmacy		    !in_range(t->flow_ipg, 0,
2888169978Skmacy			      dack_ticks_to_usec(sc, 0x7ff)))
2889169978Skmacy			return (EINVAL);
2890169978Skmacy
2891169978Skmacy		if (t->kbps >= 0) {
2892169978Skmacy			error = t3_config_sched(sc, t->kbps, t->sched);
2893169978Skmacy			if (error < 0)
2894169978Skmacy				return (-error);
2895169978Skmacy		}
2896169978Skmacy		if (t->class_ipg >= 0)
2897169978Skmacy			t3_set_sched_ipg(sc, t->sched, t->class_ipg);
2898169978Skmacy		if (t->flow_ipg >= 0) {
2899169978Skmacy			t->flow_ipg *= 1000;     /* us -> ns */
2900169978Skmacy			t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
2901169978Skmacy		}
2902169978Skmacy		if (t->mode >= 0) {
2903169978Skmacy			int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
2904169978Skmacy
2905169978Skmacy			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2906169978Skmacy					 bit, t->mode ? bit : 0);
2907169978Skmacy		}
2908169978Skmacy		if (t->channel >= 0)
2909169978Skmacy			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2910169978Skmacy					 1 << t->sched, t->channel << t->sched);
2911169978Skmacy		break;
2912182679Skmacy	}
2913182679Skmacy	case CHELSIO_GET_EEPROM: {
2914182679Skmacy		int i;
2915182679Skmacy		struct ch_eeprom *e = (struct ch_eeprom *)data;
2916182679Skmacy		uint8_t *buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT);
2917182679Skmacy
2918182679Skmacy		if (buf == NULL) {
2919182679Skmacy			return (ENOMEM);
2920182679Skmacy		}
2921182679Skmacy		e->magic = EEPROM_MAGIC;
2922182679Skmacy		for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4)
2923182679Skmacy			error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]);
2924182679Skmacy
2925182679Skmacy		if (!error)
2926182679Skmacy			error = copyout(buf + e->offset, e->data, e->len);
2927182679Skmacy
2928182679Skmacy		free(buf, M_DEVBUF);
2929182679Skmacy		break;
2930182679Skmacy	}
2931182679Skmacy	case CHELSIO_CLEAR_STATS: {
2932182679Skmacy		if (!(sc->flags & FULL_INIT_DONE))
2933182679Skmacy			return EAGAIN;
2934182679Skmacy
2935182679Skmacy		PORT_LOCK(pi);
2936182679Skmacy		t3_mac_update_stats(&pi->mac);
2937182679Skmacy		memset(&pi->mac.stats, 0, sizeof(pi->mac.stats));
2938182679Skmacy		PORT_UNLOCK(pi);
2939182679Skmacy		break;
2940182679Skmacy	}
2941189643Sgnn	case CHELSIO_GET_UP_LA: {
2942189643Sgnn		struct ch_up_la *la = (struct ch_up_la *)data;
2943189643Sgnn		uint8_t *buf = malloc(LA_BUFSIZE, M_DEVBUF, M_NOWAIT);
2944189643Sgnn		if (buf == NULL) {
2945189643Sgnn			return (ENOMEM);
2946189643Sgnn		}
2947189643Sgnn		if (la->bufsize < LA_BUFSIZE)
2948189643Sgnn			error = ENOBUFS;
2949189643Sgnn
2950189643Sgnn		if (!error)
2951189643Sgnn			error = -t3_get_up_la(sc, &la->stopped, &la->idx,
2952189643Sgnn					      &la->bufsize, buf);
2953189643Sgnn		if (!error)
2954189643Sgnn			error = copyout(buf, la->data, la->bufsize);
2955189643Sgnn
2956189643Sgnn		free(buf, M_DEVBUF);
2957189643Sgnn		break;
2958189643Sgnn	}
2959189643Sgnn	case CHELSIO_GET_UP_IOQS: {
2960189643Sgnn		struct ch_up_ioqs *ioqs = (struct ch_up_ioqs *)data;
2961189643Sgnn		uint8_t *buf = malloc(IOQS_BUFSIZE, M_DEVBUF, M_NOWAIT);
2962189643Sgnn		uint32_t *v;
2963189643Sgnn
2964189643Sgnn		if (buf == NULL) {
2965189643Sgnn			return (ENOMEM);
2966189643Sgnn		}
2967189643Sgnn		if (ioqs->bufsize < IOQS_BUFSIZE)
2968189643Sgnn			error = ENOBUFS;
2969189643Sgnn
2970189643Sgnn		if (!error)
2971189643Sgnn			error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf);
2972189643Sgnn
2973189643Sgnn		if (!error) {
2974189643Sgnn			v = (uint32_t *)buf;
2975189643Sgnn
2976189643Sgnn			ioqs->ioq_rx_enable = *v++;
2977189643Sgnn			ioqs->ioq_tx_enable = *v++;
2978189643Sgnn			ioqs->ioq_rx_status = *v++;
2979189643Sgnn			ioqs->ioq_tx_status = *v++;
2980189643Sgnn
2981189643Sgnn			error = copyout(v, ioqs->data, ioqs->bufsize);
2982189643Sgnn		}
2983189643Sgnn
2984189643Sgnn		free(buf, M_DEVBUF);
2985189643Sgnn		break;
2986189643Sgnn	}
2987207643Snp	case CHELSIO_SET_FILTER: {
2988242544Seadler		struct ch_filter *f = (struct ch_filter *)data;
2989207643Snp		struct filter_info *p;
2990207643Snp		unsigned int nfilters = sc->params.mc5.nfilters;
2991207643Snp
2992207643Snp		if (!is_offload(sc))
2993207643Snp			return (EOPNOTSUPP);	/* No TCAM */
2994207643Snp		if (!(sc->flags & FULL_INIT_DONE))
2995207643Snp			return (EAGAIN);	/* mc5 not setup yet */
2996207643Snp		if (nfilters == 0)
2997207643Snp			return (EBUSY);		/* TOE will use TCAM */
2998207643Snp
2999207643Snp		/* sanity checks */
3000207643Snp		if (f->filter_id >= nfilters ||
3001207643Snp		    (f->val.dip && f->mask.dip != 0xffffffff) ||
3002207643Snp		    (f->val.sport && f->mask.sport != 0xffff) ||
3003207643Snp		    (f->val.dport && f->mask.dport != 0xffff) ||
3004207643Snp		    (f->val.vlan && f->mask.vlan != 0xfff) ||
3005207643Snp		    (f->val.vlan_prio &&
3006207643Snp			f->mask.vlan_prio != FILTER_NO_VLAN_PRI) ||
3007207643Snp		    (f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) ||
3008207643Snp		    f->qset >= SGE_QSETS ||
3009207643Snp		    sc->rrss_map[f->qset] >= RSS_TABLE_SIZE)
3010207643Snp			return (EINVAL);
3011207643Snp
3012207643Snp		/* Was allocated with M_WAITOK */
3013207643Snp		KASSERT(sc->filters, ("filter table NULL\n"));
3014207643Snp
3015207643Snp		p = &sc->filters[f->filter_id];
3016207643Snp		if (p->locked)
3017207643Snp			return (EPERM);
3018207643Snp
3019207643Snp		bzero(p, sizeof(*p));
3020207643Snp		p->sip = f->val.sip;
3021207643Snp		p->sip_mask = f->mask.sip;
3022207643Snp		p->dip = f->val.dip;
3023207643Snp		p->sport = f->val.sport;
3024207643Snp		p->dport = f->val.dport;
3025207643Snp		p->vlan = f->mask.vlan ? f->val.vlan : 0xfff;
3026207643Snp		p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) :
3027207643Snp		    FILTER_NO_VLAN_PRI;
3028207643Snp		p->mac_hit = f->mac_hit;
3029207643Snp		p->mac_vld = f->mac_addr_idx != 0xffff;
3030207643Snp		p->mac_idx = f->mac_addr_idx;
3031207643Snp		p->pkt_type = f->proto;
3032207643Snp		p->report_filter_id = f->want_filter_id;
3033207643Snp		p->pass = f->pass;
3034207643Snp		p->rss = f->rss;
3035207643Snp		p->qset = f->qset;
3036207643Snp
3037207643Snp		error = set_filter(sc, f->filter_id, p);
3038207643Snp		if (error == 0)
3039207643Snp			p->valid = 1;
3040207643Snp		break;
3041207643Snp	}
3042207643Snp	case CHELSIO_DEL_FILTER: {
3043207643Snp		struct ch_filter *f = (struct ch_filter *)data;
3044207643Snp		struct filter_info *p;
3045207643Snp		unsigned int nfilters = sc->params.mc5.nfilters;
3046207643Snp
3047207643Snp		if (!is_offload(sc))
3048207643Snp			return (EOPNOTSUPP);
3049207643Snp		if (!(sc->flags & FULL_INIT_DONE))
3050207643Snp			return (EAGAIN);
3051207643Snp		if (nfilters == 0 || sc->filters == NULL)
3052207643Snp			return (EINVAL);
3053207643Snp		if (f->filter_id >= nfilters)
3054207643Snp		       return (EINVAL);
3055207643Snp
3056207643Snp		p = &sc->filters[f->filter_id];
3057207643Snp		if (p->locked)
3058207643Snp			return (EPERM);
3059207643Snp		if (!p->valid)
3060207643Snp			return (EFAULT); /* Read "Bad address" as "Bad index" */
3061207643Snp
3062207643Snp		bzero(p, sizeof(*p));
3063207643Snp		p->sip = p->sip_mask = 0xffffffff;
3064207643Snp		p->vlan = 0xfff;
3065207643Snp		p->vlan_prio = FILTER_NO_VLAN_PRI;
3066207643Snp		p->pkt_type = 1;
3067207643Snp		error = set_filter(sc, f->filter_id, p);
3068207643Snp		break;
3069207643Snp	}
3070207643Snp	case CHELSIO_GET_FILTER: {
3071207643Snp		struct ch_filter *f = (struct ch_filter *)data;
3072207643Snp		struct filter_info *p;
3073207643Snp		unsigned int i, nfilters = sc->params.mc5.nfilters;
3074207643Snp
3075207643Snp		if (!is_offload(sc))
3076207643Snp			return (EOPNOTSUPP);
3077207643Snp		if (!(sc->flags & FULL_INIT_DONE))
3078207643Snp			return (EAGAIN);
3079207643Snp		if (nfilters == 0 || sc->filters == NULL)
3080207643Snp			return (EINVAL);
3081207643Snp
3082207643Snp		i = f->filter_id == 0xffffffff ? 0 : f->filter_id + 1;
3083207643Snp		for (; i < nfilters; i++) {
3084207643Snp			p = &sc->filters[i];
3085207643Snp			if (!p->valid)
3086207643Snp				continue;
3087207643Snp
3088207643Snp			bzero(f, sizeof(*f));
3089207643Snp
3090207643Snp			f->filter_id = i;
3091207643Snp			f->val.sip = p->sip;
3092207643Snp			f->mask.sip = p->sip_mask;
3093207643Snp			f->val.dip = p->dip;
3094207643Snp			f->mask.dip = p->dip ? 0xffffffff : 0;
3095207643Snp			f->val.sport = p->sport;
3096207643Snp			f->mask.sport = p->sport ? 0xffff : 0;
3097207643Snp			f->val.dport = p->dport;
3098207643Snp			f->mask.dport = p->dport ? 0xffff : 0;
3099207643Snp			f->val.vlan = p->vlan == 0xfff ? 0 : p->vlan;
3100207643Snp			f->mask.vlan = p->vlan == 0xfff ? 0 : 0xfff;
3101207643Snp			f->val.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
3102207643Snp			    0 : p->vlan_prio;
3103207643Snp			f->mask.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
3104207643Snp			    0 : FILTER_NO_VLAN_PRI;
3105207643Snp			f->mac_hit = p->mac_hit;
3106207643Snp			f->mac_addr_idx = p->mac_vld ? p->mac_idx : 0xffff;
3107207643Snp			f->proto = p->pkt_type;
3108207643Snp			f->want_filter_id = p->report_filter_id;
3109207643Snp			f->pass = p->pass;
3110207643Snp			f->rss = p->rss;
3111207643Snp			f->qset = p->qset;
3112207643Snp
3113207643Snp			break;
3114207643Snp		}
3115207643Snp
3116207643Snp		if (i == nfilters)
3117207643Snp			f->filter_id = 0xffffffff;
3118207643Snp		break;
3119207643Snp	}
3120167514Skmacy	default:
3121167514Skmacy		return (EOPNOTSUPP);
3122167514Skmacy		break;
3123167514Skmacy	}
3124167514Skmacy
3125167514Skmacy	return (error);
3126167514Skmacy}
3127167514Skmacy
3128167514Skmacystatic __inline void
3129167514Skmacyreg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
3130167514Skmacy    unsigned int end)
3131167514Skmacy{
3132182679Skmacy	uint32_t *p = (uint32_t *)(buf + start);
3133167514Skmacy
3134167514Skmacy	for ( ; start <= end; start += sizeof(uint32_t))
3135167514Skmacy		*p++ = t3_read_reg(ap, start);
3136167514Skmacy}
3137167514Skmacy
3138167514Skmacy#define T3_REGMAP_SIZE (3 * 1024)
3139167514Skmacystatic int
3140167514Skmacycxgb_get_regs_len(void)
3141167514Skmacy{
3142167514Skmacy	return T3_REGMAP_SIZE;
3143167514Skmacy}
3144167514Skmacy
3145167514Skmacystatic void
3146182679Skmacycxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf)
3147167514Skmacy{
3148167514Skmacy
3149167514Skmacy	/*
3150167514Skmacy	 * Version scheme:
3151167514Skmacy	 * bits 0..9: chip version
3152167514Skmacy	 * bits 10..15: chip revision
3153167514Skmacy	 * bit 31: set for PCIe cards
3154167514Skmacy	 */
3155167514Skmacy	regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
3156167514Skmacy
3157167514Skmacy	/*
3158167514Skmacy	 * We skip the MAC statistics registers because they are clear-on-read.
3159167514Skmacy	 * Also reading multi-register stats would need to synchronize with the
3160167514Skmacy	 * periodic mac stats accumulation.  Hard to justify the complexity.
3161167514Skmacy	 */
3162182679Skmacy	memset(buf, 0, cxgb_get_regs_len());
3163167514Skmacy	reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
3164167514Skmacy	reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
3165167514Skmacy	reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
3166167514Skmacy	reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
3167167514Skmacy	reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
3168167514Skmacy	reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
3169167514Skmacy		       XGM_REG(A_XGM_SERDES_STAT3, 1));
3170167514Skmacy	reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
3171167514Skmacy		       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
3172167514Skmacy}
3173176572Skmacy
3174207643Snpstatic int
3175207643Snpalloc_filters(struct adapter *sc)
3176207643Snp{
3177207643Snp	struct filter_info *p;
3178207643Snp	unsigned int nfilters = sc->params.mc5.nfilters;
3179176572Skmacy
3180207643Snp	if (nfilters == 0)
3181207643Snp		return (0);
3182207643Snp
3183207643Snp	p = malloc(sizeof(*p) * nfilters, M_DEVBUF, M_WAITOK | M_ZERO);
3184207643Snp	sc->filters = p;
3185207643Snp
3186207643Snp	p = &sc->filters[nfilters - 1];
3187207643Snp	p->vlan = 0xfff;
3188207643Snp	p->vlan_prio = FILTER_NO_VLAN_PRI;
3189207643Snp	p->pass = p->rss = p->valid = p->locked = 1;
3190207643Snp
3191207643Snp	return (0);
3192207643Snp}
3193207643Snp
3194207643Snpstatic int
3195207643Snpsetup_hw_filters(struct adapter *sc)
3196207643Snp{
3197207643Snp	int i, rc;
3198207643Snp	unsigned int nfilters = sc->params.mc5.nfilters;
3199207643Snp
3200207643Snp	if (!sc->filters)
3201207643Snp		return (0);
3202207643Snp
3203207643Snp	t3_enable_filters(sc);
3204207643Snp
3205207643Snp	for (i = rc = 0; i < nfilters && !rc; i++) {
3206207643Snp		if (sc->filters[i].locked)
3207207643Snp			rc = set_filter(sc, i, &sc->filters[i]);
3208207643Snp	}
3209207643Snp
3210207643Snp	return (rc);
3211207643Snp}
3212207643Snp
3213207643Snpstatic int
3214207643Snpset_filter(struct adapter *sc, int id, const struct filter_info *f)
3215207643Snp{
3216207643Snp	int len;
3217207643Snp	struct mbuf *m;
3218207643Snp	struct ulp_txpkt *txpkt;
3219207643Snp	struct work_request_hdr *wr;
3220207643Snp	struct cpl_pass_open_req *oreq;
3221207643Snp	struct cpl_set_tcb_field *sreq;
3222207643Snp
3223207643Snp	len = sizeof(*wr) + sizeof(*oreq) + 2 * sizeof(*sreq);
3224207643Snp	KASSERT(len <= MHLEN, ("filter request too big for an mbuf"));
3225207643Snp
3226207643Snp	id += t3_mc5_size(&sc->mc5) - sc->params.mc5.nroutes -
3227207643Snp	      sc->params.mc5.nfilters;
3228207643Snp
3229207643Snp	m = m_gethdr(M_WAITOK, MT_DATA);
3230207643Snp	m->m_len = m->m_pkthdr.len = len;
3231207643Snp	bzero(mtod(m, char *), len);
3232207643Snp
3233207643Snp	wr = mtod(m, struct work_request_hdr *);
3234207643Snp	wr->wrh_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC);
3235207643Snp
3236207643Snp	oreq = (struct cpl_pass_open_req *)(wr + 1);
3237207643Snp	txpkt = (struct ulp_txpkt *)oreq;
3238207643Snp	txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
3239207643Snp	txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8));
3240207643Snp	OPCODE_TID(oreq) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, id));
3241207643Snp	oreq->local_port = htons(f->dport);
3242207643Snp	oreq->peer_port = htons(f->sport);
3243207643Snp	oreq->local_ip = htonl(f->dip);
3244207643Snp	oreq->peer_ip = htonl(f->sip);
3245207643Snp	oreq->peer_netmask = htonl(f->sip_mask);
3246207643Snp	oreq->opt0h = 0;
3247207643Snp	oreq->opt0l = htonl(F_NO_OFFLOAD);
3248207643Snp	oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) |
3249207643Snp			 V_CONN_POLICY(CPL_CONN_POLICY_FILTER) |
3250207643Snp			 V_VLAN_PRI(f->vlan_prio >> 1) |
3251207643Snp			 V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) |
3252207643Snp			 V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) |
3253207643Snp			 V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4)));
3254207643Snp
3255207643Snp	sreq = (struct cpl_set_tcb_field *)(oreq + 1);
3256207643Snp	set_tcb_field_ulp(sreq, id, 1, 0x1800808000ULL,
3257207643Snp			  (f->report_filter_id << 15) | (1 << 23) |
3258207643Snp			  ((u64)f->pass << 35) | ((u64)!f->rss << 36));
3259207643Snp	set_tcb_field_ulp(sreq + 1, id, 0, 0xffffffff, (2 << 19) | 1);
3260207643Snp	t3_mgmt_tx(sc, m);
3261207643Snp
3262207643Snp	if (f->pass && !f->rss) {
3263207643Snp		len = sizeof(*sreq);
3264207643Snp		m = m_gethdr(M_WAITOK, MT_DATA);
3265207643Snp		m->m_len = m->m_pkthdr.len = len;
3266207643Snp		bzero(mtod(m, char *), len);
3267207643Snp		sreq = mtod(m, struct cpl_set_tcb_field *);
3268207643Snp		sreq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
3269207643Snp		mk_set_tcb_field(sreq, id, 25, 0x3f80000,
3270207643Snp				 (u64)sc->rrss_map[f->qset] << 19);
3271207643Snp		t3_mgmt_tx(sc, m);
3272207643Snp	}
3273207643Snp	return 0;
3274207643Snp}
3275207643Snp
3276207643Snpstatic inline void
3277207643Snpmk_set_tcb_field(struct cpl_set_tcb_field *req, unsigned int tid,
3278207643Snp    unsigned int word, u64 mask, u64 val)
3279207643Snp{
3280207643Snp	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
3281207643Snp	req->reply = V_NO_REPLY(1);
3282207643Snp	req->cpu_idx = 0;
3283207643Snp	req->word = htons(word);
3284207643Snp	req->mask = htobe64(mask);
3285207643Snp	req->val = htobe64(val);
3286207643Snp}
3287207643Snp
3288207643Snpstatic inline void
3289207643Snpset_tcb_field_ulp(struct cpl_set_tcb_field *req, unsigned int tid,
3290207643Snp    unsigned int word, u64 mask, u64 val)
3291207643Snp{
3292207643Snp	struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
3293207643Snp
3294207643Snp	txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
3295207643Snp	txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8));
3296207643Snp	mk_set_tcb_field(req, tid, word, mask, val);
3297207643Snp}
3298237920Snp
3299237920Snpvoid
3300237920Snpt3_iterate(void (*func)(struct adapter *, void *), void *arg)
3301237920Snp{
3302237920Snp	struct adapter *sc;
3303237920Snp
3304237920Snp	mtx_lock(&t3_list_lock);
3305237920Snp	SLIST_FOREACH(sc, &t3_list, link) {
3306237920Snp		/*
3307237920Snp		 * func should not make any assumptions about what state sc is
3308237920Snp		 * in - the only guarantee is that sc->sc_lock is a valid lock.
3309237920Snp		 */
3310237920Snp		func(sc, arg);
3311237920Snp	}
3312237920Snp	mtx_unlock(&t3_list_lock);
3313237920Snp}
3314237920Snp
3315237920Snp#ifdef TCP_OFFLOAD
3316237920Snpstatic int
3317237920Snptoe_capability(struct port_info *pi, int enable)
3318237920Snp{
3319237920Snp	int rc;
3320237920Snp	struct adapter *sc = pi->adapter;
3321237920Snp
3322237920Snp	ADAPTER_LOCK_ASSERT_OWNED(sc);
3323237920Snp
3324237920Snp	if (!is_offload(sc))
3325237920Snp		return (ENODEV);
3326237920Snp
3327237920Snp	if (enable) {
3328237920Snp		if (!(sc->flags & FULL_INIT_DONE)) {
3329237920Snp			log(LOG_WARNING,
3330237920Snp			    "You must enable a cxgb interface first\n");
3331237920Snp			return (EAGAIN);
3332237920Snp		}
3333237920Snp
3334237920Snp		if (isset(&sc->offload_map, pi->port_id))
3335237920Snp			return (0);
3336237920Snp
3337237920Snp		if (!(sc->flags & TOM_INIT_DONE)) {
3338237920Snp			rc = t3_activate_uld(sc, ULD_TOM);
3339237920Snp			if (rc == EAGAIN) {
3340237920Snp				log(LOG_WARNING,
3341237920Snp				    "You must kldload t3_tom.ko before trying "
3342237920Snp				    "to enable TOE on a cxgb interface.\n");
3343237920Snp			}
3344237920Snp			if (rc != 0)
3345237920Snp				return (rc);
3346237920Snp			KASSERT(sc->tom_softc != NULL,
3347237920Snp			    ("%s: TOM activated but softc NULL", __func__));
3348237920Snp			KASSERT(sc->flags & TOM_INIT_DONE,
3349237920Snp			    ("%s: TOM activated but flag not set", __func__));
3350237920Snp		}
3351237920Snp
3352237920Snp		setbit(&sc->offload_map, pi->port_id);
3353237920Snp
3354237920Snp		/*
3355237920Snp		 * XXX: Temporary code to allow iWARP to be enabled when TOE is
3356237920Snp		 * enabled on any port.  Need to figure out how to enable,
3357237920Snp		 * disable, load, and unload iWARP cleanly.
3358237920Snp		 */
3359237920Snp		if (!isset(&sc->offload_map, MAX_NPORTS) &&
3360237920Snp		    t3_activate_uld(sc, ULD_IWARP) == 0)
3361237920Snp			setbit(&sc->offload_map, MAX_NPORTS);
3362237920Snp	} else {
3363237920Snp		if (!isset(&sc->offload_map, pi->port_id))
3364237920Snp			return (0);
3365237920Snp
3366237920Snp		KASSERT(sc->flags & TOM_INIT_DONE,
3367237920Snp		    ("%s: TOM never initialized?", __func__));
3368237920Snp		clrbit(&sc->offload_map, pi->port_id);
3369237920Snp	}
3370237920Snp
3371237920Snp	return (0);
3372237920Snp}
3373237920Snp
3374237920Snp/*
3375237920Snp * Add an upper layer driver to the global list.
3376237920Snp */
3377237920Snpint
3378237920Snpt3_register_uld(struct uld_info *ui)
3379237920Snp{
3380237920Snp	int rc = 0;
3381237920Snp	struct uld_info *u;
3382237920Snp
3383237920Snp	mtx_lock(&t3_uld_list_lock);
3384237920Snp	SLIST_FOREACH(u, &t3_uld_list, link) {
3385237920Snp	    if (u->uld_id == ui->uld_id) {
3386237920Snp		    rc = EEXIST;
3387237920Snp		    goto done;
3388237920Snp	    }
3389237920Snp	}
3390237920Snp
3391237920Snp	SLIST_INSERT_HEAD(&t3_uld_list, ui, link);
3392237920Snp	ui->refcount = 0;
3393237920Snpdone:
3394237920Snp	mtx_unlock(&t3_uld_list_lock);
3395237920Snp	return (rc);
3396237920Snp}
3397237920Snp
3398237920Snpint
3399237920Snpt3_unregister_uld(struct uld_info *ui)
3400237920Snp{
3401237920Snp	int rc = EINVAL;
3402237920Snp	struct uld_info *u;
3403237920Snp
3404237920Snp	mtx_lock(&t3_uld_list_lock);
3405237920Snp
3406237920Snp	SLIST_FOREACH(u, &t3_uld_list, link) {
3407237920Snp	    if (u == ui) {
3408237920Snp		    if (ui->refcount > 0) {
3409237920Snp			    rc = EBUSY;
3410237920Snp			    goto done;
3411237920Snp		    }
3412237920Snp
3413237920Snp		    SLIST_REMOVE(&t3_uld_list, ui, uld_info, link);
3414237920Snp		    rc = 0;
3415237920Snp		    goto done;
3416237920Snp	    }
3417237920Snp	}
3418237920Snpdone:
3419237920Snp	mtx_unlock(&t3_uld_list_lock);
3420237920Snp	return (rc);
3421237920Snp}
3422237920Snp
3423237920Snpint
3424237920Snpt3_activate_uld(struct adapter *sc, int id)
3425237920Snp{
3426237920Snp	int rc = EAGAIN;
3427237920Snp	struct uld_info *ui;
3428237920Snp
3429237920Snp	mtx_lock(&t3_uld_list_lock);
3430237920Snp
3431237920Snp	SLIST_FOREACH(ui, &t3_uld_list, link) {
3432237920Snp		if (ui->uld_id == id) {
3433237920Snp			rc = ui->activate(sc);
3434237920Snp			if (rc == 0)
3435237920Snp				ui->refcount++;
3436237920Snp			goto done;
3437237920Snp		}
3438237920Snp	}
3439237920Snpdone:
3440237920Snp	mtx_unlock(&t3_uld_list_lock);
3441237920Snp
3442237920Snp	return (rc);
3443237920Snp}
3444237920Snp
3445237920Snpint
3446237920Snpt3_deactivate_uld(struct adapter *sc, int id)
3447237920Snp{
3448237920Snp	int rc = EINVAL;
3449237920Snp	struct uld_info *ui;
3450237920Snp
3451237920Snp	mtx_lock(&t3_uld_list_lock);
3452237920Snp
3453237920Snp	SLIST_FOREACH(ui, &t3_uld_list, link) {
3454237920Snp		if (ui->uld_id == id) {
3455237920Snp			rc = ui->deactivate(sc);
3456237920Snp			if (rc == 0)
3457237920Snp				ui->refcount--;
3458237920Snp			goto done;
3459237920Snp		}
3460237920Snp	}
3461237920Snpdone:
3462237920Snp	mtx_unlock(&t3_uld_list_lock);
3463237920Snp
3464237920Snp	return (rc);
3465237920Snp}
3466237920Snp
3467237920Snpstatic int
3468237920Snpcpl_not_handled(struct sge_qset *qs __unused, struct rsp_desc *r __unused,
3469237920Snp    struct mbuf *m)
3470237920Snp{
3471237920Snp	m_freem(m);
3472237920Snp	return (EDOOFUS);
3473237920Snp}
3474237920Snp
3475237920Snpint
3476237920Snpt3_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
3477237920Snp{
3478237920Snp	uintptr_t *loc, new;
3479237920Snp
3480237920Snp	if (opcode >= NUM_CPL_HANDLERS)
3481237920Snp		return (EINVAL);
3482237920Snp
3483237920Snp	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
3484237920Snp	loc = (uintptr_t *) &sc->cpl_handler[opcode];
3485237920Snp	atomic_store_rel_ptr(loc, new);
3486237920Snp
3487237920Snp	return (0);
3488237920Snp}
3489237920Snp#endif
3490237920Snp
3491237920Snpstatic int
3492237920Snpcxgbc_mod_event(module_t mod, int cmd, void *arg)
3493237920Snp{
3494237920Snp	int rc = 0;
3495237920Snp
3496237920Snp	switch (cmd) {
3497237920Snp	case MOD_LOAD:
3498237920Snp		mtx_init(&t3_list_lock, "T3 adapters", 0, MTX_DEF);
3499237920Snp		SLIST_INIT(&t3_list);
3500237920Snp#ifdef TCP_OFFLOAD
3501237920Snp		mtx_init(&t3_uld_list_lock, "T3 ULDs", 0, MTX_DEF);
3502237920Snp		SLIST_INIT(&t3_uld_list);
3503237920Snp#endif
3504237920Snp		break;
3505237920Snp
3506237920Snp	case MOD_UNLOAD:
3507237920Snp#ifdef TCP_OFFLOAD
3508237920Snp		mtx_lock(&t3_uld_list_lock);
3509237920Snp		if (!SLIST_EMPTY(&t3_uld_list)) {
3510237920Snp			rc = EBUSY;
3511237920Snp			mtx_unlock(&t3_uld_list_lock);
3512237920Snp			break;
3513237920Snp		}
3514237920Snp		mtx_unlock(&t3_uld_list_lock);
3515237920Snp		mtx_destroy(&t3_uld_list_lock);
3516237920Snp#endif
3517237920Snp		mtx_lock(&t3_list_lock);
3518237920Snp		if (!SLIST_EMPTY(&t3_list)) {
3519237920Snp			rc = EBUSY;
3520237920Snp			mtx_unlock(&t3_list_lock);
3521237920Snp			break;
3522237920Snp		}
3523237920Snp		mtx_unlock(&t3_list_lock);
3524237920Snp		mtx_destroy(&t3_list_lock);
3525237920Snp		break;
3526237920Snp	}
3527237920Snp
3528237920Snp	return (rc);
3529237920Snp}
3530