cxgb_main.c revision 181652
1167514Skmacy/**************************************************************************
2167514Skmacy
3178302SkmacyCopyright (c) 2007-2008, Chelsio Inc.
4167514SkmacyAll rights reserved.
5167514Skmacy
6167514SkmacyRedistribution and use in source and binary forms, with or without
7167514Skmacymodification, are permitted provided that the following conditions are met:
8167514Skmacy
9167514Skmacy 1. Redistributions of source code must retain the above copyright notice,
10167514Skmacy    this list of conditions and the following disclaimer.
11167514Skmacy
12178302Skmacy 2. Neither the name of the Chelsio Corporation nor the names of its
13167514Skmacy    contributors may be used to endorse or promote products derived from
14167514Skmacy    this software without specific prior written permission.
15167514Skmacy
16167514SkmacyTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17167514SkmacyAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18167514SkmacyIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19167514SkmacyARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20167514SkmacyLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21167514SkmacyCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22167514SkmacySUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23167514SkmacyINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24167514SkmacyCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25167514SkmacyARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26167514SkmacyPOSSIBILITY OF SUCH DAMAGE.
27167514Skmacy
28167514Skmacy***************************************************************************/
29167514Skmacy
30167514Skmacy#include <sys/cdefs.h>
31167514Skmacy__FBSDID("$FreeBSD: head/sys/dev/cxgb/cxgb_main.c 181652 2008-08-13 01:30:41Z kmacy $");
32167514Skmacy
33167514Skmacy#include <sys/param.h>
34167514Skmacy#include <sys/systm.h>
35167514Skmacy#include <sys/kernel.h>
36167514Skmacy#include <sys/bus.h>
37167514Skmacy#include <sys/module.h>
38167514Skmacy#include <sys/pciio.h>
39167514Skmacy#include <sys/conf.h>
40167514Skmacy#include <machine/bus.h>
41167514Skmacy#include <machine/resource.h>
42167514Skmacy#include <sys/bus_dma.h>
43176472Skmacy#include <sys/ktr.h>
44167514Skmacy#include <sys/rman.h>
45167514Skmacy#include <sys/ioccom.h>
46167514Skmacy#include <sys/mbuf.h>
47167514Skmacy#include <sys/linker.h>
48167514Skmacy#include <sys/firmware.h>
49167514Skmacy#include <sys/socket.h>
50167514Skmacy#include <sys/sockio.h>
51167514Skmacy#include <sys/smp.h>
52167514Skmacy#include <sys/sysctl.h>
53174708Skmacy#include <sys/syslog.h>
54167514Skmacy#include <sys/queue.h>
55167514Skmacy#include <sys/taskqueue.h>
56174708Skmacy#include <sys/proc.h>
57167514Skmacy
58167514Skmacy#include <net/bpf.h>
59167514Skmacy#include <net/ethernet.h>
60167514Skmacy#include <net/if.h>
61167514Skmacy#include <net/if_arp.h>
62167514Skmacy#include <net/if_dl.h>
63167514Skmacy#include <net/if_media.h>
64167514Skmacy#include <net/if_types.h>
65180583Skmacy#include <net/if_vlan_var.h>
66167514Skmacy
67167514Skmacy#include <netinet/in_systm.h>
68167514Skmacy#include <netinet/in.h>
69167514Skmacy#include <netinet/if_ether.h>
70167514Skmacy#include <netinet/ip.h>
71167514Skmacy#include <netinet/ip.h>
72167514Skmacy#include <netinet/tcp.h>
73167514Skmacy#include <netinet/udp.h>
74167514Skmacy
75167514Skmacy#include <dev/pci/pcireg.h>
76167514Skmacy#include <dev/pci/pcivar.h>
77167514Skmacy#include <dev/pci/pci_private.h>
78167514Skmacy
79170076Skmacy#ifdef CONFIG_DEFINED
80170076Skmacy#include <cxgb_include.h>
81170076Skmacy#else
82170076Skmacy#include <dev/cxgb/cxgb_include.h>
83170076Skmacy#endif
84167514Skmacy
85167514Skmacy#ifdef PRIV_SUPPORTED
86167514Skmacy#include <sys/priv.h>
87167514Skmacy#endif
88167514Skmacy
89174726Skmacy#ifdef IFNET_MULTIQUEUE
90174708Skmacy#include <machine/intr_machdep.h>
91174726Skmacy#endif
92174708Skmacy
93167514Skmacystatic int cxgb_setup_msix(adapter_t *, int);
94170654Skmacystatic void cxgb_teardown_msix(adapter_t *);
95167514Skmacystatic void cxgb_init(void *);
96167514Skmacystatic void cxgb_init_locked(struct port_info *);
97167734Skmacystatic void cxgb_stop_locked(struct port_info *);
98167514Skmacystatic void cxgb_set_rxmode(struct port_info *);
99167514Skmacystatic int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
100167514Skmacystatic int cxgb_media_change(struct ifnet *);
101167514Skmacystatic void cxgb_media_status(struct ifnet *, struct ifmediareq *);
102167514Skmacystatic int setup_sge_qsets(adapter_t *);
103167514Skmacystatic void cxgb_async_intr(void *);
104167514Skmacystatic void cxgb_ext_intr_handler(void *, int);
105170869Skmacystatic void cxgb_tick_handler(void *, int);
106170869Skmacystatic void cxgb_down_locked(struct adapter *sc);
107167514Skmacystatic void cxgb_tick(void *);
108167514Skmacystatic void setup_rss(adapter_t *sc);
109167514Skmacy
110167514Skmacy/* Attachment glue for the PCI controller end of the device.  Each port of
111167514Skmacy * the device is attached separately, as defined later.
112167514Skmacy */
113167514Skmacystatic int cxgb_controller_probe(device_t);
114167514Skmacystatic int cxgb_controller_attach(device_t);
115167514Skmacystatic int cxgb_controller_detach(device_t);
116167514Skmacystatic void cxgb_free(struct adapter *);
117167514Skmacystatic __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
118167514Skmacy    unsigned int end);
119167514Skmacystatic void cxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf);
120167514Skmacystatic int cxgb_get_regs_len(void);
121169978Skmacystatic int offload_open(struct port_info *pi);
122171978Skmacystatic void touch_bars(device_t dev);
123174626Skmacystatic int offload_close(struct t3cdev *tdev);
124176472Skmacystatic void cxgb_link_start(struct port_info *p);
125167514Skmacy
126167514Skmacystatic device_method_t cxgb_controller_methods[] = {
127167514Skmacy	DEVMETHOD(device_probe,		cxgb_controller_probe),
128167514Skmacy	DEVMETHOD(device_attach,	cxgb_controller_attach),
129167514Skmacy	DEVMETHOD(device_detach,	cxgb_controller_detach),
130167514Skmacy
131167514Skmacy	/* bus interface */
132167514Skmacy	DEVMETHOD(bus_print_child,	bus_generic_print_child),
133167514Skmacy	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
134167514Skmacy
135167514Skmacy	{ 0, 0 }
136167514Skmacy};
137167514Skmacy
138167514Skmacystatic driver_t cxgb_controller_driver = {
139167514Skmacy	"cxgbc",
140167514Skmacy	cxgb_controller_methods,
141167514Skmacy	sizeof(struct adapter)
142167514Skmacy};
143167514Skmacy
144167514Skmacystatic devclass_t	cxgb_controller_devclass;
145167514SkmacyDRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, 0, 0);
146167514Skmacy
147167514Skmacy/*
148167514Skmacy * Attachment glue for the ports.  Attachment is done directly to the
149167514Skmacy * controller device.
150167514Skmacy */
151167514Skmacystatic int cxgb_port_probe(device_t);
152167514Skmacystatic int cxgb_port_attach(device_t);
153167514Skmacystatic int cxgb_port_detach(device_t);
154167514Skmacy
155167514Skmacystatic device_method_t cxgb_port_methods[] = {
156167514Skmacy	DEVMETHOD(device_probe,		cxgb_port_probe),
157167514Skmacy	DEVMETHOD(device_attach,	cxgb_port_attach),
158167514Skmacy	DEVMETHOD(device_detach,	cxgb_port_detach),
159167514Skmacy	{ 0, 0 }
160167514Skmacy};
161167514Skmacy
162167514Skmacystatic driver_t cxgb_port_driver = {
163167514Skmacy	"cxgb",
164167514Skmacy	cxgb_port_methods,
165167514Skmacy	0
166167514Skmacy};
167167514Skmacy
168167514Skmacystatic d_ioctl_t cxgb_extension_ioctl;
169170654Skmacystatic d_open_t cxgb_extension_open;
170170654Skmacystatic d_close_t cxgb_extension_close;
171167514Skmacy
172170654Skmacystatic struct cdevsw cxgb_cdevsw = {
173170654Skmacy       .d_version =    D_VERSION,
174170654Skmacy       .d_flags =      0,
175170654Skmacy       .d_open =       cxgb_extension_open,
176170654Skmacy       .d_close =      cxgb_extension_close,
177170654Skmacy       .d_ioctl =      cxgb_extension_ioctl,
178170654Skmacy       .d_name =       "cxgb",
179170654Skmacy};
180170654Skmacy
181167514Skmacystatic devclass_t	cxgb_port_devclass;
182167514SkmacyDRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
183167514Skmacy
184167514Skmacy#define SGE_MSIX_COUNT (SGE_QSETS + 1)
185167514Skmacy
186167514Skmacy/*
187167514Skmacy * The driver uses the best interrupt scheme available on a platform in the
188167514Skmacy * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
189167514Skmacy * of these schemes the driver may consider as follows:
190167514Skmacy *
191167514Skmacy * msi = 2: choose from among all three options
192167514Skmacy * msi = 1 : only consider MSI and pin interrupts
193167514Skmacy * msi = 0: force pin interrupts
194167514Skmacy */
195167760Skmacystatic int msi_allowed = 2;
196170083Skmacy
197167514SkmacyTUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
198167514SkmacySYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
199167514SkmacySYSCTL_UINT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
200167514Skmacy    "MSI-X, MSI, INTx selector");
201169978Skmacy
202169053Skmacy/*
203169978Skmacy * The driver enables offload as a default.
204169978Skmacy * To disable it, use ofld_disable = 1.
205169053Skmacy */
206169978Skmacystatic int ofld_disable = 0;
207169978SkmacyTUNABLE_INT("hw.cxgb.ofld_disable", &ofld_disable);
208169978SkmacySYSCTL_UINT(_hw_cxgb, OID_AUTO, ofld_disable, CTLFLAG_RDTUN, &ofld_disable, 0,
209169978Skmacy    "disable ULP offload");
210169978Skmacy
211169978Skmacy/*
212169978Skmacy * The driver uses an auto-queue algorithm by default.
213169978Skmacy * To disable it and force a single queue-set per port, use singleq = 1.
214169978Skmacy */
215174708Skmacystatic int singleq = 0;
216169978SkmacyTUNABLE_INT("hw.cxgb.singleq", &singleq);
217169978SkmacySYSCTL_UINT(_hw_cxgb, OID_AUTO, singleq, CTLFLAG_RDTUN, &singleq, 0,
218169978Skmacy    "use a single queue-set per port");
219167514Skmacy
220175200Skmacy
221176572Skmacy/*
222176572Skmacy * The driver uses an auto-queue algorithm by default.
223176572Skmacy * To disable it and force a single queue-set per port, use singleq = 1.
224176572Skmacy */
225176572Skmacystatic int force_fw_update = 0;
226176572SkmacyTUNABLE_INT("hw.cxgb.force_fw_update", &force_fw_update);
227176572SkmacySYSCTL_UINT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0,
228176572Skmacy    "update firmware even if up to date");
229175200Skmacy
230175200Skmacyint cxgb_use_16k_clusters = 0;
231175200SkmacyTUNABLE_INT("hw.cxgb.use_16k_clusters", &cxgb_use_16k_clusters);
232175200SkmacySYSCTL_UINT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
233175200Skmacy    &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
234175200Skmacy
235167514Skmacyenum {
236167514Skmacy	MAX_TXQ_ENTRIES      = 16384,
237167514Skmacy	MAX_CTRL_TXQ_ENTRIES = 1024,
238167514Skmacy	MAX_RSPQ_ENTRIES     = 16384,
239167514Skmacy	MAX_RX_BUFFERS       = 16384,
240167514Skmacy	MAX_RX_JUMBO_BUFFERS = 16384,
241167514Skmacy	MIN_TXQ_ENTRIES      = 4,
242167514Skmacy	MIN_CTRL_TXQ_ENTRIES = 4,
243167514Skmacy	MIN_RSPQ_ENTRIES     = 32,
244172096Skmacy	MIN_FL_ENTRIES       = 32,
245172096Skmacy	MIN_FL_JUMBO_ENTRIES = 32
246167514Skmacy};
247167514Skmacy
248171471Skmacystruct filter_info {
249171471Skmacy	u32 sip;
250171471Skmacy	u32 sip_mask;
251171471Skmacy	u32 dip;
252171471Skmacy	u16 sport;
253171471Skmacy	u16 dport;
254171471Skmacy	u32 vlan:12;
255171471Skmacy	u32 vlan_prio:3;
256171471Skmacy	u32 mac_hit:1;
257171471Skmacy	u32 mac_idx:4;
258171471Skmacy	u32 mac_vld:1;
259171471Skmacy	u32 pkt_type:2;
260171471Skmacy	u32 report_filter_id:1;
261171471Skmacy	u32 pass:1;
262171471Skmacy	u32 rss:1;
263171471Skmacy	u32 qset:3;
264171471Skmacy	u32 locked:1;
265171471Skmacy	u32 valid:1;
266171471Skmacy};
267171471Skmacy
268171471Skmacyenum { FILTER_NO_VLAN_PRI = 7 };
269171471Skmacy
270167514Skmacy#define PORT_MASK ((1 << MAX_NPORTS) - 1)
271167514Skmacy
272167514Skmacy/* Table for probing the cards.  The desc field isn't actually used */
273167514Skmacystruct cxgb_ident {
274167514Skmacy	uint16_t	vendor;
275167514Skmacy	uint16_t	device;
276167514Skmacy	int		index;
277167514Skmacy	char		*desc;
278167514Skmacy} cxgb_identifiers[] = {
279167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
280167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
281167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
282167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
283167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
284167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
285167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
286167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
287167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
288167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
289170654Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
290167514Skmacy	{0, 0, 0, NULL}
291167514Skmacy};
292167514Skmacy
293171471Skmacystatic int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
294171471Skmacy
295176472Skmacy
296176472Skmacyvoid
297176472Skmacycxgb_log_tcb(struct adapter *sc, unsigned int tid)
298176472Skmacy{
299176472Skmacy	char buf[TCB_SIZE];
300176472Skmacy	uint64_t *tcb = (uint64_t *)buf;
301176472Skmacy	int i, error;
302176472Skmacy	struct mc7 *mem = &sc->cm;
303176472Skmacy
304176472Skmacy	error = t3_mc7_bd_read(mem, tid*TCB_SIZE/8, TCB_SIZE/8, tcb);
305176472Skmacy	if (error)
306176472Skmacy		printf("cxgb_tcb_log failed\n");
307176472Skmacy
308176472Skmacy	CTR1(KTR_CXGB, "TCB tid=%u", tid);
309176472Skmacy	for (i = 0; i < TCB_SIZE / 32; i++) {
310176472Skmacy		CTR5(KTR_CXGB, "%1d: %08x %08x %08x %08x",
311176472Skmacy		    i, (uint32_t)tcb[1], (uint32_t)(tcb[1] >> 32),
312176472Skmacy		    (uint32_t)tcb[0], (uint32_t)(tcb[0] >> 32));
313176472Skmacy		tcb += 2;
314176472Skmacy		CTR4(KTR_CXGB, "   %08x %08x %08x %08x",
315176472Skmacy		    (uint32_t)tcb[1], (uint32_t)(tcb[1] >> 32),
316176472Skmacy		    (uint32_t)tcb[0], (uint32_t)(tcb[0] >> 32));
317176472Skmacy		tcb += 2;
318176472Skmacy	}
319176472Skmacy}
320176472Skmacy
321174708Skmacystatic __inline char
322171471Skmacyt3rev2char(struct adapter *adapter)
323171471Skmacy{
324171471Skmacy	char rev = 'z';
325171471Skmacy
326171471Skmacy	switch(adapter->params.rev) {
327171471Skmacy	case T3_REV_A:
328171471Skmacy		rev = 'a';
329171471Skmacy		break;
330171471Skmacy	case T3_REV_B:
331171471Skmacy	case T3_REV_B2:
332171471Skmacy		rev = 'b';
333171471Skmacy		break;
334171471Skmacy	case T3_REV_C:
335171471Skmacy		rev = 'c';
336171471Skmacy		break;
337171471Skmacy	}
338171471Skmacy	return rev;
339171471Skmacy}
340171471Skmacy
341167514Skmacystatic struct cxgb_ident *
342167514Skmacycxgb_get_ident(device_t dev)
343167514Skmacy{
344167514Skmacy	struct cxgb_ident *id;
345167514Skmacy
346167514Skmacy	for (id = cxgb_identifiers; id->desc != NULL; id++) {
347167514Skmacy		if ((id->vendor == pci_get_vendor(dev)) &&
348167514Skmacy		    (id->device == pci_get_device(dev))) {
349167514Skmacy			return (id);
350167514Skmacy		}
351167514Skmacy	}
352167514Skmacy	return (NULL);
353167514Skmacy}
354167514Skmacy
355167514Skmacystatic const struct adapter_info *
356167514Skmacycxgb_get_adapter_info(device_t dev)
357167514Skmacy{
358167514Skmacy	struct cxgb_ident *id;
359167514Skmacy	const struct adapter_info *ai;
360167514Skmacy
361167514Skmacy	id = cxgb_get_ident(dev);
362167514Skmacy	if (id == NULL)
363167514Skmacy		return (NULL);
364167514Skmacy
365167514Skmacy	ai = t3_get_adapter_info(id->index);
366167514Skmacy
367167514Skmacy	return (ai);
368167514Skmacy}
369167514Skmacy
370167514Skmacystatic int
371167514Skmacycxgb_controller_probe(device_t dev)
372167514Skmacy{
373167514Skmacy	const struct adapter_info *ai;
374167514Skmacy	char *ports, buf[80];
375170654Skmacy	int nports;
376170654Skmacy
377167514Skmacy	ai = cxgb_get_adapter_info(dev);
378167514Skmacy	if (ai == NULL)
379167514Skmacy		return (ENXIO);
380167514Skmacy
381170654Skmacy	nports = ai->nports0 + ai->nports1;
382170654Skmacy	if (nports == 1)
383167514Skmacy		ports = "port";
384167514Skmacy	else
385167514Skmacy		ports = "ports";
386167514Skmacy
387170654Skmacy	snprintf(buf, sizeof(buf), "%s RNIC, %d %s", ai->desc, nports, ports);
388167514Skmacy	device_set_desc_copy(dev, buf);
389167514Skmacy	return (BUS_PROBE_DEFAULT);
390167514Skmacy}
391167514Skmacy
392176572Skmacy#define FW_FNAME "cxgb_t3fw"
393176613Skmacy#define TPEEPROM_NAME "t3b_tp_eeprom"
394176613Skmacy#define TPSRAM_NAME "t3b_protocol_sram"
395171471Skmacy
396167514Skmacystatic int
397169978Skmacyupgrade_fw(adapter_t *sc)
398167514Skmacy{
399167514Skmacy#ifdef FIRMWARE_LATEST
400167514Skmacy	const struct firmware *fw;
401167514Skmacy#else
402167514Skmacy	struct firmware *fw;
403167514Skmacy#endif
404167514Skmacy	int status;
405167514Skmacy
406176572Skmacy	if ((fw = firmware_get(FW_FNAME)) == NULL)  {
407176572Skmacy		device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME);
408169978Skmacy		return (ENOENT);
409171471Skmacy	} else
410176572Skmacy		device_printf(sc->dev, "updating firmware on card\n");
411167514Skmacy	status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
412167514Skmacy
413171471Skmacy	device_printf(sc->dev, "firmware update returned %s %d\n", (status == 0) ? "success" : "fail", status);
414171471Skmacy
415167514Skmacy	firmware_put(fw, FIRMWARE_UNLOAD);
416167514Skmacy
417167514Skmacy	return (status);
418167514Skmacy}
419167514Skmacy
420167514Skmacystatic int
421167514Skmacycxgb_controller_attach(device_t dev)
422167514Skmacy{
423167514Skmacy	device_t child;
424167514Skmacy	const struct adapter_info *ai;
425167514Skmacy	struct adapter *sc;
426172109Skmacy	int i, error = 0;
427167514Skmacy	uint32_t vers;
428167760Skmacy	int port_qsets = 1;
429171868Skmacy#ifdef MSI_SUPPORTED
430172109Skmacy	int msi_needed, reg;
431176472Skmacy#endif
432176472Skmacy	int must_load = 0;
433167514Skmacy	sc = device_get_softc(dev);
434167514Skmacy	sc->dev = dev;
435169978Skmacy	sc->msi_count = 0;
436172109Skmacy	ai = cxgb_get_adapter_info(dev);
437172109Skmacy
438172109Skmacy	/*
439172109Skmacy	 * XXX not really related but a recent addition
440172109Skmacy	 */
441172109Skmacy#ifdef MSI_SUPPORTED
442167840Skmacy	/* find the PCIe link width and set max read request to 4KB*/
443167840Skmacy	if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
444167840Skmacy		uint16_t lnk, pectl;
445167840Skmacy		lnk = pci_read_config(dev, reg + 0x12, 2);
446167840Skmacy		sc->link_width = (lnk >> 4) & 0x3f;
447167840Skmacy
448167840Skmacy		pectl = pci_read_config(dev, reg + 0x8, 2);
449167840Skmacy		pectl = (pectl & ~0x7000) | (5 << 12);
450167840Skmacy		pci_write_config(dev, reg + 0x8, pectl, 2);
451167840Skmacy	}
452171471Skmacy
453171471Skmacy	if (sc->link_width != 0 && sc->link_width <= 4 &&
454171471Skmacy	    (ai->nports0 + ai->nports1) <= 2) {
455167840Skmacy		device_printf(sc->dev,
456167862Skmacy		    "PCIe x%d Link, expect reduced performance\n",
457167840Skmacy		    sc->link_width);
458167840Skmacy	}
459172109Skmacy#endif
460171978Skmacy	touch_bars(dev);
461167514Skmacy	pci_enable_busmaster(dev);
462167514Skmacy	/*
463167514Skmacy	 * Allocate the registers and make them available to the driver.
464167514Skmacy	 * The registers that we care about for NIC mode are in BAR 0
465167514Skmacy	 */
466167514Skmacy	sc->regs_rid = PCIR_BAR(0);
467167514Skmacy	if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
468167514Skmacy	    &sc->regs_rid, RF_ACTIVE)) == NULL) {
469176472Skmacy		device_printf(dev, "Cannot allocate BAR region 0\n");
470167514Skmacy		return (ENXIO);
471167514Skmacy	}
472176472Skmacy	sc->udbs_rid = PCIR_BAR(2);
473176472Skmacy	if ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
474176472Skmacy           &sc->udbs_rid, RF_ACTIVE)) == NULL) {
475176472Skmacy		device_printf(dev, "Cannot allocate BAR region 1\n");
476176472Skmacy		error = ENXIO;
477176472Skmacy		goto out;
478176472Skmacy       }
479167514Skmacy
480170869Skmacy	snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
481170869Skmacy	    device_get_unit(dev));
482170869Skmacy	ADAPTER_LOCK_INIT(sc, sc->lockbuf);
483170869Skmacy
484170869Skmacy	snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
485170869Skmacy	    device_get_unit(dev));
486170869Skmacy	snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
487170869Skmacy	    device_get_unit(dev));
488170869Skmacy	snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
489170869Skmacy	    device_get_unit(dev));
490167514Skmacy
491176472Skmacy	MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN);
492170869Skmacy	MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
493170869Skmacy	MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
494170869Skmacy
495167514Skmacy	sc->bt = rman_get_bustag(sc->regs_res);
496167514Skmacy	sc->bh = rman_get_bushandle(sc->regs_res);
497167514Skmacy	sc->mmio_len = rman_get_size(sc->regs_res);
498167769Skmacy
499167769Skmacy	if (t3_prep_adapter(sc, ai, 1) < 0) {
500170654Skmacy		printf("prep adapter failed\n");
501167769Skmacy		error = ENODEV;
502167769Skmacy		goto out;
503167769Skmacy	}
504177464Skmacy        /* Allocate the BAR for doing MSI-X.  If it succeeds, try to allocate
505167514Skmacy	 * enough messages for the queue sets.  If that fails, try falling
506167514Skmacy	 * back to MSI.  If that fails, then try falling back to the legacy
507167514Skmacy	 * interrupt pin model.
508167514Skmacy	 */
509167514Skmacy#ifdef MSI_SUPPORTED
510167760Skmacy
511167514Skmacy	sc->msix_regs_rid = 0x20;
512167514Skmacy	if ((msi_allowed >= 2) &&
513167514Skmacy	    (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
514167514Skmacy	    &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
515167514Skmacy
516169978Skmacy		msi_needed = sc->msi_count = SGE_MSIX_COUNT;
517167760Skmacy
518169978Skmacy		if (((error = pci_alloc_msix(dev, &sc->msi_count)) != 0) ||
519169978Skmacy		    (sc->msi_count != msi_needed)) {
520169978Skmacy			device_printf(dev, "msix allocation failed - msi_count = %d"
521169978Skmacy			    " msi_needed=%d will try msi err=%d\n", sc->msi_count,
522169978Skmacy			    msi_needed, error);
523169978Skmacy			sc->msi_count = 0;
524167514Skmacy			pci_release_msi(dev);
525167514Skmacy			bus_release_resource(dev, SYS_RES_MEMORY,
526167514Skmacy			    sc->msix_regs_rid, sc->msix_regs_res);
527167514Skmacy			sc->msix_regs_res = NULL;
528167514Skmacy		} else {
529167514Skmacy			sc->flags |= USING_MSIX;
530170081Skmacy			sc->cxgb_intr = t3_intr_msix;
531167514Skmacy		}
532167514Skmacy	}
533167514Skmacy
534169978Skmacy	if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
535169978Skmacy		sc->msi_count = 1;
536169978Skmacy		if (pci_alloc_msi(dev, &sc->msi_count)) {
537167760Skmacy			device_printf(dev, "alloc msi failed - will try INTx\n");
538169978Skmacy			sc->msi_count = 0;
539167514Skmacy			pci_release_msi(dev);
540167514Skmacy		} else {
541167514Skmacy			sc->flags |= USING_MSI;
542167514Skmacy			sc->irq_rid = 1;
543170081Skmacy			sc->cxgb_intr = t3_intr_msi;
544167514Skmacy		}
545167514Skmacy	}
546167514Skmacy#endif
547169978Skmacy	if (sc->msi_count == 0) {
548167760Skmacy		device_printf(dev, "using line interrupts\n");
549167514Skmacy		sc->irq_rid = 0;
550170081Skmacy		sc->cxgb_intr = t3b_intr;
551167514Skmacy	}
552167514Skmacy
553177464Skmacy	if ((sc->flags & USING_MSIX) && !singleq)
554177464Skmacy		port_qsets = min((SGE_QSETS/(sc)->params.nports), mp_ncpus);
555177464Skmacy
556167514Skmacy	/* Create a private taskqueue thread for handling driver events */
557167514Skmacy#ifdef TASKQUEUE_CURRENT
558167514Skmacy	sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
559167514Skmacy	    taskqueue_thread_enqueue, &sc->tq);
560167514Skmacy#else
561167514Skmacy	sc->tq = taskqueue_create_fast("cxgb_taskq", M_NOWAIT,
562167514Skmacy	    taskqueue_thread_enqueue, &sc->tq);
563167514Skmacy#endif
564167514Skmacy	if (sc->tq == NULL) {
565167514Skmacy		device_printf(dev, "failed to allocate controller task queue\n");
566167514Skmacy		goto out;
567167514Skmacy	}
568171804Skmacy
569167514Skmacy	taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
570167514Skmacy	    device_get_nameunit(dev));
571167514Skmacy	TASK_INIT(&sc->ext_intr_task, 0, cxgb_ext_intr_handler, sc);
572170869Skmacy	TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
573167514Skmacy
574167514Skmacy
575167514Skmacy	/* Create a periodic callout for checking adapter status */
576170869Skmacy	callout_init(&sc->cxgb_tick_ch, TRUE);
577167514Skmacy
578176572Skmacy	if ((t3_check_fw_version(sc, &must_load) != 0 && must_load) || force_fw_update) {
579167514Skmacy		/*
580167514Skmacy		 * Warn user that a firmware update will be attempted in init.
581167514Skmacy		 */
582169978Skmacy		device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
583169978Skmacy		    FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
584167514Skmacy		sc->flags &= ~FW_UPTODATE;
585167514Skmacy	} else {
586167514Skmacy		sc->flags |= FW_UPTODATE;
587167514Skmacy	}
588171471Skmacy
589176472Skmacy	if (t3_check_tpsram_version(sc, &must_load) != 0 && must_load) {
590171471Skmacy		/*
591171471Skmacy		 * Warn user that a firmware update will be attempted in init.
592171471Skmacy		 */
593171471Skmacy		device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
594171471Skmacy		    t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
595171471Skmacy		sc->flags &= ~TPS_UPTODATE;
596171471Skmacy	} else {
597171471Skmacy		sc->flags |= TPS_UPTODATE;
598171471Skmacy	}
599167514Skmacy
600167514Skmacy	/*
601167514Skmacy	 * Create a child device for each MAC.  The ethernet attachment
602167514Skmacy	 * will be done in these children.
603167760Skmacy	 */
604167760Skmacy	for (i = 0; i < (sc)->params.nports; i++) {
605171978Skmacy		struct port_info *pi;
606171978Skmacy
607167514Skmacy		if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
608167514Skmacy			device_printf(dev, "failed to add child port\n");
609167514Skmacy			error = EINVAL;
610167514Skmacy			goto out;
611167514Skmacy		}
612171978Skmacy		pi = &sc->port[i];
613171978Skmacy		pi->adapter = sc;
614171978Skmacy		pi->nqsets = port_qsets;
615171978Skmacy		pi->first_qset = i*port_qsets;
616171978Skmacy		pi->port_id = i;
617171978Skmacy		pi->tx_chan = i >= ai->nports0;
618171978Skmacy		pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
619171978Skmacy		sc->rxpkt_map[pi->txpkt_intf] = i;
620174708Skmacy		sc->port[i].tx_chan = i >= ai->nports0;
621171471Skmacy		sc->portdev[i] = child;
622171978Skmacy		device_set_softc(child, pi);
623167514Skmacy	}
624167514Skmacy	if ((error = bus_generic_attach(dev)) != 0)
625167514Skmacy		goto out;
626167514Skmacy
627167514Skmacy	/* initialize sge private state */
628170654Skmacy	t3_sge_init_adapter(sc);
629167514Skmacy
630167514Skmacy	t3_led_ready(sc);
631169978Skmacy
632169978Skmacy	cxgb_offload_init();
633169978Skmacy	if (is_offload(sc)) {
634169978Skmacy		setbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
635169978Skmacy		cxgb_adapter_ofld(sc);
636169978Skmacy        }
637167514Skmacy	error = t3_get_fw_version(sc, &vers);
638167514Skmacy	if (error)
639167514Skmacy		goto out;
640167514Skmacy
641169978Skmacy	snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
642169978Skmacy	    G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
643169978Skmacy	    G_FW_VERSION_MICRO(vers));
644169978Skmacy
645176472Skmacy	device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]);
646181652Skmacy	callout_reset(&sc->cxgb_tick_ch, CXGB_TICKS(sc), cxgb_tick, sc);
647174708Skmacy	t3_add_attach_sysctls(sc);
648167514Skmacyout:
649167514Skmacy	if (error)
650167514Skmacy		cxgb_free(sc);
651167514Skmacy
652167514Skmacy	return (error);
653167514Skmacy}
654167514Skmacy
655167514Skmacystatic int
656167514Skmacycxgb_controller_detach(device_t dev)
657167514Skmacy{
658167514Skmacy	struct adapter *sc;
659167514Skmacy
660167514Skmacy	sc = device_get_softc(dev);
661167514Skmacy
662167514Skmacy	cxgb_free(sc);
663167514Skmacy
664167514Skmacy	return (0);
665167514Skmacy}
666167514Skmacy
667167514Skmacystatic void
668167514Skmacycxgb_free(struct adapter *sc)
669167514Skmacy{
670167514Skmacy	int i;
671167514Skmacy
672176472Skmacy	ADAPTER_LOCK(sc);
673176472Skmacy	sc->flags |= CXGB_SHUTDOWN;
674176472Skmacy	ADAPTER_UNLOCK(sc);
675174708Skmacy	cxgb_pcpu_shutdown_threads(sc);
676170869Skmacy	ADAPTER_LOCK(sc);
677176472Skmacy
678174708Skmacy/*
679174708Skmacy * drops the lock
680174708Skmacy */
681170869Skmacy	cxgb_down_locked(sc);
682169978Skmacy
683169978Skmacy#ifdef MSI_SUPPORTED
684169978Skmacy	if (sc->flags & (USING_MSI | USING_MSIX)) {
685169978Skmacy		device_printf(sc->dev, "releasing msi message(s)\n");
686169978Skmacy		pci_release_msi(sc->dev);
687169978Skmacy	} else {
688169978Skmacy		device_printf(sc->dev, "no msi message to release\n");
689169978Skmacy	}
690169978Skmacy#endif
691169978Skmacy	if (sc->msix_regs_res != NULL) {
692169978Skmacy		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
693169978Skmacy		    sc->msix_regs_res);
694169978Skmacy	}
695176472Skmacy
696171978Skmacy	t3_sge_deinit_sw(sc);
697171978Skmacy	/*
698171978Skmacy	 * Wait for last callout
699171978Skmacy	 */
700171978Skmacy
701174708Skmacy	DELAY(hz*100);
702170869Skmacy
703167760Skmacy	for (i = 0; i < (sc)->params.nports; ++i) {
704167760Skmacy		if (sc->portdev[i] != NULL)
705167760Skmacy			device_delete_child(sc->dev, sc->portdev[i]);
706167760Skmacy	}
707167760Skmacy
708167514Skmacy	bus_generic_detach(sc->dev);
709176472Skmacy	if (sc->tq != NULL) {
710171978Skmacy		taskqueue_free(sc->tq);
711176472Skmacy		sc->tq = NULL;
712176472Skmacy	}
713176472Skmacy
714169978Skmacy	if (is_offload(sc)) {
715169978Skmacy		cxgb_adapter_unofld(sc);
716169978Skmacy		if (isset(&sc->open_device_map,	OFFLOAD_DEVMAP_BIT))
717169978Skmacy			offload_close(&sc->tdev);
718174708Skmacy		else
719174708Skmacy			printf("cxgb_free: DEVMAP_BIT not set\n");
720174708Skmacy	} else
721174708Skmacy		printf("not offloading set\n");
722178302Skmacy#ifdef notyet
723176472Skmacy	if (sc->flags & CXGB_OFLD_INIT)
724176472Skmacy		cxgb_offload_deactivate(sc);
725178302Skmacy#endif
726171471Skmacy	free(sc->filters, M_DEVBUF);
727167514Skmacy	t3_sge_free(sc);
728170869Skmacy
729170869Skmacy	cxgb_offload_exit();
730176472Skmacy
731176472Skmacy	if (sc->udbs_res != NULL)
732176472Skmacy		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid,
733176472Skmacy		    sc->udbs_res);
734176472Skmacy
735167514Skmacy	if (sc->regs_res != NULL)
736167514Skmacy		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
737167514Skmacy		    sc->regs_res);
738167514Skmacy
739170869Skmacy	MTX_DESTROY(&sc->mdio_lock);
740170869Skmacy	MTX_DESTROY(&sc->sge.reg_lock);
741170869Skmacy	MTX_DESTROY(&sc->elmer_lock);
742170869Skmacy	ADAPTER_LOCK_DEINIT(sc);
743167514Skmacy}
744167514Skmacy
745167514Skmacy/**
746167514Skmacy *	setup_sge_qsets - configure SGE Tx/Rx/response queues
747167514Skmacy *	@sc: the controller softc
748167514Skmacy *
749167514Skmacy *	Determines how many sets of SGE queues to use and initializes them.
750167514Skmacy *	We support multiple queue sets per port if we have MSI-X, otherwise
751167514Skmacy *	just one queue set per port.
752167514Skmacy */
753167514Skmacystatic int
754167514Skmacysetup_sge_qsets(adapter_t *sc)
755167514Skmacy{
756172096Skmacy	int i, j, err, irq_idx = 0, qset_idx = 0;
757169978Skmacy	u_int ntxq = SGE_TXQ_PER_SET;
758167514Skmacy
759167514Skmacy	if ((err = t3_sge_alloc(sc)) != 0) {
760167760Skmacy		device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
761167514Skmacy		return (err);
762167514Skmacy	}
763167514Skmacy
764167514Skmacy	if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
765167514Skmacy		irq_idx = -1;
766167514Skmacy
767172096Skmacy	for (i = 0; i < (sc)->params.nports; i++) {
768167514Skmacy		struct port_info *pi = &sc->port[i];
769167514Skmacy
770171978Skmacy		for (j = 0; j < pi->nqsets; j++, qset_idx++) {
771167760Skmacy			err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
772167514Skmacy			    (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
773167514Skmacy			    &sc->params.sge.qset[qset_idx], ntxq, pi);
774167514Skmacy			if (err) {
775167514Skmacy				t3_free_sge_resources(sc);
776171978Skmacy				device_printf(sc->dev, "t3_sge_alloc_qset failed with %d\n",
777171978Skmacy				    err);
778167514Skmacy				return (err);
779167514Skmacy			}
780167514Skmacy		}
781167514Skmacy	}
782167514Skmacy
783167514Skmacy	return (0);
784167514Skmacy}
785167514Skmacy
786170654Skmacystatic void
787170654Skmacycxgb_teardown_msix(adapter_t *sc)
788170654Skmacy{
789170654Skmacy	int i, nqsets;
790170654Skmacy
791170654Skmacy	for (nqsets = i = 0; i < (sc)->params.nports; i++)
792170654Skmacy		nqsets += sc->port[i].nqsets;
793170654Skmacy
794170654Skmacy	for (i = 0; i < nqsets; i++) {
795170654Skmacy		if (sc->msix_intr_tag[i] != NULL) {
796170654Skmacy			bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
797170654Skmacy			    sc->msix_intr_tag[i]);
798170654Skmacy			sc->msix_intr_tag[i] = NULL;
799170654Skmacy		}
800170654Skmacy		if (sc->msix_irq_res[i] != NULL) {
801170654Skmacy			bus_release_resource(sc->dev, SYS_RES_IRQ,
802170654Skmacy			    sc->msix_irq_rid[i], sc->msix_irq_res[i]);
803170654Skmacy			sc->msix_irq_res[i] = NULL;
804170654Skmacy		}
805170654Skmacy	}
806170654Skmacy}
807170654Skmacy
808167514Skmacystatic int
809167514Skmacycxgb_setup_msix(adapter_t *sc, int msix_count)
810167514Skmacy{
811167514Skmacy	int i, j, k, nqsets, rid;
812167514Skmacy
813167514Skmacy	/* The first message indicates link changes and error conditions */
814167514Skmacy	sc->irq_rid = 1;
815167514Skmacy	if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
816167514Skmacy	   &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
817167514Skmacy		device_printf(sc->dev, "Cannot allocate msix interrupt\n");
818167514Skmacy		return (EINVAL);
819167514Skmacy	}
820167760Skmacy
821167514Skmacy	if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
822167514Skmacy#ifdef INTR_FILTERS
823171978Skmacy		NULL,
824167514Skmacy#endif
825167514Skmacy		cxgb_async_intr, sc, &sc->intr_tag)) {
826167514Skmacy		device_printf(sc->dev, "Cannot set up interrupt\n");
827167514Skmacy		return (EINVAL);
828167514Skmacy	}
829170654Skmacy	for (i = k = 0; i < (sc)->params.nports; i++) {
830167514Skmacy		nqsets = sc->port[i].nqsets;
831170654Skmacy		for (j = 0; j < nqsets; j++, k++) {
832167514Skmacy			struct sge_qset *qs = &sc->sge.qs[k];
833171804Skmacy
834167514Skmacy			rid = k + 2;
835167514Skmacy			if (cxgb_debug)
836167514Skmacy				printf("rid=%d ", rid);
837167514Skmacy			if ((sc->msix_irq_res[k] = bus_alloc_resource_any(
838167514Skmacy			    sc->dev, SYS_RES_IRQ, &rid,
839167514Skmacy			    RF_SHAREABLE | RF_ACTIVE)) == NULL) {
840167514Skmacy				device_printf(sc->dev, "Cannot allocate "
841167514Skmacy				    "interrupt for message %d\n", rid);
842167514Skmacy				return (EINVAL);
843167514Skmacy			}
844167514Skmacy			sc->msix_irq_rid[k] = rid;
845170654Skmacy			if (bus_setup_intr(sc->dev, sc->msix_irq_res[k],
846174708Skmacy				INTR_MPSAFE|INTR_TYPE_NET,
847167514Skmacy#ifdef INTR_FILTERS
848171978Skmacy				NULL,
849167514Skmacy#endif
850167514Skmacy				t3_intr_msix, qs, &sc->msix_intr_tag[k])) {
851167514Skmacy				device_printf(sc->dev, "Cannot set up "
852167514Skmacy				    "interrupt for message %d\n", rid);
853167514Skmacy				return (EINVAL);
854167514Skmacy			}
855174708Skmacy#ifdef IFNET_MULTIQUEUE
856174708Skmacy			if (singleq == 0) {
857174708Skmacy				int vector = rman_get_start(sc->msix_irq_res[k]);
858174708Skmacy				if (bootverbose)
859174708Skmacy					device_printf(sc->dev, "binding vector=%d to cpu=%d\n", vector, k % mp_ncpus);
860174708Skmacy				intr_bind(vector, k % mp_ncpus);
861174708Skmacy			}
862174708Skmacy#endif
863167514Skmacy		}
864167514Skmacy	}
865167760Skmacy
866167514Skmacy	return (0);
867167514Skmacy}
868167514Skmacy
869167514Skmacystatic int
870167514Skmacycxgb_port_probe(device_t dev)
871167514Skmacy{
872167514Skmacy	struct port_info *p;
873167514Skmacy	char buf[80];
874176472Skmacy	const char *desc;
875176472Skmacy
876167514Skmacy	p = device_get_softc(dev);
877176472Skmacy	desc = p->phy.desc;
878176472Skmacy	snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, desc);
879167514Skmacy	device_set_desc_copy(dev, buf);
880167514Skmacy	return (0);
881167514Skmacy}
882167514Skmacy
883167514Skmacy
884167514Skmacystatic int
885167514Skmacycxgb_makedev(struct port_info *pi)
886167514Skmacy{
887167514Skmacy
888170654Skmacy	pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit,
889170654Skmacy	    UID_ROOT, GID_WHEEL, 0600, if_name(pi->ifp));
890167514Skmacy
891167514Skmacy	if (pi->port_cdev == NULL)
892167514Skmacy		return (ENOMEM);
893167514Skmacy
894167514Skmacy	pi->port_cdev->si_drv1 = (void *)pi;
895167514Skmacy
896167514Skmacy	return (0);
897167514Skmacy}
898167514Skmacy
899167514Skmacy
900167514Skmacy#ifdef TSO_SUPPORTED
901181616Skmacy#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO)
902167514Skmacy/* Don't enable TSO6 yet */
903181616Skmacy#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4 | IFCAP_JUMBO_MTU | IFCAP_LRO)
904167514Skmacy#else
905167514Skmacy#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
906167514Skmacy/* Don't enable TSO6 yet */
907167514Skmacy#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM |  IFCAP_JUMBO_MTU)
908167514Skmacy#define IFCAP_TSO4 0x0
909171868Skmacy#define IFCAP_TSO6 0x0
910167514Skmacy#define CSUM_TSO   0x0
911167514Skmacy#endif
912167514Skmacy
913167514Skmacy
914167514Skmacystatic int
915167514Skmacycxgb_port_attach(device_t dev)
916167514Skmacy{
917167514Skmacy	struct port_info *p;
918167514Skmacy	struct ifnet *ifp;
919170654Skmacy	int err, media_flags;
920176472Skmacy	struct adapter *sc;
921167514Skmacy
922176472Skmacy
923167514Skmacy	p = device_get_softc(dev);
924176472Skmacy	sc = p->adapter;
925170869Skmacy	snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
926171803Skmacy	    device_get_unit(device_get_parent(dev)), p->port_id);
927170869Skmacy	PORT_LOCK_INIT(p, p->lockbuf);
928167514Skmacy
929167514Skmacy	/* Allocate an ifnet object and set it up */
930167514Skmacy	ifp = p->ifp = if_alloc(IFT_ETHER);
931167514Skmacy	if (ifp == NULL) {
932167514Skmacy		device_printf(dev, "Cannot allocate ifnet\n");
933167514Skmacy		return (ENOMEM);
934167514Skmacy	}
935167514Skmacy
936167514Skmacy	/*
937167514Skmacy	 * Note that there is currently no watchdog timer.
938167514Skmacy	 */
939167514Skmacy	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
940167514Skmacy	ifp->if_init = cxgb_init;
941167514Skmacy	ifp->if_softc = p;
942167514Skmacy	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
943167514Skmacy	ifp->if_ioctl = cxgb_ioctl;
944167514Skmacy	ifp->if_start = cxgb_start;
945174708Skmacy
946176472Skmacy#if 0
947174708Skmacy#ifdef IFNET_MULTIQUEUE
948174708Skmacy	ifp->if_flags |= IFF_MULTIQ;
949174708Skmacy	ifp->if_mq_start = cxgb_pcpu_start;
950174708Skmacy#endif
951176472Skmacy#endif
952167514Skmacy	ifp->if_timer = 0;	/* Disable ifnet watchdog */
953167514Skmacy	ifp->if_watchdog = NULL;
954167514Skmacy
955175312Skmacy	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
956167514Skmacy	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
957167514Skmacy	IFQ_SET_READY(&ifp->if_snd);
958167514Skmacy
959167514Skmacy	ifp->if_hwassist = ifp->if_capabilities = ifp->if_capenable = 0;
960167514Skmacy	ifp->if_capabilities |= CXGB_CAP;
961167514Skmacy	ifp->if_capenable |= CXGB_CAP_ENABLE;
962167514Skmacy	ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO);
963171471Skmacy	/*
964171471Skmacy	 * disable TSO on 4-port - it isn't supported by the firmware yet
965171471Skmacy	 */
966171471Skmacy	if (p->adapter->params.nports > 2) {
967171471Skmacy		ifp->if_capabilities &= ~(IFCAP_TSO4 | IFCAP_TSO6);
968171471Skmacy		ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TSO6);
969171471Skmacy		ifp->if_hwassist &= ~CSUM_TSO;
970171471Skmacy	}
971171471Skmacy
972167514Skmacy	ether_ifattach(ifp, p->hw_addr);
973171471Skmacy	/*
974171471Skmacy	 * Only default to jumbo frames on 10GigE
975171471Skmacy	 */
976171471Skmacy	if (p->adapter->params.nports <= 2)
977180583Skmacy		ifp->if_mtu = ETHERMTU_JUMBO;
978167514Skmacy	if ((err = cxgb_makedev(p)) != 0) {
979167514Skmacy		printf("makedev failed %d\n", err);
980167514Skmacy		return (err);
981167514Skmacy	}
982167514Skmacy	ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
983167514Skmacy	    cxgb_media_status);
984176472Skmacy
985176472Skmacy	if (!strcmp(p->phy.desc,	"10GBASE-CX4")) {
986170654Skmacy		media_flags = IFM_ETHER | IFM_10G_CX4 | IFM_FDX;
987176472Skmacy	} else if (!strcmp(p->phy.desc, "10GBASE-SR")) {
988170654Skmacy		media_flags = IFM_ETHER | IFM_10G_SR | IFM_FDX;
989177340Skmacy	} else if (!strcmp(p->phy.desc, "10GBASE-R")) {
990170654Skmacy		media_flags = IFM_ETHER | IFM_10G_LR | IFM_FDX;
991176472Skmacy	} else if (!strcmp(p->phy.desc, "10/100/1000BASE-T")) {
992170654Skmacy		ifmedia_add(&p->media, IFM_ETHER | IFM_10_T, 0, NULL);
993170654Skmacy		ifmedia_add(&p->media, IFM_ETHER | IFM_10_T | IFM_FDX,
994170654Skmacy			    0, NULL);
995170654Skmacy		ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX,
996170654Skmacy			    0, NULL);
997170654Skmacy		ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
998170654Skmacy			    0, NULL);
999170654Skmacy		ifmedia_add(&p->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1000170654Skmacy			    0, NULL);
1001170654Skmacy		media_flags = 0;
1002170654Skmacy	} else {
1003176472Skmacy	        printf("unsupported media type %s\n", p->phy.desc);
1004167514Skmacy		return (ENXIO);
1005167514Skmacy	}
1006170654Skmacy	if (media_flags) {
1007170654Skmacy		ifmedia_add(&p->media, media_flags, 0, NULL);
1008170654Skmacy		ifmedia_set(&p->media, media_flags);
1009170654Skmacy	} else {
1010170654Skmacy		ifmedia_add(&p->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1011170654Skmacy		ifmedia_set(&p->media, IFM_ETHER | IFM_AUTO);
1012170654Skmacy	}
1013167514Skmacy
1014170654Skmacy
1015171803Skmacy	snprintf(p->taskqbuf, TASKQ_NAME_LEN, "cxgb_port_taskq%d", p->port_id);
1016167514Skmacy#ifdef TASKQUEUE_CURRENT
1017167514Skmacy	/* Create a port for handling TX without starvation */
1018170869Skmacy	p->tq = taskqueue_create(p->taskqbuf, M_NOWAIT,
1019167514Skmacy	    taskqueue_thread_enqueue, &p->tq);
1020167514Skmacy#else
1021167514Skmacy	/* Create a port for handling TX without starvation */
1022171868Skmacy	p->tq = taskqueue_create_fast(p->taskqbuf, M_NOWAIT,
1023167514Skmacy	    taskqueue_thread_enqueue, &p->tq);
1024177340Skmacy#endif
1025177340Skmacy	/* Get the latest mac address, User can use a LAA */
1026177340Skmacy	bcopy(IF_LLADDR(p->ifp), p->hw_addr, ETHER_ADDR_LEN);
1027170654Skmacy	t3_sge_init_port(p);
1028177415Skmacy#if defined(LINK_ATTACH)
1029176472Skmacy	cxgb_link_start(p);
1030176472Skmacy	t3_link_changed(sc, p->port_id);
1031177415Skmacy#endif
1032167514Skmacy	return (0);
1033167514Skmacy}
1034167514Skmacy
1035167514Skmacystatic int
1036167514Skmacycxgb_port_detach(device_t dev)
1037167514Skmacy{
1038167514Skmacy	struct port_info *p;
1039167514Skmacy
1040167514Skmacy	p = device_get_softc(dev);
1041169978Skmacy
1042169978Skmacy	PORT_LOCK(p);
1043170654Skmacy	if (p->ifp->if_drv_flags & IFF_DRV_RUNNING)
1044170654Skmacy		cxgb_stop_locked(p);
1045169978Skmacy	PORT_UNLOCK(p);
1046169978Skmacy
1047167514Skmacy	if (p->tq != NULL) {
1048167514Skmacy		taskqueue_drain(p->tq, &p->start_task);
1049167514Skmacy		taskqueue_free(p->tq);
1050167514Skmacy		p->tq = NULL;
1051167514Skmacy	}
1052170869Skmacy
1053171978Skmacy	ether_ifdetach(p->ifp);
1054174708Skmacy	printf("waiting for callout to stop ...");
1055174708Skmacy	DELAY(1000000);
1056174708Skmacy	printf("done\n");
1057171978Skmacy	/*
1058171978Skmacy	 * the lock may be acquired in ifdetach
1059171978Skmacy	 */
1060170869Skmacy	PORT_LOCK_DEINIT(p);
1061167514Skmacy	if_free(p->ifp);
1062167514Skmacy
1063170654Skmacy	if (p->port_cdev != NULL)
1064170654Skmacy		destroy_dev(p->port_cdev);
1065170654Skmacy
1066167514Skmacy	return (0);
1067167514Skmacy}
1068167514Skmacy
1069167514Skmacyvoid
1070167514Skmacyt3_fatal_err(struct adapter *sc)
1071167514Skmacy{
1072167514Skmacy	u_int fw_status[4];
1073172096Skmacy
1074172096Skmacy	if (sc->flags & FULL_INIT_DONE) {
1075172096Skmacy		t3_sge_stop(sc);
1076172096Skmacy		t3_write_reg(sc, A_XGM_TX_CTRL, 0);
1077172096Skmacy		t3_write_reg(sc, A_XGM_RX_CTRL, 0);
1078172096Skmacy		t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
1079172096Skmacy		t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
1080172096Skmacy		t3_intr_disable(sc);
1081172096Skmacy	}
1082167514Skmacy	device_printf(sc->dev,"encountered fatal error, operation suspended\n");
1083167514Skmacy	if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
1084167514Skmacy		device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
1085167514Skmacy		    fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
1086167514Skmacy}
1087167514Skmacy
1088167514Skmacyint
1089167514Skmacyt3_os_find_pci_capability(adapter_t *sc, int cap)
1090167514Skmacy{
1091167514Skmacy	device_t dev;
1092167514Skmacy	struct pci_devinfo *dinfo;
1093167514Skmacy	pcicfgregs *cfg;
1094167514Skmacy	uint32_t status;
1095167514Skmacy	uint8_t ptr;
1096167514Skmacy
1097167514Skmacy	dev = sc->dev;
1098167514Skmacy	dinfo = device_get_ivars(dev);
1099167514Skmacy	cfg = &dinfo->cfg;
1100167514Skmacy
1101167514Skmacy	status = pci_read_config(dev, PCIR_STATUS, 2);
1102167514Skmacy	if (!(status & PCIM_STATUS_CAPPRESENT))
1103167514Skmacy		return (0);
1104167514Skmacy
1105167514Skmacy	switch (cfg->hdrtype & PCIM_HDRTYPE) {
1106167514Skmacy	case 0:
1107167514Skmacy	case 1:
1108167514Skmacy		ptr = PCIR_CAP_PTR;
1109167514Skmacy		break;
1110167514Skmacy	case 2:
1111167514Skmacy		ptr = PCIR_CAP_PTR_2;
1112167514Skmacy		break;
1113167514Skmacy	default:
1114167514Skmacy		return (0);
1115167514Skmacy		break;
1116167514Skmacy	}
1117167514Skmacy	ptr = pci_read_config(dev, ptr, 1);
1118167514Skmacy
1119167514Skmacy	while (ptr != 0) {
1120167514Skmacy		if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
1121167514Skmacy			return (ptr);
1122167514Skmacy		ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1123167514Skmacy	}
1124167514Skmacy
1125167514Skmacy	return (0);
1126167514Skmacy}
1127167514Skmacy
1128167514Skmacyint
1129167514Skmacyt3_os_pci_save_state(struct adapter *sc)
1130167514Skmacy{
1131167514Skmacy	device_t dev;
1132167514Skmacy	struct pci_devinfo *dinfo;
1133167514Skmacy
1134167514Skmacy	dev = sc->dev;
1135167514Skmacy	dinfo = device_get_ivars(dev);
1136167514Skmacy
1137167514Skmacy	pci_cfg_save(dev, dinfo, 0);
1138167514Skmacy	return (0);
1139167514Skmacy}
1140167514Skmacy
1141167514Skmacyint
1142167514Skmacyt3_os_pci_restore_state(struct adapter *sc)
1143167514Skmacy{
1144167514Skmacy	device_t dev;
1145167514Skmacy	struct pci_devinfo *dinfo;
1146167514Skmacy
1147167514Skmacy	dev = sc->dev;
1148167514Skmacy	dinfo = device_get_ivars(dev);
1149167514Skmacy
1150167514Skmacy	pci_cfg_restore(dev, dinfo);
1151167514Skmacy	return (0);
1152167514Skmacy}
1153167514Skmacy
1154167514Skmacy/**
1155167514Skmacy *	t3_os_link_changed - handle link status changes
1156167514Skmacy *	@adapter: the adapter associated with the link change
1157167514Skmacy *	@port_id: the port index whose limk status has changed
1158177340Skmacy *	@link_status: the new status of the link
1159167514Skmacy *	@speed: the new speed setting
1160167514Skmacy *	@duplex: the new duplex setting
1161167514Skmacy *	@fc: the new flow-control setting
1162167514Skmacy *
1163167514Skmacy *	This is the OS-dependent handler for link status changes.  The OS
1164167514Skmacy *	neutral handler takes care of most of the processing for these events,
1165167514Skmacy *	then calls this handler for any OS-specific processing.
1166167514Skmacy */
1167167514Skmacyvoid
1168167514Skmacyt3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
1169167514Skmacy     int duplex, int fc)
1170167514Skmacy{
1171167514Skmacy	struct port_info *pi = &adapter->port[port_id];
1172169978Skmacy	struct cmac *mac = &adapter->port[port_id].mac;
1173167514Skmacy
1174169978Skmacy	if (link_status) {
1175177340Skmacy		DELAY(10);
1176177340Skmacy		t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1177177340Skmacy			/* Clear errors created by MAC enable */
1178177340Skmacy			t3_set_reg_field(adapter,
1179177340Skmacy					 A_XGM_STAT_CTRL + pi->mac.offset,
1180177340Skmacy					 F_CLRSTATS, 1);
1181167514Skmacy		if_link_state_change(pi->ifp, LINK_STATE_UP);
1182177340Skmacy
1183169978Skmacy	} else {
1184169978Skmacy		pi->phy.ops->power_down(&pi->phy, 1);
1185169978Skmacy		t3_mac_disable(mac, MAC_DIRECTION_RX);
1186169978Skmacy		t3_link_start(&pi->phy, mac, &pi->link_config);
1187177340Skmacy		t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1188176472Skmacy		if_link_state_change(pi->ifp, LINK_STATE_DOWN);
1189169978Skmacy	}
1190167514Skmacy}
1191167514Skmacy
1192181614Skmacy/**
1193181614Skmacy *	t3_os_phymod_changed - handle PHY module changes
1194181614Skmacy *	@phy: the PHY reporting the module change
1195181614Skmacy *	@mod_type: new module type
1196181614Skmacy *
1197181614Skmacy *	This is the OS-dependent handler for PHY module changes.  It is
1198181614Skmacy *	invoked when a PHY module is removed or inserted for any OS-specific
1199181614Skmacy *	processing.
1200181614Skmacy */
1201181614Skmacyvoid t3_os_phymod_changed(struct adapter *adap, int port_id)
1202181614Skmacy{
1203181614Skmacy	static const char *mod_str[] = {
1204181614Skmacy		NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
1205181614Skmacy	};
1206181614Skmacy
1207181614Skmacy	struct port_info *pi = &adap->port[port_id];
1208181614Skmacy
1209181614Skmacy	if (pi->phy.modtype == phy_modtype_none)
1210181614Skmacy		device_printf(adap->dev, "PHY module unplugged\n");
1211181614Skmacy	else {
1212181614Skmacy		KASSERT(pi->phy.modtype < ARRAY_SIZE(mod_str),
1213181614Skmacy		    ("invalid PHY module type %d", pi->phy.modtype));
1214181614Skmacy		device_printf(adap->dev, "%s PHY module inserted\n",
1215181614Skmacy		    mod_str[pi->phy.modtype]);
1216181614Skmacy	}
1217181614Skmacy}
1218181614Skmacy
1219167514Skmacy/*
1220167514Skmacy * Interrupt-context handler for external (PHY) interrupts.
1221167514Skmacy */
1222167514Skmacyvoid
1223167514Skmacyt3_os_ext_intr_handler(adapter_t *sc)
1224167514Skmacy{
1225167514Skmacy	if (cxgb_debug)
1226167514Skmacy		printf("t3_os_ext_intr_handler\n");
1227167514Skmacy	/*
1228167514Skmacy	 * Schedule a task to handle external interrupts as they may be slow
1229167514Skmacy	 * and we use a mutex to protect MDIO registers.  We disable PHY
1230167514Skmacy	 * interrupts in the meantime and let the task reenable them when
1231167514Skmacy	 * it's done.
1232167514Skmacy	 */
1233169978Skmacy	ADAPTER_LOCK(sc);
1234167514Skmacy	if (sc->slow_intr_mask) {
1235167514Skmacy		sc->slow_intr_mask &= ~F_T3DBG;
1236167514Skmacy		t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
1237167514Skmacy		taskqueue_enqueue(sc->tq, &sc->ext_intr_task);
1238167514Skmacy	}
1239169978Skmacy	ADAPTER_UNLOCK(sc);
1240167514Skmacy}
1241167514Skmacy
1242167514Skmacyvoid
1243167514Skmacyt3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
1244167514Skmacy{
1245167514Skmacy
1246167514Skmacy	/*
1247167514Skmacy	 * The ifnet might not be allocated before this gets called,
1248167514Skmacy	 * as this is called early on in attach by t3_prep_adapter
1249167514Skmacy	 * save the address off in the port structure
1250167514Skmacy	 */
1251167514Skmacy	if (cxgb_debug)
1252167514Skmacy		printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
1253167514Skmacy	bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
1254167514Skmacy}
1255167514Skmacy
1256167514Skmacy/**
1257167514Skmacy *	link_start - enable a port
1258167514Skmacy *	@p: the port to enable
1259167514Skmacy *
1260167514Skmacy *	Performs the MAC and PHY actions needed to enable a port.
1261167514Skmacy */
1262167514Skmacystatic void
1263167514Skmacycxgb_link_start(struct port_info *p)
1264167514Skmacy{
1265167514Skmacy	struct ifnet *ifp;
1266167514Skmacy	struct t3_rx_mode rm;
1267167514Skmacy	struct cmac *mac = &p->mac;
1268180583Skmacy	int mtu, hwtagging;
1269167514Skmacy
1270167514Skmacy	ifp = p->ifp;
1271167514Skmacy
1272180583Skmacy	bcopy(IF_LLADDR(ifp), p->hw_addr, ETHER_ADDR_LEN);
1273180583Skmacy
1274180583Skmacy	mtu = ifp->if_mtu;
1275180583Skmacy	if (ifp->if_capenable & IFCAP_VLAN_MTU)
1276180583Skmacy		mtu += ETHER_VLAN_ENCAP_LEN;
1277180583Skmacy
1278180583Skmacy	hwtagging = (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0;
1279180583Skmacy
1280167514Skmacy	t3_init_rx_mode(&rm, p);
1281172096Skmacy	if (!mac->multiport)
1282171978Skmacy		t3_mac_reset(mac);
1283180583Skmacy	t3_mac_set_mtu(mac, mtu);
1284180583Skmacy	t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging);
1285167514Skmacy	t3_mac_set_address(mac, 0, p->hw_addr);
1286167514Skmacy	t3_mac_set_rx_mode(mac, &rm);
1287167514Skmacy	t3_link_start(&p->phy, mac, &p->link_config);
1288167514Skmacy	t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1289167514Skmacy}
1290167514Skmacy
1291176472Skmacy
1292176472Skmacystatic int
1293176472Skmacyawait_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
1294176472Skmacy			      unsigned long n)
1295176472Skmacy{
1296176472Skmacy	int attempts = 5;
1297176472Skmacy
1298176472Skmacy	while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
1299176472Skmacy		if (!--attempts)
1300176472Skmacy			return (ETIMEDOUT);
1301176472Skmacy		t3_os_sleep(10);
1302176472Skmacy	}
1303176472Skmacy	return 0;
1304176472Skmacy}
1305176472Skmacy
1306176472Skmacystatic int
1307176472Skmacyinit_tp_parity(struct adapter *adap)
1308176472Skmacy{
1309176472Skmacy	int i;
1310176472Skmacy	struct mbuf *m;
1311176472Skmacy	struct cpl_set_tcb_field *greq;
1312176472Skmacy	unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
1313176472Skmacy
1314176472Skmacy	t3_tp_set_offload_mode(adap, 1);
1315176472Skmacy
1316176472Skmacy	for (i = 0; i < 16; i++) {
1317176472Skmacy		struct cpl_smt_write_req *req;
1318176472Skmacy
1319176472Skmacy		m = m_gethdr(M_WAITOK, MT_DATA);
1320176472Skmacy		req = mtod(m, struct cpl_smt_write_req *);
1321176472Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1322176472Skmacy		memset(req, 0, sizeof(*req));
1323176472Skmacy		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1324176472Skmacy		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
1325176472Skmacy		req->iff = i;
1326176472Skmacy		t3_mgmt_tx(adap, m);
1327176472Skmacy	}
1328176472Skmacy
1329176472Skmacy	for (i = 0; i < 2048; i++) {
1330176472Skmacy		struct cpl_l2t_write_req *req;
1331176472Skmacy
1332176472Skmacy		m = m_gethdr(M_WAITOK, MT_DATA);
1333176472Skmacy		req = mtod(m, struct cpl_l2t_write_req *);
1334176472Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1335176472Skmacy		memset(req, 0, sizeof(*req));
1336176472Skmacy		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1337176472Skmacy		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
1338176472Skmacy		req->params = htonl(V_L2T_W_IDX(i));
1339176472Skmacy		t3_mgmt_tx(adap, m);
1340176472Skmacy	}
1341176472Skmacy
1342176472Skmacy	for (i = 0; i < 2048; i++) {
1343176472Skmacy		struct cpl_rte_write_req *req;
1344176472Skmacy
1345176472Skmacy		m = m_gethdr(M_WAITOK, MT_DATA);
1346176472Skmacy		req = mtod(m, struct cpl_rte_write_req *);
1347176472Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1348176472Skmacy		memset(req, 0, sizeof(*req));
1349176472Skmacy		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1350176472Skmacy		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
1351176472Skmacy		req->l2t_idx = htonl(V_L2T_W_IDX(i));
1352176472Skmacy		t3_mgmt_tx(adap, m);
1353176472Skmacy	}
1354176472Skmacy
1355176472Skmacy	m = m_gethdr(M_WAITOK, MT_DATA);
1356176472Skmacy	greq = mtod(m, struct cpl_set_tcb_field *);
1357176472Skmacy	m->m_len = m->m_pkthdr.len = sizeof(*greq);
1358176472Skmacy	memset(greq, 0, sizeof(*greq));
1359176472Skmacy	greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1360176472Skmacy	OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
1361176472Skmacy	greq->mask = htobe64(1);
1362176472Skmacy	t3_mgmt_tx(adap, m);
1363176472Skmacy
1364176472Skmacy	i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
1365176472Skmacy	t3_tp_set_offload_mode(adap, 0);
1366176472Skmacy	return (i);
1367176472Skmacy}
1368176472Skmacy
1369167514Skmacy/**
1370167514Skmacy *	setup_rss - configure Receive Side Steering (per-queue connection demux)
1371167514Skmacy *	@adap: the adapter
1372167514Skmacy *
1373167514Skmacy *	Sets up RSS to distribute packets to multiple receive queues.  We
1374167514Skmacy *	configure the RSS CPU lookup table to distribute to the number of HW
1375167514Skmacy *	receive queues, and the response queue lookup table to narrow that
1376167514Skmacy *	down to the response queues actually configured for each port.
1377167514Skmacy *	We always configure the RSS mapping for two ports since the mapping
1378167514Skmacy *	table has plenty of entries.
1379167514Skmacy */
1380167514Skmacystatic void
1381167514Skmacysetup_rss(adapter_t *adap)
1382167514Skmacy{
1383167514Skmacy	int i;
1384171471Skmacy	u_int nq[2];
1385167514Skmacy	uint8_t cpus[SGE_QSETS + 1];
1386167514Skmacy	uint16_t rspq_map[RSS_TABLE_SIZE];
1387171471Skmacy
1388167514Skmacy	for (i = 0; i < SGE_QSETS; ++i)
1389167514Skmacy		cpus[i] = i;
1390167514Skmacy	cpus[SGE_QSETS] = 0xff;
1391167514Skmacy
1392171978Skmacy	nq[0] = nq[1] = 0;
1393171978Skmacy	for_each_port(adap, i) {
1394171978Skmacy		const struct port_info *pi = adap2pinfo(adap, i);
1395171978Skmacy
1396171978Skmacy		nq[pi->tx_chan] += pi->nqsets;
1397171978Skmacy	}
1398167514Skmacy	for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
1399176472Skmacy		rspq_map[i] = nq[0] ? i % nq[0] : 0;
1400176472Skmacy		rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0;
1401167514Skmacy	}
1402171471Skmacy	/* Calculate the reverse RSS map table */
1403171471Skmacy	for (i = 0; i < RSS_TABLE_SIZE; ++i)
1404171471Skmacy		if (adap->rrss_map[rspq_map[i]] == 0xff)
1405171471Skmacy			adap->rrss_map[rspq_map[i]] = i;
1406167514Skmacy
1407167514Skmacy	t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
1408171471Skmacy		      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
1409176472Skmacy	              F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ,
1410176472Skmacy	              cpus, rspq_map);
1411171471Skmacy
1412167514Skmacy}
1413167514Skmacy
1414169978Skmacy/*
1415169978Skmacy * Sends an mbuf to an offload queue driver
1416169978Skmacy * after dealing with any active network taps.
1417169978Skmacy */
1418169978Skmacystatic inline int
1419174626Skmacyoffload_tx(struct t3cdev *tdev, struct mbuf *m)
1420169978Skmacy{
1421169978Skmacy	int ret;
1422169978Skmacy
1423169978Skmacy	ret = t3_offload_tx(tdev, m);
1424170654Skmacy	return (ret);
1425169978Skmacy}
1426169978Skmacy
1427169978Skmacystatic int
1428169978Skmacywrite_smt_entry(struct adapter *adapter, int idx)
1429169978Skmacy{
1430169978Skmacy	struct port_info *pi = &adapter->port[idx];
1431169978Skmacy	struct cpl_smt_write_req *req;
1432169978Skmacy	struct mbuf *m;
1433169978Skmacy
1434169978Skmacy	if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
1435169978Skmacy		return (ENOMEM);
1436169978Skmacy
1437169978Skmacy	req = mtod(m, struct cpl_smt_write_req *);
1438174708Skmacy	m->m_pkthdr.len = m->m_len = sizeof(struct cpl_smt_write_req);
1439174708Skmacy
1440169978Skmacy	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1441169978Skmacy	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
1442169978Skmacy	req->mtu_idx = NMTUS - 1;  /* should be 0 but there's a T3 bug */
1443169978Skmacy	req->iff = idx;
1444169978Skmacy	memset(req->src_mac1, 0, sizeof(req->src_mac1));
1445169978Skmacy	memcpy(req->src_mac0, pi->hw_addr, ETHER_ADDR_LEN);
1446169978Skmacy
1447169978Skmacy	m_set_priority(m, 1);
1448169978Skmacy
1449169978Skmacy	offload_tx(&adapter->tdev, m);
1450169978Skmacy
1451169978Skmacy	return (0);
1452169978Skmacy}
1453169978Skmacy
1454169978Skmacystatic int
1455169978Skmacyinit_smt(struct adapter *adapter)
1456169978Skmacy{
1457169978Skmacy	int i;
1458169978Skmacy
1459169978Skmacy	for_each_port(adapter, i)
1460169978Skmacy		write_smt_entry(adapter, i);
1461169978Skmacy	return 0;
1462169978Skmacy}
1463169978Skmacy
1464167514Skmacystatic void
1465169978Skmacyinit_port_mtus(adapter_t *adapter)
1466169978Skmacy{
1467169978Skmacy	unsigned int mtus = adapter->port[0].ifp->if_mtu;
1468169978Skmacy
1469169978Skmacy	if (adapter->port[1].ifp)
1470169978Skmacy		mtus |= adapter->port[1].ifp->if_mtu << 16;
1471169978Skmacy	t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
1472169978Skmacy}
1473169978Skmacy
1474169978Skmacystatic void
1475167514Skmacysend_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
1476167514Skmacy			      int hi, int port)
1477167514Skmacy{
1478167514Skmacy	struct mbuf *m;
1479167514Skmacy	struct mngt_pktsched_wr *req;
1480167514Skmacy
1481171471Skmacy	m = m_gethdr(M_DONTWAIT, MT_DATA);
1482167848Skmacy	if (m) {
1483169978Skmacy		req = mtod(m, struct mngt_pktsched_wr *);
1484167848Skmacy		req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1485167848Skmacy		req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1486167848Skmacy		req->sched = sched;
1487167848Skmacy		req->idx = qidx;
1488167848Skmacy		req->min = lo;
1489167848Skmacy		req->max = hi;
1490167848Skmacy		req->binding = port;
1491167848Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1492167848Skmacy		t3_mgmt_tx(adap, m);
1493167848Skmacy	}
1494167514Skmacy}
1495167514Skmacy
1496167514Skmacystatic void
1497167514Skmacybind_qsets(adapter_t *sc)
1498167514Skmacy{
1499167514Skmacy	int i, j;
1500167514Skmacy
1501174708Skmacy	cxgb_pcpu_startup_threads(sc);
1502167514Skmacy	for (i = 0; i < (sc)->params.nports; ++i) {
1503167514Skmacy		const struct port_info *pi = adap2pinfo(sc, i);
1504167514Skmacy
1505172096Skmacy		for (j = 0; j < pi->nqsets; ++j) {
1506167514Skmacy			send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1507172096Skmacy					  -1, pi->tx_chan);
1508172096Skmacy
1509172096Skmacy		}
1510167514Skmacy	}
1511167514Skmacy}
1512167514Skmacy
1513171471Skmacystatic void
1514171471Skmacyupdate_tpeeprom(struct adapter *adap)
1515171471Skmacy{
1516172109Skmacy#ifdef FIRMWARE_LATEST
1517171471Skmacy	const struct firmware *tpeeprom;
1518172109Skmacy#else
1519172109Skmacy	struct firmware *tpeeprom;
1520172109Skmacy#endif
1521172109Skmacy
1522171471Skmacy	uint32_t version;
1523171471Skmacy	unsigned int major, minor;
1524171471Skmacy	int ret, len;
1525171471Skmacy	char rev;
1526171471Skmacy
1527171471Skmacy	t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
1528171471Skmacy
1529171471Skmacy	major = G_TP_VERSION_MAJOR(version);
1530171471Skmacy	minor = G_TP_VERSION_MINOR(version);
1531171471Skmacy	if (major == TP_VERSION_MAJOR  && minor == TP_VERSION_MINOR)
1532171471Skmacy		return;
1533171471Skmacy
1534171471Skmacy	rev = t3rev2char(adap);
1535171471Skmacy
1536176613Skmacy	tpeeprom = firmware_get(TPEEPROM_NAME);
1537171471Skmacy	if (tpeeprom == NULL) {
1538171471Skmacy		device_printf(adap->dev, "could not load TP EEPROM: unable to load %s\n",
1539176613Skmacy		    TPEEPROM_NAME);
1540171471Skmacy		return;
1541171471Skmacy	}
1542171471Skmacy
1543171471Skmacy	len = tpeeprom->datasize - 4;
1544171471Skmacy
1545171471Skmacy	ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
1546171471Skmacy	if (ret)
1547171471Skmacy		goto release_tpeeprom;
1548171471Skmacy
1549171471Skmacy	if (len != TP_SRAM_LEN) {
1550176613Skmacy		device_printf(adap->dev, "%s length is wrong len=%d expected=%d\n", TPEEPROM_NAME, len, TP_SRAM_LEN);
1551171471Skmacy		return;
1552171471Skmacy	}
1553171471Skmacy
1554171471Skmacy	ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
1555171471Skmacy	    TP_SRAM_OFFSET);
1556171471Skmacy
1557171471Skmacy	if (!ret) {
1558171471Skmacy		device_printf(adap->dev,
1559171471Skmacy			"Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
1560171471Skmacy			 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1561171471Skmacy	} else
1562171471Skmacy		device_printf(adap->dev, "Protocol SRAM image update in EEPROM failed\n");
1563171471Skmacy
1564171471Skmacyrelease_tpeeprom:
1565171471Skmacy	firmware_put(tpeeprom, FIRMWARE_UNLOAD);
1566171471Skmacy
1567171471Skmacy	return;
1568171471Skmacy}
1569171471Skmacy
1570171471Skmacystatic int
1571171471Skmacyupdate_tpsram(struct adapter *adap)
1572171471Skmacy{
1573172109Skmacy#ifdef FIRMWARE_LATEST
1574171471Skmacy	const struct firmware *tpsram;
1575172109Skmacy#else
1576172109Skmacy	struct firmware *tpsram;
1577172109Skmacy#endif
1578171471Skmacy	int ret;
1579171471Skmacy	char rev;
1580171471Skmacy
1581171471Skmacy	rev = t3rev2char(adap);
1582171471Skmacy	if (!rev)
1583171471Skmacy		return 0;
1584171471Skmacy
1585171471Skmacy	update_tpeeprom(adap);
1586171471Skmacy
1587176613Skmacy	tpsram = firmware_get(TPSRAM_NAME);
1588171471Skmacy	if (tpsram == NULL){
1589176613Skmacy		device_printf(adap->dev, "could not load TP SRAM\n");
1590171471Skmacy		return (EINVAL);
1591171471Skmacy	} else
1592176613Skmacy		device_printf(adap->dev, "updating TP SRAM\n");
1593171471Skmacy
1594171471Skmacy	ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
1595171471Skmacy	if (ret)
1596171471Skmacy		goto release_tpsram;
1597171471Skmacy
1598171471Skmacy	ret = t3_set_proto_sram(adap, tpsram->data);
1599171471Skmacy	if (ret)
1600171471Skmacy		device_printf(adap->dev, "loading protocol SRAM failed\n");
1601171471Skmacy
1602171471Skmacyrelease_tpsram:
1603171471Skmacy	firmware_put(tpsram, FIRMWARE_UNLOAD);
1604171471Skmacy
1605171471Skmacy	return ret;
1606171471Skmacy}
1607171471Skmacy
1608169978Skmacy/**
1609169978Skmacy *	cxgb_up - enable the adapter
1610169978Skmacy *	@adap: adapter being enabled
1611169978Skmacy *
1612169978Skmacy *	Called when the first port is enabled, this function performs the
1613169978Skmacy *	actions necessary to make an adapter operational, such as completing
1614169978Skmacy *	the initialization of HW modules, and enabling interrupts.
1615169978Skmacy *
1616169978Skmacy */
1617169978Skmacystatic int
1618169978Skmacycxgb_up(struct adapter *sc)
1619169978Skmacy{
1620169978Skmacy	int err = 0;
1621169978Skmacy
1622169978Skmacy	if ((sc->flags & FULL_INIT_DONE) == 0) {
1623169978Skmacy
1624169978Skmacy		if ((sc->flags & FW_UPTODATE) == 0)
1625171471Skmacy			if ((err = upgrade_fw(sc)))
1626171471Skmacy				goto out;
1627171471Skmacy		if ((sc->flags & TPS_UPTODATE) == 0)
1628171471Skmacy			if ((err = update_tpsram(sc)))
1629171471Skmacy				goto out;
1630169978Skmacy		err = t3_init_hw(sc, 0);
1631169978Skmacy		if (err)
1632169978Skmacy			goto out;
1633169978Skmacy
1634176472Skmacy		t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1635169978Skmacy		t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1636169978Skmacy
1637169978Skmacy		err = setup_sge_qsets(sc);
1638169978Skmacy		if (err)
1639169978Skmacy			goto out;
1640169978Skmacy
1641169978Skmacy		setup_rss(sc);
1642174708Skmacy		t3_add_configured_sysctls(sc);
1643169978Skmacy		sc->flags |= FULL_INIT_DONE;
1644169978Skmacy	}
1645169978Skmacy
1646169978Skmacy	t3_intr_clear(sc);
1647169978Skmacy
1648169978Skmacy	/* If it's MSI or INTx, allocate a single interrupt for everything */
1649169978Skmacy	if ((sc->flags & USING_MSIX) == 0) {
1650169978Skmacy		if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
1651169978Skmacy		   &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1652171978Skmacy			device_printf(sc->dev, "Cannot allocate interrupt rid=%d\n",
1653171978Skmacy			    sc->irq_rid);
1654169978Skmacy			err = EINVAL;
1655169978Skmacy			goto out;
1656169978Skmacy		}
1657169978Skmacy		device_printf(sc->dev, "allocated irq_res=%p\n", sc->irq_res);
1658169978Skmacy
1659169978Skmacy		if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
1660169978Skmacy#ifdef INTR_FILTERS
1661169978Skmacy			NULL,
1662169978Skmacy#endif
1663169978Skmacy			sc->cxgb_intr, sc, &sc->intr_tag)) {
1664169978Skmacy			device_printf(sc->dev, "Cannot set up interrupt\n");
1665169978Skmacy			err = EINVAL;
1666169978Skmacy			goto irq_err;
1667169978Skmacy		}
1668169978Skmacy	} else {
1669169978Skmacy		cxgb_setup_msix(sc, sc->msi_count);
1670169978Skmacy	}
1671169978Skmacy
1672169978Skmacy	t3_sge_start(sc);
1673169978Skmacy	t3_intr_enable(sc);
1674169978Skmacy
1675176472Skmacy	if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) &&
1676176472Skmacy	    is_offload(sc) && init_tp_parity(sc) == 0)
1677176472Skmacy		sc->flags |= TP_PARITY_INIT;
1678176472Skmacy
1679176472Skmacy	if (sc->flags & TP_PARITY_INIT) {
1680176472Skmacy		t3_write_reg(sc, A_TP_INT_CAUSE,
1681176472Skmacy				F_CMCACHEPERR | F_ARPLUTPERR);
1682176472Skmacy		t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff);
1683176472Skmacy	}
1684176472Skmacy
1685176472Skmacy
1686172096Skmacy	if (!(sc->flags & QUEUES_BOUND)) {
1687169978Skmacy		bind_qsets(sc);
1688171471Skmacy		sc->flags |= QUEUES_BOUND;
1689171471Skmacy	}
1690169978Skmacyout:
1691169978Skmacy	return (err);
1692169978Skmacyirq_err:
1693169978Skmacy	CH_ERR(sc, "request_irq failed, err %d\n", err);
1694169978Skmacy	goto out;
1695169978Skmacy}
1696169978Skmacy
1697169978Skmacy
1698169978Skmacy/*
1699169978Skmacy * Release resources when all the ports and offloading have been stopped.
1700169978Skmacy */
1701167514Skmacystatic void
1702170869Skmacycxgb_down_locked(struct adapter *sc)
1703169978Skmacy{
1704170654Skmacy
1705169978Skmacy	t3_sge_stop(sc);
1706169978Skmacy	t3_intr_disable(sc);
1707170654Skmacy
1708169978Skmacy	if (sc->intr_tag != NULL) {
1709169978Skmacy		bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
1710169978Skmacy		sc->intr_tag = NULL;
1711169978Skmacy	}
1712169978Skmacy	if (sc->irq_res != NULL) {
1713169978Skmacy		device_printf(sc->dev, "de-allocating interrupt irq_rid=%d irq_res=%p\n",
1714169978Skmacy		    sc->irq_rid, sc->irq_res);
1715169978Skmacy		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
1716169978Skmacy		    sc->irq_res);
1717169978Skmacy		sc->irq_res = NULL;
1718169978Skmacy	}
1719170654Skmacy
1720176472Skmacy	if (sc->flags & USING_MSIX)
1721170654Skmacy		cxgb_teardown_msix(sc);
1722176472Skmacy
1723174708Skmacy	callout_stop(&sc->cxgb_tick_ch);
1724174708Skmacy	callout_stop(&sc->sge_timer_ch);
1725170869Skmacy	callout_drain(&sc->cxgb_tick_ch);
1726169978Skmacy	callout_drain(&sc->sge_timer_ch);
1727170869Skmacy
1728171978Skmacy	if (sc->tq != NULL) {
1729176472Skmacy		printf("draining slow intr\n");
1730176472Skmacy
1731170654Skmacy		taskqueue_drain(sc->tq, &sc->slow_intr_task);
1732176472Skmacy			printf("draining ext intr\n");
1733176472Skmacy		taskqueue_drain(sc->tq, &sc->ext_intr_task);
1734176472Skmacy		printf("draining tick task\n");
1735176472Skmacy		taskqueue_drain(sc->tq, &sc->tick_task);
1736171978Skmacy	}
1737176472Skmacy	ADAPTER_UNLOCK(sc);
1738169978Skmacy}
1739169978Skmacy
1740169978Skmacystatic int
1741169978Skmacyoffload_open(struct port_info *pi)
1742169978Skmacy{
1743169978Skmacy	struct adapter *adapter = pi->adapter;
1744174708Skmacy	struct t3cdev *tdev = &adapter->tdev;
1745174708Skmacy#ifdef notyet
1746174708Skmacy	    T3CDEV(pi->ifp);
1747174708Skmacy#endif
1748169978Skmacy	int adap_up = adapter->open_device_map & PORT_MASK;
1749169978Skmacy	int err = 0;
1750169978Skmacy
1751176472Skmacy	CTR1(KTR_CXGB, "device_map=0x%x", adapter->open_device_map);
1752169978Skmacy	if (atomic_cmpset_int(&adapter->open_device_map,
1753174708Skmacy		(adapter->open_device_map & ~(1<<OFFLOAD_DEVMAP_BIT)),
1754174708Skmacy		(adapter->open_device_map | (1<<OFFLOAD_DEVMAP_BIT))) == 0)
1755169978Skmacy		return (0);
1756169978Skmacy
1757174708Skmacy
1758174708Skmacy	if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT))
1759174708Skmacy		printf("offload_open: DEVMAP_BIT did not get set 0x%x\n", adapter->open_device_map);
1760169978Skmacy	ADAPTER_LOCK(pi->adapter);
1761169978Skmacy	if (!adap_up)
1762169978Skmacy		err = cxgb_up(adapter);
1763169978Skmacy	ADAPTER_UNLOCK(pi->adapter);
1764171471Skmacy	if (err)
1765169978Skmacy		return (err);
1766169978Skmacy
1767169978Skmacy	t3_tp_set_offload_mode(adapter, 1);
1768174708Skmacy	tdev->lldev = pi->ifp;
1769169978Skmacy
1770169978Skmacy	init_port_mtus(adapter);
1771169978Skmacy	t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1772169978Skmacy		     adapter->params.b_wnd,
1773169978Skmacy		     adapter->params.rev == 0 ?
1774169978Skmacy		       adapter->port[0].ifp->if_mtu : 0xffff);
1775169978Skmacy	init_smt(adapter);
1776169978Skmacy
1777178767Skmacy	/* Call back all registered clients */
1778178767Skmacy	cxgb_add_clients(tdev);
1779178767Skmacy
1780178767Skmacy
1781169978Skmacy	/* restore them in case the offload module has changed them */
1782169978Skmacy	if (err) {
1783169978Skmacy		t3_tp_set_offload_mode(adapter, 0);
1784169978Skmacy		clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
1785169978Skmacy		cxgb_set_dummy_ops(tdev);
1786169978Skmacy	}
1787169978Skmacy	return (err);
1788169978Skmacy}
1789174708Skmacy
1790169978Skmacystatic int
1791174708Skmacyoffload_close(struct t3cdev *tdev)
1792169978Skmacy{
1793169978Skmacy	struct adapter *adapter = tdev2adap(tdev);
1794169978Skmacy
1795176472Skmacy	if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT))
1796170654Skmacy		return (0);
1797178767Skmacy
1798178767Skmacy	/* Call back all registered clients */
1799178767Skmacy	cxgb_remove_clients(tdev);
1800178767Skmacy
1801169978Skmacy	tdev->lldev = NULL;
1802169978Skmacy	cxgb_set_dummy_ops(tdev);
1803169978Skmacy	t3_tp_set_offload_mode(adapter, 0);
1804169978Skmacy	clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
1805169978Skmacy
1806174708Skmacy	ADAPTER_LOCK(adapter);
1807169978Skmacy	if (!adapter->open_device_map)
1808174708Skmacy		cxgb_down_locked(adapter);
1809174708Skmacy	else
1810174708Skmacy		ADAPTER_UNLOCK(adapter);
1811170654Skmacy	return (0);
1812169978Skmacy}
1813169978Skmacy
1814174708Skmacy
1815169978Skmacystatic void
1816167514Skmacycxgb_init(void *arg)
1817167514Skmacy{
1818167514Skmacy	struct port_info *p = arg;
1819167514Skmacy
1820167514Skmacy	PORT_LOCK(p);
1821167514Skmacy	cxgb_init_locked(p);
1822167514Skmacy	PORT_UNLOCK(p);
1823167514Skmacy}
1824167514Skmacy
1825167514Skmacystatic void
1826167514Skmacycxgb_init_locked(struct port_info *p)
1827167514Skmacy{
1828167514Skmacy	struct ifnet *ifp;
1829167514Skmacy	adapter_t *sc = p->adapter;
1830169978Skmacy	int err;
1831167514Skmacy
1832170869Skmacy	PORT_LOCK_ASSERT_OWNED(p);
1833167514Skmacy	ifp = p->ifp;
1834167514Skmacy
1835167514Skmacy	ADAPTER_LOCK(p->adapter);
1836171471Skmacy	if ((sc->open_device_map == 0) && (err = cxgb_up(sc))) {
1837169978Skmacy		ADAPTER_UNLOCK(p->adapter);
1838169978Skmacy		cxgb_stop_locked(p);
1839169978Skmacy		return;
1840169978Skmacy	}
1841170869Skmacy	if (p->adapter->open_device_map == 0) {
1842167514Skmacy		t3_intr_clear(sc);
1843170869Skmacy	}
1844171803Skmacy	setbit(&p->adapter->open_device_map, p->port_id);
1845170654Skmacy	ADAPTER_UNLOCK(p->adapter);
1846169978Skmacy
1847169978Skmacy	if (is_offload(sc) && !ofld_disable) {
1848169978Skmacy		err = offload_open(p);
1849169978Skmacy		if (err)
1850169978Skmacy			log(LOG_WARNING,
1851169978Skmacy			    "Could not initialize offload capabilities\n");
1852169978Skmacy	}
1853177415Skmacy#if !defined(LINK_ATTACH)
1854177415Skmacy	cxgb_link_start(p);
1855177415Skmacy	t3_link_changed(sc, p->port_id);
1856177415Skmacy#endif
1857170654Skmacy	ifp->if_baudrate = p->link_config.speed * 1000000;
1858171978Skmacy
1859172096Skmacy	device_printf(sc->dev, "enabling interrupts on port=%d\n", p->port_id);
1860171803Skmacy	t3_port_intr_enable(sc, p->port_id);
1861167760Skmacy
1862175224Skmacy	t3_sge_reset_adapter(sc);
1863170869Skmacy
1864167514Skmacy	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1865167514Skmacy	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1866167514Skmacy}
1867167514Skmacy
1868167514Skmacystatic void
1869167514Skmacycxgb_set_rxmode(struct port_info *p)
1870167514Skmacy{
1871167514Skmacy	struct t3_rx_mode rm;
1872167514Skmacy	struct cmac *mac = &p->mac;
1873167760Skmacy
1874167514Skmacy	t3_init_rx_mode(&rm, p);
1875176472Skmacy	mtx_lock(&p->adapter->mdio_lock);
1876167514Skmacy	t3_mac_set_rx_mode(mac, &rm);
1877176472Skmacy	mtx_unlock(&p->adapter->mdio_lock);
1878167514Skmacy}
1879167514Skmacy
1880167514Skmacystatic void
1881177340Skmacycxgb_stop_locked(struct port_info *pi)
1882167514Skmacy{
1883167514Skmacy	struct ifnet *ifp;
1884167514Skmacy
1885177340Skmacy	PORT_LOCK_ASSERT_OWNED(pi);
1886177340Skmacy	ADAPTER_LOCK_ASSERT_NOTOWNED(pi->adapter);
1887170654Skmacy
1888177340Skmacy	ifp = pi->ifp;
1889177340Skmacy	t3_port_intr_disable(pi->adapter, pi->port_id);
1890169978Skmacy	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1891169978Skmacy
1892177340Skmacy	/* disable pause frames */
1893177340Skmacy	t3_set_reg_field(pi->adapter, A_XGM_TX_CFG + pi->mac.offset,
1894177340Skmacy			 F_TXPAUSEEN, 0);
1895170869Skmacy
1896177340Skmacy	/* Reset RX FIFO HWM */
1897177340Skmacy        t3_set_reg_field(pi->adapter, A_XGM_RXFIFO_CFG +  pi->mac.offset,
1898177340Skmacy			 V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0);
1899177340Skmacy
1900177340Skmacy
1901177340Skmacy	ADAPTER_LOCK(pi->adapter);
1902177340Skmacy	clrbit(&pi->adapter->open_device_map, pi->port_id);
1903177340Skmacy
1904177340Skmacy	if (pi->adapter->open_device_map == 0) {
1905177340Skmacy		cxgb_down_locked(pi->adapter);
1906170869Skmacy	} else
1907177340Skmacy		ADAPTER_UNLOCK(pi->adapter);
1908170869Skmacy
1909177415Skmacy#if !defined(LINK_ATTACH)
1910177340Skmacy	DELAY(100);
1911177340Skmacy
1912177340Skmacy	/* Wait for TXFIFO empty */
1913177340Skmacy	t3_wait_op_done(pi->adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
1914177340Skmacy			F_TXFIFO_EMPTY, 1, 20, 5);
1915177340Skmacy
1916177340Skmacy	DELAY(100);
1917177340Skmacy	t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1918177340Skmacy
1919177340Skmacy	pi->phy.ops->power_down(&pi->phy, 1);
1920177415Skmacy#endif
1921177340Skmacy
1922167514Skmacy}
1923167514Skmacy
1924167514Skmacystatic int
1925170654Skmacycxgb_set_mtu(struct port_info *p, int mtu)
1926170654Skmacy{
1927170654Skmacy	struct ifnet *ifp = p->ifp;
1928170654Skmacy	int error = 0;
1929170654Skmacy
1930180583Skmacy	if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1931170654Skmacy		error = EINVAL;
1932170654Skmacy	else if (ifp->if_mtu != mtu) {
1933170654Skmacy		PORT_LOCK(p);
1934170654Skmacy		ifp->if_mtu = mtu;
1935170654Skmacy		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1936170654Skmacy			cxgb_stop_locked(p);
1937170654Skmacy			cxgb_init_locked(p);
1938170654Skmacy		}
1939170654Skmacy		PORT_UNLOCK(p);
1940170654Skmacy	}
1941170654Skmacy	return (error);
1942170654Skmacy}
1943170654Skmacy
1944181616Skmacy/*
1945181616Skmacy * Mark lro enabled or disabled in all qsets for this port
1946181616Skmacy */
1947170654Skmacystatic int
1948181616Skmacycxgb_set_lro(struct port_info *p, int enabled)
1949181616Skmacy{
1950181616Skmacy	int i;
1951181616Skmacy	struct adapter *adp = p->adapter;
1952181616Skmacy	struct sge_qset *q;
1953181616Skmacy
1954181616Skmacy	PORT_LOCK_ASSERT_OWNED(p);
1955181616Skmacy	for (i = 0; i < p->nqsets; i++) {
1956181616Skmacy		q = &adp->sge.qs[p->first_qset + i];
1957181616Skmacy		q->lro.enabled = (enabled != 0);
1958181616Skmacy	}
1959181616Skmacy	return (0);
1960181616Skmacy}
1961181616Skmacy
1962181616Skmacystatic int
1963167514Skmacycxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
1964167514Skmacy{
1965167514Skmacy	struct port_info *p = ifp->if_softc;
1966167514Skmacy	struct ifaddr *ifa = (struct ifaddr *)data;
1967167514Skmacy	struct ifreq *ifr = (struct ifreq *)data;
1968180583Skmacy	int flags, error = 0, reinit = 0;
1969167514Skmacy	uint32_t mask;
1970167514Skmacy
1971168737Skmacy	/*
1972168737Skmacy	 * XXX need to check that we aren't in the middle of an unload
1973168737Skmacy	 */
1974167514Skmacy	switch (command) {
1975167514Skmacy	case SIOCSIFMTU:
1976170654Skmacy		error = cxgb_set_mtu(p, ifr->ifr_mtu);
1977167514Skmacy		break;
1978167514Skmacy	case SIOCSIFADDR:
1979167514Skmacy		if (ifa->ifa_addr->sa_family == AF_INET) {
1980167514Skmacy			ifp->if_flags |= IFF_UP;
1981176472Skmacy			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1982176472Skmacy				PORT_LOCK(p);
1983170654Skmacy				cxgb_init_locked(p);
1984176472Skmacy				PORT_UNLOCK(p);
1985176472Skmacy			}
1986167514Skmacy			arp_ifinit(ifp, ifa);
1987167514Skmacy		} else
1988167514Skmacy			error = ether_ioctl(ifp, command, data);
1989167514Skmacy		break;
1990167514Skmacy	case SIOCSIFFLAGS:
1991170869Skmacy		PORT_LOCK(p);
1992167514Skmacy		if (ifp->if_flags & IFF_UP) {
1993167514Skmacy			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1994167514Skmacy				flags = p->if_flags;
1995167514Skmacy				if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
1996167514Skmacy				    ((ifp->if_flags ^ flags) & IFF_ALLMULTI))
1997167514Skmacy					cxgb_set_rxmode(p);
1998167514Skmacy			} else
1999167514Skmacy				cxgb_init_locked(p);
2000167760Skmacy			p->if_flags = ifp->if_flags;
2001170869Skmacy		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2002170869Skmacy			cxgb_stop_locked(p);
2003170869Skmacy
2004176472Skmacy		PORT_UNLOCK(p);
2005176472Skmacy		break;
2006176472Skmacy	case SIOCADDMULTI:
2007176472Skmacy	case SIOCDELMULTI:
2008170869Skmacy		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2009176472Skmacy			cxgb_set_rxmode(p);
2010167514Skmacy		}
2011167514Skmacy		break;
2012167514Skmacy	case SIOCSIFMEDIA:
2013167514Skmacy	case SIOCGIFMEDIA:
2014167514Skmacy		error = ifmedia_ioctl(ifp, ifr, &p->media, command);
2015167514Skmacy		break;
2016167514Skmacy	case SIOCSIFCAP:
2017167514Skmacy		PORT_LOCK(p);
2018167514Skmacy		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2019167514Skmacy		if (mask & IFCAP_TXCSUM) {
2020167514Skmacy			if (IFCAP_TXCSUM & ifp->if_capenable) {
2021167514Skmacy				ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
2022167514Skmacy				ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
2023180583Skmacy				    | CSUM_IP | CSUM_TSO);
2024167514Skmacy			} else {
2025167514Skmacy				ifp->if_capenable |= IFCAP_TXCSUM;
2026180583Skmacy				ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP
2027180583Skmacy				    | CSUM_IP);
2028167514Skmacy			}
2029167514Skmacy		}
2030180583Skmacy		if (mask & IFCAP_RXCSUM) {
2031180583Skmacy			ifp->if_capenable ^= IFCAP_RXCSUM;
2032180583Skmacy		}
2033167514Skmacy		if (mask & IFCAP_TSO4) {
2034167514Skmacy			if (IFCAP_TSO4 & ifp->if_capenable) {
2035167514Skmacy				ifp->if_capenable &= ~IFCAP_TSO4;
2036167514Skmacy				ifp->if_hwassist &= ~CSUM_TSO;
2037167514Skmacy			} else if (IFCAP_TXCSUM & ifp->if_capenable) {
2038167514Skmacy				ifp->if_capenable |= IFCAP_TSO4;
2039167514Skmacy				ifp->if_hwassist |= CSUM_TSO;
2040167514Skmacy			} else {
2041167514Skmacy				if (cxgb_debug)
2042167514Skmacy					printf("cxgb requires tx checksum offload"
2043167514Skmacy					    " be enabled to use TSO\n");
2044167514Skmacy				error = EINVAL;
2045167514Skmacy			}
2046167514Skmacy		}
2047181616Skmacy		if (mask & IFCAP_LRO) {
2048181616Skmacy			ifp->if_capenable ^= IFCAP_LRO;
2049181616Skmacy
2050181616Skmacy			/* Safe to do this even if cxgb_up not called yet */
2051181616Skmacy			cxgb_set_lro(p, ifp->if_capenable & IFCAP_LRO);
2052181616Skmacy		}
2053180583Skmacy		if (mask & IFCAP_VLAN_HWTAGGING) {
2054180583Skmacy			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2055180583Skmacy			reinit = ifp->if_drv_flags & IFF_DRV_RUNNING;
2056180583Skmacy		}
2057180583Skmacy		if (mask & IFCAP_VLAN_MTU) {
2058180583Skmacy			ifp->if_capenable ^= IFCAP_VLAN_MTU;
2059180583Skmacy			reinit = ifp->if_drv_flags & IFF_DRV_RUNNING;
2060180583Skmacy		}
2061180583Skmacy		if (mask & IFCAP_VLAN_HWCSUM) {
2062180583Skmacy			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2063180583Skmacy		}
2064180583Skmacy		if (reinit) {
2065180583Skmacy			cxgb_stop_locked(p);
2066180583Skmacy			cxgb_init_locked(p);
2067180583Skmacy		}
2068167514Skmacy		PORT_UNLOCK(p);
2069180583Skmacy
2070180583Skmacy#ifdef VLAN_CAPABILITIES
2071180583Skmacy		VLAN_CAPABILITIES(ifp);
2072180583Skmacy#endif
2073167514Skmacy		break;
2074167514Skmacy	default:
2075167514Skmacy		error = ether_ioctl(ifp, command, data);
2076167514Skmacy		break;
2077167514Skmacy	}
2078167514Skmacy	return (error);
2079167514Skmacy}
2080167514Skmacy
2081174708Skmacystatic int
2082167514Skmacycxgb_media_change(struct ifnet *ifp)
2083167514Skmacy{
2084167514Skmacy	if_printf(ifp, "media change not supported\n");
2085167514Skmacy	return (ENXIO);
2086167514Skmacy}
2087167514Skmacy
2088167514Skmacystatic void
2089167514Skmacycxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2090167514Skmacy{
2091167514Skmacy	struct port_info *p = ifp->if_softc;
2092167514Skmacy
2093167514Skmacy	ifmr->ifm_status = IFM_AVALID;
2094167514Skmacy	ifmr->ifm_active = IFM_ETHER;
2095167514Skmacy
2096167514Skmacy	if (!p->link_config.link_ok)
2097167514Skmacy		return;
2098167514Skmacy
2099167514Skmacy	ifmr->ifm_status |= IFM_ACTIVE;
2100167514Skmacy
2101170654Skmacy	switch (p->link_config.speed) {
2102170654Skmacy	case 10:
2103170654Skmacy		ifmr->ifm_active |= IFM_10_T;
2104170654Skmacy		break;
2105170654Skmacy	case 100:
2106170654Skmacy		ifmr->ifm_active |= IFM_100_TX;
2107170654Skmacy			break;
2108170654Skmacy	case 1000:
2109170654Skmacy		ifmr->ifm_active |= IFM_1000_T;
2110170654Skmacy		break;
2111170654Skmacy	}
2112170654Skmacy
2113167514Skmacy	if (p->link_config.duplex)
2114167514Skmacy		ifmr->ifm_active |= IFM_FDX;
2115167514Skmacy	else
2116167514Skmacy		ifmr->ifm_active |= IFM_HDX;
2117167514Skmacy}
2118167514Skmacy
2119167514Skmacystatic void
2120167514Skmacycxgb_async_intr(void *data)
2121167514Skmacy{
2122167760Skmacy	adapter_t *sc = data;
2123167760Skmacy
2124167514Skmacy	if (cxgb_debug)
2125167760Skmacy		device_printf(sc->dev, "cxgb_async_intr\n");
2126170869Skmacy	/*
2127170869Skmacy	 * May need to sleep - defer to taskqueue
2128170869Skmacy	 */
2129170869Skmacy	taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
2130167514Skmacy}
2131167514Skmacy
2132167514Skmacystatic void
2133167514Skmacycxgb_ext_intr_handler(void *arg, int count)
2134167514Skmacy{
2135167514Skmacy	adapter_t *sc = (adapter_t *)arg;
2136167514Skmacy
2137167514Skmacy	if (cxgb_debug)
2138167514Skmacy		printf("cxgb_ext_intr_handler\n");
2139167514Skmacy
2140167514Skmacy	t3_phy_intr_handler(sc);
2141167514Skmacy
2142167514Skmacy	/* Now reenable external interrupts */
2143169978Skmacy	ADAPTER_LOCK(sc);
2144167514Skmacy	if (sc->slow_intr_mask) {
2145167514Skmacy		sc->slow_intr_mask |= F_T3DBG;
2146167514Skmacy		t3_write_reg(sc, A_PL_INT_CAUSE0, F_T3DBG);
2147167514Skmacy		t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
2148167514Skmacy	}
2149169978Skmacy	ADAPTER_UNLOCK(sc);
2150167514Skmacy}
2151167514Skmacy
2152167514Skmacystatic void
2153167746Skmacycheck_link_status(adapter_t *sc)
2154167514Skmacy{
2155167746Skmacy	int i;
2156167514Skmacy
2157167746Skmacy	for (i = 0; i < (sc)->params.nports; ++i) {
2158167746Skmacy		struct port_info *p = &sc->port[i];
2159167514Skmacy
2160176472Skmacy		if (!(p->phy.caps & SUPPORTED_IRQ))
2161167746Skmacy			t3_link_changed(sc, i);
2162170654Skmacy		p->ifp->if_baudrate = p->link_config.speed * 1000000;
2163167746Skmacy	}
2164167514Skmacy}
2165167514Skmacy
2166167514Skmacystatic void
2167167746Skmacycheck_t3b2_mac(struct adapter *adapter)
2168167514Skmacy{
2169167514Skmacy	int i;
2170167514Skmacy
2171176472Skmacy	if(adapter->flags & CXGB_SHUTDOWN)
2172176472Skmacy		return;
2173176472Skmacy
2174167746Skmacy	for_each_port(adapter, i) {
2175167746Skmacy		struct port_info *p = &adapter->port[i];
2176167746Skmacy		struct ifnet *ifp = p->ifp;
2177167746Skmacy		int status;
2178176472Skmacy
2179176472Skmacy		if(adapter->flags & CXGB_SHUTDOWN)
2180176472Skmacy			return;
2181176472Skmacy
2182167746Skmacy		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2183167746Skmacy			continue;
2184167746Skmacy
2185167746Skmacy		status = 0;
2186167746Skmacy		PORT_LOCK(p);
2187167746Skmacy		if ((ifp->if_drv_flags & IFF_DRV_RUNNING))
2188167746Skmacy			status = t3b2_mac_watchdog_task(&p->mac);
2189167746Skmacy		if (status == 1)
2190167746Skmacy			p->mac.stats.num_toggled++;
2191167746Skmacy		else if (status == 2) {
2192167746Skmacy			struct cmac *mac = &p->mac;
2193180583Skmacy			int mtu = ifp->if_mtu;
2194167746Skmacy
2195180583Skmacy			if (ifp->if_capenable & IFCAP_VLAN_MTU)
2196180583Skmacy				mtu += ETHER_VLAN_ENCAP_LEN;
2197180583Skmacy			t3_mac_set_mtu(mac, mtu);
2198167746Skmacy			t3_mac_set_address(mac, 0, p->hw_addr);
2199167746Skmacy			cxgb_set_rxmode(p);
2200167746Skmacy			t3_link_start(&p->phy, mac, &p->link_config);
2201167746Skmacy			t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2202171803Skmacy			t3_port_intr_enable(adapter, p->port_id);
2203167746Skmacy			p->mac.stats.num_resets++;
2204167746Skmacy		}
2205167746Skmacy		PORT_UNLOCK(p);
2206167514Skmacy	}
2207167514Skmacy}
2208167514Skmacy
2209167746Skmacystatic void
2210167746Skmacycxgb_tick(void *arg)
2211167746Skmacy{
2212167746Skmacy	adapter_t *sc = (adapter_t *)arg;
2213170869Skmacy
2214176472Skmacy	if(sc->flags & CXGB_SHUTDOWN)
2215176472Skmacy		return;
2216174708Skmacy
2217170869Skmacy	taskqueue_enqueue(sc->tq, &sc->tick_task);
2218181652Skmacy	callout_reset(&sc->cxgb_tick_ch, CXGB_TICKS(sc), cxgb_tick, sc);
2219170869Skmacy}
2220170869Skmacy
2221170869Skmacystatic void
2222170869Skmacycxgb_tick_handler(void *arg, int count)
2223170869Skmacy{
2224170869Skmacy	adapter_t *sc = (adapter_t *)arg;
2225167746Skmacy	const struct adapter_params *p = &sc->params;
2226181652Skmacy	int i;
2227167746Skmacy
2228176472Skmacy	if(sc->flags & CXGB_SHUTDOWN)
2229176472Skmacy		return;
2230176472Skmacy
2231170869Skmacy	ADAPTER_LOCK(sc);
2232167746Skmacy	if (p->linkpoll_period)
2233167746Skmacy		check_link_status(sc);
2234167746Skmacy
2235181652Skmacy	sc->check_task_cnt++;
2236181652Skmacy
2237167746Skmacy	/*
2238176472Skmacy	 * adapter lock can currently only be acquired after the
2239167746Skmacy	 * port lock
2240167746Skmacy	 */
2241167746Skmacy	ADAPTER_UNLOCK(sc);
2242170654Skmacy
2243176472Skmacy	if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map)
2244167746Skmacy		check_t3b2_mac(sc);
2245181652Skmacy
2246181652Skmacy	/* Update MAC stats if it's time to do so */
2247181652Skmacy	if (!p->linkpoll_period ||
2248181652Skmacy	    (sc->check_task_cnt * p->linkpoll_period) / 10 >=
2249181652Skmacy	    p->stats_update_period) {
2250181652Skmacy		for_each_port(sc, i) {
2251181652Skmacy			struct port_info *port = &sc->port[i];
2252181652Skmacy			PORT_LOCK(port);
2253181652Skmacy			t3_mac_update_stats(&port->mac);
2254181652Skmacy			PORT_UNLOCK(port);
2255181652Skmacy		}
2256181652Skmacy		sc->check_task_cnt = 0;
2257181652Skmacy	}
2258167746Skmacy}
2259167746Skmacy
2260171978Skmacystatic void
2261171978Skmacytouch_bars(device_t dev)
2262171978Skmacy{
2263171978Skmacy	/*
2264171978Skmacy	 * Don't enable yet
2265171978Skmacy	 */
2266171978Skmacy#if !defined(__LP64__) && 0
2267171978Skmacy	u32 v;
2268171978Skmacy
2269171978Skmacy	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
2270171978Skmacy	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
2271171978Skmacy	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
2272171978Skmacy	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
2273171978Skmacy	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
2274171978Skmacy	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
2275171978Skmacy#endif
2276171978Skmacy}
2277171978Skmacy
2278167514Skmacystatic int
2279171471Skmacyset_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
2280171471Skmacy{
2281171471Skmacy	uint8_t *buf;
2282171471Skmacy	int err = 0;
2283171471Skmacy	u32 aligned_offset, aligned_len, *p;
2284171471Skmacy	struct adapter *adapter = pi->adapter;
2285171471Skmacy
2286171471Skmacy
2287171471Skmacy	aligned_offset = offset & ~3;
2288171471Skmacy	aligned_len = (len + (offset & 3) + 3) & ~3;
2289171471Skmacy
2290171471Skmacy	if (aligned_offset != offset || aligned_len != len) {
2291171471Skmacy		buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);
2292171471Skmacy		if (!buf)
2293171471Skmacy			return (ENOMEM);
2294171471Skmacy		err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
2295171471Skmacy		if (!err && aligned_len > 4)
2296171471Skmacy			err = t3_seeprom_read(adapter,
2297171471Skmacy					      aligned_offset + aligned_len - 4,
2298171471Skmacy					      (u32 *)&buf[aligned_len - 4]);
2299171471Skmacy		if (err)
2300171471Skmacy			goto out;
2301171471Skmacy		memcpy(buf + (offset & 3), data, len);
2302171471Skmacy	} else
2303171471Skmacy		buf = (uint8_t *)(uintptr_t)data;
2304171471Skmacy
2305171471Skmacy	err = t3_seeprom_wp(adapter, 0);
2306171471Skmacy	if (err)
2307171471Skmacy		goto out;
2308171471Skmacy
2309171471Skmacy	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2310171471Skmacy		err = t3_seeprom_write(adapter, aligned_offset, *p);
2311171471Skmacy		aligned_offset += 4;
2312171471Skmacy	}
2313171471Skmacy
2314171471Skmacy	if (!err)
2315171471Skmacy		err = t3_seeprom_wp(adapter, 1);
2316171471Skmacyout:
2317171471Skmacy	if (buf != data)
2318171471Skmacy		free(buf, M_DEVBUF);
2319171471Skmacy	return err;
2320171471Skmacy}
2321171471Skmacy
2322171471Skmacy
2323171471Skmacystatic int
2324167514Skmacyin_range(int val, int lo, int hi)
2325167514Skmacy{
2326167514Skmacy	return val < 0 || (val <= hi && val >= lo);
2327167514Skmacy}
2328167514Skmacy
2329167514Skmacystatic int
2330170654Skmacycxgb_extension_open(struct cdev *dev, int flags, int fmp, d_thread_t *td)
2331170654Skmacy{
2332170654Skmacy       return (0);
2333170654Skmacy}
2334170654Skmacy
2335170654Skmacystatic int
2336170654Skmacycxgb_extension_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
2337170654Skmacy{
2338170654Skmacy       return (0);
2339170654Skmacy}
2340170654Skmacy
2341170654Skmacystatic int
2342167514Skmacycxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
2343167514Skmacy    int fflag, struct thread *td)
2344167514Skmacy{
2345167514Skmacy	int mmd, error = 0;
2346167514Skmacy	struct port_info *pi = dev->si_drv1;
2347167514Skmacy	adapter_t *sc = pi->adapter;
2348167514Skmacy
2349167514Skmacy#ifdef PRIV_SUPPORTED
2350167514Skmacy	if (priv_check(td, PRIV_DRIVER)) {
2351167514Skmacy		if (cxgb_debug)
2352167514Skmacy			printf("user does not have access to privileged ioctls\n");
2353167514Skmacy		return (EPERM);
2354167514Skmacy	}
2355167514Skmacy#else
2356167514Skmacy	if (suser(td)) {
2357167514Skmacy		if (cxgb_debug)
2358167514Skmacy			printf("user does not have access to privileged ioctls\n");
2359167514Skmacy		return (EPERM);
2360167514Skmacy	}
2361167514Skmacy#endif
2362167514Skmacy
2363167514Skmacy	switch (cmd) {
2364167514Skmacy	case SIOCGMIIREG: {
2365167514Skmacy		uint32_t val;
2366167514Skmacy		struct cphy *phy = &pi->phy;
2367167514Skmacy		struct mii_data *mid = (struct mii_data *)data;
2368167514Skmacy
2369167514Skmacy		if (!phy->mdio_read)
2370167514Skmacy			return (EOPNOTSUPP);
2371167514Skmacy		if (is_10G(sc)) {
2372167514Skmacy			mmd = mid->phy_id >> 8;
2373167514Skmacy			if (!mmd)
2374167514Skmacy				mmd = MDIO_DEV_PCS;
2375167514Skmacy			else if (mmd > MDIO_DEV_XGXS)
2376171471Skmacy				return (EINVAL);
2377167514Skmacy
2378167514Skmacy			error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
2379167514Skmacy					     mid->reg_num, &val);
2380167514Skmacy		} else
2381167514Skmacy		        error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
2382167514Skmacy					     mid->reg_num & 0x1f, &val);
2383167514Skmacy		if (error == 0)
2384167514Skmacy			mid->val_out = val;
2385167514Skmacy		break;
2386167514Skmacy	}
2387167514Skmacy	case SIOCSMIIREG: {
2388167514Skmacy		struct cphy *phy = &pi->phy;
2389167514Skmacy		struct mii_data *mid = (struct mii_data *)data;
2390167514Skmacy
2391167514Skmacy		if (!phy->mdio_write)
2392167514Skmacy			return (EOPNOTSUPP);
2393167514Skmacy		if (is_10G(sc)) {
2394167514Skmacy			mmd = mid->phy_id >> 8;
2395167514Skmacy			if (!mmd)
2396167514Skmacy				mmd = MDIO_DEV_PCS;
2397167514Skmacy			else if (mmd > MDIO_DEV_XGXS)
2398167514Skmacy				return (EINVAL);
2399167514Skmacy
2400167514Skmacy			error = phy->mdio_write(sc, mid->phy_id & 0x1f,
2401167514Skmacy					      mmd, mid->reg_num, mid->val_in);
2402167514Skmacy		} else
2403167514Skmacy			error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
2404167514Skmacy					      mid->reg_num & 0x1f,
2405167514Skmacy					      mid->val_in);
2406167514Skmacy		break;
2407167514Skmacy	}
2408167514Skmacy	case CHELSIO_SETREG: {
2409167514Skmacy		struct ch_reg *edata = (struct ch_reg *)data;
2410167514Skmacy		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2411167514Skmacy			return (EFAULT);
2412167514Skmacy		t3_write_reg(sc, edata->addr, edata->val);
2413167514Skmacy		break;
2414167514Skmacy	}
2415167514Skmacy	case CHELSIO_GETREG: {
2416167514Skmacy		struct ch_reg *edata = (struct ch_reg *)data;
2417167514Skmacy		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2418167514Skmacy			return (EFAULT);
2419167514Skmacy		edata->val = t3_read_reg(sc, edata->addr);
2420167514Skmacy		break;
2421167514Skmacy	}
2422167514Skmacy	case CHELSIO_GET_SGE_CONTEXT: {
2423167514Skmacy		struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
2424176472Skmacy		mtx_lock_spin(&sc->sge.reg_lock);
2425167514Skmacy		switch (ecntxt->cntxt_type) {
2426167514Skmacy		case CNTXT_TYPE_EGRESS:
2427167514Skmacy			error = t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
2428167514Skmacy			    ecntxt->data);
2429167514Skmacy			break;
2430167514Skmacy		case CNTXT_TYPE_FL:
2431167514Skmacy			error = t3_sge_read_fl(sc, ecntxt->cntxt_id,
2432167514Skmacy			    ecntxt->data);
2433167514Skmacy			break;
2434167514Skmacy		case CNTXT_TYPE_RSP:
2435167514Skmacy			error = t3_sge_read_rspq(sc, ecntxt->cntxt_id,
2436167514Skmacy			    ecntxt->data);
2437167514Skmacy			break;
2438167514Skmacy		case CNTXT_TYPE_CQ:
2439167514Skmacy			error = t3_sge_read_cq(sc, ecntxt->cntxt_id,
2440167514Skmacy			    ecntxt->data);
2441167514Skmacy			break;
2442167514Skmacy		default:
2443167514Skmacy			error = EINVAL;
2444167514Skmacy			break;
2445167514Skmacy		}
2446176472Skmacy		mtx_unlock_spin(&sc->sge.reg_lock);
2447167514Skmacy		break;
2448167514Skmacy	}
2449167514Skmacy	case CHELSIO_GET_SGE_DESC: {
2450167514Skmacy		struct ch_desc *edesc = (struct ch_desc *)data;
2451167514Skmacy		int ret;
2452167514Skmacy		if (edesc->queue_num >= SGE_QSETS * 6)
2453167514Skmacy			return (EINVAL);
2454167514Skmacy		ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
2455167514Skmacy		    edesc->queue_num % 6, edesc->idx, edesc->data);
2456167514Skmacy		if (ret < 0)
2457167514Skmacy			return (EINVAL);
2458167514Skmacy		edesc->size = ret;
2459167514Skmacy		break;
2460167514Skmacy	}
2461167514Skmacy	case CHELSIO_SET_QSET_PARAMS: {
2462167514Skmacy		struct qset_params *q;
2463167514Skmacy		struct ch_qset_params *t = (struct ch_qset_params *)data;
2464176472Skmacy		int i;
2465176472Skmacy
2466167514Skmacy		if (t->qset_idx >= SGE_QSETS)
2467171471Skmacy			return (EINVAL);
2468167514Skmacy		if (!in_range(t->intr_lat, 0, M_NEWTIMER) ||
2469167514Skmacy		    !in_range(t->cong_thres, 0, 255) ||
2470167514Skmacy		    !in_range(t->txq_size[0], MIN_TXQ_ENTRIES,
2471167514Skmacy			      MAX_TXQ_ENTRIES) ||
2472167514Skmacy		    !in_range(t->txq_size[1], MIN_TXQ_ENTRIES,
2473167514Skmacy			      MAX_TXQ_ENTRIES) ||
2474167514Skmacy		    !in_range(t->txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2475167514Skmacy			      MAX_CTRL_TXQ_ENTRIES) ||
2476167514Skmacy		    !in_range(t->fl_size[0], MIN_FL_ENTRIES, MAX_RX_BUFFERS) ||
2477167514Skmacy		    !in_range(t->fl_size[1], MIN_FL_ENTRIES,
2478167514Skmacy			      MAX_RX_JUMBO_BUFFERS) ||
2479167514Skmacy		    !in_range(t->rspq_size, MIN_RSPQ_ENTRIES, MAX_RSPQ_ENTRIES))
2480171471Skmacy			return (EINVAL);
2481176472Skmacy
2482176472Skmacy		if ((sc->flags & FULL_INIT_DONE) && t->lro > 0)
2483176472Skmacy			for_each_port(sc, i) {
2484176472Skmacy				pi = adap2pinfo(sc, i);
2485176472Skmacy				if (t->qset_idx >= pi->first_qset &&
2486176472Skmacy				    t->qset_idx < pi->first_qset + pi->nqsets
2487176472Skmacy#if 0
2488176472Skmacy					&& !pi->rx_csum_offload
2489176472Skmacy#endif
2490176472Skmacy					)
2491176472Skmacy					return -EINVAL;
2492176472Skmacy			}
2493167514Skmacy		if ((sc->flags & FULL_INIT_DONE) &&
2494167514Skmacy		    (t->rspq_size >= 0 || t->fl_size[0] >= 0 ||
2495167514Skmacy		     t->fl_size[1] >= 0 || t->txq_size[0] >= 0 ||
2496167514Skmacy		     t->txq_size[1] >= 0 || t->txq_size[2] >= 0 ||
2497167514Skmacy		     t->polling >= 0 || t->cong_thres >= 0))
2498171471Skmacy			return (EBUSY);
2499167514Skmacy
2500167514Skmacy		q = &sc->params.sge.qset[t->qset_idx];
2501167514Skmacy
2502167514Skmacy		if (t->rspq_size >= 0)
2503167514Skmacy			q->rspq_size = t->rspq_size;
2504167514Skmacy		if (t->fl_size[0] >= 0)
2505167514Skmacy			q->fl_size = t->fl_size[0];
2506167514Skmacy		if (t->fl_size[1] >= 0)
2507167514Skmacy			q->jumbo_size = t->fl_size[1];
2508167514Skmacy		if (t->txq_size[0] >= 0)
2509167514Skmacy			q->txq_size[0] = t->txq_size[0];
2510167514Skmacy		if (t->txq_size[1] >= 0)
2511167514Skmacy			q->txq_size[1] = t->txq_size[1];
2512167514Skmacy		if (t->txq_size[2] >= 0)
2513167514Skmacy			q->txq_size[2] = t->txq_size[2];
2514167514Skmacy		if (t->cong_thres >= 0)
2515167514Skmacy			q->cong_thres = t->cong_thres;
2516167514Skmacy		if (t->intr_lat >= 0) {
2517167514Skmacy			struct sge_qset *qs = &sc->sge.qs[t->qset_idx];
2518167514Skmacy
2519180583Skmacy			q->coalesce_usecs = t->intr_lat;
2520167514Skmacy			t3_update_qset_coalesce(qs, q);
2521167514Skmacy		}
2522167514Skmacy		break;
2523167514Skmacy	}
2524167514Skmacy	case CHELSIO_GET_QSET_PARAMS: {
2525167514Skmacy		struct qset_params *q;
2526167514Skmacy		struct ch_qset_params *t = (struct ch_qset_params *)data;
2527167514Skmacy
2528167514Skmacy		if (t->qset_idx >= SGE_QSETS)
2529167514Skmacy			return (EINVAL);
2530167514Skmacy
2531167514Skmacy		q = &(sc)->params.sge.qset[t->qset_idx];
2532167514Skmacy		t->rspq_size   = q->rspq_size;
2533167514Skmacy		t->txq_size[0] = q->txq_size[0];
2534167514Skmacy		t->txq_size[1] = q->txq_size[1];
2535167514Skmacy		t->txq_size[2] = q->txq_size[2];
2536167514Skmacy		t->fl_size[0]  = q->fl_size;
2537167514Skmacy		t->fl_size[1]  = q->jumbo_size;
2538167514Skmacy		t->polling     = q->polling;
2539180583Skmacy		t->intr_lat    = q->coalesce_usecs;
2540167514Skmacy		t->cong_thres  = q->cong_thres;
2541167514Skmacy		break;
2542167514Skmacy	}
2543167514Skmacy	case CHELSIO_SET_QSET_NUM: {
2544167514Skmacy		struct ch_reg *edata = (struct ch_reg *)data;
2545171803Skmacy		unsigned int port_idx = pi->port_id;
2546167514Skmacy
2547167514Skmacy		if (sc->flags & FULL_INIT_DONE)
2548167514Skmacy			return (EBUSY);
2549167514Skmacy		if (edata->val < 1 ||
2550167514Skmacy		    (edata->val > 1 && !(sc->flags & USING_MSIX)))
2551167514Skmacy			return (EINVAL);
2552167514Skmacy		if (edata->val + sc->port[!port_idx].nqsets > SGE_QSETS)
2553167514Skmacy			return (EINVAL);
2554167514Skmacy		sc->port[port_idx].nqsets = edata->val;
2555169978Skmacy		sc->port[0].first_qset = 0;
2556167514Skmacy		/*
2557169978Skmacy		 * XXX hardcode ourselves to 2 ports just like LEEENUX
2558167514Skmacy		 */
2559167514Skmacy		sc->port[1].first_qset = sc->port[0].nqsets;
2560167514Skmacy		break;
2561167514Skmacy	}
2562167514Skmacy	case CHELSIO_GET_QSET_NUM: {
2563167514Skmacy		struct ch_reg *edata = (struct ch_reg *)data;
2564167514Skmacy		edata->val = pi->nqsets;
2565167514Skmacy		break;
2566167514Skmacy	}
2567169978Skmacy#ifdef notyet
2568167514Skmacy	case CHELSIO_LOAD_FW:
2569167514Skmacy	case CHELSIO_GET_PM:
2570167514Skmacy	case CHELSIO_SET_PM:
2571167514Skmacy		return (EOPNOTSUPP);
2572167514Skmacy		break;
2573167514Skmacy#endif
2574169978Skmacy	case CHELSIO_SETMTUTAB: {
2575169978Skmacy		struct ch_mtus *m = (struct ch_mtus *)data;
2576169978Skmacy		int i;
2577169978Skmacy
2578169978Skmacy		if (!is_offload(sc))
2579169978Skmacy			return (EOPNOTSUPP);
2580169978Skmacy		if (offload_running(sc))
2581169978Skmacy			return (EBUSY);
2582169978Skmacy		if (m->nmtus != NMTUS)
2583169978Skmacy			return (EINVAL);
2584169978Skmacy		if (m->mtus[0] < 81)         /* accommodate SACK */
2585169978Skmacy			return (EINVAL);
2586169978Skmacy
2587169978Skmacy		/*
2588169978Skmacy		 * MTUs must be in ascending order
2589169978Skmacy		 */
2590169978Skmacy		for (i = 1; i < NMTUS; ++i)
2591169978Skmacy			if (m->mtus[i] < m->mtus[i - 1])
2592169978Skmacy				return (EINVAL);
2593169978Skmacy
2594169978Skmacy		memcpy(sc->params.mtus, m->mtus,
2595169978Skmacy		       sizeof(sc->params.mtus));
2596169978Skmacy		break;
2597169978Skmacy	}
2598169978Skmacy	case CHELSIO_GETMTUTAB: {
2599169978Skmacy		struct ch_mtus *m = (struct ch_mtus *)data;
2600169978Skmacy
2601169978Skmacy		if (!is_offload(sc))
2602169978Skmacy			return (EOPNOTSUPP);
2603169978Skmacy
2604169978Skmacy		memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
2605169978Skmacy		m->nmtus = NMTUS;
2606169978Skmacy		break;
2607171471Skmacy	}
2608169978Skmacy	case CHELSIO_DEVUP:
2609169978Skmacy		if (!is_offload(sc))
2610169978Skmacy			return (EOPNOTSUPP);
2611169978Skmacy		return offload_open(pi);
2612169978Skmacy		break;
2613167514Skmacy	case CHELSIO_GET_MEM: {
2614167514Skmacy		struct ch_mem_range *t = (struct ch_mem_range *)data;
2615167514Skmacy		struct mc7 *mem;
2616167514Skmacy		uint8_t *useraddr;
2617167514Skmacy		u64 buf[32];
2618167514Skmacy
2619167514Skmacy		if (!is_offload(sc))
2620167514Skmacy			return (EOPNOTSUPP);
2621167514Skmacy		if (!(sc->flags & FULL_INIT_DONE))
2622167514Skmacy			return (EIO);         /* need the memory controllers */
2623167514Skmacy		if ((t->addr & 0x7) || (t->len & 0x7))
2624167514Skmacy			return (EINVAL);
2625167514Skmacy		if (t->mem_id == MEM_CM)
2626167514Skmacy			mem = &sc->cm;
2627167514Skmacy		else if (t->mem_id == MEM_PMRX)
2628167514Skmacy			mem = &sc->pmrx;
2629167514Skmacy		else if (t->mem_id == MEM_PMTX)
2630167514Skmacy			mem = &sc->pmtx;
2631167514Skmacy		else
2632167514Skmacy			return (EINVAL);
2633167514Skmacy
2634167514Skmacy		/*
2635167514Skmacy		 * Version scheme:
2636167514Skmacy		 * bits 0..9: chip version
2637167514Skmacy		 * bits 10..15: chip revision
2638167514Skmacy		 */
2639167514Skmacy		t->version = 3 | (sc->params.rev << 10);
2640167514Skmacy
2641167514Skmacy		/*
2642167514Skmacy		 * Read 256 bytes at a time as len can be large and we don't
2643167514Skmacy		 * want to use huge intermediate buffers.
2644167514Skmacy		 */
2645174708Skmacy		useraddr = (uint8_t *)t->buf;
2646167514Skmacy		while (t->len) {
2647167514Skmacy			unsigned int chunk = min(t->len, sizeof(buf));
2648167514Skmacy
2649167514Skmacy			error = t3_mc7_bd_read(mem, t->addr / 8, chunk / 8, buf);
2650167514Skmacy			if (error)
2651167514Skmacy				return (-error);
2652167514Skmacy			if (copyout(buf, useraddr, chunk))
2653167514Skmacy				return (EFAULT);
2654167514Skmacy			useraddr += chunk;
2655167514Skmacy			t->addr += chunk;
2656167514Skmacy			t->len -= chunk;
2657167514Skmacy		}
2658167514Skmacy		break;
2659167514Skmacy	}
2660169978Skmacy	case CHELSIO_READ_TCAM_WORD: {
2661169978Skmacy		struct ch_tcam_word *t = (struct ch_tcam_word *)data;
2662169978Skmacy
2663169978Skmacy		if (!is_offload(sc))
2664169978Skmacy			return (EOPNOTSUPP);
2665171471Skmacy		if (!(sc->flags & FULL_INIT_DONE))
2666171471Skmacy			return (EIO);         /* need MC5 */
2667169978Skmacy		return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
2668169978Skmacy		break;
2669169978Skmacy	}
2670167514Skmacy	case CHELSIO_SET_TRACE_FILTER: {
2671167514Skmacy		struct ch_trace *t = (struct ch_trace *)data;
2672167514Skmacy		const struct trace_params *tp;
2673167514Skmacy
2674167514Skmacy		tp = (const struct trace_params *)&t->sip;
2675167514Skmacy		if (t->config_tx)
2676167514Skmacy			t3_config_trace_filter(sc, tp, 0, t->invert_match,
2677167514Skmacy					       t->trace_tx);
2678167514Skmacy		if (t->config_rx)
2679167514Skmacy			t3_config_trace_filter(sc, tp, 1, t->invert_match,
2680167514Skmacy					       t->trace_rx);
2681167514Skmacy		break;
2682167514Skmacy	}
2683167514Skmacy	case CHELSIO_SET_PKTSCHED: {
2684167514Skmacy		struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
2685167514Skmacy		if (sc->open_device_map == 0)
2686167514Skmacy			return (EAGAIN);
2687167514Skmacy		send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
2688167514Skmacy		    p->binding);
2689167514Skmacy		break;
2690167514Skmacy	}
2691167514Skmacy	case CHELSIO_IFCONF_GETREGS: {
2692167514Skmacy		struct ifconf_regs *regs = (struct ifconf_regs *)data;
2693167514Skmacy		int reglen = cxgb_get_regs_len();
2694167514Skmacy		uint8_t *buf = malloc(REGDUMP_SIZE, M_DEVBUF, M_NOWAIT);
2695167514Skmacy		if (buf == NULL) {
2696167514Skmacy			return (ENOMEM);
2697167514Skmacy		} if (regs->len > reglen)
2698167514Skmacy			regs->len = reglen;
2699167514Skmacy		else if (regs->len < reglen) {
2700167514Skmacy			error = E2BIG;
2701167514Skmacy			goto done;
2702167514Skmacy		}
2703167514Skmacy		cxgb_get_regs(sc, regs, buf);
2704167514Skmacy		error = copyout(buf, regs->data, reglen);
2705167514Skmacy
2706167514Skmacy		done:
2707167514Skmacy		free(buf, M_DEVBUF);
2708167514Skmacy
2709167514Skmacy		break;
2710167514Skmacy	}
2711169978Skmacy	case CHELSIO_SET_HW_SCHED: {
2712169978Skmacy		struct ch_hw_sched *t = (struct ch_hw_sched *)data;
2713169978Skmacy		unsigned int ticks_per_usec = core_ticks_per_usec(sc);
2714169978Skmacy
2715169978Skmacy		if ((sc->flags & FULL_INIT_DONE) == 0)
2716169978Skmacy			return (EAGAIN);       /* need TP to be initialized */
2717169978Skmacy		if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
2718169978Skmacy		    !in_range(t->channel, 0, 1) ||
2719169978Skmacy		    !in_range(t->kbps, 0, 10000000) ||
2720169978Skmacy		    !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
2721169978Skmacy		    !in_range(t->flow_ipg, 0,
2722169978Skmacy			      dack_ticks_to_usec(sc, 0x7ff)))
2723169978Skmacy			return (EINVAL);
2724169978Skmacy
2725169978Skmacy		if (t->kbps >= 0) {
2726169978Skmacy			error = t3_config_sched(sc, t->kbps, t->sched);
2727169978Skmacy			if (error < 0)
2728169978Skmacy				return (-error);
2729169978Skmacy		}
2730169978Skmacy		if (t->class_ipg >= 0)
2731169978Skmacy			t3_set_sched_ipg(sc, t->sched, t->class_ipg);
2732169978Skmacy		if (t->flow_ipg >= 0) {
2733169978Skmacy			t->flow_ipg *= 1000;     /* us -> ns */
2734169978Skmacy			t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
2735169978Skmacy		}
2736169978Skmacy		if (t->mode >= 0) {
2737169978Skmacy			int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
2738169978Skmacy
2739169978Skmacy			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2740169978Skmacy					 bit, t->mode ? bit : 0);
2741169978Skmacy		}
2742169978Skmacy		if (t->channel >= 0)
2743169978Skmacy			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2744169978Skmacy					 1 << t->sched, t->channel << t->sched);
2745169978Skmacy		break;
2746169978Skmacy	}
2747167514Skmacy	default:
2748167514Skmacy		return (EOPNOTSUPP);
2749167514Skmacy		break;
2750167514Skmacy	}
2751167514Skmacy
2752167514Skmacy	return (error);
2753167514Skmacy}
2754167514Skmacy
2755167514Skmacystatic __inline void
2756167514Skmacyreg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
2757167514Skmacy    unsigned int end)
2758167514Skmacy{
2759167514Skmacy	uint32_t *p = (uint32_t *)buf + start;
2760167514Skmacy
2761167514Skmacy	for ( ; start <= end; start += sizeof(uint32_t))
2762167514Skmacy		*p++ = t3_read_reg(ap, start);
2763167514Skmacy}
2764167514Skmacy
2765167514Skmacy#define T3_REGMAP_SIZE (3 * 1024)
2766167514Skmacystatic int
2767167514Skmacycxgb_get_regs_len(void)
2768167514Skmacy{
2769167514Skmacy	return T3_REGMAP_SIZE;
2770167514Skmacy}
2771167514Skmacy#undef T3_REGMAP_SIZE
2772167514Skmacy
2773167514Skmacystatic void
2774167514Skmacycxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf)
2775167514Skmacy{
2776167514Skmacy
2777167514Skmacy	/*
2778167514Skmacy	 * Version scheme:
2779167514Skmacy	 * bits 0..9: chip version
2780167514Skmacy	 * bits 10..15: chip revision
2781167514Skmacy	 * bit 31: set for PCIe cards
2782167514Skmacy	 */
2783167514Skmacy	regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
2784167514Skmacy
2785167514Skmacy	/*
2786167514Skmacy	 * We skip the MAC statistics registers because they are clear-on-read.
2787167514Skmacy	 * Also reading multi-register stats would need to synchronize with the
2788167514Skmacy	 * periodic mac stats accumulation.  Hard to justify the complexity.
2789167514Skmacy	 */
2790167514Skmacy	memset(buf, 0, REGDUMP_SIZE);
2791167514Skmacy	reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
2792167514Skmacy	reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
2793167514Skmacy	reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
2794167514Skmacy	reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
2795167514Skmacy	reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
2796167514Skmacy	reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
2797167514Skmacy		       XGM_REG(A_XGM_SERDES_STAT3, 1));
2798167514Skmacy	reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
2799167514Skmacy		       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
2800167514Skmacy}
2801176572Skmacy
2802176572Skmacy
2803176572SkmacyMODULE_DEPEND(if_cxgb, cxgb_t3fw, 1, 1, 1);
2804