cxgb_main.c revision 170869
1167514Skmacy/**************************************************************************
2167514Skmacy
3167514SkmacyCopyright (c) 2007, Chelsio Inc.
4167514SkmacyAll rights reserved.
5167514Skmacy
6167514SkmacyRedistribution and use in source and binary forms, with or without
7167514Skmacymodification, are permitted provided that the following conditions are met:
8167514Skmacy
9167514Skmacy 1. Redistributions of source code must retain the above copyright notice,
10167514Skmacy    this list of conditions and the following disclaimer.
11167514Skmacy
12169978Skmacy2. Neither the name of the Chelsio Corporation nor the names of its
13167514Skmacy    contributors may be used to endorse or promote products derived from
14167514Skmacy    this software without specific prior written permission.
15167514Skmacy
16167514SkmacyTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17167514SkmacyAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18167514SkmacyIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19167514SkmacyARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20167514SkmacyLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21167514SkmacyCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22167514SkmacySUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23167514SkmacyINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24167514SkmacyCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25167514SkmacyARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26167514SkmacyPOSSIBILITY OF SUCH DAMAGE.
27167514Skmacy
28167514Skmacy***************************************************************************/
29167514Skmacy
30167514Skmacy#include <sys/cdefs.h>
31167514Skmacy__FBSDID("$FreeBSD: head/sys/dev/cxgb/cxgb_main.c 170869 2007-06-17 04:33:38Z kmacy $");
32167514Skmacy
33167514Skmacy#include <sys/param.h>
34167514Skmacy#include <sys/systm.h>
35167514Skmacy#include <sys/kernel.h>
36167514Skmacy#include <sys/bus.h>
37167514Skmacy#include <sys/module.h>
38167514Skmacy#include <sys/pciio.h>
39167514Skmacy#include <sys/conf.h>
40167514Skmacy#include <machine/bus.h>
41167514Skmacy#include <machine/resource.h>
42167514Skmacy#include <sys/bus_dma.h>
43167514Skmacy#include <sys/rman.h>
44167514Skmacy#include <sys/ioccom.h>
45167514Skmacy#include <sys/mbuf.h>
46167514Skmacy#include <sys/linker.h>
47167514Skmacy#include <sys/firmware.h>
48167514Skmacy#include <sys/socket.h>
49167514Skmacy#include <sys/sockio.h>
50167514Skmacy#include <sys/smp.h>
51167514Skmacy#include <sys/sysctl.h>
52167514Skmacy#include <sys/queue.h>
53167514Skmacy#include <sys/taskqueue.h>
54167514Skmacy
55167514Skmacy#include <net/bpf.h>
56167514Skmacy#include <net/ethernet.h>
57167514Skmacy#include <net/if.h>
58167514Skmacy#include <net/if_arp.h>
59167514Skmacy#include <net/if_dl.h>
60167514Skmacy#include <net/if_media.h>
61167514Skmacy#include <net/if_types.h>
62167514Skmacy
63167514Skmacy#include <netinet/in_systm.h>
64167514Skmacy#include <netinet/in.h>
65167514Skmacy#include <netinet/if_ether.h>
66167514Skmacy#include <netinet/ip.h>
67167514Skmacy#include <netinet/ip.h>
68167514Skmacy#include <netinet/tcp.h>
69167514Skmacy#include <netinet/udp.h>
70167514Skmacy
71167514Skmacy#include <dev/pci/pcireg.h>
72167514Skmacy#include <dev/pci/pcivar.h>
73167514Skmacy#include <dev/pci/pci_private.h>
74167514Skmacy
75170076Skmacy#ifdef CONFIG_DEFINED
76170076Skmacy#include <cxgb_include.h>
77170076Skmacy#else
78170076Skmacy#include <dev/cxgb/cxgb_include.h>
79170076Skmacy#endif
80167514Skmacy
81167514Skmacy#ifdef PRIV_SUPPORTED
82167514Skmacy#include <sys/priv.h>
83167514Skmacy#endif
84167514Skmacy
85167514Skmacystatic int cxgb_setup_msix(adapter_t *, int);
86170654Skmacystatic void cxgb_teardown_msix(adapter_t *);
87167514Skmacystatic void cxgb_init(void *);
88167514Skmacystatic void cxgb_init_locked(struct port_info *);
89167734Skmacystatic void cxgb_stop_locked(struct port_info *);
90167514Skmacystatic void cxgb_set_rxmode(struct port_info *);
91167514Skmacystatic int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
92167514Skmacystatic void cxgb_start(struct ifnet *);
93167514Skmacystatic void cxgb_start_proc(void *, int ncount);
94167514Skmacystatic int cxgb_media_change(struct ifnet *);
95167514Skmacystatic void cxgb_media_status(struct ifnet *, struct ifmediareq *);
96167514Skmacystatic int setup_sge_qsets(adapter_t *);
97167514Skmacystatic void cxgb_async_intr(void *);
98167514Skmacystatic void cxgb_ext_intr_handler(void *, int);
99170869Skmacystatic void cxgb_tick_handler(void *, int);
100170869Skmacystatic void cxgb_down_locked(struct adapter *sc);
101167514Skmacystatic void cxgb_tick(void *);
102167514Skmacystatic void setup_rss(adapter_t *sc);
103167514Skmacy
104167514Skmacy/* Attachment glue for the PCI controller end of the device.  Each port of
105167514Skmacy * the device is attached separately, as defined later.
106167514Skmacy */
107167514Skmacystatic int cxgb_controller_probe(device_t);
108167514Skmacystatic int cxgb_controller_attach(device_t);
109167514Skmacystatic int cxgb_controller_detach(device_t);
110167514Skmacystatic void cxgb_free(struct adapter *);
111167514Skmacystatic __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
112167514Skmacy    unsigned int end);
113167514Skmacystatic void cxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf);
114167514Skmacystatic int cxgb_get_regs_len(void);
115169978Skmacystatic int offload_open(struct port_info *pi);
116170789Skmacy#ifdef notyet
117169978Skmacystatic int offload_close(struct toedev *tdev);
118170789Skmacy#endif
119167514Skmacy
120169978Skmacy
121167514Skmacystatic device_method_t cxgb_controller_methods[] = {
122167514Skmacy	DEVMETHOD(device_probe,		cxgb_controller_probe),
123167514Skmacy	DEVMETHOD(device_attach,	cxgb_controller_attach),
124167514Skmacy	DEVMETHOD(device_detach,	cxgb_controller_detach),
125167514Skmacy
126167514Skmacy	/* bus interface */
127167514Skmacy	DEVMETHOD(bus_print_child,	bus_generic_print_child),
128167514Skmacy	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
129167514Skmacy
130167514Skmacy	{ 0, 0 }
131167514Skmacy};
132167514Skmacy
133167514Skmacystatic driver_t cxgb_controller_driver = {
134167514Skmacy	"cxgbc",
135167514Skmacy	cxgb_controller_methods,
136167514Skmacy	sizeof(struct adapter)
137167514Skmacy};
138167514Skmacy
139167514Skmacystatic devclass_t	cxgb_controller_devclass;
140167514SkmacyDRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, 0, 0);
141167514Skmacy
142167514Skmacy/*
143167514Skmacy * Attachment glue for the ports.  Attachment is done directly to the
144167514Skmacy * controller device.
145167514Skmacy */
146167514Skmacystatic int cxgb_port_probe(device_t);
147167514Skmacystatic int cxgb_port_attach(device_t);
148167514Skmacystatic int cxgb_port_detach(device_t);
149167514Skmacy
150167514Skmacystatic device_method_t cxgb_port_methods[] = {
151167514Skmacy	DEVMETHOD(device_probe,		cxgb_port_probe),
152167514Skmacy	DEVMETHOD(device_attach,	cxgb_port_attach),
153167514Skmacy	DEVMETHOD(device_detach,	cxgb_port_detach),
154167514Skmacy	{ 0, 0 }
155167514Skmacy};
156167514Skmacy
157167514Skmacystatic driver_t cxgb_port_driver = {
158167514Skmacy	"cxgb",
159167514Skmacy	cxgb_port_methods,
160167514Skmacy	0
161167514Skmacy};
162167514Skmacy
163167514Skmacystatic d_ioctl_t cxgb_extension_ioctl;
164170654Skmacystatic d_open_t cxgb_extension_open;
165170654Skmacystatic d_close_t cxgb_extension_close;
166167514Skmacy
167170654Skmacystatic struct cdevsw cxgb_cdevsw = {
168170654Skmacy       .d_version =    D_VERSION,
169170654Skmacy       .d_flags =      0,
170170654Skmacy       .d_open =       cxgb_extension_open,
171170654Skmacy       .d_close =      cxgb_extension_close,
172170654Skmacy       .d_ioctl =      cxgb_extension_ioctl,
173170654Skmacy       .d_name =       "cxgb",
174170654Skmacy};
175170654Skmacy
176167514Skmacystatic devclass_t	cxgb_port_devclass;
177167514SkmacyDRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
178167514Skmacy
179167514Skmacy#define SGE_MSIX_COUNT (SGE_QSETS + 1)
180167514Skmacy
181168749Skmacyextern int collapse_mbufs;
182167514Skmacy/*
183167514Skmacy * The driver uses the best interrupt scheme available on a platform in the
184167514Skmacy * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
185167514Skmacy * of these schemes the driver may consider as follows:
186167514Skmacy *
187167514Skmacy * msi = 2: choose from among all three options
188167514Skmacy * msi = 1 : only consider MSI and pin interrupts
189167514Skmacy * msi = 0: force pin interrupts
190167514Skmacy */
191167760Skmacystatic int msi_allowed = 2;
192170083Skmacy
193167514SkmacyTUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
194167514SkmacySYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
195167514SkmacySYSCTL_UINT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
196167514Skmacy    "MSI-X, MSI, INTx selector");
197169978Skmacy
198169053Skmacy/*
199169978Skmacy * The driver enables offload as a default.
200169978Skmacy * To disable it, use ofld_disable = 1.
201169053Skmacy */
202169978Skmacystatic int ofld_disable = 0;
203169978SkmacyTUNABLE_INT("hw.cxgb.ofld_disable", &ofld_disable);
204169978SkmacySYSCTL_UINT(_hw_cxgb, OID_AUTO, ofld_disable, CTLFLAG_RDTUN, &ofld_disable, 0,
205169978Skmacy    "disable ULP offload");
206169978Skmacy
207169978Skmacy/*
208169978Skmacy * The driver uses an auto-queue algorithm by default.
209169978Skmacy * To disable it and force a single queue-set per port, use singleq = 1.
210169978Skmacy */
211169053Skmacystatic int singleq = 1;
212169978SkmacyTUNABLE_INT("hw.cxgb.singleq", &singleq);
213169978SkmacySYSCTL_UINT(_hw_cxgb, OID_AUTO, singleq, CTLFLAG_RDTUN, &singleq, 0,
214169978Skmacy    "use a single queue-set per port");
215167514Skmacy
216167514Skmacyenum {
217167514Skmacy	MAX_TXQ_ENTRIES      = 16384,
218167514Skmacy	MAX_CTRL_TXQ_ENTRIES = 1024,
219167514Skmacy	MAX_RSPQ_ENTRIES     = 16384,
220167514Skmacy	MAX_RX_BUFFERS       = 16384,
221167514Skmacy	MAX_RX_JUMBO_BUFFERS = 16384,
222167514Skmacy	MIN_TXQ_ENTRIES      = 4,
223167514Skmacy	MIN_CTRL_TXQ_ENTRIES = 4,
224167514Skmacy	MIN_RSPQ_ENTRIES     = 32,
225167514Skmacy	MIN_FL_ENTRIES       = 32
226167514Skmacy};
227167514Skmacy
228167514Skmacy#define PORT_MASK ((1 << MAX_NPORTS) - 1)
229167514Skmacy
230167514Skmacy/* Table for probing the cards.  The desc field isn't actually used */
231167514Skmacystruct cxgb_ident {
232167514Skmacy	uint16_t	vendor;
233167514Skmacy	uint16_t	device;
234167514Skmacy	int		index;
235167514Skmacy	char		*desc;
236167514Skmacy} cxgb_identifiers[] = {
237167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
238167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
239167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
240167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
241167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
242167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
243167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
244167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
245167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
246167514Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
247170654Skmacy	{PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
248167514Skmacy	{0, 0, 0, NULL}
249167514Skmacy};
250167514Skmacy
251167514Skmacystatic struct cxgb_ident *
252167514Skmacycxgb_get_ident(device_t dev)
253167514Skmacy{
254167514Skmacy	struct cxgb_ident *id;
255167514Skmacy
256167514Skmacy	for (id = cxgb_identifiers; id->desc != NULL; id++) {
257167514Skmacy		if ((id->vendor == pci_get_vendor(dev)) &&
258167514Skmacy		    (id->device == pci_get_device(dev))) {
259167514Skmacy			return (id);
260167514Skmacy		}
261167514Skmacy	}
262167514Skmacy	return (NULL);
263167514Skmacy}
264167514Skmacy
265167514Skmacystatic const struct adapter_info *
266167514Skmacycxgb_get_adapter_info(device_t dev)
267167514Skmacy{
268167514Skmacy	struct cxgb_ident *id;
269167514Skmacy	const struct adapter_info *ai;
270167514Skmacy
271167514Skmacy	id = cxgb_get_ident(dev);
272167514Skmacy	if (id == NULL)
273167514Skmacy		return (NULL);
274167514Skmacy
275167514Skmacy	ai = t3_get_adapter_info(id->index);
276167514Skmacy
277167514Skmacy	return (ai);
278167514Skmacy}
279167514Skmacy
280167514Skmacystatic int
281167514Skmacycxgb_controller_probe(device_t dev)
282167514Skmacy{
283167514Skmacy	const struct adapter_info *ai;
284167514Skmacy	char *ports, buf[80];
285170654Skmacy	int nports;
286170654Skmacy
287167514Skmacy	ai = cxgb_get_adapter_info(dev);
288167514Skmacy	if (ai == NULL)
289167514Skmacy		return (ENXIO);
290167514Skmacy
291170654Skmacy	nports = ai->nports0 + ai->nports1;
292170654Skmacy	if (nports == 1)
293167514Skmacy		ports = "port";
294167514Skmacy	else
295167514Skmacy		ports = "ports";
296167514Skmacy
297170654Skmacy	snprintf(buf, sizeof(buf), "%s RNIC, %d %s", ai->desc, nports, ports);
298167514Skmacy	device_set_desc_copy(dev, buf);
299167514Skmacy	return (BUS_PROBE_DEFAULT);
300167514Skmacy}
301167514Skmacy
302167514Skmacystatic int
303169978Skmacyupgrade_fw(adapter_t *sc)
304167514Skmacy{
305167514Skmacy	char buf[32];
306167514Skmacy#ifdef FIRMWARE_LATEST
307167514Skmacy	const struct firmware *fw;
308167514Skmacy#else
309167514Skmacy	struct firmware *fw;
310167514Skmacy#endif
311167514Skmacy	int status;
312167514Skmacy
313169978Skmacy	snprintf(&buf[0], sizeof(buf), "t3fw%d%d%d", FW_VERSION_MAJOR,
314169978Skmacy	    FW_VERSION_MINOR, FW_VERSION_MICRO);
315167514Skmacy
316167514Skmacy	fw = firmware_get(buf);
317167514Skmacy
318167514Skmacy	if (fw == NULL) {
319169978Skmacy		device_printf(sc->dev, "Could not find firmware image %s\n", buf);
320169978Skmacy		return (ENOENT);
321167514Skmacy	}
322167514Skmacy	status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
323167514Skmacy
324167514Skmacy	firmware_put(fw, FIRMWARE_UNLOAD);
325167514Skmacy
326167514Skmacy	return (status);
327167514Skmacy}
328167514Skmacy
329167514Skmacystatic int
330167514Skmacycxgb_controller_attach(device_t dev)
331167514Skmacy{
332167514Skmacy	device_t child;
333167514Skmacy	const struct adapter_info *ai;
334167514Skmacy	struct adapter *sc;
335169978Skmacy	int i, reg, msi_needed, error = 0;
336167514Skmacy	uint32_t vers;
337167760Skmacy	int port_qsets = 1;
338170869Skmacy
339167514Skmacy	sc = device_get_softc(dev);
340167514Skmacy	sc->dev = dev;
341169978Skmacy	sc->msi_count = 0;
342169978Skmacy
343167840Skmacy	/* find the PCIe link width and set max read request to 4KB*/
344167840Skmacy	if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
345167840Skmacy		uint16_t lnk, pectl;
346167840Skmacy		lnk = pci_read_config(dev, reg + 0x12, 2);
347167840Skmacy		sc->link_width = (lnk >> 4) & 0x3f;
348167840Skmacy
349167840Skmacy		pectl = pci_read_config(dev, reg + 0x8, 2);
350167840Skmacy		pectl = (pectl & ~0x7000) | (5 << 12);
351167840Skmacy		pci_write_config(dev, reg + 0x8, pectl, 2);
352167840Skmacy	}
353167840Skmacy	if (sc->link_width != 0 && sc->link_width <= 4) {
354167840Skmacy		device_printf(sc->dev,
355167862Skmacy		    "PCIe x%d Link, expect reduced performance\n",
356167840Skmacy		    sc->link_width);
357167840Skmacy	}
358167840Skmacy
359167514Skmacy	pci_enable_busmaster(dev);
360167514Skmacy	/*
361167514Skmacy	 * Allocate the registers and make them available to the driver.
362167514Skmacy	 * The registers that we care about for NIC mode are in BAR 0
363167514Skmacy	 */
364167514Skmacy	sc->regs_rid = PCIR_BAR(0);
365167514Skmacy	if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
366167514Skmacy	    &sc->regs_rid, RF_ACTIVE)) == NULL) {
367167514Skmacy		device_printf(dev, "Cannot allocate BAR\n");
368167514Skmacy		return (ENXIO);
369167514Skmacy	}
370167514Skmacy
371170869Skmacy	snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
372170869Skmacy	    device_get_unit(dev));
373170869Skmacy	ADAPTER_LOCK_INIT(sc, sc->lockbuf);
374170869Skmacy
375170869Skmacy	snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
376170869Skmacy	    device_get_unit(dev));
377170869Skmacy	snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
378170869Skmacy	    device_get_unit(dev));
379170869Skmacy	snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
380170869Skmacy	    device_get_unit(dev));
381167514Skmacy
382170869Skmacy	MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_DEF);
383170869Skmacy	MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
384170869Skmacy	MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
385170869Skmacy
386167514Skmacy	sc->bt = rman_get_bustag(sc->regs_res);
387167514Skmacy	sc->bh = rman_get_bushandle(sc->regs_res);
388167514Skmacy	sc->mmio_len = rman_get_size(sc->regs_res);
389167769Skmacy
390167769Skmacy	ai = cxgb_get_adapter_info(dev);
391167769Skmacy	if (t3_prep_adapter(sc, ai, 1) < 0) {
392170654Skmacy		printf("prep adapter failed\n");
393167769Skmacy		error = ENODEV;
394167769Skmacy		goto out;
395167769Skmacy	}
396167514Skmacy	/* Allocate the BAR for doing MSI-X.  If it succeeds, try to allocate
397167514Skmacy	 * enough messages for the queue sets.  If that fails, try falling
398167514Skmacy	 * back to MSI.  If that fails, then try falling back to the legacy
399167514Skmacy	 * interrupt pin model.
400167514Skmacy	 */
401167514Skmacy#ifdef MSI_SUPPORTED
402167760Skmacy
403167514Skmacy	sc->msix_regs_rid = 0x20;
404167514Skmacy	if ((msi_allowed >= 2) &&
405167514Skmacy	    (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
406167514Skmacy	    &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
407167514Skmacy
408169978Skmacy		msi_needed = sc->msi_count = SGE_MSIX_COUNT;
409167760Skmacy
410169978Skmacy		if (((error = pci_alloc_msix(dev, &sc->msi_count)) != 0) ||
411169978Skmacy		    (sc->msi_count != msi_needed)) {
412169978Skmacy			device_printf(dev, "msix allocation failed - msi_count = %d"
413169978Skmacy			    " msi_needed=%d will try msi err=%d\n", sc->msi_count,
414169978Skmacy			    msi_needed, error);
415169978Skmacy			sc->msi_count = 0;
416167514Skmacy			pci_release_msi(dev);
417167514Skmacy			bus_release_resource(dev, SYS_RES_MEMORY,
418167514Skmacy			    sc->msix_regs_rid, sc->msix_regs_res);
419167514Skmacy			sc->msix_regs_res = NULL;
420167514Skmacy		} else {
421167514Skmacy			sc->flags |= USING_MSIX;
422170081Skmacy			sc->cxgb_intr = t3_intr_msix;
423167514Skmacy		}
424167514Skmacy	}
425167514Skmacy
426169978Skmacy	if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
427169978Skmacy		sc->msi_count = 1;
428169978Skmacy		if (pci_alloc_msi(dev, &sc->msi_count)) {
429167760Skmacy			device_printf(dev, "alloc msi failed - will try INTx\n");
430169978Skmacy			sc->msi_count = 0;
431167514Skmacy			pci_release_msi(dev);
432167514Skmacy		} else {
433167514Skmacy			sc->flags |= USING_MSI;
434167514Skmacy			sc->irq_rid = 1;
435170081Skmacy			sc->cxgb_intr = t3_intr_msi;
436167514Skmacy		}
437167514Skmacy	}
438167514Skmacy#endif
439169978Skmacy	if (sc->msi_count == 0) {
440167760Skmacy		device_printf(dev, "using line interrupts\n");
441167514Skmacy		sc->irq_rid = 0;
442170081Skmacy		sc->cxgb_intr = t3b_intr;
443167514Skmacy	}
444167514Skmacy
445167514Skmacy
446167514Skmacy	/* Create a private taskqueue thread for handling driver events */
447167514Skmacy#ifdef TASKQUEUE_CURRENT
448167514Skmacy	sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
449167514Skmacy	    taskqueue_thread_enqueue, &sc->tq);
450167514Skmacy#else
451167514Skmacy	sc->tq = taskqueue_create_fast("cxgb_taskq", M_NOWAIT,
452167514Skmacy	    taskqueue_thread_enqueue, &sc->tq);
453167514Skmacy#endif
454167514Skmacy	if (sc->tq == NULL) {
455167514Skmacy		device_printf(dev, "failed to allocate controller task queue\n");
456167514Skmacy		goto out;
457167514Skmacy	}
458167514Skmacy
459167514Skmacy	taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
460167514Skmacy	    device_get_nameunit(dev));
461167514Skmacy	TASK_INIT(&sc->ext_intr_task, 0, cxgb_ext_intr_handler, sc);
462170869Skmacy	TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
463167514Skmacy
464167514Skmacy
465167514Skmacy	/* Create a periodic callout for checking adapter status */
466170869Skmacy	callout_init(&sc->cxgb_tick_ch, TRUE);
467167514Skmacy
468167514Skmacy	if (t3_check_fw_version(sc) != 0) {
469167514Skmacy		/*
470167514Skmacy		 * Warn user that a firmware update will be attempted in init.
471167514Skmacy		 */
472169978Skmacy		device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
473169978Skmacy		    FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
474167514Skmacy		sc->flags &= ~FW_UPTODATE;
475167514Skmacy	} else {
476167514Skmacy		sc->flags |= FW_UPTODATE;
477167514Skmacy	}
478167514Skmacy
479169978Skmacy	if ((sc->flags & USING_MSIX) && !singleq)
480167760Skmacy		port_qsets = min((SGE_QSETS/(sc)->params.nports), mp_ncpus);
481167760Skmacy
482167514Skmacy	/*
483167514Skmacy	 * Create a child device for each MAC.  The ethernet attachment
484167514Skmacy	 * will be done in these children.
485167760Skmacy	 */
486167760Skmacy	for (i = 0; i < (sc)->params.nports; i++) {
487167514Skmacy		if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
488167514Skmacy			device_printf(dev, "failed to add child port\n");
489167514Skmacy			error = EINVAL;
490167514Skmacy			goto out;
491167514Skmacy		}
492167514Skmacy		sc->portdev[i] = child;
493167514Skmacy		sc->port[i].adapter = sc;
494167760Skmacy		sc->port[i].nqsets = port_qsets;
495167760Skmacy		sc->port[i].first_qset = i*port_qsets;
496167514Skmacy		sc->port[i].port = i;
497167514Skmacy		device_set_softc(child, &sc->port[i]);
498167514Skmacy	}
499167514Skmacy	if ((error = bus_generic_attach(dev)) != 0)
500167514Skmacy		goto out;
501167514Skmacy
502169978Skmacy	/*
503169978Skmacy	 * XXX need to poll for link status
504169978Skmacy	 */
505167514Skmacy	sc->params.stats_update_period = 1;
506167514Skmacy
507167514Skmacy	/* initialize sge private state */
508170654Skmacy	t3_sge_init_adapter(sc);
509167514Skmacy
510167514Skmacy	t3_led_ready(sc);
511169978Skmacy
512169978Skmacy	cxgb_offload_init();
513169978Skmacy	if (is_offload(sc)) {
514169978Skmacy		setbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
515169978Skmacy		cxgb_adapter_ofld(sc);
516169978Skmacy        }
517167514Skmacy	error = t3_get_fw_version(sc, &vers);
518167514Skmacy	if (error)
519167514Skmacy		goto out;
520167514Skmacy
521169978Skmacy	snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
522169978Skmacy	    G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
523169978Skmacy	    G_FW_VERSION_MICRO(vers));
524169978Skmacy
525167514Skmacy	t3_add_sysctls(sc);
526167514Skmacyout:
527167514Skmacy	if (error)
528167514Skmacy		cxgb_free(sc);
529167514Skmacy
530167514Skmacy	return (error);
531167514Skmacy}
532167514Skmacy
533167514Skmacystatic int
534167514Skmacycxgb_controller_detach(device_t dev)
535167514Skmacy{
536167514Skmacy	struct adapter *sc;
537167514Skmacy
538167514Skmacy	sc = device_get_softc(dev);
539167514Skmacy
540167514Skmacy	cxgb_free(sc);
541167514Skmacy
542167514Skmacy	return (0);
543167514Skmacy}
544167514Skmacy
545167514Skmacystatic void
546167514Skmacycxgb_free(struct adapter *sc)
547167514Skmacy{
548167514Skmacy	int i;
549167514Skmacy
550170869Skmacy	ADAPTER_LOCK(sc);
551170869Skmacy	/*
552170869Skmacy	 * drops the lock
553170869Skmacy	 */
554170869Skmacy	cxgb_down_locked(sc);
555169978Skmacy
556169978Skmacy#ifdef MSI_SUPPORTED
557169978Skmacy	if (sc->flags & (USING_MSI | USING_MSIX)) {
558169978Skmacy		device_printf(sc->dev, "releasing msi message(s)\n");
559169978Skmacy		pci_release_msi(sc->dev);
560169978Skmacy	} else {
561169978Skmacy		device_printf(sc->dev, "no msi message to release\n");
562169978Skmacy	}
563169978Skmacy#endif
564169978Skmacy	if (sc->msix_regs_res != NULL) {
565169978Skmacy		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
566169978Skmacy		    sc->msix_regs_res);
567169978Skmacy	}
568169978Skmacy
569167514Skmacy	t3_sge_deinit_sw(sc);
570167514Skmacy
571167514Skmacy	if (sc->tq != NULL) {
572167514Skmacy		taskqueue_drain(sc->tq, &sc->ext_intr_task);
573170869Skmacy		taskqueue_drain(sc->tq, &sc->tick_task);
574167514Skmacy		taskqueue_free(sc->tq);
575167514Skmacy	}
576170869Skmacy
577170869Skmacy	tsleep(&sc, 0, "cxgb unload", hz);
578167514Skmacy
579167760Skmacy	for (i = 0; i < (sc)->params.nports; ++i) {
580167760Skmacy		if (sc->portdev[i] != NULL)
581167760Skmacy			device_delete_child(sc->dev, sc->portdev[i]);
582167760Skmacy	}
583167760Skmacy
584167514Skmacy	bus_generic_detach(sc->dev);
585170654Skmacy#ifdef notyet
586169978Skmacy	if (is_offload(sc)) {
587169978Skmacy		cxgb_adapter_unofld(sc);
588169978Skmacy		if (isset(&sc->open_device_map,	OFFLOAD_DEVMAP_BIT))
589169978Skmacy			offload_close(&sc->tdev);
590169978Skmacy	}
591170654Skmacy#endif
592167514Skmacy	t3_free_sge_resources(sc);
593167514Skmacy	t3_sge_free(sc);
594170869Skmacy
595170869Skmacy	cxgb_offload_exit();
596170869Skmacy
597167514Skmacy	if (sc->regs_res != NULL)
598167514Skmacy		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
599167514Skmacy		    sc->regs_res);
600167514Skmacy
601170869Skmacy	MTX_DESTROY(&sc->mdio_lock);
602170869Skmacy	MTX_DESTROY(&sc->sge.reg_lock);
603170869Skmacy	MTX_DESTROY(&sc->elmer_lock);
604170869Skmacy	ADAPTER_LOCK_DEINIT(sc);
605167514Skmacy
606167514Skmacy	return;
607167514Skmacy}
608167514Skmacy
609167514Skmacy/**
610167514Skmacy *	setup_sge_qsets - configure SGE Tx/Rx/response queues
611167514Skmacy *	@sc: the controller softc
612167514Skmacy *
613167514Skmacy *	Determines how many sets of SGE queues to use and initializes them.
614167514Skmacy *	We support multiple queue sets per port if we have MSI-X, otherwise
615167514Skmacy *	just one queue set per port.
616167514Skmacy */
617167514Skmacystatic int
618167514Skmacysetup_sge_qsets(adapter_t *sc)
619167514Skmacy{
620167514Skmacy	int i, j, err, irq_idx, qset_idx;
621169978Skmacy	u_int ntxq = SGE_TXQ_PER_SET;
622167514Skmacy
623167514Skmacy	if ((err = t3_sge_alloc(sc)) != 0) {
624167760Skmacy		device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
625167514Skmacy		return (err);
626167514Skmacy	}
627167514Skmacy
628167514Skmacy	if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
629167514Skmacy		irq_idx = -1;
630167514Skmacy	else
631167514Skmacy		irq_idx = 0;
632167514Skmacy
633167514Skmacy	for (qset_idx = 0, i = 0; i < (sc)->params.nports; ++i) {
634167514Skmacy		struct port_info *pi = &sc->port[i];
635167514Skmacy
636167514Skmacy		for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
637167760Skmacy			err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
638167514Skmacy			    (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
639167514Skmacy			    &sc->params.sge.qset[qset_idx], ntxq, pi);
640167514Skmacy			if (err) {
641167514Skmacy				t3_free_sge_resources(sc);
642167760Skmacy				device_printf(sc->dev, "t3_sge_alloc_qset failed with %d\n", err);
643167514Skmacy				return (err);
644167514Skmacy			}
645167514Skmacy		}
646167514Skmacy	}
647167514Skmacy
648167514Skmacy	return (0);
649167514Skmacy}
650167514Skmacy
651170654Skmacystatic void
652170654Skmacycxgb_teardown_msix(adapter_t *sc)
653170654Skmacy{
654170654Skmacy	int i, nqsets;
655170654Skmacy
656170654Skmacy	for (nqsets = i = 0; i < (sc)->params.nports; i++)
657170654Skmacy		nqsets += sc->port[i].nqsets;
658170654Skmacy
659170654Skmacy	for (i = 0; i < nqsets; i++) {
660170654Skmacy		if (sc->msix_intr_tag[i] != NULL) {
661170654Skmacy			bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
662170654Skmacy			    sc->msix_intr_tag[i]);
663170654Skmacy			sc->msix_intr_tag[i] = NULL;
664170654Skmacy		}
665170654Skmacy		if (sc->msix_irq_res[i] != NULL) {
666170654Skmacy			bus_release_resource(sc->dev, SYS_RES_IRQ,
667170654Skmacy			    sc->msix_irq_rid[i], sc->msix_irq_res[i]);
668170654Skmacy			sc->msix_irq_res[i] = NULL;
669170654Skmacy		}
670170654Skmacy	}
671170654Skmacy}
672170654Skmacy
673167514Skmacystatic int
674167514Skmacycxgb_setup_msix(adapter_t *sc, int msix_count)
675167514Skmacy{
676167514Skmacy	int i, j, k, nqsets, rid;
677167514Skmacy
678167514Skmacy	/* The first message indicates link changes and error conditions */
679167514Skmacy	sc->irq_rid = 1;
680167514Skmacy	if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
681167514Skmacy	   &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
682167514Skmacy		device_printf(sc->dev, "Cannot allocate msix interrupt\n");
683167514Skmacy		return (EINVAL);
684167514Skmacy	}
685167760Skmacy
686167514Skmacy	if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
687167514Skmacy#ifdef INTR_FILTERS
688167514Skmacy			NULL,
689167514Skmacy#endif
690167514Skmacy		cxgb_async_intr, sc, &sc->intr_tag)) {
691167514Skmacy		device_printf(sc->dev, "Cannot set up interrupt\n");
692167514Skmacy		return (EINVAL);
693167514Skmacy	}
694170654Skmacy	for (i = k = 0; i < (sc)->params.nports; i++) {
695167514Skmacy		nqsets = sc->port[i].nqsets;
696170654Skmacy		for (j = 0; j < nqsets; j++, k++) {
697167514Skmacy			struct sge_qset *qs = &sc->sge.qs[k];
698167514Skmacy
699167514Skmacy			rid = k + 2;
700167514Skmacy			if (cxgb_debug)
701167514Skmacy				printf("rid=%d ", rid);
702167514Skmacy			if ((sc->msix_irq_res[k] = bus_alloc_resource_any(
703167514Skmacy			    sc->dev, SYS_RES_IRQ, &rid,
704167514Skmacy			    RF_SHAREABLE | RF_ACTIVE)) == NULL) {
705167514Skmacy				device_printf(sc->dev, "Cannot allocate "
706167514Skmacy				    "interrupt for message %d\n", rid);
707167514Skmacy				return (EINVAL);
708167514Skmacy			}
709167514Skmacy			sc->msix_irq_rid[k] = rid;
710170654Skmacy			if (bus_setup_intr(sc->dev, sc->msix_irq_res[k],
711167514Skmacy			    INTR_MPSAFE|INTR_TYPE_NET,
712167514Skmacy#ifdef INTR_FILTERS
713167514Skmacy			NULL,
714167514Skmacy#endif
715167514Skmacy				t3_intr_msix, qs, &sc->msix_intr_tag[k])) {
716167514Skmacy				device_printf(sc->dev, "Cannot set up "
717167514Skmacy				    "interrupt for message %d\n", rid);
718167514Skmacy				return (EINVAL);
719167514Skmacy			}
720167514Skmacy		}
721167514Skmacy	}
722167760Skmacy
723167760Skmacy
724167514Skmacy	return (0);
725167514Skmacy}
726167514Skmacy
727167514Skmacystatic int
728167514Skmacycxgb_port_probe(device_t dev)
729167514Skmacy{
730167514Skmacy	struct port_info *p;
731167514Skmacy	char buf[80];
732167514Skmacy
733167514Skmacy	p = device_get_softc(dev);
734167514Skmacy
735167514Skmacy	snprintf(buf, sizeof(buf), "Port %d %s", p->port, p->port_type->desc);
736167514Skmacy	device_set_desc_copy(dev, buf);
737167514Skmacy	return (0);
738167514Skmacy}
739167514Skmacy
740167514Skmacy
741167514Skmacystatic int
742167514Skmacycxgb_makedev(struct port_info *pi)
743167514Skmacy{
744167514Skmacy
745170654Skmacy	pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit,
746170654Skmacy	    UID_ROOT, GID_WHEEL, 0600, if_name(pi->ifp));
747167514Skmacy
748167514Skmacy	if (pi->port_cdev == NULL)
749167514Skmacy		return (ENOMEM);
750167514Skmacy
751167514Skmacy	pi->port_cdev->si_drv1 = (void *)pi;
752167514Skmacy
753167514Skmacy	return (0);
754167514Skmacy}
755167514Skmacy
756167514Skmacy
757167514Skmacy#ifdef TSO_SUPPORTED
758167514Skmacy#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU)
759167514Skmacy/* Don't enable TSO6 yet */
760167514Skmacy#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4 | IFCAP_JUMBO_MTU)
761167514Skmacy#else
762167514Skmacy#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
763167514Skmacy/* Don't enable TSO6 yet */
764167514Skmacy#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM |  IFCAP_JUMBO_MTU)
765167514Skmacy#define IFCAP_TSO4 0x0
766167514Skmacy#define CSUM_TSO   0x0
767167514Skmacy#endif
768167514Skmacy
769167514Skmacy
770167514Skmacystatic int
771167514Skmacycxgb_port_attach(device_t dev)
772167514Skmacy{
773167514Skmacy	struct port_info *p;
774167514Skmacy	struct ifnet *ifp;
775170654Skmacy	int err, media_flags;
776167514Skmacy
777167514Skmacy	p = device_get_softc(dev);
778167514Skmacy
779170869Skmacy	snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
780170869Skmacy	    device_get_unit(device_get_parent(dev)), p->port);
781170869Skmacy	PORT_LOCK_INIT(p, p->lockbuf);
782167514Skmacy
783167514Skmacy	/* Allocate an ifnet object and set it up */
784167514Skmacy	ifp = p->ifp = if_alloc(IFT_ETHER);
785167514Skmacy	if (ifp == NULL) {
786167514Skmacy		device_printf(dev, "Cannot allocate ifnet\n");
787167514Skmacy		return (ENOMEM);
788167514Skmacy	}
789167514Skmacy
790167514Skmacy	/*
791167514Skmacy	 * Note that there is currently no watchdog timer.
792167514Skmacy	 */
793167514Skmacy	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
794167514Skmacy	ifp->if_init = cxgb_init;
795167514Skmacy	ifp->if_softc = p;
796167514Skmacy	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
797167514Skmacy	ifp->if_ioctl = cxgb_ioctl;
798167514Skmacy	ifp->if_start = cxgb_start;
799167514Skmacy	ifp->if_timer = 0;	/* Disable ifnet watchdog */
800167514Skmacy	ifp->if_watchdog = NULL;
801167514Skmacy
802167514Skmacy	ifp->if_snd.ifq_drv_maxlen = TX_ETH_Q_SIZE;
803167514Skmacy	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
804167514Skmacy	IFQ_SET_READY(&ifp->if_snd);
805167514Skmacy
806167514Skmacy	ifp->if_hwassist = ifp->if_capabilities = ifp->if_capenable = 0;
807167514Skmacy	ifp->if_capabilities |= CXGB_CAP;
808167514Skmacy	ifp->if_capenable |= CXGB_CAP_ENABLE;
809167514Skmacy	ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO);
810167514Skmacy
811167514Skmacy	ether_ifattach(ifp, p->hw_addr);
812167514Skmacy#ifdef DEFAULT_JUMBO
813167514Skmacy	ifp->if_mtu = 9000;
814167514Skmacy#endif
815167514Skmacy	if ((err = cxgb_makedev(p)) != 0) {
816167514Skmacy		printf("makedev failed %d\n", err);
817167514Skmacy		return (err);
818167514Skmacy	}
819167514Skmacy	ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
820167514Skmacy	    cxgb_media_status);
821170654Skmacy
822170654Skmacy	if (!strcmp(p->port_type->desc, "10GBASE-CX4")) {
823170654Skmacy		media_flags = IFM_ETHER | IFM_10G_CX4 | IFM_FDX;
824170654Skmacy	} else if (!strcmp(p->port_type->desc, "10GBASE-SR")) {
825170654Skmacy		media_flags = IFM_ETHER | IFM_10G_SR | IFM_FDX;
826170654Skmacy	} else if (!strcmp(p->port_type->desc, "10GBASE-XR")) {
827170654Skmacy		media_flags = IFM_ETHER | IFM_10G_LR | IFM_FDX;
828170654Skmacy	} else if (!strcmp(p->port_type->desc, "10/100/1000BASE-T")) {
829170654Skmacy		ifmedia_add(&p->media, IFM_ETHER | IFM_10_T, 0, NULL);
830170654Skmacy		ifmedia_add(&p->media, IFM_ETHER | IFM_10_T | IFM_FDX,
831170654Skmacy			    0, NULL);
832170654Skmacy		ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX,
833170654Skmacy			    0, NULL);
834170654Skmacy		ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
835170654Skmacy			    0, NULL);
836170654Skmacy		ifmedia_add(&p->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
837170654Skmacy			    0, NULL);
838170654Skmacy		media_flags = 0;
839170654Skmacy	} else {
840167514Skmacy	        printf("unsupported media type %s\n", p->port_type->desc);
841167514Skmacy		return (ENXIO);
842167514Skmacy	}
843170654Skmacy	if (media_flags) {
844170654Skmacy		ifmedia_add(&p->media, media_flags, 0, NULL);
845170654Skmacy		ifmedia_set(&p->media, media_flags);
846170654Skmacy	} else {
847170654Skmacy		ifmedia_add(&p->media, IFM_ETHER | IFM_AUTO, 0, NULL);
848170654Skmacy		ifmedia_set(&p->media, IFM_ETHER | IFM_AUTO);
849170654Skmacy	}
850167514Skmacy
851170654Skmacy
852170869Skmacy	snprintf(p->taskqbuf, TASKQ_NAME_LEN, "cxgb_port_taskq%d", p->port);
853167514Skmacy#ifdef TASKQUEUE_CURRENT
854167514Skmacy	/* Create a port for handling TX without starvation */
855170869Skmacy	p->tq = taskqueue_create(p->taskqbuf, M_NOWAIT,
856167514Skmacy	    taskqueue_thread_enqueue, &p->tq);
857167514Skmacy#else
858167514Skmacy	/* Create a port for handling TX without starvation */
859167514Skmacy	p->tq = taskqueue_create_fast(buf, M_NOWAIT,
860167514Skmacy	    taskqueue_thread_enqueue, &p->tq);
861167514Skmacy#endif
862170654Skmacy
863167514Skmacy	if (p->tq == NULL) {
864167514Skmacy		device_printf(dev, "failed to allocate port task queue\n");
865167514Skmacy		return (ENOMEM);
866167514Skmacy	}
867167514Skmacy	taskqueue_start_threads(&p->tq, 1, PI_NET, "%s taskq",
868167514Skmacy	    device_get_nameunit(dev));
869167514Skmacy	TASK_INIT(&p->start_task, 0, cxgb_start_proc, ifp);
870167514Skmacy
871170654Skmacy	t3_sge_init_port(p);
872170654Skmacy
873167514Skmacy	return (0);
874167514Skmacy}
875167514Skmacy
876167514Skmacystatic int
877167514Skmacycxgb_port_detach(device_t dev)
878167514Skmacy{
879167514Skmacy	struct port_info *p;
880167514Skmacy
881167514Skmacy	p = device_get_softc(dev);
882169978Skmacy
883169978Skmacy	PORT_LOCK(p);
884170654Skmacy	if (p->ifp->if_drv_flags & IFF_DRV_RUNNING)
885170654Skmacy		cxgb_stop_locked(p);
886169978Skmacy	PORT_UNLOCK(p);
887169978Skmacy
888167514Skmacy	if (p->tq != NULL) {
889167514Skmacy		taskqueue_drain(p->tq, &p->start_task);
890167514Skmacy		taskqueue_free(p->tq);
891167514Skmacy		p->tq = NULL;
892167514Skmacy	}
893170869Skmacy
894170869Skmacy	PORT_LOCK_DEINIT(p);
895167514Skmacy	ether_ifdetach(p->ifp);
896167514Skmacy	if_free(p->ifp);
897167514Skmacy
898170654Skmacy	if (p->port_cdev != NULL)
899170654Skmacy		destroy_dev(p->port_cdev);
900170654Skmacy
901167514Skmacy	return (0);
902167514Skmacy}
903167514Skmacy
904167514Skmacyvoid
905167514Skmacyt3_fatal_err(struct adapter *sc)
906167514Skmacy{
907167514Skmacy	u_int fw_status[4];
908167514Skmacy
909167514Skmacy	device_printf(sc->dev,"encountered fatal error, operation suspended\n");
910167514Skmacy	if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
911167514Skmacy		device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
912167514Skmacy		    fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
913167514Skmacy}
914167514Skmacy
915167514Skmacyint
916167514Skmacyt3_os_find_pci_capability(adapter_t *sc, int cap)
917167514Skmacy{
918167514Skmacy	device_t dev;
919167514Skmacy	struct pci_devinfo *dinfo;
920167514Skmacy	pcicfgregs *cfg;
921167514Skmacy	uint32_t status;
922167514Skmacy	uint8_t ptr;
923167514Skmacy
924167514Skmacy	dev = sc->dev;
925167514Skmacy	dinfo = device_get_ivars(dev);
926167514Skmacy	cfg = &dinfo->cfg;
927167514Skmacy
928167514Skmacy	status = pci_read_config(dev, PCIR_STATUS, 2);
929167514Skmacy	if (!(status & PCIM_STATUS_CAPPRESENT))
930167514Skmacy		return (0);
931167514Skmacy
932167514Skmacy	switch (cfg->hdrtype & PCIM_HDRTYPE) {
933167514Skmacy	case 0:
934167514Skmacy	case 1:
935167514Skmacy		ptr = PCIR_CAP_PTR;
936167514Skmacy		break;
937167514Skmacy	case 2:
938167514Skmacy		ptr = PCIR_CAP_PTR_2;
939167514Skmacy		break;
940167514Skmacy	default:
941167514Skmacy		return (0);
942167514Skmacy		break;
943167514Skmacy	}
944167514Skmacy	ptr = pci_read_config(dev, ptr, 1);
945167514Skmacy
946167514Skmacy	while (ptr != 0) {
947167514Skmacy		if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
948167514Skmacy			return (ptr);
949167514Skmacy		ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
950167514Skmacy	}
951167514Skmacy
952167514Skmacy	return (0);
953167514Skmacy}
954167514Skmacy
955167514Skmacyint
956167514Skmacyt3_os_pci_save_state(struct adapter *sc)
957167514Skmacy{
958167514Skmacy	device_t dev;
959167514Skmacy	struct pci_devinfo *dinfo;
960167514Skmacy
961167514Skmacy	dev = sc->dev;
962167514Skmacy	dinfo = device_get_ivars(dev);
963167514Skmacy
964167514Skmacy	pci_cfg_save(dev, dinfo, 0);
965167514Skmacy	return (0);
966167514Skmacy}
967167514Skmacy
968167514Skmacyint
969167514Skmacyt3_os_pci_restore_state(struct adapter *sc)
970167514Skmacy{
971167514Skmacy	device_t dev;
972167514Skmacy	struct pci_devinfo *dinfo;
973167514Skmacy
974167514Skmacy	dev = sc->dev;
975167514Skmacy	dinfo = device_get_ivars(dev);
976167514Skmacy
977167514Skmacy	pci_cfg_restore(dev, dinfo);
978167514Skmacy	return (0);
979167514Skmacy}
980167514Skmacy
981167514Skmacy/**
982167514Skmacy *	t3_os_link_changed - handle link status changes
983167514Skmacy *	@adapter: the adapter associated with the link change
984167514Skmacy *	@port_id: the port index whose limk status has changed
985167514Skmacy *	@link_stat: the new status of the link
986167514Skmacy *	@speed: the new speed setting
987167514Skmacy *	@duplex: the new duplex setting
988167514Skmacy *	@fc: the new flow-control setting
989167514Skmacy *
990167514Skmacy *	This is the OS-dependent handler for link status changes.  The OS
991167514Skmacy *	neutral handler takes care of most of the processing for these events,
992167514Skmacy *	then calls this handler for any OS-specific processing.
993167514Skmacy */
994167514Skmacyvoid
995167514Skmacyt3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
996167514Skmacy     int duplex, int fc)
997167514Skmacy{
998167514Skmacy	struct port_info *pi = &adapter->port[port_id];
999169978Skmacy	struct cmac *mac = &adapter->port[port_id].mac;
1000167514Skmacy
1001167514Skmacy	if ((pi->ifp->if_flags & IFF_UP) == 0)
1002167514Skmacy		return;
1003169978Skmacy
1004169978Skmacy	if (link_status) {
1005169978Skmacy		t3_mac_enable(mac, MAC_DIRECTION_RX);
1006167514Skmacy		if_link_state_change(pi->ifp, LINK_STATE_UP);
1007169978Skmacy	} else {
1008167514Skmacy		if_link_state_change(pi->ifp, LINK_STATE_DOWN);
1009169978Skmacy		pi->phy.ops->power_down(&pi->phy, 1);
1010169978Skmacy		t3_mac_disable(mac, MAC_DIRECTION_RX);
1011169978Skmacy		t3_link_start(&pi->phy, mac, &pi->link_config);
1012169978Skmacy	}
1013167514Skmacy}
1014167514Skmacy
1015167514Skmacy
1016167514Skmacy/*
1017167514Skmacy * Interrupt-context handler for external (PHY) interrupts.
1018167514Skmacy */
1019167514Skmacyvoid
1020167514Skmacyt3_os_ext_intr_handler(adapter_t *sc)
1021167514Skmacy{
1022167514Skmacy	if (cxgb_debug)
1023167514Skmacy		printf("t3_os_ext_intr_handler\n");
1024167514Skmacy	/*
1025167514Skmacy	 * Schedule a task to handle external interrupts as they may be slow
1026167514Skmacy	 * and we use a mutex to protect MDIO registers.  We disable PHY
1027167514Skmacy	 * interrupts in the meantime and let the task reenable them when
1028167514Skmacy	 * it's done.
1029167514Skmacy	 */
1030169978Skmacy	ADAPTER_LOCK(sc);
1031167514Skmacy	if (sc->slow_intr_mask) {
1032167514Skmacy		sc->slow_intr_mask &= ~F_T3DBG;
1033167514Skmacy		t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
1034167514Skmacy		taskqueue_enqueue(sc->tq, &sc->ext_intr_task);
1035167514Skmacy	}
1036169978Skmacy	ADAPTER_UNLOCK(sc);
1037167514Skmacy}
1038167514Skmacy
1039167514Skmacyvoid
1040167514Skmacyt3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
1041167514Skmacy{
1042167514Skmacy
1043167514Skmacy	/*
1044167514Skmacy	 * The ifnet might not be allocated before this gets called,
1045167514Skmacy	 * as this is called early on in attach by t3_prep_adapter
1046167514Skmacy	 * save the address off in the port structure
1047167514Skmacy	 */
1048167514Skmacy	if (cxgb_debug)
1049167514Skmacy		printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
1050167514Skmacy	bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
1051167514Skmacy}
1052167514Skmacy
1053167514Skmacy/**
1054167514Skmacy *	link_start - enable a port
1055167514Skmacy *	@p: the port to enable
1056167514Skmacy *
1057167514Skmacy *	Performs the MAC and PHY actions needed to enable a port.
1058167514Skmacy */
1059167514Skmacystatic void
1060167514Skmacycxgb_link_start(struct port_info *p)
1061167514Skmacy{
1062167514Skmacy	struct ifnet *ifp;
1063167514Skmacy	struct t3_rx_mode rm;
1064167514Skmacy	struct cmac *mac = &p->mac;
1065167514Skmacy
1066167514Skmacy	ifp = p->ifp;
1067167514Skmacy
1068167514Skmacy	t3_init_rx_mode(&rm, p);
1069167514Skmacy	t3_mac_reset(mac);
1070170654Skmacy	t3_mac_set_mtu(mac, ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1071167514Skmacy	t3_mac_set_address(mac, 0, p->hw_addr);
1072167514Skmacy	t3_mac_set_rx_mode(mac, &rm);
1073167514Skmacy	t3_link_start(&p->phy, mac, &p->link_config);
1074167514Skmacy	t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1075167514Skmacy}
1076167514Skmacy
1077167514Skmacy/**
1078167514Skmacy *	setup_rss - configure Receive Side Steering (per-queue connection demux)
1079167514Skmacy *	@adap: the adapter
1080167514Skmacy *
1081167514Skmacy *	Sets up RSS to distribute packets to multiple receive queues.  We
1082167514Skmacy *	configure the RSS CPU lookup table to distribute to the number of HW
1083167514Skmacy *	receive queues, and the response queue lookup table to narrow that
1084167514Skmacy *	down to the response queues actually configured for each port.
1085167514Skmacy *	We always configure the RSS mapping for two ports since the mapping
1086167514Skmacy *	table has plenty of entries.
1087167514Skmacy */
1088167514Skmacystatic void
1089167514Skmacysetup_rss(adapter_t *adap)
1090167514Skmacy{
1091167514Skmacy	int i;
1092167514Skmacy	u_int nq0 = adap->port[0].nqsets;
1093167514Skmacy	u_int nq1 = max((u_int)adap->port[1].nqsets, 1U);
1094167514Skmacy	uint8_t cpus[SGE_QSETS + 1];
1095167514Skmacy	uint16_t rspq_map[RSS_TABLE_SIZE];
1096167514Skmacy
1097167514Skmacy	for (i = 0; i < SGE_QSETS; ++i)
1098167514Skmacy		cpus[i] = i;
1099167514Skmacy	cpus[SGE_QSETS] = 0xff;
1100167514Skmacy
1101167514Skmacy	for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
1102167514Skmacy		rspq_map[i] = i % nq0;
1103167514Skmacy		rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
1104167514Skmacy	}
1105167514Skmacy
1106167514Skmacy	t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
1107167514Skmacy	    F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
1108167514Skmacy	    V_RRCPLCPUSIZE(6), cpus, rspq_map);
1109167514Skmacy}
1110167514Skmacy
1111169978Skmacy/*
1112169978Skmacy * Sends an mbuf to an offload queue driver
1113169978Skmacy * after dealing with any active network taps.
1114169978Skmacy */
1115169978Skmacystatic inline int
1116169978Skmacyoffload_tx(struct toedev *tdev, struct mbuf *m)
1117169978Skmacy{
1118169978Skmacy	int ret;
1119169978Skmacy
1120169978Skmacy	critical_enter();
1121169978Skmacy	ret = t3_offload_tx(tdev, m);
1122169978Skmacy	critical_exit();
1123170654Skmacy	return (ret);
1124169978Skmacy}
1125169978Skmacy
1126169978Skmacystatic int
1127169978Skmacywrite_smt_entry(struct adapter *adapter, int idx)
1128169978Skmacy{
1129169978Skmacy	struct port_info *pi = &adapter->port[idx];
1130169978Skmacy	struct cpl_smt_write_req *req;
1131169978Skmacy	struct mbuf *m;
1132169978Skmacy
1133169978Skmacy	if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
1134169978Skmacy		return (ENOMEM);
1135169978Skmacy
1136169978Skmacy	req = mtod(m, struct cpl_smt_write_req *);
1137169978Skmacy	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1138169978Skmacy	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
1139169978Skmacy	req->mtu_idx = NMTUS - 1;  /* should be 0 but there's a T3 bug */
1140169978Skmacy	req->iff = idx;
1141169978Skmacy	memset(req->src_mac1, 0, sizeof(req->src_mac1));
1142169978Skmacy	memcpy(req->src_mac0, pi->hw_addr, ETHER_ADDR_LEN);
1143169978Skmacy
1144169978Skmacy	m_set_priority(m, 1);
1145169978Skmacy
1146169978Skmacy	offload_tx(&adapter->tdev, m);
1147169978Skmacy
1148169978Skmacy	return (0);
1149169978Skmacy}
1150169978Skmacy
1151169978Skmacystatic int
1152169978Skmacyinit_smt(struct adapter *adapter)
1153169978Skmacy{
1154169978Skmacy	int i;
1155169978Skmacy
1156169978Skmacy	for_each_port(adapter, i)
1157169978Skmacy		write_smt_entry(adapter, i);
1158169978Skmacy	return 0;
1159169978Skmacy}
1160169978Skmacy
1161167514Skmacystatic void
1162169978Skmacyinit_port_mtus(adapter_t *adapter)
1163169978Skmacy{
1164169978Skmacy	unsigned int mtus = adapter->port[0].ifp->if_mtu;
1165169978Skmacy
1166169978Skmacy	if (adapter->port[1].ifp)
1167169978Skmacy		mtus |= adapter->port[1].ifp->if_mtu << 16;
1168169978Skmacy	t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
1169169978Skmacy}
1170169978Skmacy
1171169978Skmacystatic void
1172167514Skmacysend_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
1173167514Skmacy			      int hi, int port)
1174167514Skmacy{
1175167514Skmacy	struct mbuf *m;
1176167514Skmacy	struct mngt_pktsched_wr *req;
1177167514Skmacy
1178167514Skmacy	m = m_gethdr(M_NOWAIT, MT_DATA);
1179167848Skmacy	if (m) {
1180169978Skmacy		req = mtod(m, struct mngt_pktsched_wr *);
1181167848Skmacy		req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1182167848Skmacy		req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1183167848Skmacy		req->sched = sched;
1184167848Skmacy		req->idx = qidx;
1185167848Skmacy		req->min = lo;
1186167848Skmacy		req->max = hi;
1187167848Skmacy		req->binding = port;
1188167848Skmacy		m->m_len = m->m_pkthdr.len = sizeof(*req);
1189167848Skmacy		t3_mgmt_tx(adap, m);
1190167848Skmacy	}
1191167514Skmacy}
1192167514Skmacy
1193167514Skmacystatic void
1194167514Skmacybind_qsets(adapter_t *sc)
1195167514Skmacy{
1196167514Skmacy	int i, j;
1197167514Skmacy
1198167514Skmacy	for (i = 0; i < (sc)->params.nports; ++i) {
1199167514Skmacy		const struct port_info *pi = adap2pinfo(sc, i);
1200167514Skmacy
1201167514Skmacy		for (j = 0; j < pi->nqsets; ++j)
1202167514Skmacy			send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1203167514Skmacy					  -1, i);
1204167514Skmacy	}
1205167514Skmacy}
1206167514Skmacy
1207169978Skmacy/**
1208169978Skmacy *	cxgb_up - enable the adapter
1209169978Skmacy *	@adap: adapter being enabled
1210169978Skmacy *
1211169978Skmacy *	Called when the first port is enabled, this function performs the
1212169978Skmacy *	actions necessary to make an adapter operational, such as completing
1213169978Skmacy *	the initialization of HW modules, and enabling interrupts.
1214169978Skmacy *
1215169978Skmacy */
1216169978Skmacystatic int
1217169978Skmacycxgb_up(struct adapter *sc)
1218169978Skmacy{
1219169978Skmacy	int err = 0;
1220169978Skmacy
1221169978Skmacy	if ((sc->flags & FULL_INIT_DONE) == 0) {
1222169978Skmacy
1223169978Skmacy		if ((sc->flags & FW_UPTODATE) == 0)
1224169978Skmacy			err = upgrade_fw(sc);
1225169978Skmacy
1226169978Skmacy		if (err)
1227169978Skmacy			goto out;
1228169978Skmacy
1229169978Skmacy		err = t3_init_hw(sc, 0);
1230169978Skmacy		if (err)
1231169978Skmacy			goto out;
1232169978Skmacy
1233169978Skmacy		t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1234169978Skmacy
1235169978Skmacy		err = setup_sge_qsets(sc);
1236169978Skmacy		if (err)
1237169978Skmacy			goto out;
1238169978Skmacy
1239169978Skmacy		setup_rss(sc);
1240169978Skmacy		sc->flags |= FULL_INIT_DONE;
1241169978Skmacy	}
1242169978Skmacy
1243169978Skmacy	t3_intr_clear(sc);
1244169978Skmacy
1245169978Skmacy	/* If it's MSI or INTx, allocate a single interrupt for everything */
1246169978Skmacy	if ((sc->flags & USING_MSIX) == 0) {
1247169978Skmacy		if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
1248169978Skmacy		   &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1249169978Skmacy			device_printf(sc->dev, "Cannot allocate interrupt rid=%d\n", sc->irq_rid);
1250169978Skmacy			err = EINVAL;
1251169978Skmacy			goto out;
1252169978Skmacy		}
1253169978Skmacy		device_printf(sc->dev, "allocated irq_res=%p\n", sc->irq_res);
1254169978Skmacy
1255169978Skmacy		if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
1256169978Skmacy#ifdef INTR_FILTERS
1257169978Skmacy			NULL,
1258169978Skmacy#endif
1259169978Skmacy			sc->cxgb_intr, sc, &sc->intr_tag)) {
1260169978Skmacy			device_printf(sc->dev, "Cannot set up interrupt\n");
1261169978Skmacy			err = EINVAL;
1262169978Skmacy			goto irq_err;
1263169978Skmacy		}
1264169978Skmacy	} else {
1265169978Skmacy		cxgb_setup_msix(sc, sc->msi_count);
1266169978Skmacy	}
1267169978Skmacy
1268169978Skmacy	t3_sge_start(sc);
1269169978Skmacy	t3_intr_enable(sc);
1270169978Skmacy
1271169978Skmacy	if ((sc->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
1272169978Skmacy		bind_qsets(sc);
1273169978Skmacy	sc->flags |= QUEUES_BOUND;
1274169978Skmacyout:
1275169978Skmacy	return (err);
1276169978Skmacyirq_err:
1277169978Skmacy	CH_ERR(sc, "request_irq failed, err %d\n", err);
1278169978Skmacy	goto out;
1279169978Skmacy}
1280169978Skmacy
1281169978Skmacy
1282169978Skmacy/*
1283169978Skmacy * Release resources when all the ports and offloading have been stopped.
1284169978Skmacy */
1285167514Skmacystatic void
1286170869Skmacycxgb_down_locked(struct adapter *sc)
1287169978Skmacy{
1288169978Skmacy	int i;
1289170654Skmacy
1290169978Skmacy	t3_sge_stop(sc);
1291169978Skmacy	t3_intr_disable(sc);
1292170654Skmacy
1293169978Skmacy	if (sc->intr_tag != NULL) {
1294169978Skmacy		bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
1295169978Skmacy		sc->intr_tag = NULL;
1296169978Skmacy	}
1297169978Skmacy	if (sc->irq_res != NULL) {
1298169978Skmacy		device_printf(sc->dev, "de-allocating interrupt irq_rid=%d irq_res=%p\n",
1299169978Skmacy		    sc->irq_rid, sc->irq_res);
1300169978Skmacy		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
1301169978Skmacy		    sc->irq_res);
1302169978Skmacy		sc->irq_res = NULL;
1303169978Skmacy	}
1304170654Skmacy
1305170654Skmacy	if (sc->flags & USING_MSIX)
1306170654Skmacy		cxgb_teardown_msix(sc);
1307170869Skmacy	ADAPTER_UNLOCK(sc);
1308169978Skmacy
1309170869Skmacy	callout_drain(&sc->cxgb_tick_ch);
1310169978Skmacy	callout_drain(&sc->sge_timer_ch);
1311170869Skmacy
1312170654Skmacy	if (sc->tq != NULL)
1313170654Skmacy		taskqueue_drain(sc->tq, &sc->slow_intr_task);
1314170654Skmacy	for (i = 0; i < sc->params.nports; i++)
1315170654Skmacy		if (sc->port[i].tq != NULL)
1316170654Skmacy			taskqueue_drain(sc->port[i].tq, &sc->port[i].timer_reclaim_task);
1317170654Skmacy
1318169978Skmacy}
1319169978Skmacy
1320169978Skmacystatic int
1321169978Skmacyoffload_open(struct port_info *pi)
1322169978Skmacy{
1323169978Skmacy	struct adapter *adapter = pi->adapter;
1324169978Skmacy	struct toedev *tdev = TOEDEV(pi->ifp);
1325169978Skmacy	int adap_up = adapter->open_device_map & PORT_MASK;
1326169978Skmacy	int err = 0;
1327169978Skmacy
1328169978Skmacy	if (atomic_cmpset_int(&adapter->open_device_map,
1329169978Skmacy		(adapter->open_device_map & ~OFFLOAD_DEVMAP_BIT),
1330169978Skmacy		(adapter->open_device_map | OFFLOAD_DEVMAP_BIT)) == 0)
1331169978Skmacy		return (0);
1332169978Skmacy
1333169978Skmacy	ADAPTER_LOCK(pi->adapter);
1334169978Skmacy	if (!adap_up)
1335169978Skmacy		err = cxgb_up(adapter);
1336169978Skmacy	ADAPTER_UNLOCK(pi->adapter);
1337169978Skmacy	if (err < 0)
1338169978Skmacy		return (err);
1339169978Skmacy
1340169978Skmacy	t3_tp_set_offload_mode(adapter, 1);
1341169978Skmacy	tdev->lldev = adapter->port[0].ifp;
1342169978Skmacy	err = cxgb_offload_activate(adapter);
1343169978Skmacy	if (err)
1344169978Skmacy		goto out;
1345169978Skmacy
1346169978Skmacy	init_port_mtus(adapter);
1347169978Skmacy	t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1348169978Skmacy		     adapter->params.b_wnd,
1349169978Skmacy		     adapter->params.rev == 0 ?
1350169978Skmacy		       adapter->port[0].ifp->if_mtu : 0xffff);
1351169978Skmacy	init_smt(adapter);
1352169978Skmacy
1353169978Skmacy	/* Call back all registered clients */
1354169978Skmacy	cxgb_add_clients(tdev);
1355169978Skmacy
1356169978Skmacyout:
1357169978Skmacy	/* restore them in case the offload module has changed them */
1358169978Skmacy	if (err) {
1359169978Skmacy		t3_tp_set_offload_mode(adapter, 0);
1360169978Skmacy		clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
1361169978Skmacy		cxgb_set_dummy_ops(tdev);
1362169978Skmacy	}
1363169978Skmacy	return (err);
1364169978Skmacy}
1365170789Skmacy#ifdef notyet
1366169978Skmacystatic int
1367169978Skmacyoffload_close(struct toedev *tdev)
1368169978Skmacy{
1369169978Skmacy	struct adapter *adapter = tdev2adap(tdev);
1370169978Skmacy
1371169978Skmacy	if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT))
1372170654Skmacy		return (0);
1373169978Skmacy
1374169978Skmacy	/* Call back all registered clients */
1375169978Skmacy	cxgb_remove_clients(tdev);
1376169978Skmacy	tdev->lldev = NULL;
1377169978Skmacy	cxgb_set_dummy_ops(tdev);
1378169978Skmacy	t3_tp_set_offload_mode(adapter, 0);
1379169978Skmacy	clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
1380169978Skmacy
1381169978Skmacy	if (!adapter->open_device_map)
1382169978Skmacy		cxgb_down(adapter);
1383169978Skmacy
1384169978Skmacy	cxgb_offload_deactivate(adapter);
1385170654Skmacy	return (0);
1386169978Skmacy}
1387170789Skmacy#endif
1388169978Skmacy
1389169978Skmacystatic void
1390167514Skmacycxgb_init(void *arg)
1391167514Skmacy{
1392167514Skmacy	struct port_info *p = arg;
1393167514Skmacy
1394167514Skmacy	PORT_LOCK(p);
1395167514Skmacy	cxgb_init_locked(p);
1396167514Skmacy	PORT_UNLOCK(p);
1397167514Skmacy}
1398167514Skmacy
1399167514Skmacystatic void
1400167514Skmacycxgb_init_locked(struct port_info *p)
1401167514Skmacy{
1402167514Skmacy	struct ifnet *ifp;
1403167514Skmacy	adapter_t *sc = p->adapter;
1404169978Skmacy	int err;
1405167514Skmacy
1406170869Skmacy	PORT_LOCK_ASSERT_OWNED(p);
1407167514Skmacy	ifp = p->ifp;
1408167514Skmacy
1409167514Skmacy	ADAPTER_LOCK(p->adapter);
1410169978Skmacy	if ((sc->open_device_map == 0) && ((err = cxgb_up(sc)) < 0)) {
1411169978Skmacy		ADAPTER_UNLOCK(p->adapter);
1412169978Skmacy		cxgb_stop_locked(p);
1413169978Skmacy		return;
1414169978Skmacy	}
1415170869Skmacy	if (p->adapter->open_device_map == 0) {
1416167514Skmacy		t3_intr_clear(sc);
1417170869Skmacy		t3_sge_init_adapter(sc);
1418170869Skmacy	}
1419169978Skmacy	setbit(&p->adapter->open_device_map, p->port);
1420170654Skmacy	ADAPTER_UNLOCK(p->adapter);
1421169978Skmacy
1422169978Skmacy	if (is_offload(sc) && !ofld_disable) {
1423169978Skmacy		err = offload_open(p);
1424169978Skmacy		if (err)
1425169978Skmacy			log(LOG_WARNING,
1426169978Skmacy			    "Could not initialize offload capabilities\n");
1427169978Skmacy	}
1428169978Skmacy	cxgb_link_start(p);
1429170654Skmacy	t3_link_changed(sc, p->port);
1430170654Skmacy	ifp->if_baudrate = p->link_config.speed * 1000000;
1431170654Skmacy
1432167514Skmacy	t3_port_intr_enable(sc, p->port);
1433167760Skmacy
1434167514Skmacy	callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz,
1435167514Skmacy	    cxgb_tick, sc);
1436170869Skmacy
1437167514Skmacy	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1438167514Skmacy	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1439167514Skmacy}
1440167514Skmacy
1441167514Skmacystatic void
1442167514Skmacycxgb_set_rxmode(struct port_info *p)
1443167514Skmacy{
1444167514Skmacy	struct t3_rx_mode rm;
1445167514Skmacy	struct cmac *mac = &p->mac;
1446167760Skmacy
1447170869Skmacy	PORT_LOCK_ASSERT_OWNED(p);
1448170654Skmacy
1449167514Skmacy	t3_init_rx_mode(&rm, p);
1450167514Skmacy	t3_mac_set_rx_mode(mac, &rm);
1451167514Skmacy}
1452167514Skmacy
1453167514Skmacystatic void
1454167734Skmacycxgb_stop_locked(struct port_info *p)
1455167514Skmacy{
1456167514Skmacy	struct ifnet *ifp;
1457167514Skmacy
1458170869Skmacy	PORT_LOCK_ASSERT_OWNED(p);
1459170869Skmacy	ADAPTER_LOCK_ASSERT_NOTOWNED(p->adapter);
1460170654Skmacy
1461167514Skmacy	ifp = p->ifp;
1462167514Skmacy
1463169978Skmacy	t3_port_intr_disable(p->adapter, p->port);
1464169978Skmacy	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1465169978Skmacy	p->phy.ops->power_down(&p->phy, 1);
1466169978Skmacy	t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1467169978Skmacy
1468167514Skmacy	ADAPTER_LOCK(p->adapter);
1469169978Skmacy	clrbit(&p->adapter->open_device_map, p->port);
1470170869Skmacy
1471170869Skmacy
1472170869Skmacy	if (p->adapter->open_device_map == 0) {
1473170869Skmacy		cxgb_down_locked(p->adapter);
1474170869Skmacy	} else
1475170869Skmacy		ADAPTER_UNLOCK(p->adapter);
1476170869Skmacy
1477167514Skmacy}
1478167514Skmacy
1479167514Skmacystatic int
1480170654Skmacycxgb_set_mtu(struct port_info *p, int mtu)
1481170654Skmacy{
1482170654Skmacy	struct ifnet *ifp = p->ifp;
1483170654Skmacy	int error = 0;
1484170654Skmacy
1485170654Skmacy	if ((mtu < ETHERMIN) || (mtu > ETHER_MAX_LEN_JUMBO))
1486170654Skmacy		error = EINVAL;
1487170654Skmacy	else if (ifp->if_mtu != mtu) {
1488170654Skmacy		PORT_LOCK(p);
1489170654Skmacy		ifp->if_mtu = mtu;
1490170654Skmacy		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1491170654Skmacy			callout_stop(&p->adapter->cxgb_tick_ch);
1492170654Skmacy			cxgb_stop_locked(p);
1493170654Skmacy			cxgb_init_locked(p);
1494170654Skmacy		}
1495170654Skmacy		PORT_UNLOCK(p);
1496170654Skmacy	}
1497170654Skmacy	return (error);
1498170654Skmacy}
1499170654Skmacy
1500170654Skmacystatic int
1501167514Skmacycxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
1502167514Skmacy{
1503167514Skmacy	struct port_info *p = ifp->if_softc;
1504167514Skmacy	struct ifaddr *ifa = (struct ifaddr *)data;
1505167514Skmacy	struct ifreq *ifr = (struct ifreq *)data;
1506167514Skmacy	int flags, error = 0;
1507167514Skmacy	uint32_t mask;
1508167514Skmacy
1509168737Skmacy	/*
1510168737Skmacy	 * XXX need to check that we aren't in the middle of an unload
1511168737Skmacy	 */
1512167514Skmacy	switch (command) {
1513167514Skmacy	case SIOCSIFMTU:
1514170654Skmacy		error = cxgb_set_mtu(p, ifr->ifr_mtu);
1515167514Skmacy		break;
1516167514Skmacy	case SIOCSIFADDR:
1517167514Skmacy	case SIOCGIFADDR:
1518170654Skmacy		PORT_LOCK(p);
1519167514Skmacy		if (ifa->ifa_addr->sa_family == AF_INET) {
1520167514Skmacy			ifp->if_flags |= IFF_UP;
1521170654Skmacy			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1522170654Skmacy				cxgb_init_locked(p);
1523167514Skmacy			arp_ifinit(ifp, ifa);
1524167514Skmacy		} else
1525167514Skmacy			error = ether_ioctl(ifp, command, data);
1526170654Skmacy		PORT_UNLOCK(p);
1527167514Skmacy		break;
1528167514Skmacy	case SIOCSIFFLAGS:
1529170869Skmacy		callout_drain(&p->adapter->cxgb_tick_ch);
1530170869Skmacy		PORT_LOCK(p);
1531167514Skmacy		if (ifp->if_flags & IFF_UP) {
1532167514Skmacy			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1533167514Skmacy				flags = p->if_flags;
1534167514Skmacy				if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
1535167514Skmacy				    ((ifp->if_flags ^ flags) & IFF_ALLMULTI))
1536167514Skmacy					cxgb_set_rxmode(p);
1537167514Skmacy			} else
1538167514Skmacy				cxgb_init_locked(p);
1539167760Skmacy			p->if_flags = ifp->if_flags;
1540170869Skmacy		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1541170869Skmacy			cxgb_stop_locked(p);
1542170869Skmacy
1543170869Skmacy		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1544170869Skmacy			adapter_t *sc = p->adapter;
1545170869Skmacy			callout_reset(&sc->cxgb_tick_ch,
1546170869Skmacy			    sc->params.stats_update_period * hz,
1547170869Skmacy			    cxgb_tick, sc);
1548167514Skmacy		}
1549170654Skmacy		PORT_UNLOCK(p);
1550167514Skmacy		break;
1551167514Skmacy	case SIOCSIFMEDIA:
1552167514Skmacy	case SIOCGIFMEDIA:
1553167514Skmacy		error = ifmedia_ioctl(ifp, ifr, &p->media, command);
1554167514Skmacy		break;
1555167514Skmacy	case SIOCSIFCAP:
1556167514Skmacy		PORT_LOCK(p);
1557167514Skmacy		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1558167514Skmacy		if (mask & IFCAP_TXCSUM) {
1559167514Skmacy			if (IFCAP_TXCSUM & ifp->if_capenable) {
1560167514Skmacy				ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
1561167514Skmacy				ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
1562167514Skmacy				    | CSUM_TSO);
1563167514Skmacy			} else {
1564167514Skmacy				ifp->if_capenable |= IFCAP_TXCSUM;
1565167514Skmacy				ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1566167514Skmacy			}
1567167514Skmacy		} else if (mask & IFCAP_RXCSUM) {
1568167514Skmacy			if (IFCAP_RXCSUM & ifp->if_capenable) {
1569167514Skmacy				ifp->if_capenable &= ~IFCAP_RXCSUM;
1570167514Skmacy			} else {
1571167514Skmacy				ifp->if_capenable |= IFCAP_RXCSUM;
1572167514Skmacy			}
1573167514Skmacy		}
1574167514Skmacy		if (mask & IFCAP_TSO4) {
1575167514Skmacy			if (IFCAP_TSO4 & ifp->if_capenable) {
1576167514Skmacy				ifp->if_capenable &= ~IFCAP_TSO4;
1577167514Skmacy				ifp->if_hwassist &= ~CSUM_TSO;
1578167514Skmacy			} else if (IFCAP_TXCSUM & ifp->if_capenable) {
1579167514Skmacy				ifp->if_capenable |= IFCAP_TSO4;
1580167514Skmacy				ifp->if_hwassist |= CSUM_TSO;
1581167514Skmacy			} else {
1582167514Skmacy				if (cxgb_debug)
1583167514Skmacy					printf("cxgb requires tx checksum offload"
1584167514Skmacy					    " be enabled to use TSO\n");
1585167514Skmacy				error = EINVAL;
1586167514Skmacy			}
1587167514Skmacy		}
1588167514Skmacy		PORT_UNLOCK(p);
1589167514Skmacy		break;
1590167514Skmacy	default:
1591167514Skmacy		error = ether_ioctl(ifp, command, data);
1592167514Skmacy		break;
1593167514Skmacy	}
1594167514Skmacy	return (error);
1595167514Skmacy}
1596167514Skmacy
1597167514Skmacystatic int
1598167514Skmacycxgb_start_tx(struct ifnet *ifp, uint32_t txmax)
1599167514Skmacy{
1600167514Skmacy	struct sge_qset *qs;
1601167514Skmacy	struct sge_txq *txq;
1602167514Skmacy	struct port_info *p = ifp->if_softc;
1603168737Skmacy	struct mbuf *m0, *m = NULL;
1604167514Skmacy	int err, in_use_init;
1605170654Skmacy
1606167514Skmacy	if (!p->link_config.link_ok)
1607167514Skmacy		return (ENXIO);
1608167514Skmacy
1609167514Skmacy	if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1610167514Skmacy		return (ENOBUFS);
1611167514Skmacy
1612167514Skmacy	qs = &p->adapter->sge.qs[p->first_qset];
1613167514Skmacy	txq = &qs->txq[TXQ_ETH];
1614167514Skmacy	err = 0;
1615167514Skmacy
1616167514Skmacy	mtx_lock(&txq->lock);
1617167514Skmacy	in_use_init = txq->in_use;
1618167514Skmacy	while ((txq->in_use - in_use_init < txmax) &&
1619167514Skmacy	    (txq->size > txq->in_use + TX_MAX_DESC)) {
1620167514Skmacy		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1621167514Skmacy		if (m == NULL)
1622167514Skmacy			break;
1623168737Skmacy		/*
1624168737Skmacy		 * Convert chain to M_IOVEC
1625168737Skmacy		 */
1626168737Skmacy		KASSERT((m->m_flags & M_IOVEC) == 0, ("IOVEC set too early"));
1627168737Skmacy		m0 = m;
1628168737Skmacy#ifdef INVARIANTS
1629168737Skmacy		/*
1630168737Skmacy		 * Clean up after net stack sloppiness
1631168737Skmacy		 * before calling m_sanity
1632168737Skmacy		 */
1633168737Skmacy		m0 = m->m_next;
1634168737Skmacy		while (m0) {
1635168737Skmacy			m0->m_flags &= ~M_PKTHDR;
1636168737Skmacy			m0 = m0->m_next;
1637168737Skmacy		}
1638168737Skmacy		m_sanity(m0, 0);
1639168737Skmacy		m0 = m;
1640168749Skmacy#endif
1641168749Skmacy		if (collapse_mbufs && m->m_pkthdr.len > MCLBYTES &&
1642168737Skmacy		    m_collapse(m, TX_MAX_SEGS, &m0) == EFBIG) {
1643168737Skmacy			if ((m0 = m_defrag(m, M_NOWAIT)) != NULL) {
1644168737Skmacy				m = m0;
1645168737Skmacy				m_collapse(m, TX_MAX_SEGS, &m0);
1646168737Skmacy			} else
1647168737Skmacy				break;
1648168737Skmacy		}
1649168737Skmacy		m = m0;
1650167514Skmacy		if ((err = t3_encap(p, &m)) != 0)
1651167514Skmacy			break;
1652169978Skmacy		BPF_MTAP(ifp, m);
1653167514Skmacy	}
1654167514Skmacy	mtx_unlock(&txq->lock);
1655167514Skmacy
1656167514Skmacy	if (__predict_false(err)) {
1657167514Skmacy		if (err == ENOMEM) {
1658170083Skmacy			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1659167514Skmacy			IFQ_LOCK(&ifp->if_snd);
1660167514Skmacy			IFQ_DRV_PREPEND(&ifp->if_snd, m);
1661167514Skmacy			IFQ_UNLOCK(&ifp->if_snd);
1662167514Skmacy		}
1663167514Skmacy	}
1664170654Skmacy	if (err == 0 && m == NULL)
1665170654Skmacy		err = ENOBUFS;
1666170654Skmacy	else if ((err == 0) &&  (txq->size <= txq->in_use + TX_MAX_DESC) &&
1667170007Skmacy	    (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
1668170007Skmacy		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1669170654Skmacy		err = ENOSPC;
1670170007Skmacy	}
1671167514Skmacy	return (err);
1672167514Skmacy}
1673167514Skmacy
1674167514Skmacystatic void
1675167514Skmacycxgb_start_proc(void *arg, int ncount)
1676167514Skmacy{
1677167514Skmacy	struct ifnet *ifp = arg;
1678167514Skmacy	struct port_info *pi = ifp->if_softc;
1679167514Skmacy	struct sge_qset *qs;
1680167514Skmacy	struct sge_txq *txq;
1681170654Skmacy	int error;
1682167514Skmacy
1683167514Skmacy	qs = &pi->adapter->sge.qs[pi->first_qset];
1684167514Skmacy	txq = &qs->txq[TXQ_ETH];
1685167514Skmacy
1686170654Skmacy	do {
1687167538Skmacy		if (desc_reclaimable(txq) > TX_CLEAN_MAX_DESC)
1688167538Skmacy			taskqueue_enqueue(pi->adapter->tq,
1689170654Skmacy			    &pi->timer_reclaim_task);
1690167525Skmacy
1691167538Skmacy		error = cxgb_start_tx(ifp, TX_START_MAX_DESC);
1692170654Skmacy	} while (error == 0);
1693167514Skmacy}
1694167514Skmacy
1695167514Skmacystatic void
1696167514Skmacycxgb_start(struct ifnet *ifp)
1697167514Skmacy{
1698167514Skmacy	struct port_info *pi = ifp->if_softc;
1699167514Skmacy	struct sge_qset *qs;
1700167514Skmacy	struct sge_txq *txq;
1701167514Skmacy	int err;
1702167514Skmacy
1703167514Skmacy	qs = &pi->adapter->sge.qs[pi->first_qset];
1704167514Skmacy	txq = &qs->txq[TXQ_ETH];
1705167514Skmacy
1706167538Skmacy	if (desc_reclaimable(txq) > TX_CLEAN_MAX_DESC)
1707167538Skmacy		taskqueue_enqueue(pi->adapter->tq,
1708170654Skmacy		    &pi->timer_reclaim_task);
1709167538Skmacy
1710167514Skmacy	err = cxgb_start_tx(ifp, TX_START_MAX_DESC);
1711167514Skmacy
1712167514Skmacy	if (err == 0)
1713167514Skmacy		taskqueue_enqueue(pi->tq, &pi->start_task);
1714167514Skmacy}
1715167514Skmacy
1716167514Skmacy
1717167514Skmacystatic int
1718167514Skmacycxgb_media_change(struct ifnet *ifp)
1719167514Skmacy{
1720167514Skmacy	if_printf(ifp, "media change not supported\n");
1721167514Skmacy	return (ENXIO);
1722167514Skmacy}
1723167514Skmacy
1724167514Skmacystatic void
1725167514Skmacycxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1726167514Skmacy{
1727167514Skmacy	struct port_info *p = ifp->if_softc;
1728167514Skmacy
1729167514Skmacy	ifmr->ifm_status = IFM_AVALID;
1730167514Skmacy	ifmr->ifm_active = IFM_ETHER;
1731167514Skmacy
1732167514Skmacy	if (!p->link_config.link_ok)
1733167514Skmacy		return;
1734167514Skmacy
1735167514Skmacy	ifmr->ifm_status |= IFM_ACTIVE;
1736167514Skmacy
1737170654Skmacy	switch (p->link_config.speed) {
1738170654Skmacy	case 10:
1739170654Skmacy		ifmr->ifm_active |= IFM_10_T;
1740170654Skmacy		break;
1741170654Skmacy	case 100:
1742170654Skmacy		ifmr->ifm_active |= IFM_100_TX;
1743170654Skmacy			break;
1744170654Skmacy	case 1000:
1745170654Skmacy		ifmr->ifm_active |= IFM_1000_T;
1746170654Skmacy		break;
1747170654Skmacy	}
1748170654Skmacy
1749167514Skmacy	if (p->link_config.duplex)
1750167514Skmacy		ifmr->ifm_active |= IFM_FDX;
1751167514Skmacy	else
1752167514Skmacy		ifmr->ifm_active |= IFM_HDX;
1753167514Skmacy}
1754167514Skmacy
1755167514Skmacystatic void
1756167514Skmacycxgb_async_intr(void *data)
1757167514Skmacy{
1758167760Skmacy	adapter_t *sc = data;
1759167760Skmacy
1760167514Skmacy	if (cxgb_debug)
1761167760Skmacy		device_printf(sc->dev, "cxgb_async_intr\n");
1762170869Skmacy	/*
1763170869Skmacy	 * May need to sleep - defer to taskqueue
1764170869Skmacy	 */
1765170869Skmacy	taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
1766167514Skmacy}
1767167514Skmacy
1768167514Skmacystatic void
1769167514Skmacycxgb_ext_intr_handler(void *arg, int count)
1770167514Skmacy{
1771167514Skmacy	adapter_t *sc = (adapter_t *)arg;
1772167514Skmacy
1773167514Skmacy	if (cxgb_debug)
1774167514Skmacy		printf("cxgb_ext_intr_handler\n");
1775167514Skmacy
1776167514Skmacy	t3_phy_intr_handler(sc);
1777167514Skmacy
1778167514Skmacy	/* Now reenable external interrupts */
1779169978Skmacy	ADAPTER_LOCK(sc);
1780167514Skmacy	if (sc->slow_intr_mask) {
1781167514Skmacy		sc->slow_intr_mask |= F_T3DBG;
1782167514Skmacy		t3_write_reg(sc, A_PL_INT_CAUSE0, F_T3DBG);
1783167514Skmacy		t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
1784167514Skmacy	}
1785169978Skmacy	ADAPTER_UNLOCK(sc);
1786167514Skmacy}
1787167514Skmacy
1788167514Skmacystatic void
1789167746Skmacycheck_link_status(adapter_t *sc)
1790167514Skmacy{
1791167746Skmacy	int i;
1792167514Skmacy
1793167746Skmacy	for (i = 0; i < (sc)->params.nports; ++i) {
1794167746Skmacy		struct port_info *p = &sc->port[i];
1795167514Skmacy
1796170654Skmacy		if (!(p->port_type->caps & SUPPORTED_IRQ))
1797167746Skmacy			t3_link_changed(sc, i);
1798170654Skmacy		p->ifp->if_baudrate = p->link_config.speed * 1000000;
1799167746Skmacy	}
1800167514Skmacy}
1801167514Skmacy
1802167514Skmacystatic void
1803167746Skmacycheck_t3b2_mac(struct adapter *adapter)
1804167514Skmacy{
1805167514Skmacy	int i;
1806167514Skmacy
1807167746Skmacy	for_each_port(adapter, i) {
1808167746Skmacy		struct port_info *p = &adapter->port[i];
1809167746Skmacy		struct ifnet *ifp = p->ifp;
1810167746Skmacy		int status;
1811167514Skmacy
1812167746Skmacy		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1813167746Skmacy			continue;
1814167746Skmacy
1815167746Skmacy		status = 0;
1816167746Skmacy		PORT_LOCK(p);
1817167746Skmacy		if ((ifp->if_drv_flags & IFF_DRV_RUNNING))
1818167746Skmacy			status = t3b2_mac_watchdog_task(&p->mac);
1819167746Skmacy		if (status == 1)
1820167746Skmacy			p->mac.stats.num_toggled++;
1821167746Skmacy		else if (status == 2) {
1822167746Skmacy			struct cmac *mac = &p->mac;
1823167746Skmacy
1824170654Skmacy			t3_mac_set_mtu(mac, ifp->if_mtu + ETHER_HDR_LEN
1825170654Skmacy			    + ETHER_VLAN_ENCAP_LEN);
1826167746Skmacy			t3_mac_set_address(mac, 0, p->hw_addr);
1827167746Skmacy			cxgb_set_rxmode(p);
1828167746Skmacy			t3_link_start(&p->phy, mac, &p->link_config);
1829167746Skmacy			t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1830167746Skmacy			t3_port_intr_enable(adapter, p->port);
1831167746Skmacy			p->mac.stats.num_resets++;
1832167746Skmacy		}
1833167746Skmacy		PORT_UNLOCK(p);
1834167514Skmacy	}
1835167514Skmacy}
1836167514Skmacy
1837167746Skmacystatic void
1838167746Skmacycxgb_tick(void *arg)
1839167746Skmacy{
1840167746Skmacy	adapter_t *sc = (adapter_t *)arg;
1841170869Skmacy
1842170869Skmacy	taskqueue_enqueue(sc->tq, &sc->tick_task);
1843170869Skmacy
1844170869Skmacy	if (sc->open_device_map != 0)
1845170869Skmacy		callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz,
1846170869Skmacy		    cxgb_tick, sc);
1847170869Skmacy}
1848170869Skmacy
1849170869Skmacystatic void
1850170869Skmacycxgb_tick_handler(void *arg, int count)
1851170869Skmacy{
1852170869Skmacy	adapter_t *sc = (adapter_t *)arg;
1853167746Skmacy	const struct adapter_params *p = &sc->params;
1854167746Skmacy
1855170869Skmacy	ADAPTER_LOCK(sc);
1856167746Skmacy	if (p->linkpoll_period)
1857167746Skmacy		check_link_status(sc);
1858167746Skmacy
1859167746Skmacy	/*
1860167746Skmacy	 * adapter lock can currently only be acquire after the
1861167746Skmacy	 * port lock
1862167746Skmacy	 */
1863167746Skmacy	ADAPTER_UNLOCK(sc);
1864170654Skmacy
1865167746Skmacy	if (p->rev == T3_REV_B2)
1866167746Skmacy		check_t3b2_mac(sc);
1867167746Skmacy}
1868167746Skmacy
1869167514Skmacystatic int
1870167514Skmacyin_range(int val, int lo, int hi)
1871167514Skmacy{
1872167514Skmacy	return val < 0 || (val <= hi && val >= lo);
1873167514Skmacy}
1874167514Skmacy
1875167514Skmacystatic int
1876170654Skmacycxgb_extension_open(struct cdev *dev, int flags, int fmp, d_thread_t *td)
1877170654Skmacy{
1878170654Skmacy       return (0);
1879170654Skmacy}
1880170654Skmacy
1881170654Skmacystatic int
1882170654Skmacycxgb_extension_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1883170654Skmacy{
1884170654Skmacy       return (0);
1885170654Skmacy}
1886170654Skmacy
1887170654Skmacystatic int
1888167514Skmacycxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
1889167514Skmacy    int fflag, struct thread *td)
1890167514Skmacy{
1891167514Skmacy	int mmd, error = 0;
1892167514Skmacy	struct port_info *pi = dev->si_drv1;
1893167514Skmacy	adapter_t *sc = pi->adapter;
1894167514Skmacy
1895167514Skmacy#ifdef PRIV_SUPPORTED
1896167514Skmacy	if (priv_check(td, PRIV_DRIVER)) {
1897167514Skmacy		if (cxgb_debug)
1898167514Skmacy			printf("user does not have access to privileged ioctls\n");
1899167514Skmacy		return (EPERM);
1900167514Skmacy	}
1901167514Skmacy#else
1902167514Skmacy	if (suser(td)) {
1903167514Skmacy		if (cxgb_debug)
1904167514Skmacy			printf("user does not have access to privileged ioctls\n");
1905167514Skmacy		return (EPERM);
1906167514Skmacy	}
1907167514Skmacy#endif
1908167514Skmacy
1909167514Skmacy	switch (cmd) {
1910167514Skmacy	case SIOCGMIIREG: {
1911167514Skmacy		uint32_t val;
1912167514Skmacy		struct cphy *phy = &pi->phy;
1913167514Skmacy		struct mii_data *mid = (struct mii_data *)data;
1914167514Skmacy
1915167514Skmacy		if (!phy->mdio_read)
1916167514Skmacy			return (EOPNOTSUPP);
1917167514Skmacy		if (is_10G(sc)) {
1918167514Skmacy			mmd = mid->phy_id >> 8;
1919167514Skmacy			if (!mmd)
1920167514Skmacy				mmd = MDIO_DEV_PCS;
1921167514Skmacy			else if (mmd > MDIO_DEV_XGXS)
1922167514Skmacy				return -EINVAL;
1923167514Skmacy
1924167514Skmacy			error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
1925167514Skmacy					     mid->reg_num, &val);
1926167514Skmacy		} else
1927167514Skmacy		        error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
1928167514Skmacy					     mid->reg_num & 0x1f, &val);
1929167514Skmacy		if (error == 0)
1930167514Skmacy			mid->val_out = val;
1931167514Skmacy		break;
1932167514Skmacy	}
1933167514Skmacy	case SIOCSMIIREG: {
1934167514Skmacy		struct cphy *phy = &pi->phy;
1935167514Skmacy		struct mii_data *mid = (struct mii_data *)data;
1936167514Skmacy
1937167514Skmacy		if (!phy->mdio_write)
1938167514Skmacy			return (EOPNOTSUPP);
1939167514Skmacy		if (is_10G(sc)) {
1940167514Skmacy			mmd = mid->phy_id >> 8;
1941167514Skmacy			if (!mmd)
1942167514Skmacy				mmd = MDIO_DEV_PCS;
1943167514Skmacy			else if (mmd > MDIO_DEV_XGXS)
1944167514Skmacy				return (EINVAL);
1945167514Skmacy
1946167514Skmacy			error = phy->mdio_write(sc, mid->phy_id & 0x1f,
1947167514Skmacy					      mmd, mid->reg_num, mid->val_in);
1948167514Skmacy		} else
1949167514Skmacy			error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
1950167514Skmacy					      mid->reg_num & 0x1f,
1951167514Skmacy					      mid->val_in);
1952167514Skmacy		break;
1953167514Skmacy	}
1954167514Skmacy	case CHELSIO_SETREG: {
1955167514Skmacy		struct ch_reg *edata = (struct ch_reg *)data;
1956167514Skmacy		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
1957167514Skmacy			return (EFAULT);
1958167514Skmacy		t3_write_reg(sc, edata->addr, edata->val);
1959167514Skmacy		break;
1960167514Skmacy	}
1961167514Skmacy	case CHELSIO_GETREG: {
1962167514Skmacy		struct ch_reg *edata = (struct ch_reg *)data;
1963167514Skmacy		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
1964167514Skmacy			return (EFAULT);
1965167514Skmacy		edata->val = t3_read_reg(sc, edata->addr);
1966167514Skmacy		break;
1967167514Skmacy	}
1968167514Skmacy	case CHELSIO_GET_SGE_CONTEXT: {
1969167514Skmacy		struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
1970167514Skmacy		mtx_lock(&sc->sge.reg_lock);
1971167514Skmacy		switch (ecntxt->cntxt_type) {
1972167514Skmacy		case CNTXT_TYPE_EGRESS:
1973167514Skmacy			error = t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
1974167514Skmacy			    ecntxt->data);
1975167514Skmacy			break;
1976167514Skmacy		case CNTXT_TYPE_FL:
1977167514Skmacy			error = t3_sge_read_fl(sc, ecntxt->cntxt_id,
1978167514Skmacy			    ecntxt->data);
1979167514Skmacy			break;
1980167514Skmacy		case CNTXT_TYPE_RSP:
1981167514Skmacy			error = t3_sge_read_rspq(sc, ecntxt->cntxt_id,
1982167514Skmacy			    ecntxt->data);
1983167514Skmacy			break;
1984167514Skmacy		case CNTXT_TYPE_CQ:
1985167514Skmacy			error = t3_sge_read_cq(sc, ecntxt->cntxt_id,
1986167514Skmacy			    ecntxt->data);
1987167514Skmacy			break;
1988167514Skmacy		default:
1989167514Skmacy			error = EINVAL;
1990167514Skmacy			break;
1991167514Skmacy		}
1992167514Skmacy		mtx_unlock(&sc->sge.reg_lock);
1993167514Skmacy		break;
1994167514Skmacy	}
1995167514Skmacy	case CHELSIO_GET_SGE_DESC: {
1996167514Skmacy		struct ch_desc *edesc = (struct ch_desc *)data;
1997167514Skmacy		int ret;
1998167514Skmacy		if (edesc->queue_num >= SGE_QSETS * 6)
1999167514Skmacy			return (EINVAL);
2000167514Skmacy		ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
2001167514Skmacy		    edesc->queue_num % 6, edesc->idx, edesc->data);
2002167514Skmacy		if (ret < 0)
2003167514Skmacy			return (EINVAL);
2004167514Skmacy		edesc->size = ret;
2005167514Skmacy		break;
2006167514Skmacy	}
2007167514Skmacy	case CHELSIO_SET_QSET_PARAMS: {
2008167514Skmacy		struct qset_params *q;
2009167514Skmacy		struct ch_qset_params *t = (struct ch_qset_params *)data;
2010167514Skmacy
2011167514Skmacy		if (t->qset_idx >= SGE_QSETS)
2012167514Skmacy			return -EINVAL;
2013167514Skmacy		if (!in_range(t->intr_lat, 0, M_NEWTIMER) ||
2014167514Skmacy		    !in_range(t->cong_thres, 0, 255) ||
2015167514Skmacy		    !in_range(t->txq_size[0], MIN_TXQ_ENTRIES,
2016167514Skmacy			      MAX_TXQ_ENTRIES) ||
2017167514Skmacy		    !in_range(t->txq_size[1], MIN_TXQ_ENTRIES,
2018167514Skmacy			      MAX_TXQ_ENTRIES) ||
2019167514Skmacy		    !in_range(t->txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2020167514Skmacy			      MAX_CTRL_TXQ_ENTRIES) ||
2021167514Skmacy		    !in_range(t->fl_size[0], MIN_FL_ENTRIES, MAX_RX_BUFFERS) ||
2022167514Skmacy		    !in_range(t->fl_size[1], MIN_FL_ENTRIES,
2023167514Skmacy			      MAX_RX_JUMBO_BUFFERS) ||
2024167514Skmacy		    !in_range(t->rspq_size, MIN_RSPQ_ENTRIES, MAX_RSPQ_ENTRIES))
2025167514Skmacy		       return -EINVAL;
2026167514Skmacy		if ((sc->flags & FULL_INIT_DONE) &&
2027167514Skmacy		    (t->rspq_size >= 0 || t->fl_size[0] >= 0 ||
2028167514Skmacy		     t->fl_size[1] >= 0 || t->txq_size[0] >= 0 ||
2029167514Skmacy		     t->txq_size[1] >= 0 || t->txq_size[2] >= 0 ||
2030167514Skmacy		     t->polling >= 0 || t->cong_thres >= 0))
2031167514Skmacy			return -EBUSY;
2032167514Skmacy
2033167514Skmacy		q = &sc->params.sge.qset[t->qset_idx];
2034167514Skmacy
2035167514Skmacy		if (t->rspq_size >= 0)
2036167514Skmacy			q->rspq_size = t->rspq_size;
2037167514Skmacy		if (t->fl_size[0] >= 0)
2038167514Skmacy			q->fl_size = t->fl_size[0];
2039167514Skmacy		if (t->fl_size[1] >= 0)
2040167514Skmacy			q->jumbo_size = t->fl_size[1];
2041167514Skmacy		if (t->txq_size[0] >= 0)
2042167514Skmacy			q->txq_size[0] = t->txq_size[0];
2043167514Skmacy		if (t->txq_size[1] >= 0)
2044167514Skmacy			q->txq_size[1] = t->txq_size[1];
2045167514Skmacy		if (t->txq_size[2] >= 0)
2046167514Skmacy			q->txq_size[2] = t->txq_size[2];
2047167514Skmacy		if (t->cong_thres >= 0)
2048167514Skmacy			q->cong_thres = t->cong_thres;
2049167514Skmacy		if (t->intr_lat >= 0) {
2050167514Skmacy			struct sge_qset *qs = &sc->sge.qs[t->qset_idx];
2051167514Skmacy
2052167514Skmacy			q->coalesce_nsecs = t->intr_lat*1000;
2053167514Skmacy			t3_update_qset_coalesce(qs, q);
2054167514Skmacy		}
2055167514Skmacy		break;
2056167514Skmacy	}
2057167514Skmacy	case CHELSIO_GET_QSET_PARAMS: {
2058167514Skmacy		struct qset_params *q;
2059167514Skmacy		struct ch_qset_params *t = (struct ch_qset_params *)data;
2060167514Skmacy
2061167514Skmacy		if (t->qset_idx >= SGE_QSETS)
2062167514Skmacy			return (EINVAL);
2063167514Skmacy
2064167514Skmacy		q = &(sc)->params.sge.qset[t->qset_idx];
2065167514Skmacy		t->rspq_size   = q->rspq_size;
2066167514Skmacy		t->txq_size[0] = q->txq_size[0];
2067167514Skmacy		t->txq_size[1] = q->txq_size[1];
2068167514Skmacy		t->txq_size[2] = q->txq_size[2];
2069167514Skmacy		t->fl_size[0]  = q->fl_size;
2070167514Skmacy		t->fl_size[1]  = q->jumbo_size;
2071167514Skmacy		t->polling     = q->polling;
2072167514Skmacy		t->intr_lat    = q->coalesce_nsecs / 1000;
2073167514Skmacy		t->cong_thres  = q->cong_thres;
2074167514Skmacy		break;
2075167514Skmacy	}
2076167514Skmacy	case CHELSIO_SET_QSET_NUM: {
2077167514Skmacy		struct ch_reg *edata = (struct ch_reg *)data;
2078167514Skmacy		unsigned int port_idx = pi->port;
2079167514Skmacy
2080167514Skmacy		if (sc->flags & FULL_INIT_DONE)
2081167514Skmacy			return (EBUSY);
2082167514Skmacy		if (edata->val < 1 ||
2083167514Skmacy		    (edata->val > 1 && !(sc->flags & USING_MSIX)))
2084167514Skmacy			return (EINVAL);
2085167514Skmacy		if (edata->val + sc->port[!port_idx].nqsets > SGE_QSETS)
2086167514Skmacy			return (EINVAL);
2087167514Skmacy		sc->port[port_idx].nqsets = edata->val;
2088169978Skmacy		sc->port[0].first_qset = 0;
2089167514Skmacy		/*
2090169978Skmacy		 * XXX hardcode ourselves to 2 ports just like LEEENUX
2091167514Skmacy		 */
2092167514Skmacy		sc->port[1].first_qset = sc->port[0].nqsets;
2093167514Skmacy		break;
2094167514Skmacy	}
2095167514Skmacy	case CHELSIO_GET_QSET_NUM: {
2096167514Skmacy		struct ch_reg *edata = (struct ch_reg *)data;
2097167514Skmacy		edata->val = pi->nqsets;
2098167514Skmacy		break;
2099167514Skmacy	}
2100169978Skmacy#ifdef notyet
2101167514Skmacy	case CHELSIO_LOAD_FW:
2102167514Skmacy	case CHELSIO_GET_PM:
2103167514Skmacy	case CHELSIO_SET_PM:
2104167514Skmacy		return (EOPNOTSUPP);
2105167514Skmacy		break;
2106167514Skmacy#endif
2107169978Skmacy	case CHELSIO_SETMTUTAB: {
2108169978Skmacy		struct ch_mtus *m = (struct ch_mtus *)data;
2109169978Skmacy		int i;
2110169978Skmacy
2111169978Skmacy		if (!is_offload(sc))
2112169978Skmacy			return (EOPNOTSUPP);
2113169978Skmacy		if (offload_running(sc))
2114169978Skmacy			return (EBUSY);
2115169978Skmacy		if (m->nmtus != NMTUS)
2116169978Skmacy			return (EINVAL);
2117169978Skmacy		if (m->mtus[0] < 81)         /* accommodate SACK */
2118169978Skmacy			return (EINVAL);
2119169978Skmacy
2120169978Skmacy		/*
2121169978Skmacy		 * MTUs must be in ascending order
2122169978Skmacy		 */
2123169978Skmacy		for (i = 1; i < NMTUS; ++i)
2124169978Skmacy			if (m->mtus[i] < m->mtus[i - 1])
2125169978Skmacy				return (EINVAL);
2126169978Skmacy
2127169978Skmacy		memcpy(sc->params.mtus, m->mtus,
2128169978Skmacy		       sizeof(sc->params.mtus));
2129169978Skmacy		break;
2130169978Skmacy	}
2131169978Skmacy	case CHELSIO_GETMTUTAB: {
2132169978Skmacy		struct ch_mtus *m = (struct ch_mtus *)data;
2133169978Skmacy
2134169978Skmacy		if (!is_offload(sc))
2135169978Skmacy			return (EOPNOTSUPP);
2136169978Skmacy
2137169978Skmacy		memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
2138169978Skmacy		m->nmtus = NMTUS;
2139169978Skmacy		break;
2140169978Skmacy	}
2141169978Skmacy	case CHELSIO_DEVUP:
2142169978Skmacy		if (!is_offload(sc))
2143169978Skmacy			return (EOPNOTSUPP);
2144169978Skmacy		return offload_open(pi);
2145169978Skmacy		break;
2146167514Skmacy	case CHELSIO_GET_MEM: {
2147167514Skmacy		struct ch_mem_range *t = (struct ch_mem_range *)data;
2148167514Skmacy		struct mc7 *mem;
2149167514Skmacy		uint8_t *useraddr;
2150167514Skmacy		u64 buf[32];
2151167514Skmacy
2152167514Skmacy		if (!is_offload(sc))
2153167514Skmacy			return (EOPNOTSUPP);
2154167514Skmacy		if (!(sc->flags & FULL_INIT_DONE))
2155167514Skmacy			return (EIO);         /* need the memory controllers */
2156167514Skmacy		if ((t->addr & 0x7) || (t->len & 0x7))
2157167514Skmacy			return (EINVAL);
2158167514Skmacy		if (t->mem_id == MEM_CM)
2159167514Skmacy			mem = &sc->cm;
2160167514Skmacy		else if (t->mem_id == MEM_PMRX)
2161167514Skmacy			mem = &sc->pmrx;
2162167514Skmacy		else if (t->mem_id == MEM_PMTX)
2163167514Skmacy			mem = &sc->pmtx;
2164167514Skmacy		else
2165167514Skmacy			return (EINVAL);
2166167514Skmacy
2167167514Skmacy		/*
2168167514Skmacy		 * Version scheme:
2169167514Skmacy		 * bits 0..9: chip version
2170167514Skmacy		 * bits 10..15: chip revision
2171167514Skmacy		 */
2172167514Skmacy		t->version = 3 | (sc->params.rev << 10);
2173167514Skmacy
2174167514Skmacy		/*
2175167514Skmacy		 * Read 256 bytes at a time as len can be large and we don't
2176167514Skmacy		 * want to use huge intermediate buffers.
2177167514Skmacy		 */
2178167514Skmacy		useraddr = (uint8_t *)(t + 1);   /* advance to start of buffer */
2179167514Skmacy		while (t->len) {
2180167514Skmacy			unsigned int chunk = min(t->len, sizeof(buf));
2181167514Skmacy
2182167514Skmacy			error = t3_mc7_bd_read(mem, t->addr / 8, chunk / 8, buf);
2183167514Skmacy			if (error)
2184167514Skmacy				return (-error);
2185167514Skmacy			if (copyout(buf, useraddr, chunk))
2186167514Skmacy				return (EFAULT);
2187167514Skmacy			useraddr += chunk;
2188167514Skmacy			t->addr += chunk;
2189167514Skmacy			t->len -= chunk;
2190167514Skmacy		}
2191167514Skmacy		break;
2192167514Skmacy	}
2193169978Skmacy	case CHELSIO_READ_TCAM_WORD: {
2194169978Skmacy		struct ch_tcam_word *t = (struct ch_tcam_word *)data;
2195169978Skmacy
2196169978Skmacy		if (!is_offload(sc))
2197169978Skmacy			return (EOPNOTSUPP);
2198169978Skmacy		return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
2199169978Skmacy		break;
2200169978Skmacy	}
2201167514Skmacy	case CHELSIO_SET_TRACE_FILTER: {
2202167514Skmacy		struct ch_trace *t = (struct ch_trace *)data;
2203167514Skmacy		const struct trace_params *tp;
2204167514Skmacy
2205167514Skmacy		tp = (const struct trace_params *)&t->sip;
2206167514Skmacy		if (t->config_tx)
2207167514Skmacy			t3_config_trace_filter(sc, tp, 0, t->invert_match,
2208167514Skmacy					       t->trace_tx);
2209167514Skmacy		if (t->config_rx)
2210167514Skmacy			t3_config_trace_filter(sc, tp, 1, t->invert_match,
2211167514Skmacy					       t->trace_rx);
2212167514Skmacy		break;
2213167514Skmacy	}
2214167514Skmacy	case CHELSIO_SET_PKTSCHED: {
2215167514Skmacy		struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
2216167514Skmacy		if (sc->open_device_map == 0)
2217167514Skmacy			return (EAGAIN);
2218167514Skmacy		send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
2219167514Skmacy		    p->binding);
2220167514Skmacy		break;
2221167514Skmacy	}
2222167514Skmacy	case CHELSIO_IFCONF_GETREGS: {
2223167514Skmacy		struct ifconf_regs *regs = (struct ifconf_regs *)data;
2224167514Skmacy		int reglen = cxgb_get_regs_len();
2225167514Skmacy		uint8_t *buf = malloc(REGDUMP_SIZE, M_DEVBUF, M_NOWAIT);
2226167514Skmacy		if (buf == NULL) {
2227167514Skmacy			return (ENOMEM);
2228167514Skmacy		} if (regs->len > reglen)
2229167514Skmacy			regs->len = reglen;
2230167514Skmacy		else if (regs->len < reglen) {
2231167514Skmacy			error = E2BIG;
2232167514Skmacy			goto done;
2233167514Skmacy		}
2234167514Skmacy		cxgb_get_regs(sc, regs, buf);
2235167514Skmacy		error = copyout(buf, regs->data, reglen);
2236167514Skmacy
2237167514Skmacy		done:
2238167514Skmacy		free(buf, M_DEVBUF);
2239167514Skmacy
2240167514Skmacy		break;
2241167514Skmacy	}
2242169978Skmacy	case CHELSIO_SET_HW_SCHED: {
2243169978Skmacy		struct ch_hw_sched *t = (struct ch_hw_sched *)data;
2244169978Skmacy		unsigned int ticks_per_usec = core_ticks_per_usec(sc);
2245169978Skmacy
2246169978Skmacy		if ((sc->flags & FULL_INIT_DONE) == 0)
2247169978Skmacy			return (EAGAIN);       /* need TP to be initialized */
2248169978Skmacy		if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
2249169978Skmacy		    !in_range(t->channel, 0, 1) ||
2250169978Skmacy		    !in_range(t->kbps, 0, 10000000) ||
2251169978Skmacy		    !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
2252169978Skmacy		    !in_range(t->flow_ipg, 0,
2253169978Skmacy			      dack_ticks_to_usec(sc, 0x7ff)))
2254169978Skmacy			return (EINVAL);
2255169978Skmacy
2256169978Skmacy		if (t->kbps >= 0) {
2257169978Skmacy			error = t3_config_sched(sc, t->kbps, t->sched);
2258169978Skmacy			if (error < 0)
2259169978Skmacy				return (-error);
2260169978Skmacy		}
2261169978Skmacy		if (t->class_ipg >= 0)
2262169978Skmacy			t3_set_sched_ipg(sc, t->sched, t->class_ipg);
2263169978Skmacy		if (t->flow_ipg >= 0) {
2264169978Skmacy			t->flow_ipg *= 1000;     /* us -> ns */
2265169978Skmacy			t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
2266169978Skmacy		}
2267169978Skmacy		if (t->mode >= 0) {
2268169978Skmacy			int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
2269169978Skmacy
2270169978Skmacy			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2271169978Skmacy					 bit, t->mode ? bit : 0);
2272169978Skmacy		}
2273169978Skmacy		if (t->channel >= 0)
2274169978Skmacy			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2275169978Skmacy					 1 << t->sched, t->channel << t->sched);
2276169978Skmacy		break;
2277169978Skmacy	}
2278167514Skmacy	default:
2279167514Skmacy		return (EOPNOTSUPP);
2280167514Skmacy		break;
2281167514Skmacy	}
2282167514Skmacy
2283167514Skmacy	return (error);
2284167514Skmacy}
2285167514Skmacy
2286167514Skmacystatic __inline void
2287167514Skmacyreg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
2288167514Skmacy    unsigned int end)
2289167514Skmacy{
2290167514Skmacy	uint32_t *p = (uint32_t *)buf + start;
2291167514Skmacy
2292167514Skmacy	for ( ; start <= end; start += sizeof(uint32_t))
2293167514Skmacy		*p++ = t3_read_reg(ap, start);
2294167514Skmacy}
2295167514Skmacy
2296167514Skmacy#define T3_REGMAP_SIZE (3 * 1024)
2297167514Skmacystatic int
2298167514Skmacycxgb_get_regs_len(void)
2299167514Skmacy{
2300167514Skmacy	return T3_REGMAP_SIZE;
2301167514Skmacy}
2302167514Skmacy#undef T3_REGMAP_SIZE
2303167514Skmacy
2304167514Skmacystatic void
2305167514Skmacycxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf)
2306167514Skmacy{
2307167514Skmacy
2308167514Skmacy	/*
2309167514Skmacy	 * Version scheme:
2310167514Skmacy	 * bits 0..9: chip version
2311167514Skmacy	 * bits 10..15: chip revision
2312167514Skmacy	 * bit 31: set for PCIe cards
2313167514Skmacy	 */
2314167514Skmacy	regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
2315167514Skmacy
2316167514Skmacy	/*
2317167514Skmacy	 * We skip the MAC statistics registers because they are clear-on-read.
2318167514Skmacy	 * Also reading multi-register stats would need to synchronize with the
2319167514Skmacy	 * periodic mac stats accumulation.  Hard to justify the complexity.
2320167514Skmacy	 */
2321167514Skmacy	memset(buf, 0, REGDUMP_SIZE);
2322167514Skmacy	reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
2323167514Skmacy	reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
2324167514Skmacy	reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
2325167514Skmacy	reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
2326167514Skmacy	reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
2327167514Skmacy	reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
2328167514Skmacy		       XGM_REG(A_XGM_SERDES_STAT3, 1));
2329167514Skmacy	reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
2330167514Skmacy		       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
2331167514Skmacy}
2332