cxgb_main.c revision 175389
1/**************************************************************************
2
3Copyright (c) 2007, Chelsio Inc.
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10    this list of conditions and the following disclaimer.
11
122. Neither the name of the Chelsio Corporation nor the names of its
13    contributors may be used to endorse or promote products derived from
14    this software without specific prior written permission.
15
16THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26POSSIBILITY OF SUCH DAMAGE.
27
28***************************************************************************/
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/cxgb/cxgb_main.c 175389 2008-01-16 21:33:34Z kmacy $");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/kernel.h>
36#include <sys/bus.h>
37#include <sys/module.h>
38#include <sys/pciio.h>
39#include <sys/conf.h>
40#include <machine/bus.h>
41#include <machine/resource.h>
42#include <sys/bus_dma.h>
43#include <sys/rman.h>
44#include <sys/ioccom.h>
45#include <sys/mbuf.h>
46#include <sys/linker.h>
47#include <sys/firmware.h>
48#include <sys/socket.h>
49#include <sys/sockio.h>
50#include <sys/smp.h>
51#include <sys/sysctl.h>
52#include <sys/syslog.h>
53#include <sys/queue.h>
54#include <sys/taskqueue.h>
55#include <sys/proc.h>
56
57#include <net/bpf.h>
58#include <net/ethernet.h>
59#include <net/if.h>
60#include <net/if_arp.h>
61#include <net/if_dl.h>
62#include <net/if_media.h>
63#include <net/if_types.h>
64
65#include <netinet/in_systm.h>
66#include <netinet/in.h>
67#include <netinet/if_ether.h>
68#include <netinet/ip.h>
69#include <netinet/ip.h>
70#include <netinet/tcp.h>
71#include <netinet/udp.h>
72
73#include <dev/pci/pcireg.h>
74#include <dev/pci/pcivar.h>
75#include <dev/pci/pci_private.h>
76
77#ifdef CONFIG_DEFINED
78#include <cxgb_include.h>
79#else
80#include <dev/cxgb/cxgb_include.h>
81#endif
82
83#ifdef PRIV_SUPPORTED
84#include <sys/priv.h>
85#endif
86
87#ifdef IFNET_MULTIQUEUE
88#include <machine/intr_machdep.h>
89#endif
90
91static int cxgb_setup_msix(adapter_t *, int);
92static void cxgb_teardown_msix(adapter_t *);
93static void cxgb_init(void *);
94static void cxgb_init_locked(struct port_info *);
95static void cxgb_stop_locked(struct port_info *);
96static void cxgb_set_rxmode(struct port_info *);
97static int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
98static int cxgb_media_change(struct ifnet *);
99static void cxgb_media_status(struct ifnet *, struct ifmediareq *);
100static int setup_sge_qsets(adapter_t *);
101static void cxgb_async_intr(void *);
102static void cxgb_ext_intr_handler(void *, int);
103static void cxgb_tick_handler(void *, int);
104static void cxgb_down_locked(struct adapter *sc);
105static void cxgb_tick(void *);
106static void setup_rss(adapter_t *sc);
107
108/* Attachment glue for the PCI controller end of the device.  Each port of
109 * the device is attached separately, as defined later.
110 */
111static int cxgb_controller_probe(device_t);
112static int cxgb_controller_attach(device_t);
113static int cxgb_controller_detach(device_t);
114static void cxgb_free(struct adapter *);
115static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
116    unsigned int end);
117static void cxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf);
118static int cxgb_get_regs_len(void);
119static int offload_open(struct port_info *pi);
120static void touch_bars(device_t dev);
121static int offload_close(struct t3cdev *tdev);
122
123static device_method_t cxgb_controller_methods[] = {
124	DEVMETHOD(device_probe,		cxgb_controller_probe),
125	DEVMETHOD(device_attach,	cxgb_controller_attach),
126	DEVMETHOD(device_detach,	cxgb_controller_detach),
127
128	/* bus interface */
129	DEVMETHOD(bus_print_child,	bus_generic_print_child),
130	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
131
132	{ 0, 0 }
133};
134
135static driver_t cxgb_controller_driver = {
136	"cxgbc",
137	cxgb_controller_methods,
138	sizeof(struct adapter)
139};
140
141static devclass_t	cxgb_controller_devclass;
142DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, 0, 0);
143
144/*
145 * Attachment glue for the ports.  Attachment is done directly to the
146 * controller device.
147 */
148static int cxgb_port_probe(device_t);
149static int cxgb_port_attach(device_t);
150static int cxgb_port_detach(device_t);
151
152static device_method_t cxgb_port_methods[] = {
153	DEVMETHOD(device_probe,		cxgb_port_probe),
154	DEVMETHOD(device_attach,	cxgb_port_attach),
155	DEVMETHOD(device_detach,	cxgb_port_detach),
156	{ 0, 0 }
157};
158
159static driver_t cxgb_port_driver = {
160	"cxgb",
161	cxgb_port_methods,
162	0
163};
164
165static d_ioctl_t cxgb_extension_ioctl;
166static d_open_t cxgb_extension_open;
167static d_close_t cxgb_extension_close;
168
169static struct cdevsw cxgb_cdevsw = {
170       .d_version =    D_VERSION,
171       .d_flags =      0,
172       .d_open =       cxgb_extension_open,
173       .d_close =      cxgb_extension_close,
174       .d_ioctl =      cxgb_extension_ioctl,
175       .d_name =       "cxgb",
176};
177
178static devclass_t	cxgb_port_devclass;
179DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
180
181#define SGE_MSIX_COUNT (SGE_QSETS + 1)
182
183/*
184 * The driver uses the best interrupt scheme available on a platform in the
185 * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
186 * of these schemes the driver may consider as follows:
187 *
188 * msi = 2: choose from among all three options
189 * msi = 1 : only consider MSI and pin interrupts
190 * msi = 0: force pin interrupts
191 */
192static int msi_allowed = 2;
193
194TUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
195SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
196SYSCTL_UINT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
197    "MSI-X, MSI, INTx selector");
198
199/*
200 * The driver enables offload as a default.
201 * To disable it, use ofld_disable = 1.
202 */
203static int ofld_disable = 0;
204TUNABLE_INT("hw.cxgb.ofld_disable", &ofld_disable);
205SYSCTL_UINT(_hw_cxgb, OID_AUTO, ofld_disable, CTLFLAG_RDTUN, &ofld_disable, 0,
206    "disable ULP offload");
207
208/*
209 * The driver uses an auto-queue algorithm by default.
210 * To disable it and force a single queue-set per port, use singleq = 1.
211 */
212static int singleq = 0;
213TUNABLE_INT("hw.cxgb.singleq", &singleq);
214SYSCTL_UINT(_hw_cxgb, OID_AUTO, singleq, CTLFLAG_RDTUN, &singleq, 0,
215    "use a single queue-set per port");
216
217
218
219int cxgb_use_16k_clusters = 0;
220TUNABLE_INT("hw.cxgb.use_16k_clusters", &cxgb_use_16k_clusters);
221SYSCTL_UINT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
222    &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
223
224enum {
225	MAX_TXQ_ENTRIES      = 16384,
226	MAX_CTRL_TXQ_ENTRIES = 1024,
227	MAX_RSPQ_ENTRIES     = 16384,
228	MAX_RX_BUFFERS       = 16384,
229	MAX_RX_JUMBO_BUFFERS = 16384,
230	MIN_TXQ_ENTRIES      = 4,
231	MIN_CTRL_TXQ_ENTRIES = 4,
232	MIN_RSPQ_ENTRIES     = 32,
233	MIN_FL_ENTRIES       = 32,
234	MIN_FL_JUMBO_ENTRIES = 32
235};
236
237struct filter_info {
238	u32 sip;
239	u32 sip_mask;
240	u32 dip;
241	u16 sport;
242	u16 dport;
243	u32 vlan:12;
244	u32 vlan_prio:3;
245	u32 mac_hit:1;
246	u32 mac_idx:4;
247	u32 mac_vld:1;
248	u32 pkt_type:2;
249	u32 report_filter_id:1;
250	u32 pass:1;
251	u32 rss:1;
252	u32 qset:3;
253	u32 locked:1;
254	u32 valid:1;
255};
256
257enum { FILTER_NO_VLAN_PRI = 7 };
258
259#define PORT_MASK ((1 << MAX_NPORTS) - 1)
260
261/* Table for probing the cards.  The desc field isn't actually used */
262struct cxgb_ident {
263	uint16_t	vendor;
264	uint16_t	device;
265	int		index;
266	char		*desc;
267} cxgb_identifiers[] = {
268	{PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
269	{PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
270	{PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
271	{PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
272	{PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
273	{PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
274	{PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
275	{PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
276	{PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
277	{PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
278	{PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
279	{0, 0, 0, NULL}
280};
281
282static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
283
284static __inline char
285t3rev2char(struct adapter *adapter)
286{
287	char rev = 'z';
288
289	switch(adapter->params.rev) {
290	case T3_REV_A:
291		rev = 'a';
292		break;
293	case T3_REV_B:
294	case T3_REV_B2:
295		rev = 'b';
296		break;
297	case T3_REV_C:
298		rev = 'c';
299		break;
300	}
301	return rev;
302}
303
304static struct cxgb_ident *
305cxgb_get_ident(device_t dev)
306{
307	struct cxgb_ident *id;
308
309	for (id = cxgb_identifiers; id->desc != NULL; id++) {
310		if ((id->vendor == pci_get_vendor(dev)) &&
311		    (id->device == pci_get_device(dev))) {
312			return (id);
313		}
314	}
315	return (NULL);
316}
317
318static const struct adapter_info *
319cxgb_get_adapter_info(device_t dev)
320{
321	struct cxgb_ident *id;
322	const struct adapter_info *ai;
323
324	id = cxgb_get_ident(dev);
325	if (id == NULL)
326		return (NULL);
327
328	ai = t3_get_adapter_info(id->index);
329
330	return (ai);
331}
332
333static int
334cxgb_controller_probe(device_t dev)
335{
336	const struct adapter_info *ai;
337	char *ports, buf[80];
338	int nports;
339
340	ai = cxgb_get_adapter_info(dev);
341	if (ai == NULL)
342		return (ENXIO);
343
344	nports = ai->nports0 + ai->nports1;
345	if (nports == 1)
346		ports = "port";
347	else
348		ports = "ports";
349
350	snprintf(buf, sizeof(buf), "%s RNIC, %d %s", ai->desc, nports, ports);
351	device_set_desc_copy(dev, buf);
352	return (BUS_PROBE_DEFAULT);
353}
354
355#define FW_FNAME "t3fw%d%d%d"
356#define TPEEPROM_NAME "t3%ctpe%d%d%d"
357#define TPSRAM_NAME "t3%cps%d%d%d"
358
359static int
360upgrade_fw(adapter_t *sc)
361{
362	char buf[32];
363#ifdef FIRMWARE_LATEST
364	const struct firmware *fw;
365#else
366	struct firmware *fw;
367#endif
368	int status;
369
370	snprintf(&buf[0], sizeof(buf), FW_FNAME,  FW_VERSION_MAJOR,
371	    FW_VERSION_MINOR, FW_VERSION_MICRO);
372
373	fw = firmware_get(buf);
374
375	if (fw == NULL) {
376		device_printf(sc->dev, "Could not find firmware image %s\n", buf);
377		return (ENOENT);
378	} else
379		device_printf(sc->dev, "updating firmware on card with %s\n", buf);
380	status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
381
382	device_printf(sc->dev, "firmware update returned %s %d\n", (status == 0) ? "success" : "fail", status);
383
384	firmware_put(fw, FIRMWARE_UNLOAD);
385
386	return (status);
387}
388
389static int
390cxgb_controller_attach(device_t dev)
391{
392	device_t child;
393	const struct adapter_info *ai;
394	struct adapter *sc;
395	int i, error = 0;
396	uint32_t vers;
397	int port_qsets = 1;
398#ifdef MSI_SUPPORTED
399	int msi_needed, reg;
400#endif
401	sc = device_get_softc(dev);
402	sc->dev = dev;
403	sc->msi_count = 0;
404	ai = cxgb_get_adapter_info(dev);
405
406	/*
407	 * XXX not really related but a recent addition
408	 */
409#ifdef MSI_SUPPORTED
410	/* find the PCIe link width and set max read request to 4KB*/
411	if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
412		uint16_t lnk, pectl;
413		lnk = pci_read_config(dev, reg + 0x12, 2);
414		sc->link_width = (lnk >> 4) & 0x3f;
415
416		pectl = pci_read_config(dev, reg + 0x8, 2);
417		pectl = (pectl & ~0x7000) | (5 << 12);
418		pci_write_config(dev, reg + 0x8, pectl, 2);
419	}
420
421	if (sc->link_width != 0 && sc->link_width <= 4 &&
422	    (ai->nports0 + ai->nports1) <= 2) {
423		device_printf(sc->dev,
424		    "PCIe x%d Link, expect reduced performance\n",
425		    sc->link_width);
426	}
427#endif
428	touch_bars(dev);
429	pci_enable_busmaster(dev);
430	/*
431	 * Allocate the registers and make them available to the driver.
432	 * The registers that we care about for NIC mode are in BAR 0
433	 */
434	sc->regs_rid = PCIR_BAR(0);
435	if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
436	    &sc->regs_rid, RF_ACTIVE)) == NULL) {
437		device_printf(dev, "Cannot allocate BAR\n");
438		return (ENXIO);
439	}
440
441	snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
442	    device_get_unit(dev));
443	ADAPTER_LOCK_INIT(sc, sc->lockbuf);
444
445	snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
446	    device_get_unit(dev));
447	snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
448	    device_get_unit(dev));
449	snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
450	    device_get_unit(dev));
451
452	MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_DEF);
453	MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
454	MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
455
456	sc->bt = rman_get_bustag(sc->regs_res);
457	sc->bh = rman_get_bushandle(sc->regs_res);
458	sc->mmio_len = rman_get_size(sc->regs_res);
459
460	if (t3_prep_adapter(sc, ai, 1) < 0) {
461		printf("prep adapter failed\n");
462		error = ENODEV;
463		goto out;
464	}
465	/* Allocate the BAR for doing MSI-X.  If it succeeds, try to allocate
466	 * enough messages for the queue sets.  If that fails, try falling
467	 * back to MSI.  If that fails, then try falling back to the legacy
468	 * interrupt pin model.
469	 */
470#ifdef MSI_SUPPORTED
471
472	sc->msix_regs_rid = 0x20;
473	if ((msi_allowed >= 2) &&
474	    (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
475	    &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
476
477		msi_needed = sc->msi_count = SGE_MSIX_COUNT;
478
479		if (((error = pci_alloc_msix(dev, &sc->msi_count)) != 0) ||
480		    (sc->msi_count != msi_needed)) {
481			device_printf(dev, "msix allocation failed - msi_count = %d"
482			    " msi_needed=%d will try msi err=%d\n", sc->msi_count,
483			    msi_needed, error);
484			sc->msi_count = 0;
485			pci_release_msi(dev);
486			bus_release_resource(dev, SYS_RES_MEMORY,
487			    sc->msix_regs_rid, sc->msix_regs_res);
488			sc->msix_regs_res = NULL;
489		} else {
490			sc->flags |= USING_MSIX;
491			sc->cxgb_intr = t3_intr_msix;
492		}
493	}
494
495	if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
496		sc->msi_count = 1;
497		if (pci_alloc_msi(dev, &sc->msi_count)) {
498			device_printf(dev, "alloc msi failed - will try INTx\n");
499			sc->msi_count = 0;
500			pci_release_msi(dev);
501		} else {
502			sc->flags |= USING_MSI;
503			sc->irq_rid = 1;
504			sc->cxgb_intr = t3_intr_msi;
505		}
506	}
507#endif
508	if (sc->msi_count == 0) {
509		device_printf(dev, "using line interrupts\n");
510		sc->irq_rid = 0;
511		sc->cxgb_intr = t3b_intr;
512	}
513
514
515	/* Create a private taskqueue thread for handling driver events */
516#ifdef TASKQUEUE_CURRENT
517	sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
518	    taskqueue_thread_enqueue, &sc->tq);
519#else
520	sc->tq = taskqueue_create_fast("cxgb_taskq", M_NOWAIT,
521	    taskqueue_thread_enqueue, &sc->tq);
522#endif
523	if (sc->tq == NULL) {
524		device_printf(dev, "failed to allocate controller task queue\n");
525		goto out;
526	}
527
528	taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
529	    device_get_nameunit(dev));
530	TASK_INIT(&sc->ext_intr_task, 0, cxgb_ext_intr_handler, sc);
531	TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
532
533
534	/* Create a periodic callout for checking adapter status */
535	callout_init(&sc->cxgb_tick_ch, TRUE);
536
537	if (t3_check_fw_version(sc) != 0) {
538		/*
539		 * Warn user that a firmware update will be attempted in init.
540		 */
541		device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
542		    FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
543		sc->flags &= ~FW_UPTODATE;
544	} else {
545		sc->flags |= FW_UPTODATE;
546	}
547
548	if (t3_check_tpsram_version(sc) != 0) {
549		/*
550		 * Warn user that a firmware update will be attempted in init.
551		 */
552		device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
553		    t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
554		sc->flags &= ~TPS_UPTODATE;
555	} else {
556		sc->flags |= TPS_UPTODATE;
557	}
558
559	if ((sc->flags & USING_MSIX) && !singleq)
560		port_qsets = min((SGE_QSETS/(sc)->params.nports), mp_ncpus);
561
562	/*
563	 * Create a child device for each MAC.  The ethernet attachment
564	 * will be done in these children.
565	 */
566	for (i = 0; i < (sc)->params.nports; i++) {
567		struct port_info *pi;
568
569		if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
570			device_printf(dev, "failed to add child port\n");
571			error = EINVAL;
572			goto out;
573		}
574		pi = &sc->port[i];
575		pi->adapter = sc;
576		pi->nqsets = port_qsets;
577		pi->first_qset = i*port_qsets;
578		pi->port_id = i;
579		pi->tx_chan = i >= ai->nports0;
580		pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
581		sc->rxpkt_map[pi->txpkt_intf] = i;
582		sc->port[i].tx_chan = i >= ai->nports0;
583		sc->portdev[i] = child;
584		device_set_softc(child, pi);
585	}
586	if ((error = bus_generic_attach(dev)) != 0)
587		goto out;
588
589	/*
590	 * XXX need to poll for link status
591	 */
592	sc->params.stats_update_period = 1;
593
594	/* initialize sge private state */
595	t3_sge_init_adapter(sc);
596
597	t3_led_ready(sc);
598
599	cxgb_offload_init();
600	if (is_offload(sc)) {
601		setbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
602		cxgb_adapter_ofld(sc);
603        }
604	error = t3_get_fw_version(sc, &vers);
605	if (error)
606		goto out;
607
608	snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
609	    G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
610	    G_FW_VERSION_MICRO(vers));
611
612	t3_add_attach_sysctls(sc);
613out:
614	if (error)
615		cxgb_free(sc);
616
617	return (error);
618}
619
620static int
621cxgb_controller_detach(device_t dev)
622{
623	struct adapter *sc;
624
625	sc = device_get_softc(dev);
626
627	cxgb_free(sc);
628
629	return (0);
630}
631
632static void
633cxgb_free(struct adapter *sc)
634{
635	int i;
636
637
638	cxgb_pcpu_shutdown_threads(sc);
639	ADAPTER_LOCK(sc);
640/*
641 * drops the lock
642 */
643	cxgb_down_locked(sc);
644
645#ifdef MSI_SUPPORTED
646	if (sc->flags & (USING_MSI | USING_MSIX)) {
647		device_printf(sc->dev, "releasing msi message(s)\n");
648		pci_release_msi(sc->dev);
649	} else {
650		device_printf(sc->dev, "no msi message to release\n");
651	}
652#endif
653	if (sc->msix_regs_res != NULL) {
654		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
655		    sc->msix_regs_res);
656	}
657
658	if (sc->tq != NULL) {
659		taskqueue_drain(sc->tq, &sc->ext_intr_task);
660		taskqueue_drain(sc->tq, &sc->tick_task);
661	}
662	t3_sge_deinit_sw(sc);
663	/*
664	 * Wait for last callout
665	 */
666
667	DELAY(hz*100);
668
669	for (i = 0; i < (sc)->params.nports; ++i) {
670		if (sc->portdev[i] != NULL)
671			device_delete_child(sc->dev, sc->portdev[i]);
672	}
673
674	bus_generic_detach(sc->dev);
675	if (sc->tq != NULL)
676		taskqueue_free(sc->tq);
677	if (is_offload(sc)) {
678		cxgb_adapter_unofld(sc);
679		if (isset(&sc->open_device_map,	OFFLOAD_DEVMAP_BIT))
680			offload_close(&sc->tdev);
681		else
682			printf("cxgb_free: DEVMAP_BIT not set\n");
683	} else
684		printf("not offloading set\n");
685	free(sc->filters, M_DEVBUF);
686	t3_sge_free(sc);
687
688	cxgb_offload_exit();
689
690	if (sc->regs_res != NULL)
691		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
692		    sc->regs_res);
693
694	MTX_DESTROY(&sc->mdio_lock);
695	MTX_DESTROY(&sc->sge.reg_lock);
696	MTX_DESTROY(&sc->elmer_lock);
697	ADAPTER_LOCK_DEINIT(sc);
698}
699
700/**
701 *	setup_sge_qsets - configure SGE Tx/Rx/response queues
702 *	@sc: the controller softc
703 *
704 *	Determines how many sets of SGE queues to use and initializes them.
705 *	We support multiple queue sets per port if we have MSI-X, otherwise
706 *	just one queue set per port.
707 */
708static int
709setup_sge_qsets(adapter_t *sc)
710{
711	int i, j, err, irq_idx = 0, qset_idx = 0;
712	u_int ntxq = SGE_TXQ_PER_SET;
713
714	if ((err = t3_sge_alloc(sc)) != 0) {
715		device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
716		return (err);
717	}
718
719	if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
720		irq_idx = -1;
721
722	for (i = 0; i < (sc)->params.nports; i++) {
723		struct port_info *pi = &sc->port[i];
724
725		for (j = 0; j < pi->nqsets; j++, qset_idx++) {
726			err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
727			    (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
728			    &sc->params.sge.qset[qset_idx], ntxq, pi);
729			if (err) {
730				t3_free_sge_resources(sc);
731				device_printf(sc->dev, "t3_sge_alloc_qset failed with %d\n",
732				    err);
733				return (err);
734			}
735		}
736	}
737
738	return (0);
739}
740
741static void
742cxgb_teardown_msix(adapter_t *sc)
743{
744	int i, nqsets;
745
746	for (nqsets = i = 0; i < (sc)->params.nports; i++)
747		nqsets += sc->port[i].nqsets;
748
749	for (i = 0; i < nqsets; i++) {
750		if (sc->msix_intr_tag[i] != NULL) {
751			bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
752			    sc->msix_intr_tag[i]);
753			sc->msix_intr_tag[i] = NULL;
754		}
755		if (sc->msix_irq_res[i] != NULL) {
756			bus_release_resource(sc->dev, SYS_RES_IRQ,
757			    sc->msix_irq_rid[i], sc->msix_irq_res[i]);
758			sc->msix_irq_res[i] = NULL;
759		}
760	}
761}
762
763static int
764cxgb_setup_msix(adapter_t *sc, int msix_count)
765{
766	int i, j, k, nqsets, rid;
767
768	/* The first message indicates link changes and error conditions */
769	sc->irq_rid = 1;
770	if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
771	   &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
772		device_printf(sc->dev, "Cannot allocate msix interrupt\n");
773		return (EINVAL);
774	}
775
776	if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
777#ifdef INTR_FILTERS
778		NULL,
779#endif
780		cxgb_async_intr, sc, &sc->intr_tag)) {
781		device_printf(sc->dev, "Cannot set up interrupt\n");
782		return (EINVAL);
783	}
784	for (i = k = 0; i < (sc)->params.nports; i++) {
785		nqsets = sc->port[i].nqsets;
786		for (j = 0; j < nqsets; j++, k++) {
787			struct sge_qset *qs = &sc->sge.qs[k];
788
789			rid = k + 2;
790			if (cxgb_debug)
791				printf("rid=%d ", rid);
792			if ((sc->msix_irq_res[k] = bus_alloc_resource_any(
793			    sc->dev, SYS_RES_IRQ, &rid,
794			    RF_SHAREABLE | RF_ACTIVE)) == NULL) {
795				device_printf(sc->dev, "Cannot allocate "
796				    "interrupt for message %d\n", rid);
797				return (EINVAL);
798			}
799			sc->msix_irq_rid[k] = rid;
800			printf("setting up interrupt for port=%d\n",
801			    qs->port->port_id);
802			if (bus_setup_intr(sc->dev, sc->msix_irq_res[k],
803				INTR_MPSAFE|INTR_TYPE_NET,
804#ifdef INTR_FILTERS
805				NULL,
806#endif
807				t3_intr_msix, qs, &sc->msix_intr_tag[k])) {
808				device_printf(sc->dev, "Cannot set up "
809				    "interrupt for message %d\n", rid);
810				return (EINVAL);
811			}
812#ifdef IFNET_MULTIQUEUE
813			if (singleq == 0) {
814				int vector = rman_get_start(sc->msix_irq_res[k]);
815				if (bootverbose)
816					device_printf(sc->dev, "binding vector=%d to cpu=%d\n", vector, k % mp_ncpus);
817				intr_bind(vector, k % mp_ncpus);
818			}
819#endif
820		}
821	}
822
823	return (0);
824}
825
826static int
827cxgb_port_probe(device_t dev)
828{
829	struct port_info *p;
830	char buf[80];
831
832	p = device_get_softc(dev);
833
834	snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, p->port_type->desc);
835	device_set_desc_copy(dev, buf);
836	return (0);
837}
838
839
840static int
841cxgb_makedev(struct port_info *pi)
842{
843
844	pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit,
845	    UID_ROOT, GID_WHEEL, 0600, if_name(pi->ifp));
846
847	if (pi->port_cdev == NULL)
848		return (ENOMEM);
849
850	pi->port_cdev->si_drv1 = (void *)pi;
851
852	return (0);
853}
854
855
856#ifdef TSO_SUPPORTED
857#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU)
858/* Don't enable TSO6 yet */
859#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4 | IFCAP_JUMBO_MTU)
860#else
861#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
862/* Don't enable TSO6 yet */
863#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM |  IFCAP_JUMBO_MTU)
864#define IFCAP_TSO4 0x0
865#define IFCAP_TSO6 0x0
866#define CSUM_TSO   0x0
867#endif
868
869
870static int
871cxgb_port_attach(device_t dev)
872{
873	struct port_info *p;
874	struct ifnet *ifp;
875	int err, media_flags;
876
877	p = device_get_softc(dev);
878
879	snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
880	    device_get_unit(device_get_parent(dev)), p->port_id);
881	PORT_LOCK_INIT(p, p->lockbuf);
882
883	/* Allocate an ifnet object and set it up */
884	ifp = p->ifp = if_alloc(IFT_ETHER);
885	if (ifp == NULL) {
886		device_printf(dev, "Cannot allocate ifnet\n");
887		return (ENOMEM);
888	}
889
890	/*
891	 * Note that there is currently no watchdog timer.
892	 */
893	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
894	ifp->if_init = cxgb_init;
895	ifp->if_softc = p;
896	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
897	ifp->if_ioctl = cxgb_ioctl;
898	ifp->if_start = cxgb_start;
899
900#ifdef IFNET_MULTIQUEUE
901	ifp->if_flags |= IFF_MULTIQ;
902	ifp->if_mq_start = cxgb_pcpu_start;
903#endif
904
905	ifp->if_timer = 0;	/* Disable ifnet watchdog */
906	ifp->if_watchdog = NULL;
907
908	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
909	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
910	IFQ_SET_READY(&ifp->if_snd);
911
912	ifp->if_hwassist = ifp->if_capabilities = ifp->if_capenable = 0;
913	ifp->if_capabilities |= CXGB_CAP;
914	ifp->if_capenable |= CXGB_CAP_ENABLE;
915	ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO);
916	/*
917	 * disable TSO on 4-port - it isn't supported by the firmware yet
918	 */
919	if (p->adapter->params.nports > 2) {
920		ifp->if_capabilities &= ~(IFCAP_TSO4 | IFCAP_TSO6);
921		ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TSO6);
922		ifp->if_hwassist &= ~CSUM_TSO;
923	}
924
925	ether_ifattach(ifp, p->hw_addr);
926	/*
927	 * Only default to jumbo frames on 10GigE
928	 */
929	if (p->adapter->params.nports <= 2)
930		ifp->if_mtu = 9000;
931	if ((err = cxgb_makedev(p)) != 0) {
932		printf("makedev failed %d\n", err);
933		return (err);
934	}
935	ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
936	    cxgb_media_status);
937
938	if (!strcmp(p->port_type->desc, "10GBASE-CX4")) {
939		media_flags = IFM_ETHER | IFM_10G_CX4 | IFM_FDX;
940	} else if (!strcmp(p->port_type->desc, "10GBASE-SR")) {
941		media_flags = IFM_ETHER | IFM_10G_SR | IFM_FDX;
942	} else if (!strcmp(p->port_type->desc, "10GBASE-XR")) {
943		media_flags = IFM_ETHER | IFM_10G_LR | IFM_FDX;
944	} else if (!strcmp(p->port_type->desc, "10/100/1000BASE-T")) {
945		ifmedia_add(&p->media, IFM_ETHER | IFM_10_T, 0, NULL);
946		ifmedia_add(&p->media, IFM_ETHER | IFM_10_T | IFM_FDX,
947			    0, NULL);
948		ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX,
949			    0, NULL);
950		ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
951			    0, NULL);
952		ifmedia_add(&p->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
953			    0, NULL);
954		media_flags = 0;
955	} else {
956	        printf("unsupported media type %s\n", p->port_type->desc);
957		return (ENXIO);
958	}
959	if (media_flags) {
960		ifmedia_add(&p->media, media_flags, 0, NULL);
961		ifmedia_set(&p->media, media_flags);
962	} else {
963		ifmedia_add(&p->media, IFM_ETHER | IFM_AUTO, 0, NULL);
964		ifmedia_set(&p->media, IFM_ETHER | IFM_AUTO);
965	}
966
967
968	snprintf(p->taskqbuf, TASKQ_NAME_LEN, "cxgb_port_taskq%d", p->port_id);
969#ifdef TASKQUEUE_CURRENT
970	/* Create a port for handling TX without starvation */
971	p->tq = taskqueue_create(p->taskqbuf, M_NOWAIT,
972	    taskqueue_thread_enqueue, &p->tq);
973#else
974	/* Create a port for handling TX without starvation */
975	p->tq = taskqueue_create_fast(p->taskqbuf, M_NOWAIT,
976	    taskqueue_thread_enqueue, &p->tq);
977#endif
978	t3_sge_init_port(p);
979
980	return (0);
981}
982
983static int
984cxgb_port_detach(device_t dev)
985{
986	struct port_info *p;
987
988	p = device_get_softc(dev);
989
990	PORT_LOCK(p);
991	if (p->ifp->if_drv_flags & IFF_DRV_RUNNING)
992		cxgb_stop_locked(p);
993	PORT_UNLOCK(p);
994
995	if (p->tq != NULL) {
996		taskqueue_drain(p->tq, &p->start_task);
997		taskqueue_free(p->tq);
998		p->tq = NULL;
999	}
1000
1001	ether_ifdetach(p->ifp);
1002	printf("waiting for callout to stop ...");
1003	DELAY(1000000);
1004	printf("done\n");
1005	/*
1006	 * the lock may be acquired in ifdetach
1007	 */
1008	PORT_LOCK_DEINIT(p);
1009	if_free(p->ifp);
1010
1011	if (p->port_cdev != NULL)
1012		destroy_dev(p->port_cdev);
1013
1014	return (0);
1015}
1016
1017void
1018t3_fatal_err(struct adapter *sc)
1019{
1020	u_int fw_status[4];
1021
1022	if (sc->flags & FULL_INIT_DONE) {
1023		t3_sge_stop(sc);
1024		t3_write_reg(sc, A_XGM_TX_CTRL, 0);
1025		t3_write_reg(sc, A_XGM_RX_CTRL, 0);
1026		t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
1027		t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
1028		t3_intr_disable(sc);
1029	}
1030	device_printf(sc->dev,"encountered fatal error, operation suspended\n");
1031	if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
1032		device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
1033		    fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
1034}
1035
1036int
1037t3_os_find_pci_capability(adapter_t *sc, int cap)
1038{
1039	device_t dev;
1040	struct pci_devinfo *dinfo;
1041	pcicfgregs *cfg;
1042	uint32_t status;
1043	uint8_t ptr;
1044
1045	dev = sc->dev;
1046	dinfo = device_get_ivars(dev);
1047	cfg = &dinfo->cfg;
1048
1049	status = pci_read_config(dev, PCIR_STATUS, 2);
1050	if (!(status & PCIM_STATUS_CAPPRESENT))
1051		return (0);
1052
1053	switch (cfg->hdrtype & PCIM_HDRTYPE) {
1054	case 0:
1055	case 1:
1056		ptr = PCIR_CAP_PTR;
1057		break;
1058	case 2:
1059		ptr = PCIR_CAP_PTR_2;
1060		break;
1061	default:
1062		return (0);
1063		break;
1064	}
1065	ptr = pci_read_config(dev, ptr, 1);
1066
1067	while (ptr != 0) {
1068		if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
1069			return (ptr);
1070		ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1071	}
1072
1073	return (0);
1074}
1075
1076int
1077t3_os_pci_save_state(struct adapter *sc)
1078{
1079	device_t dev;
1080	struct pci_devinfo *dinfo;
1081
1082	dev = sc->dev;
1083	dinfo = device_get_ivars(dev);
1084
1085	pci_cfg_save(dev, dinfo, 0);
1086	return (0);
1087}
1088
1089int
1090t3_os_pci_restore_state(struct adapter *sc)
1091{
1092	device_t dev;
1093	struct pci_devinfo *dinfo;
1094
1095	dev = sc->dev;
1096	dinfo = device_get_ivars(dev);
1097
1098	pci_cfg_restore(dev, dinfo);
1099	return (0);
1100}
1101
1102/**
1103 *	t3_os_link_changed - handle link status changes
1104 *	@adapter: the adapter associated with the link change
1105 *	@port_id: the port index whose limk status has changed
1106 *	@link_stat: the new status of the link
1107 *	@speed: the new speed setting
1108 *	@duplex: the new duplex setting
1109 *	@fc: the new flow-control setting
1110 *
1111 *	This is the OS-dependent handler for link status changes.  The OS
1112 *	neutral handler takes care of most of the processing for these events,
1113 *	then calls this handler for any OS-specific processing.
1114 */
1115void
1116t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
1117     int duplex, int fc)
1118{
1119	struct port_info *pi = &adapter->port[port_id];
1120	struct cmac *mac = &adapter->port[port_id].mac;
1121
1122	if ((pi->ifp->if_flags & IFF_UP) == 0)
1123		return;
1124
1125	if (link_status) {
1126		t3_mac_enable(mac, MAC_DIRECTION_RX);
1127		if_link_state_change(pi->ifp, LINK_STATE_UP);
1128	} else {
1129		if_link_state_change(pi->ifp, LINK_STATE_DOWN);
1130		pi->phy.ops->power_down(&pi->phy, 1);
1131		t3_mac_disable(mac, MAC_DIRECTION_RX);
1132		t3_link_start(&pi->phy, mac, &pi->link_config);
1133	}
1134}
1135
1136/*
1137 * Interrupt-context handler for external (PHY) interrupts.
1138 */
1139void
1140t3_os_ext_intr_handler(adapter_t *sc)
1141{
1142	if (cxgb_debug)
1143		printf("t3_os_ext_intr_handler\n");
1144	/*
1145	 * Schedule a task to handle external interrupts as they may be slow
1146	 * and we use a mutex to protect MDIO registers.  We disable PHY
1147	 * interrupts in the meantime and let the task reenable them when
1148	 * it's done.
1149	 */
1150	ADAPTER_LOCK(sc);
1151	if (sc->slow_intr_mask) {
1152		sc->slow_intr_mask &= ~F_T3DBG;
1153		t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
1154		taskqueue_enqueue(sc->tq, &sc->ext_intr_task);
1155	}
1156	ADAPTER_UNLOCK(sc);
1157}
1158
1159void
1160t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
1161{
1162
1163	/*
1164	 * The ifnet might not be allocated before this gets called,
1165	 * as this is called early on in attach by t3_prep_adapter
1166	 * save the address off in the port structure
1167	 */
1168	if (cxgb_debug)
1169		printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
1170	bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
1171}
1172
1173/**
1174 *	link_start - enable a port
1175 *	@p: the port to enable
1176 *
1177 *	Performs the MAC and PHY actions needed to enable a port.
1178 */
1179static void
1180cxgb_link_start(struct port_info *p)
1181{
1182	struct ifnet *ifp;
1183	struct t3_rx_mode rm;
1184	struct cmac *mac = &p->mac;
1185
1186	ifp = p->ifp;
1187
1188	t3_init_rx_mode(&rm, p);
1189	if (!mac->multiport)
1190		t3_mac_reset(mac);
1191	t3_mac_set_mtu(mac, ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1192	t3_mac_set_address(mac, 0, p->hw_addr);
1193	t3_mac_set_rx_mode(mac, &rm);
1194	t3_link_start(&p->phy, mac, &p->link_config);
1195	t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1196}
1197
1198/**
1199 *	setup_rss - configure Receive Side Steering (per-queue connection demux)
1200 *	@adap: the adapter
1201 *
1202 *	Sets up RSS to distribute packets to multiple receive queues.  We
1203 *	configure the RSS CPU lookup table to distribute to the number of HW
1204 *	receive queues, and the response queue lookup table to narrow that
1205 *	down to the response queues actually configured for each port.
1206 *	We always configure the RSS mapping for two ports since the mapping
1207 *	table has plenty of entries.
1208 */
1209static void
1210setup_rss(adapter_t *adap)
1211{
1212	int i;
1213	u_int nq[2];
1214	uint8_t cpus[SGE_QSETS + 1];
1215	uint16_t rspq_map[RSS_TABLE_SIZE];
1216
1217	for (i = 0; i < SGE_QSETS; ++i)
1218		cpus[i] = i;
1219	cpus[SGE_QSETS] = 0xff;
1220
1221	nq[0] = nq[1] = 0;
1222	for_each_port(adap, i) {
1223		const struct port_info *pi = adap2pinfo(adap, i);
1224
1225		nq[pi->tx_chan] += pi->nqsets;
1226	}
1227	nq[0] = max(nq[0], 1U);
1228	nq[1] = max(nq[1], 1U);
1229	for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
1230		rspq_map[i] = i % nq[0];
1231		rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq[1]) + nq[0];
1232	}
1233	/* Calculate the reverse RSS map table */
1234	for (i = 0; i < RSS_TABLE_SIZE; ++i)
1235		if (adap->rrss_map[rspq_map[i]] == 0xff)
1236			adap->rrss_map[rspq_map[i]] = i;
1237
1238	t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
1239		      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
1240		      V_RRCPLCPUSIZE(6), cpus, rspq_map);
1241
1242}
1243
1244/*
1245 * Sends an mbuf to an offload queue driver
1246 * after dealing with any active network taps.
1247 */
1248static inline int
1249offload_tx(struct t3cdev *tdev, struct mbuf *m)
1250{
1251	int ret;
1252
1253	ret = t3_offload_tx(tdev, m);
1254	return (ret);
1255}
1256
1257static int
1258write_smt_entry(struct adapter *adapter, int idx)
1259{
1260	struct port_info *pi = &adapter->port[idx];
1261	struct cpl_smt_write_req *req;
1262	struct mbuf *m;
1263
1264	if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
1265		return (ENOMEM);
1266
1267	req = mtod(m, struct cpl_smt_write_req *);
1268	m->m_pkthdr.len = m->m_len = sizeof(struct cpl_smt_write_req);
1269
1270	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1271	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
1272	req->mtu_idx = NMTUS - 1;  /* should be 0 but there's a T3 bug */
1273	req->iff = idx;
1274	memset(req->src_mac1, 0, sizeof(req->src_mac1));
1275	memcpy(req->src_mac0, pi->hw_addr, ETHER_ADDR_LEN);
1276
1277	m_set_priority(m, 1);
1278
1279	offload_tx(&adapter->tdev, m);
1280
1281	return (0);
1282}
1283
1284static int
1285init_smt(struct adapter *adapter)
1286{
1287	int i;
1288
1289	for_each_port(adapter, i)
1290		write_smt_entry(adapter, i);
1291	return 0;
1292}
1293
1294static void
1295init_port_mtus(adapter_t *adapter)
1296{
1297	unsigned int mtus = adapter->port[0].ifp->if_mtu;
1298
1299	if (adapter->port[1].ifp)
1300		mtus |= adapter->port[1].ifp->if_mtu << 16;
1301	t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
1302}
1303
1304static void
1305send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
1306			      int hi, int port)
1307{
1308	struct mbuf *m;
1309	struct mngt_pktsched_wr *req;
1310
1311	m = m_gethdr(M_DONTWAIT, MT_DATA);
1312	if (m) {
1313		req = mtod(m, struct mngt_pktsched_wr *);
1314		req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1315		req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1316		req->sched = sched;
1317		req->idx = qidx;
1318		req->min = lo;
1319		req->max = hi;
1320		req->binding = port;
1321		m->m_len = m->m_pkthdr.len = sizeof(*req);
1322		t3_mgmt_tx(adap, m);
1323	}
1324}
1325
1326static void
1327bind_qsets(adapter_t *sc)
1328{
1329	int i, j;
1330
1331	cxgb_pcpu_startup_threads(sc);
1332	for (i = 0; i < (sc)->params.nports; ++i) {
1333		const struct port_info *pi = adap2pinfo(sc, i);
1334
1335		for (j = 0; j < pi->nqsets; ++j) {
1336			send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1337					  -1, pi->tx_chan);
1338
1339		}
1340	}
1341}
1342
1343static void
1344update_tpeeprom(struct adapter *adap)
1345{
1346#ifdef FIRMWARE_LATEST
1347	const struct firmware *tpeeprom;
1348#else
1349	struct firmware *tpeeprom;
1350#endif
1351
1352	char buf[64];
1353	uint32_t version;
1354	unsigned int major, minor;
1355	int ret, len;
1356	char rev;
1357
1358	t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
1359
1360	major = G_TP_VERSION_MAJOR(version);
1361	minor = G_TP_VERSION_MINOR(version);
1362	if (major == TP_VERSION_MAJOR  && minor == TP_VERSION_MINOR)
1363		return;
1364
1365	rev = t3rev2char(adap);
1366
1367	snprintf(buf, sizeof(buf), TPEEPROM_NAME, rev,
1368		 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1369
1370	tpeeprom = firmware_get(buf);
1371	if (tpeeprom == NULL) {
1372		device_printf(adap->dev, "could not load TP EEPROM: unable to load %s\n",
1373			buf);
1374		return;
1375	}
1376
1377	len = tpeeprom->datasize - 4;
1378
1379	ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
1380	if (ret)
1381		goto release_tpeeprom;
1382
1383	if (len != TP_SRAM_LEN) {
1384		device_printf(adap->dev, "%s length is wrong len=%d expected=%d\n", buf, len, TP_SRAM_LEN);
1385		return;
1386	}
1387
1388	ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
1389	    TP_SRAM_OFFSET);
1390
1391	if (!ret) {
1392		device_printf(adap->dev,
1393			"Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
1394			 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1395	} else
1396		device_printf(adap->dev, "Protocol SRAM image update in EEPROM failed\n");
1397
1398release_tpeeprom:
1399	firmware_put(tpeeprom, FIRMWARE_UNLOAD);
1400
1401	return;
1402}
1403
1404static int
1405update_tpsram(struct adapter *adap)
1406{
1407#ifdef FIRMWARE_LATEST
1408	const struct firmware *tpsram;
1409#else
1410	struct firmware *tpsram;
1411#endif
1412	char buf[64];
1413	int ret;
1414	char rev;
1415
1416	rev = t3rev2char(adap);
1417	if (!rev)
1418		return 0;
1419
1420	update_tpeeprom(adap);
1421
1422	snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
1423		 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1424
1425	tpsram = firmware_get(buf);
1426	if (tpsram == NULL){
1427		device_printf(adap->dev, "could not load TP SRAM: unable to load %s\n",
1428			buf);
1429		return (EINVAL);
1430	} else
1431		device_printf(adap->dev, "updating TP SRAM with %s\n", buf);
1432
1433	ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
1434	if (ret)
1435		goto release_tpsram;
1436
1437	ret = t3_set_proto_sram(adap, tpsram->data);
1438	if (ret)
1439		device_printf(adap->dev, "loading protocol SRAM failed\n");
1440
1441release_tpsram:
1442	firmware_put(tpsram, FIRMWARE_UNLOAD);
1443
1444	return ret;
1445}
1446
1447/**
1448 *	cxgb_up - enable the adapter
1449 *	@adap: adapter being enabled
1450 *
1451 *	Called when the first port is enabled, this function performs the
1452 *	actions necessary to make an adapter operational, such as completing
1453 *	the initialization of HW modules, and enabling interrupts.
1454 *
1455 */
1456static int
1457cxgb_up(struct adapter *sc)
1458{
1459	int err = 0;
1460
1461	if ((sc->flags & FULL_INIT_DONE) == 0) {
1462
1463		if ((sc->flags & FW_UPTODATE) == 0)
1464			if ((err = upgrade_fw(sc)))
1465				goto out;
1466		if ((sc->flags & TPS_UPTODATE) == 0)
1467			if ((err = update_tpsram(sc)))
1468				goto out;
1469		err = t3_init_hw(sc, 0);
1470		if (err)
1471			goto out;
1472
1473		t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1474
1475		err = setup_sge_qsets(sc);
1476		if (err)
1477			goto out;
1478
1479		setup_rss(sc);
1480		t3_add_configured_sysctls(sc);
1481		sc->flags |= FULL_INIT_DONE;
1482	}
1483
1484	t3_intr_clear(sc);
1485
1486	/* If it's MSI or INTx, allocate a single interrupt for everything */
1487	if ((sc->flags & USING_MSIX) == 0) {
1488		if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
1489		   &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1490			device_printf(sc->dev, "Cannot allocate interrupt rid=%d\n",
1491			    sc->irq_rid);
1492			err = EINVAL;
1493			goto out;
1494		}
1495		device_printf(sc->dev, "allocated irq_res=%p\n", sc->irq_res);
1496
1497		if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
1498#ifdef INTR_FILTERS
1499			NULL,
1500#endif
1501			sc->cxgb_intr, sc, &sc->intr_tag)) {
1502			device_printf(sc->dev, "Cannot set up interrupt\n");
1503			err = EINVAL;
1504			goto irq_err;
1505		}
1506	} else {
1507		cxgb_setup_msix(sc, sc->msi_count);
1508	}
1509
1510	t3_sge_start(sc);
1511	t3_intr_enable(sc);
1512
1513	if (!(sc->flags & QUEUES_BOUND)) {
1514		printf("bind qsets\n");
1515		bind_qsets(sc);
1516		sc->flags |= QUEUES_BOUND;
1517	}
1518out:
1519	return (err);
1520irq_err:
1521	CH_ERR(sc, "request_irq failed, err %d\n", err);
1522	goto out;
1523}
1524
1525
1526/*
1527 * Release resources when all the ports and offloading have been stopped.
1528 */
1529static void
1530cxgb_down_locked(struct adapter *sc)
1531{
1532	int i;
1533
1534	t3_sge_stop(sc);
1535	t3_intr_disable(sc);
1536
1537	if (sc->intr_tag != NULL) {
1538		bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
1539		sc->intr_tag = NULL;
1540	}
1541	if (sc->irq_res != NULL) {
1542		device_printf(sc->dev, "de-allocating interrupt irq_rid=%d irq_res=%p\n",
1543		    sc->irq_rid, sc->irq_res);
1544		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
1545		    sc->irq_res);
1546		sc->irq_res = NULL;
1547	}
1548
1549	if (sc->flags & USING_MSIX)
1550		cxgb_teardown_msix(sc);
1551	ADAPTER_UNLOCK(sc);
1552
1553	callout_stop(&sc->cxgb_tick_ch);
1554	callout_stop(&sc->sge_timer_ch);
1555	callout_drain(&sc->cxgb_tick_ch);
1556	callout_drain(&sc->sge_timer_ch);
1557
1558	if (sc->tq != NULL) {
1559		taskqueue_drain(sc->tq, &sc->slow_intr_task);
1560		for (i = 0; i < sc->params.nports; i++)
1561			taskqueue_drain(sc->tq, &sc->port[i].timer_reclaim_task);
1562	}
1563}
1564
1565static int
1566offload_open(struct port_info *pi)
1567{
1568	struct adapter *adapter = pi->adapter;
1569	struct t3cdev *tdev = &adapter->tdev;
1570#ifdef notyet
1571	    T3CDEV(pi->ifp);
1572#endif
1573	int adap_up = adapter->open_device_map & PORT_MASK;
1574	int err = 0;
1575
1576	printf("device_map=0x%x\n", adapter->open_device_map);
1577	if (atomic_cmpset_int(&adapter->open_device_map,
1578		(adapter->open_device_map & ~(1<<OFFLOAD_DEVMAP_BIT)),
1579		(adapter->open_device_map | (1<<OFFLOAD_DEVMAP_BIT))) == 0)
1580		return (0);
1581
1582
1583	if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT))
1584		printf("offload_open: DEVMAP_BIT did not get set 0x%x\n", adapter->open_device_map);
1585	ADAPTER_LOCK(pi->adapter);
1586	if (!adap_up)
1587		err = cxgb_up(adapter);
1588	ADAPTER_UNLOCK(pi->adapter);
1589	if (err)
1590		return (err);
1591
1592	t3_tp_set_offload_mode(adapter, 1);
1593	tdev->lldev = pi->ifp;
1594	err = cxgb_offload_activate(adapter);
1595	if (err)
1596		goto out;
1597
1598	init_port_mtus(adapter);
1599	t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1600		     adapter->params.b_wnd,
1601		     adapter->params.rev == 0 ?
1602		       adapter->port[0].ifp->if_mtu : 0xffff);
1603	init_smt(adapter);
1604
1605	/* Call back all registered clients */
1606	cxgb_add_clients(tdev);
1607
1608out:
1609	/* restore them in case the offload module has changed them */
1610	if (err) {
1611		t3_tp_set_offload_mode(adapter, 0);
1612		clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
1613		cxgb_set_dummy_ops(tdev);
1614	}
1615	return (err);
1616}
1617
1618static int
1619offload_close(struct t3cdev *tdev)
1620{
1621	struct adapter *adapter = tdev2adap(tdev);
1622
1623	if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT)) {
1624		printf("offload_close: DEVMAP_BIT not set\n");
1625
1626		return (0);
1627	}
1628
1629	/* Call back all registered clients */
1630	cxgb_remove_clients(tdev);
1631	tdev->lldev = NULL;
1632	cxgb_set_dummy_ops(tdev);
1633	t3_tp_set_offload_mode(adapter, 0);
1634	clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
1635
1636	ADAPTER_LOCK(adapter);
1637	if (!adapter->open_device_map)
1638		cxgb_down_locked(adapter);
1639	else
1640		ADAPTER_UNLOCK(adapter);
1641	cxgb_offload_deactivate(adapter);
1642	return (0);
1643}
1644
1645
1646static void
1647cxgb_init(void *arg)
1648{
1649	struct port_info *p = arg;
1650
1651	PORT_LOCK(p);
1652	cxgb_init_locked(p);
1653	PORT_UNLOCK(p);
1654}
1655
1656static void
1657cxgb_init_locked(struct port_info *p)
1658{
1659	struct ifnet *ifp;
1660	adapter_t *sc = p->adapter;
1661	int err;
1662
1663	PORT_LOCK_ASSERT_OWNED(p);
1664	ifp = p->ifp;
1665
1666	ADAPTER_LOCK(p->adapter);
1667	if ((sc->open_device_map == 0) && (err = cxgb_up(sc))) {
1668		ADAPTER_UNLOCK(p->adapter);
1669		cxgb_stop_locked(p);
1670		return;
1671	}
1672	if (p->adapter->open_device_map == 0) {
1673		t3_intr_clear(sc);
1674	}
1675	setbit(&p->adapter->open_device_map, p->port_id);
1676	ADAPTER_UNLOCK(p->adapter);
1677
1678	if (is_offload(sc) && !ofld_disable) {
1679		err = offload_open(p);
1680		if (err)
1681			log(LOG_WARNING,
1682			    "Could not initialize offload capabilities\n");
1683		else
1684			printf("offload opened\n");
1685	}
1686	cxgb_link_start(p);
1687	t3_link_changed(sc, p->port_id);
1688	ifp->if_baudrate = p->link_config.speed * 1000000;
1689
1690	device_printf(sc->dev, "enabling interrupts on port=%d\n", p->port_id);
1691	t3_port_intr_enable(sc, p->port_id);
1692
1693	callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
1694	t3_sge_reset_adapter(sc);
1695
1696	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1697	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1698}
1699
1700static void
1701cxgb_set_rxmode(struct port_info *p)
1702{
1703	struct t3_rx_mode rm;
1704	struct cmac *mac = &p->mac;
1705
1706	PORT_LOCK_ASSERT_OWNED(p);
1707
1708	t3_init_rx_mode(&rm, p);
1709	t3_mac_set_rx_mode(mac, &rm);
1710}
1711
1712static void
1713cxgb_stop_locked(struct port_info *p)
1714{
1715	struct ifnet *ifp;
1716
1717	PORT_LOCK_ASSERT_OWNED(p);
1718	ADAPTER_LOCK_ASSERT_NOTOWNED(p->adapter);
1719
1720	ifp = p->ifp;
1721	t3_port_intr_disable(p->adapter, p->port_id);
1722	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1723	p->phy.ops->power_down(&p->phy, 1);
1724	t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1725
1726	ADAPTER_LOCK(p->adapter);
1727	clrbit(&p->adapter->open_device_map, p->port_id);
1728
1729	if (p->adapter->open_device_map == 0) {
1730		cxgb_down_locked(p->adapter);
1731	} else
1732		ADAPTER_UNLOCK(p->adapter);
1733
1734}
1735
1736static int
1737cxgb_set_mtu(struct port_info *p, int mtu)
1738{
1739	struct ifnet *ifp = p->ifp;
1740	int error = 0;
1741
1742	if ((mtu < ETHERMIN) || (mtu > ETHER_MAX_LEN_JUMBO))
1743		error = EINVAL;
1744	else if (ifp->if_mtu != mtu) {
1745		PORT_LOCK(p);
1746		ifp->if_mtu = mtu;
1747		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1748			callout_stop(&p->adapter->cxgb_tick_ch);
1749			cxgb_stop_locked(p);
1750			cxgb_init_locked(p);
1751		}
1752		PORT_UNLOCK(p);
1753	}
1754	return (error);
1755}
1756
1757static int
1758cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
1759{
1760	struct port_info *p = ifp->if_softc;
1761	struct ifaddr *ifa = (struct ifaddr *)data;
1762	struct ifreq *ifr = (struct ifreq *)data;
1763	int flags, error = 0;
1764	uint32_t mask;
1765
1766	/*
1767	 * XXX need to check that we aren't in the middle of an unload
1768	 */
1769	switch (command) {
1770	case SIOCSIFMTU:
1771		error = cxgb_set_mtu(p, ifr->ifr_mtu);
1772		break;
1773	case SIOCSIFADDR:
1774	case SIOCGIFADDR:
1775		if (ifa->ifa_addr->sa_family == AF_INET) {
1776			PORT_LOCK(p);
1777			ifp->if_flags |= IFF_UP;
1778			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1779				cxgb_init_locked(p);
1780			arp_ifinit(ifp, ifa);
1781			PORT_UNLOCK(p);
1782		} else
1783			error = ether_ioctl(ifp, command, data);
1784		break;
1785	case SIOCSIFFLAGS:
1786		callout_drain(&p->adapter->cxgb_tick_ch);
1787		PORT_LOCK(p);
1788		if (ifp->if_flags & IFF_UP) {
1789			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1790				flags = p->if_flags;
1791				if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
1792				    ((ifp->if_flags ^ flags) & IFF_ALLMULTI))
1793					cxgb_set_rxmode(p);
1794			} else
1795				cxgb_init_locked(p);
1796			p->if_flags = ifp->if_flags;
1797		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1798			cxgb_stop_locked(p);
1799
1800		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1801			adapter_t *sc = p->adapter;
1802			callout_reset(&sc->cxgb_tick_ch, hz,
1803			    cxgb_tick, sc);
1804		}
1805		PORT_UNLOCK(p);
1806		break;
1807	case SIOCSIFMEDIA:
1808	case SIOCGIFMEDIA:
1809		error = ifmedia_ioctl(ifp, ifr, &p->media, command);
1810		break;
1811	case SIOCSIFCAP:
1812		PORT_LOCK(p);
1813		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1814		if (mask & IFCAP_TXCSUM) {
1815			if (IFCAP_TXCSUM & ifp->if_capenable) {
1816				ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
1817				ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
1818				    | CSUM_TSO);
1819			} else {
1820				ifp->if_capenable |= IFCAP_TXCSUM;
1821				ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1822			}
1823		} else if (mask & IFCAP_RXCSUM) {
1824			if (IFCAP_RXCSUM & ifp->if_capenable) {
1825				ifp->if_capenable &= ~IFCAP_RXCSUM;
1826			} else {
1827				ifp->if_capenable |= IFCAP_RXCSUM;
1828			}
1829		}
1830		if (mask & IFCAP_TSO4) {
1831			if (IFCAP_TSO4 & ifp->if_capenable) {
1832				ifp->if_capenable &= ~IFCAP_TSO4;
1833				ifp->if_hwassist &= ~CSUM_TSO;
1834			} else if (IFCAP_TXCSUM & ifp->if_capenable) {
1835				ifp->if_capenable |= IFCAP_TSO4;
1836				ifp->if_hwassist |= CSUM_TSO;
1837			} else {
1838				if (cxgb_debug)
1839					printf("cxgb requires tx checksum offload"
1840					    " be enabled to use TSO\n");
1841				error = EINVAL;
1842			}
1843		}
1844		PORT_UNLOCK(p);
1845		break;
1846	default:
1847		error = ether_ioctl(ifp, command, data);
1848		break;
1849	}
1850	return (error);
1851}
1852
1853static int
1854cxgb_media_change(struct ifnet *ifp)
1855{
1856	if_printf(ifp, "media change not supported\n");
1857	return (ENXIO);
1858}
1859
1860static void
1861cxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1862{
1863	struct port_info *p = ifp->if_softc;
1864
1865	ifmr->ifm_status = IFM_AVALID;
1866	ifmr->ifm_active = IFM_ETHER;
1867
1868	if (!p->link_config.link_ok)
1869		return;
1870
1871	ifmr->ifm_status |= IFM_ACTIVE;
1872
1873	switch (p->link_config.speed) {
1874	case 10:
1875		ifmr->ifm_active |= IFM_10_T;
1876		break;
1877	case 100:
1878		ifmr->ifm_active |= IFM_100_TX;
1879			break;
1880	case 1000:
1881		ifmr->ifm_active |= IFM_1000_T;
1882		break;
1883	}
1884
1885	if (p->link_config.duplex)
1886		ifmr->ifm_active |= IFM_FDX;
1887	else
1888		ifmr->ifm_active |= IFM_HDX;
1889}
1890
1891static void
1892cxgb_async_intr(void *data)
1893{
1894	adapter_t *sc = data;
1895
1896	if (cxgb_debug)
1897		device_printf(sc->dev, "cxgb_async_intr\n");
1898	/*
1899	 * May need to sleep - defer to taskqueue
1900	 */
1901	taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
1902}
1903
1904static void
1905cxgb_ext_intr_handler(void *arg, int count)
1906{
1907	adapter_t *sc = (adapter_t *)arg;
1908
1909	if (cxgb_debug)
1910		printf("cxgb_ext_intr_handler\n");
1911
1912	t3_phy_intr_handler(sc);
1913
1914	/* Now reenable external interrupts */
1915	ADAPTER_LOCK(sc);
1916	if (sc->slow_intr_mask) {
1917		sc->slow_intr_mask |= F_T3DBG;
1918		t3_write_reg(sc, A_PL_INT_CAUSE0, F_T3DBG);
1919		t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
1920	}
1921	ADAPTER_UNLOCK(sc);
1922}
1923
1924static void
1925check_link_status(adapter_t *sc)
1926{
1927	int i;
1928
1929	for (i = 0; i < (sc)->params.nports; ++i) {
1930		struct port_info *p = &sc->port[i];
1931
1932		if (!(p->port_type->caps & SUPPORTED_IRQ))
1933			t3_link_changed(sc, i);
1934		p->ifp->if_baudrate = p->link_config.speed * 1000000;
1935	}
1936}
1937
1938static void
1939check_t3b2_mac(struct adapter *adapter)
1940{
1941	int i;
1942
1943	for_each_port(adapter, i) {
1944		struct port_info *p = &adapter->port[i];
1945		struct ifnet *ifp = p->ifp;
1946		int status;
1947
1948		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1949			continue;
1950
1951		status = 0;
1952		PORT_LOCK(p);
1953		if ((ifp->if_drv_flags & IFF_DRV_RUNNING))
1954			status = t3b2_mac_watchdog_task(&p->mac);
1955		if (status == 1)
1956			p->mac.stats.num_toggled++;
1957		else if (status == 2) {
1958			struct cmac *mac = &p->mac;
1959
1960			t3_mac_set_mtu(mac, ifp->if_mtu + ETHER_HDR_LEN
1961			    + ETHER_VLAN_ENCAP_LEN);
1962			t3_mac_set_address(mac, 0, p->hw_addr);
1963			cxgb_set_rxmode(p);
1964			t3_link_start(&p->phy, mac, &p->link_config);
1965			t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1966			t3_port_intr_enable(adapter, p->port_id);
1967			p->mac.stats.num_resets++;
1968		}
1969		PORT_UNLOCK(p);
1970	}
1971}
1972
1973static void
1974cxgb_tick(void *arg)
1975{
1976	adapter_t *sc = (adapter_t *)arg;
1977	int i, running = 0;
1978
1979	for_each_port(sc, i) {
1980
1981		struct port_info *p = &sc->port[i];
1982		struct ifnet *ifp = p->ifp;
1983		PORT_LOCK(p);
1984
1985		if ((ifp->if_drv_flags & IFF_DRV_RUNNING))
1986			running = 1;
1987		PORT_UNLOCK(p);
1988	}
1989
1990	if (running == 0)
1991		return;
1992
1993	taskqueue_enqueue(sc->tq, &sc->tick_task);
1994
1995	if (sc->open_device_map != 0)
1996		callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
1997}
1998
1999static void
2000cxgb_tick_handler(void *arg, int count)
2001{
2002	adapter_t *sc = (adapter_t *)arg;
2003	const struct adapter_params *p = &sc->params;
2004
2005	ADAPTER_LOCK(sc);
2006	if (p->linkpoll_period)
2007		check_link_status(sc);
2008
2009	/*
2010	 * adapter lock can currently only be acquire after the
2011	 * port lock
2012	 */
2013	ADAPTER_UNLOCK(sc);
2014
2015	if (p->rev == T3_REV_B2 && p->nports < 4)
2016		check_t3b2_mac(sc);
2017}
2018
2019static void
2020touch_bars(device_t dev)
2021{
2022	/*
2023	 * Don't enable yet
2024	 */
2025#if !defined(__LP64__) && 0
2026	u32 v;
2027
2028	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
2029	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
2030	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
2031	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
2032	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
2033	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
2034#endif
2035}
2036
2037static int
2038set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
2039{
2040	uint8_t *buf;
2041	int err = 0;
2042	u32 aligned_offset, aligned_len, *p;
2043	struct adapter *adapter = pi->adapter;
2044
2045
2046	aligned_offset = offset & ~3;
2047	aligned_len = (len + (offset & 3) + 3) & ~3;
2048
2049	if (aligned_offset != offset || aligned_len != len) {
2050		buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);
2051		if (!buf)
2052			return (ENOMEM);
2053		err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
2054		if (!err && aligned_len > 4)
2055			err = t3_seeprom_read(adapter,
2056					      aligned_offset + aligned_len - 4,
2057					      (u32 *)&buf[aligned_len - 4]);
2058		if (err)
2059			goto out;
2060		memcpy(buf + (offset & 3), data, len);
2061	} else
2062		buf = (uint8_t *)(uintptr_t)data;
2063
2064	err = t3_seeprom_wp(adapter, 0);
2065	if (err)
2066		goto out;
2067
2068	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2069		err = t3_seeprom_write(adapter, aligned_offset, *p);
2070		aligned_offset += 4;
2071	}
2072
2073	if (!err)
2074		err = t3_seeprom_wp(adapter, 1);
2075out:
2076	if (buf != data)
2077		free(buf, M_DEVBUF);
2078	return err;
2079}
2080
2081
2082static int
2083in_range(int val, int lo, int hi)
2084{
2085	return val < 0 || (val <= hi && val >= lo);
2086}
2087
2088static int
2089cxgb_extension_open(struct cdev *dev, int flags, int fmp, d_thread_t *td)
2090{
2091       return (0);
2092}
2093
2094static int
2095cxgb_extension_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
2096{
2097       return (0);
2098}
2099
2100static int
2101cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
2102    int fflag, struct thread *td)
2103{
2104	int mmd, error = 0;
2105	struct port_info *pi = dev->si_drv1;
2106	adapter_t *sc = pi->adapter;
2107
2108#ifdef PRIV_SUPPORTED
2109	if (priv_check(td, PRIV_DRIVER)) {
2110		if (cxgb_debug)
2111			printf("user does not have access to privileged ioctls\n");
2112		return (EPERM);
2113	}
2114#else
2115	if (suser(td)) {
2116		if (cxgb_debug)
2117			printf("user does not have access to privileged ioctls\n");
2118		return (EPERM);
2119	}
2120#endif
2121
2122	switch (cmd) {
2123	case SIOCGMIIREG: {
2124		uint32_t val;
2125		struct cphy *phy = &pi->phy;
2126		struct mii_data *mid = (struct mii_data *)data;
2127
2128		if (!phy->mdio_read)
2129			return (EOPNOTSUPP);
2130		if (is_10G(sc)) {
2131			mmd = mid->phy_id >> 8;
2132			if (!mmd)
2133				mmd = MDIO_DEV_PCS;
2134			else if (mmd > MDIO_DEV_XGXS)
2135				return (EINVAL);
2136
2137			error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
2138					     mid->reg_num, &val);
2139		} else
2140		        error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
2141					     mid->reg_num & 0x1f, &val);
2142		if (error == 0)
2143			mid->val_out = val;
2144		break;
2145	}
2146	case SIOCSMIIREG: {
2147		struct cphy *phy = &pi->phy;
2148		struct mii_data *mid = (struct mii_data *)data;
2149
2150		if (!phy->mdio_write)
2151			return (EOPNOTSUPP);
2152		if (is_10G(sc)) {
2153			mmd = mid->phy_id >> 8;
2154			if (!mmd)
2155				mmd = MDIO_DEV_PCS;
2156			else if (mmd > MDIO_DEV_XGXS)
2157				return (EINVAL);
2158
2159			error = phy->mdio_write(sc, mid->phy_id & 0x1f,
2160					      mmd, mid->reg_num, mid->val_in);
2161		} else
2162			error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
2163					      mid->reg_num & 0x1f,
2164					      mid->val_in);
2165		break;
2166	}
2167	case CHELSIO_SETREG: {
2168		struct ch_reg *edata = (struct ch_reg *)data;
2169		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2170			return (EFAULT);
2171		t3_write_reg(sc, edata->addr, edata->val);
2172		break;
2173	}
2174	case CHELSIO_GETREG: {
2175		struct ch_reg *edata = (struct ch_reg *)data;
2176		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2177			return (EFAULT);
2178		edata->val = t3_read_reg(sc, edata->addr);
2179		break;
2180	}
2181	case CHELSIO_GET_SGE_CONTEXT: {
2182		struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
2183		mtx_lock(&sc->sge.reg_lock);
2184		switch (ecntxt->cntxt_type) {
2185		case CNTXT_TYPE_EGRESS:
2186			error = t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
2187			    ecntxt->data);
2188			break;
2189		case CNTXT_TYPE_FL:
2190			error = t3_sge_read_fl(sc, ecntxt->cntxt_id,
2191			    ecntxt->data);
2192			break;
2193		case CNTXT_TYPE_RSP:
2194			error = t3_sge_read_rspq(sc, ecntxt->cntxt_id,
2195			    ecntxt->data);
2196			break;
2197		case CNTXT_TYPE_CQ:
2198			error = t3_sge_read_cq(sc, ecntxt->cntxt_id,
2199			    ecntxt->data);
2200			break;
2201		default:
2202			error = EINVAL;
2203			break;
2204		}
2205		mtx_unlock(&sc->sge.reg_lock);
2206		break;
2207	}
2208	case CHELSIO_GET_SGE_DESC: {
2209		struct ch_desc *edesc = (struct ch_desc *)data;
2210		int ret;
2211		if (edesc->queue_num >= SGE_QSETS * 6)
2212			return (EINVAL);
2213		ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
2214		    edesc->queue_num % 6, edesc->idx, edesc->data);
2215		if (ret < 0)
2216			return (EINVAL);
2217		edesc->size = ret;
2218		break;
2219	}
2220	case CHELSIO_SET_QSET_PARAMS: {
2221		struct qset_params *q;
2222		struct ch_qset_params *t = (struct ch_qset_params *)data;
2223
2224		if (t->qset_idx >= SGE_QSETS)
2225			return (EINVAL);
2226		if (!in_range(t->intr_lat, 0, M_NEWTIMER) ||
2227		    !in_range(t->cong_thres, 0, 255) ||
2228		    !in_range(t->txq_size[0], MIN_TXQ_ENTRIES,
2229			      MAX_TXQ_ENTRIES) ||
2230		    !in_range(t->txq_size[1], MIN_TXQ_ENTRIES,
2231			      MAX_TXQ_ENTRIES) ||
2232		    !in_range(t->txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2233			      MAX_CTRL_TXQ_ENTRIES) ||
2234		    !in_range(t->fl_size[0], MIN_FL_ENTRIES, MAX_RX_BUFFERS) ||
2235		    !in_range(t->fl_size[1], MIN_FL_ENTRIES,
2236			      MAX_RX_JUMBO_BUFFERS) ||
2237		    !in_range(t->rspq_size, MIN_RSPQ_ENTRIES, MAX_RSPQ_ENTRIES))
2238			return (EINVAL);
2239		if ((sc->flags & FULL_INIT_DONE) &&
2240		    (t->rspq_size >= 0 || t->fl_size[0] >= 0 ||
2241		     t->fl_size[1] >= 0 || t->txq_size[0] >= 0 ||
2242		     t->txq_size[1] >= 0 || t->txq_size[2] >= 0 ||
2243		     t->polling >= 0 || t->cong_thres >= 0))
2244			return (EBUSY);
2245
2246		q = &sc->params.sge.qset[t->qset_idx];
2247
2248		if (t->rspq_size >= 0)
2249			q->rspq_size = t->rspq_size;
2250		if (t->fl_size[0] >= 0)
2251			q->fl_size = t->fl_size[0];
2252		if (t->fl_size[1] >= 0)
2253			q->jumbo_size = t->fl_size[1];
2254		if (t->txq_size[0] >= 0)
2255			q->txq_size[0] = t->txq_size[0];
2256		if (t->txq_size[1] >= 0)
2257			q->txq_size[1] = t->txq_size[1];
2258		if (t->txq_size[2] >= 0)
2259			q->txq_size[2] = t->txq_size[2];
2260		if (t->cong_thres >= 0)
2261			q->cong_thres = t->cong_thres;
2262		if (t->intr_lat >= 0) {
2263			struct sge_qset *qs = &sc->sge.qs[t->qset_idx];
2264
2265			q->coalesce_nsecs = t->intr_lat*1000;
2266			t3_update_qset_coalesce(qs, q);
2267		}
2268		break;
2269	}
2270	case CHELSIO_GET_QSET_PARAMS: {
2271		struct qset_params *q;
2272		struct ch_qset_params *t = (struct ch_qset_params *)data;
2273
2274		if (t->qset_idx >= SGE_QSETS)
2275			return (EINVAL);
2276
2277		q = &(sc)->params.sge.qset[t->qset_idx];
2278		t->rspq_size   = q->rspq_size;
2279		t->txq_size[0] = q->txq_size[0];
2280		t->txq_size[1] = q->txq_size[1];
2281		t->txq_size[2] = q->txq_size[2];
2282		t->fl_size[0]  = q->fl_size;
2283		t->fl_size[1]  = q->jumbo_size;
2284		t->polling     = q->polling;
2285		t->intr_lat    = q->coalesce_nsecs / 1000;
2286		t->cong_thres  = q->cong_thres;
2287		break;
2288	}
2289	case CHELSIO_SET_QSET_NUM: {
2290		struct ch_reg *edata = (struct ch_reg *)data;
2291		unsigned int port_idx = pi->port_id;
2292
2293		if (sc->flags & FULL_INIT_DONE)
2294			return (EBUSY);
2295		if (edata->val < 1 ||
2296		    (edata->val > 1 && !(sc->flags & USING_MSIX)))
2297			return (EINVAL);
2298		if (edata->val + sc->port[!port_idx].nqsets > SGE_QSETS)
2299			return (EINVAL);
2300		sc->port[port_idx].nqsets = edata->val;
2301		sc->port[0].first_qset = 0;
2302		/*
2303		 * XXX hardcode ourselves to 2 ports just like LEEENUX
2304		 */
2305		sc->port[1].first_qset = sc->port[0].nqsets;
2306		break;
2307	}
2308	case CHELSIO_GET_QSET_NUM: {
2309		struct ch_reg *edata = (struct ch_reg *)data;
2310		edata->val = pi->nqsets;
2311		break;
2312	}
2313#ifdef notyet
2314	case CHELSIO_LOAD_FW:
2315	case CHELSIO_GET_PM:
2316	case CHELSIO_SET_PM:
2317		return (EOPNOTSUPP);
2318		break;
2319#endif
2320	case CHELSIO_SETMTUTAB: {
2321		struct ch_mtus *m = (struct ch_mtus *)data;
2322		int i;
2323
2324		if (!is_offload(sc))
2325			return (EOPNOTSUPP);
2326		if (offload_running(sc))
2327			return (EBUSY);
2328		if (m->nmtus != NMTUS)
2329			return (EINVAL);
2330		if (m->mtus[0] < 81)         /* accommodate SACK */
2331			return (EINVAL);
2332
2333		/*
2334		 * MTUs must be in ascending order
2335		 */
2336		for (i = 1; i < NMTUS; ++i)
2337			if (m->mtus[i] < m->mtus[i - 1])
2338				return (EINVAL);
2339
2340		memcpy(sc->params.mtus, m->mtus,
2341		       sizeof(sc->params.mtus));
2342		break;
2343	}
2344	case CHELSIO_GETMTUTAB: {
2345		struct ch_mtus *m = (struct ch_mtus *)data;
2346
2347		if (!is_offload(sc))
2348			return (EOPNOTSUPP);
2349
2350		memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
2351		m->nmtus = NMTUS;
2352		break;
2353	}
2354	case CHELSIO_DEVUP:
2355		if (!is_offload(sc))
2356			return (EOPNOTSUPP);
2357		return offload_open(pi);
2358		break;
2359	case CHELSIO_GET_MEM: {
2360		struct ch_mem_range *t = (struct ch_mem_range *)data;
2361		struct mc7 *mem;
2362		uint8_t *useraddr;
2363		u64 buf[32];
2364
2365		if (!is_offload(sc))
2366			return (EOPNOTSUPP);
2367		if (!(sc->flags & FULL_INIT_DONE))
2368			return (EIO);         /* need the memory controllers */
2369		if ((t->addr & 0x7) || (t->len & 0x7))
2370			return (EINVAL);
2371		if (t->mem_id == MEM_CM)
2372			mem = &sc->cm;
2373		else if (t->mem_id == MEM_PMRX)
2374			mem = &sc->pmrx;
2375		else if (t->mem_id == MEM_PMTX)
2376			mem = &sc->pmtx;
2377		else
2378			return (EINVAL);
2379
2380		/*
2381		 * Version scheme:
2382		 * bits 0..9: chip version
2383		 * bits 10..15: chip revision
2384		 */
2385		t->version = 3 | (sc->params.rev << 10);
2386
2387		/*
2388		 * Read 256 bytes at a time as len can be large and we don't
2389		 * want to use huge intermediate buffers.
2390		 */
2391		useraddr = (uint8_t *)t->buf;
2392		while (t->len) {
2393			unsigned int chunk = min(t->len, sizeof(buf));
2394
2395			error = t3_mc7_bd_read(mem, t->addr / 8, chunk / 8, buf);
2396			if (error)
2397				return (-error);
2398			if (copyout(buf, useraddr, chunk))
2399				return (EFAULT);
2400			useraddr += chunk;
2401			t->addr += chunk;
2402			t->len -= chunk;
2403		}
2404		break;
2405	}
2406	case CHELSIO_READ_TCAM_WORD: {
2407		struct ch_tcam_word *t = (struct ch_tcam_word *)data;
2408
2409		if (!is_offload(sc))
2410			return (EOPNOTSUPP);
2411		if (!(sc->flags & FULL_INIT_DONE))
2412			return (EIO);         /* need MC5 */
2413		return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
2414		break;
2415	}
2416	case CHELSIO_SET_TRACE_FILTER: {
2417		struct ch_trace *t = (struct ch_trace *)data;
2418		const struct trace_params *tp;
2419
2420		tp = (const struct trace_params *)&t->sip;
2421		if (t->config_tx)
2422			t3_config_trace_filter(sc, tp, 0, t->invert_match,
2423					       t->trace_tx);
2424		if (t->config_rx)
2425			t3_config_trace_filter(sc, tp, 1, t->invert_match,
2426					       t->trace_rx);
2427		break;
2428	}
2429	case CHELSIO_SET_PKTSCHED: {
2430		struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
2431		if (sc->open_device_map == 0)
2432			return (EAGAIN);
2433		send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
2434		    p->binding);
2435		break;
2436	}
2437	case CHELSIO_IFCONF_GETREGS: {
2438		struct ifconf_regs *regs = (struct ifconf_regs *)data;
2439		int reglen = cxgb_get_regs_len();
2440		uint8_t *buf = malloc(REGDUMP_SIZE, M_DEVBUF, M_NOWAIT);
2441		if (buf == NULL) {
2442			return (ENOMEM);
2443		} if (regs->len > reglen)
2444			regs->len = reglen;
2445		else if (regs->len < reglen) {
2446			error = E2BIG;
2447			goto done;
2448		}
2449		cxgb_get_regs(sc, regs, buf);
2450		error = copyout(buf, regs->data, reglen);
2451
2452		done:
2453		free(buf, M_DEVBUF);
2454
2455		break;
2456	}
2457	case CHELSIO_SET_HW_SCHED: {
2458		struct ch_hw_sched *t = (struct ch_hw_sched *)data;
2459		unsigned int ticks_per_usec = core_ticks_per_usec(sc);
2460
2461		if ((sc->flags & FULL_INIT_DONE) == 0)
2462			return (EAGAIN);       /* need TP to be initialized */
2463		if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
2464		    !in_range(t->channel, 0, 1) ||
2465		    !in_range(t->kbps, 0, 10000000) ||
2466		    !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
2467		    !in_range(t->flow_ipg, 0,
2468			      dack_ticks_to_usec(sc, 0x7ff)))
2469			return (EINVAL);
2470
2471		if (t->kbps >= 0) {
2472			error = t3_config_sched(sc, t->kbps, t->sched);
2473			if (error < 0)
2474				return (-error);
2475		}
2476		if (t->class_ipg >= 0)
2477			t3_set_sched_ipg(sc, t->sched, t->class_ipg);
2478		if (t->flow_ipg >= 0) {
2479			t->flow_ipg *= 1000;     /* us -> ns */
2480			t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
2481		}
2482		if (t->mode >= 0) {
2483			int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
2484
2485			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2486					 bit, t->mode ? bit : 0);
2487		}
2488		if (t->channel >= 0)
2489			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2490					 1 << t->sched, t->channel << t->sched);
2491		break;
2492	}
2493	default:
2494		return (EOPNOTSUPP);
2495		break;
2496	}
2497
2498	return (error);
2499}
2500
2501static __inline void
2502reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
2503    unsigned int end)
2504{
2505	uint32_t *p = (uint32_t *)buf + start;
2506
2507	for ( ; start <= end; start += sizeof(uint32_t))
2508		*p++ = t3_read_reg(ap, start);
2509}
2510
2511#define T3_REGMAP_SIZE (3 * 1024)
2512static int
2513cxgb_get_regs_len(void)
2514{
2515	return T3_REGMAP_SIZE;
2516}
2517#undef T3_REGMAP_SIZE
2518
2519static void
2520cxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf)
2521{
2522
2523	/*
2524	 * Version scheme:
2525	 * bits 0..9: chip version
2526	 * bits 10..15: chip revision
2527	 * bit 31: set for PCIe cards
2528	 */
2529	regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
2530
2531	/*
2532	 * We skip the MAC statistics registers because they are clear-on-read.
2533	 * Also reading multi-register stats would need to synchronize with the
2534	 * periodic mac stats accumulation.  Hard to justify the complexity.
2535	 */
2536	memset(buf, 0, REGDUMP_SIZE);
2537	reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
2538	reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
2539	reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
2540	reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
2541	reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
2542	reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
2543		       XGM_REG(A_XGM_SERDES_STAT3, 1));
2544	reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
2545		       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
2546}
2547