cxgb_main.c revision 197791
1/**************************************************************************
2
3Copyright (c) 2007-2009, Chelsio Inc.
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10    this list of conditions and the following disclaimer.
11
12 2. Neither the name of the Chelsio Corporation nor the names of its
13    contributors may be used to endorse or promote products derived from
14    this software without specific prior written permission.
15
16THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26POSSIBILITY OF SUCH DAMAGE.
27
28***************************************************************************/
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/cxgb/cxgb_main.c 197791 2009-10-05 20:21:41Z np $");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/kernel.h>
36#include <sys/bus.h>
37#include <sys/module.h>
38#include <sys/pciio.h>
39#include <sys/conf.h>
40#include <machine/bus.h>
41#include <machine/resource.h>
42#include <sys/bus_dma.h>
43#include <sys/ktr.h>
44#include <sys/rman.h>
45#include <sys/ioccom.h>
46#include <sys/mbuf.h>
47#include <sys/linker.h>
48#include <sys/firmware.h>
49#include <sys/socket.h>
50#include <sys/sockio.h>
51#include <sys/smp.h>
52#include <sys/sysctl.h>
53#include <sys/syslog.h>
54#include <sys/queue.h>
55#include <sys/taskqueue.h>
56#include <sys/proc.h>
57
58#include <net/bpf.h>
59#include <net/ethernet.h>
60#include <net/if.h>
61#include <net/if_arp.h>
62#include <net/if_dl.h>
63#include <net/if_media.h>
64#include <net/if_types.h>
65#include <net/if_vlan_var.h>
66
67#include <netinet/in_systm.h>
68#include <netinet/in.h>
69#include <netinet/if_ether.h>
70#include <netinet/ip.h>
71#include <netinet/ip.h>
72#include <netinet/tcp.h>
73#include <netinet/udp.h>
74
75#include <dev/pci/pcireg.h>
76#include <dev/pci/pcivar.h>
77#include <dev/pci/pci_private.h>
78
79#include <cxgb_include.h>
80
81#ifdef PRIV_SUPPORTED
82#include <sys/priv.h>
83#endif
84
85static int cxgb_setup_interrupts(adapter_t *);
86static void cxgb_teardown_interrupts(adapter_t *);
87static int cxgb_begin_op(struct port_info *, const char *);
88static int cxgb_begin_detach(struct port_info *);
89static int cxgb_end_op(struct port_info *);
90static void cxgb_init(void *);
91static int cxgb_init_synchronized(struct port_info *);
92static int cxgb_uninit_synchronized(struct port_info *);
93static int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
94static int cxgb_media_change(struct ifnet *);
95static int cxgb_ifm_type(int);
96static void cxgb_build_medialist(struct port_info *);
97static void cxgb_media_status(struct ifnet *, struct ifmediareq *);
98static int setup_sge_qsets(adapter_t *);
99static void cxgb_async_intr(void *);
100static void cxgb_ext_intr_handler(void *, int);
101static void cxgb_tick_handler(void *, int);
102static void cxgb_tick(void *);
103static void setup_rss(adapter_t *sc);
104
105/* Attachment glue for the PCI controller end of the device.  Each port of
106 * the device is attached separately, as defined later.
107 */
108static int cxgb_controller_probe(device_t);
109static int cxgb_controller_attach(device_t);
110static int cxgb_controller_detach(device_t);
111static void cxgb_free(struct adapter *);
112static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
113    unsigned int end);
114static void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf);
115static int cxgb_get_regs_len(void);
116static int offload_open(struct port_info *pi);
117static void touch_bars(device_t dev);
118static int offload_close(struct t3cdev *tdev);
119static void cxgb_update_mac_settings(struct port_info *p);
120
121static device_method_t cxgb_controller_methods[] = {
122	DEVMETHOD(device_probe,		cxgb_controller_probe),
123	DEVMETHOD(device_attach,	cxgb_controller_attach),
124	DEVMETHOD(device_detach,	cxgb_controller_detach),
125
126	/* bus interface */
127	DEVMETHOD(bus_print_child,	bus_generic_print_child),
128	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
129
130	{ 0, 0 }
131};
132
133static driver_t cxgb_controller_driver = {
134	"cxgbc",
135	cxgb_controller_methods,
136	sizeof(struct adapter)
137};
138
139static devclass_t	cxgb_controller_devclass;
140DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, 0, 0);
141
142/*
143 * Attachment glue for the ports.  Attachment is done directly to the
144 * controller device.
145 */
146static int cxgb_port_probe(device_t);
147static int cxgb_port_attach(device_t);
148static int cxgb_port_detach(device_t);
149
150static device_method_t cxgb_port_methods[] = {
151	DEVMETHOD(device_probe,		cxgb_port_probe),
152	DEVMETHOD(device_attach,	cxgb_port_attach),
153	DEVMETHOD(device_detach,	cxgb_port_detach),
154	{ 0, 0 }
155};
156
157static driver_t cxgb_port_driver = {
158	"cxgb",
159	cxgb_port_methods,
160	0
161};
162
163static d_ioctl_t cxgb_extension_ioctl;
164static d_open_t cxgb_extension_open;
165static d_close_t cxgb_extension_close;
166
167static struct cdevsw cxgb_cdevsw = {
168       .d_version =    D_VERSION,
169       .d_flags =      0,
170       .d_open =       cxgb_extension_open,
171       .d_close =      cxgb_extension_close,
172       .d_ioctl =      cxgb_extension_ioctl,
173       .d_name =       "cxgb",
174};
175
176static devclass_t	cxgb_port_devclass;
177DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
178
179/*
180 * The driver uses the best interrupt scheme available on a platform in the
181 * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
182 * of these schemes the driver may consider as follows:
183 *
184 * msi = 2: choose from among all three options
185 * msi = 1 : only consider MSI and pin interrupts
186 * msi = 0: force pin interrupts
187 */
188static int msi_allowed = 2;
189
190TUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
191SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
192SYSCTL_UINT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
193    "MSI-X, MSI, INTx selector");
194
195/*
196 * The driver enables offload as a default.
197 * To disable it, use ofld_disable = 1.
198 */
199static int ofld_disable = 0;
200TUNABLE_INT("hw.cxgb.ofld_disable", &ofld_disable);
201SYSCTL_UINT(_hw_cxgb, OID_AUTO, ofld_disable, CTLFLAG_RDTUN, &ofld_disable, 0,
202    "disable ULP offload");
203
204/*
205 * The driver uses an auto-queue algorithm by default.
206 * To disable it and force a single queue-set per port, use multiq = 0
207 */
208static int multiq = 1;
209TUNABLE_INT("hw.cxgb.multiq", &multiq);
210SYSCTL_UINT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0,
211    "use min(ncpus/ports, 8) queue-sets per port");
212
213/*
214 * By default the driver will not update the firmware unless
215 * it was compiled against a newer version
216 *
217 */
218static int force_fw_update = 0;
219TUNABLE_INT("hw.cxgb.force_fw_update", &force_fw_update);
220SYSCTL_UINT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0,
221    "update firmware even if up to date");
222
223int cxgb_use_16k_clusters = 1;
224TUNABLE_INT("hw.cxgb.use_16k_clusters", &cxgb_use_16k_clusters);
225SYSCTL_UINT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
226    &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
227
228/*
229 * Tune the size of the output queue.
230 */
231int cxgb_snd_queue_len = IFQ_MAXLEN;
232TUNABLE_INT("hw.cxgb.snd_queue_len", &cxgb_snd_queue_len);
233SYSCTL_UINT(_hw_cxgb, OID_AUTO, snd_queue_len, CTLFLAG_RDTUN,
234    &cxgb_snd_queue_len, 0, "send queue size ");
235
236
237enum {
238	MAX_TXQ_ENTRIES      = 16384,
239	MAX_CTRL_TXQ_ENTRIES = 1024,
240	MAX_RSPQ_ENTRIES     = 16384,
241	MAX_RX_BUFFERS       = 16384,
242	MAX_RX_JUMBO_BUFFERS = 16384,
243	MIN_TXQ_ENTRIES      = 4,
244	MIN_CTRL_TXQ_ENTRIES = 4,
245	MIN_RSPQ_ENTRIES     = 32,
246	MIN_FL_ENTRIES       = 32,
247	MIN_FL_JUMBO_ENTRIES = 32
248};
249
250struct filter_info {
251	u32 sip;
252	u32 sip_mask;
253	u32 dip;
254	u16 sport;
255	u16 dport;
256	u32 vlan:12;
257	u32 vlan_prio:3;
258	u32 mac_hit:1;
259	u32 mac_idx:4;
260	u32 mac_vld:1;
261	u32 pkt_type:2;
262	u32 report_filter_id:1;
263	u32 pass:1;
264	u32 rss:1;
265	u32 qset:3;
266	u32 locked:1;
267	u32 valid:1;
268};
269
270enum { FILTER_NO_VLAN_PRI = 7 };
271
272#define EEPROM_MAGIC 0x38E2F10C
273
274#define PORT_MASK ((1 << MAX_NPORTS) - 1)
275
276/* Table for probing the cards.  The desc field isn't actually used */
277struct cxgb_ident {
278	uint16_t	vendor;
279	uint16_t	device;
280	int		index;
281	char		*desc;
282} cxgb_identifiers[] = {
283	{PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
284	{PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
285	{PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
286	{PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
287	{PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
288	{PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
289	{PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
290	{PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
291	{PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
292	{PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
293	{PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
294	{PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "T3C10"},
295	{PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"},
296	{PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"},
297	{0, 0, 0, NULL}
298};
299
300static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
301
302
303static __inline char
304t3rev2char(struct adapter *adapter)
305{
306	char rev = 'z';
307
308	switch(adapter->params.rev) {
309	case T3_REV_A:
310		rev = 'a';
311		break;
312	case T3_REV_B:
313	case T3_REV_B2:
314		rev = 'b';
315		break;
316	case T3_REV_C:
317		rev = 'c';
318		break;
319	}
320	return rev;
321}
322
323static struct cxgb_ident *
324cxgb_get_ident(device_t dev)
325{
326	struct cxgb_ident *id;
327
328	for (id = cxgb_identifiers; id->desc != NULL; id++) {
329		if ((id->vendor == pci_get_vendor(dev)) &&
330		    (id->device == pci_get_device(dev))) {
331			return (id);
332		}
333	}
334	return (NULL);
335}
336
337static const struct adapter_info *
338cxgb_get_adapter_info(device_t dev)
339{
340	struct cxgb_ident *id;
341	const struct adapter_info *ai;
342
343	id = cxgb_get_ident(dev);
344	if (id == NULL)
345		return (NULL);
346
347	ai = t3_get_adapter_info(id->index);
348
349	return (ai);
350}
351
352static int
353cxgb_controller_probe(device_t dev)
354{
355	const struct adapter_info *ai;
356	char *ports, buf[80];
357	int nports;
358	struct adapter *sc = device_get_softc(dev);
359
360	ai = cxgb_get_adapter_info(dev);
361	if (ai == NULL)
362		return (ENXIO);
363
364	nports = ai->nports0 + ai->nports1;
365	if (nports == 1)
366		ports = "port";
367	else
368		ports = "ports";
369
370	snprintf(buf, sizeof(buf), "%s %sNIC, rev: %d nports: %d %s",
371	    ai->desc, is_offload(sc) ? "R" : "",
372	    sc->params.rev, nports, ports);
373	device_set_desc_copy(dev, buf);
374	return (BUS_PROBE_DEFAULT);
375}
376
377#define FW_FNAME "cxgb_t3fw"
378#define TPEEPROM_NAME "cxgb_t3%c_tp_eeprom"
379#define TPSRAM_NAME "cxgb_t3%c_protocol_sram"
380
381static int
382upgrade_fw(adapter_t *sc)
383{
384#ifdef FIRMWARE_LATEST
385	const struct firmware *fw;
386#else
387	struct firmware *fw;
388#endif
389	int status;
390
391	if ((fw = firmware_get(FW_FNAME)) == NULL)  {
392		device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME);
393		return (ENOENT);
394	} else
395		device_printf(sc->dev, "updating firmware on card\n");
396	status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
397
398	device_printf(sc->dev, "firmware update returned %s %d\n", (status == 0) ? "success" : "fail", status);
399
400	firmware_put(fw, FIRMWARE_UNLOAD);
401
402	return (status);
403}
404
405/*
406 * The cxgb_controller_attach function is responsible for the initial
407 * bringup of the device.  Its responsibilities include:
408 *
409 *  1. Determine if the device supports MSI or MSI-X.
410 *  2. Allocate bus resources so that we can access the Base Address Register
411 *  3. Create and initialize mutexes for the controller and its control
412 *     logic such as SGE and MDIO.
413 *  4. Call hardware specific setup routine for the adapter as a whole.
414 *  5. Allocate the BAR for doing MSI-X.
415 *  6. Setup the line interrupt iff MSI-X is not supported.
416 *  7. Create the driver's taskq.
417 *  8. Start one task queue service thread.
418 *  9. Check if the firmware and SRAM are up-to-date.  They will be
419 *     auto-updated later (before FULL_INIT_DONE), if required.
420 * 10. Create a child device for each MAC (port)
421 * 11. Initialize T3 private state.
422 * 12. Trigger the LED
423 * 13. Setup offload iff supported.
424 * 14. Reset/restart the tick callout.
425 * 15. Attach sysctls
426 *
427 * NOTE: Any modification or deviation from this list MUST be reflected in
428 * the above comment.  Failure to do so will result in problems on various
429 * error conditions including link flapping.
430 */
431static int
432cxgb_controller_attach(device_t dev)
433{
434	device_t child;
435	const struct adapter_info *ai;
436	struct adapter *sc;
437	int i, error = 0;
438	uint32_t vers;
439	int port_qsets = 1;
440#ifdef MSI_SUPPORTED
441	int msi_needed, reg;
442#endif
443	char buf[80];
444
445	sc = device_get_softc(dev);
446	sc->dev = dev;
447	sc->msi_count = 0;
448	ai = cxgb_get_adapter_info(dev);
449
450	/*
451	 * XXX not really related but a recent addition
452	 */
453#ifdef MSI_SUPPORTED
454	/* find the PCIe link width and set max read request to 4KB*/
455	if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
456		uint16_t lnk, pectl;
457		lnk = pci_read_config(dev, reg + 0x12, 2);
458		sc->link_width = (lnk >> 4) & 0x3f;
459
460		pectl = pci_read_config(dev, reg + 0x8, 2);
461		pectl = (pectl & ~0x7000) | (5 << 12);
462		pci_write_config(dev, reg + 0x8, pectl, 2);
463	}
464
465	if (sc->link_width != 0 && sc->link_width <= 4 &&
466	    (ai->nports0 + ai->nports1) <= 2) {
467		device_printf(sc->dev,
468		    "PCIe x%d Link, expect reduced performance\n",
469		    sc->link_width);
470	}
471#endif
472	touch_bars(dev);
473	pci_enable_busmaster(dev);
474	/*
475	 * Allocate the registers and make them available to the driver.
476	 * The registers that we care about for NIC mode are in BAR 0
477	 */
478	sc->regs_rid = PCIR_BAR(0);
479	if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
480	    &sc->regs_rid, RF_ACTIVE)) == NULL) {
481		device_printf(dev, "Cannot allocate BAR region 0\n");
482		return (ENXIO);
483	}
484	sc->udbs_rid = PCIR_BAR(2);
485	sc->udbs_res = NULL;
486	if (is_offload(sc) &&
487	    ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
488		   &sc->udbs_rid, RF_ACTIVE)) == NULL)) {
489		device_printf(dev, "Cannot allocate BAR region 1\n");
490		error = ENXIO;
491		goto out;
492	}
493
494	snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
495	    device_get_unit(dev));
496	ADAPTER_LOCK_INIT(sc, sc->lockbuf);
497
498	snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
499	    device_get_unit(dev));
500	snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
501	    device_get_unit(dev));
502	snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
503	    device_get_unit(dev));
504
505	MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN);
506	MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
507	MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
508
509	sc->bt = rman_get_bustag(sc->regs_res);
510	sc->bh = rman_get_bushandle(sc->regs_res);
511	sc->mmio_len = rman_get_size(sc->regs_res);
512
513	for (i = 0; i < MAX_NPORTS; i++)
514		sc->port[i].adapter = sc;
515
516	if (t3_prep_adapter(sc, ai, 1) < 0) {
517		printf("prep adapter failed\n");
518		error = ENODEV;
519		goto out;
520	}
521        /* Allocate the BAR for doing MSI-X.  If it succeeds, try to allocate
522	 * enough messages for the queue sets.  If that fails, try falling
523	 * back to MSI.  If that fails, then try falling back to the legacy
524	 * interrupt pin model.
525	 */
526#ifdef MSI_SUPPORTED
527
528	sc->msix_regs_rid = 0x20;
529	if ((msi_allowed >= 2) &&
530	    (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
531	    &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
532
533		if (multiq)
534			port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus);
535		msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1;
536
537		if (pci_msix_count(dev) == 0 ||
538		    (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 ||
539		    sc->msi_count != msi_needed) {
540			device_printf(dev, "alloc msix failed - "
541				      "msi_count=%d, msi_needed=%d, err=%d; "
542				      "will try MSI\n", sc->msi_count,
543				      msi_needed, error);
544			sc->msi_count = 0;
545			port_qsets = 1;
546			pci_release_msi(dev);
547			bus_release_resource(dev, SYS_RES_MEMORY,
548			    sc->msix_regs_rid, sc->msix_regs_res);
549			sc->msix_regs_res = NULL;
550		} else {
551			sc->flags |= USING_MSIX;
552			sc->cxgb_intr = cxgb_async_intr;
553			device_printf(dev,
554				      "using MSI-X interrupts (%u vectors)\n",
555				      sc->msi_count);
556		}
557	}
558
559	if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
560		sc->msi_count = 1;
561		if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) {
562			device_printf(dev, "alloc msi failed - "
563				      "err=%d; will try INTx\n", error);
564			sc->msi_count = 0;
565			port_qsets = 1;
566			pci_release_msi(dev);
567		} else {
568			sc->flags |= USING_MSI;
569			sc->cxgb_intr = t3_intr_msi;
570			device_printf(dev, "using MSI interrupts\n");
571		}
572	}
573#endif
574	if (sc->msi_count == 0) {
575		device_printf(dev, "using line interrupts\n");
576		sc->cxgb_intr = t3b_intr;
577	}
578
579	/* Create a private taskqueue thread for handling driver events */
580#ifdef TASKQUEUE_CURRENT
581	sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
582	    taskqueue_thread_enqueue, &sc->tq);
583#else
584	sc->tq = taskqueue_create_fast("cxgb_taskq", M_NOWAIT,
585	    taskqueue_thread_enqueue, &sc->tq);
586#endif
587	if (sc->tq == NULL) {
588		device_printf(dev, "failed to allocate controller task queue\n");
589		goto out;
590	}
591
592	taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
593	    device_get_nameunit(dev));
594	TASK_INIT(&sc->ext_intr_task, 0, cxgb_ext_intr_handler, sc);
595	TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
596
597
598	/* Create a periodic callout for checking adapter status */
599	callout_init(&sc->cxgb_tick_ch, TRUE);
600
601	if (t3_check_fw_version(sc) < 0 || force_fw_update) {
602		/*
603		 * Warn user that a firmware update will be attempted in init.
604		 */
605		device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
606		    FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
607		sc->flags &= ~FW_UPTODATE;
608	} else {
609		sc->flags |= FW_UPTODATE;
610	}
611
612	if (t3_check_tpsram_version(sc) < 0) {
613		/*
614		 * Warn user that a firmware update will be attempted in init.
615		 */
616		device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
617		    t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
618		sc->flags &= ~TPS_UPTODATE;
619	} else {
620		sc->flags |= TPS_UPTODATE;
621	}
622
623	/*
624	 * Create a child device for each MAC.  The ethernet attachment
625	 * will be done in these children.
626	 */
627	for (i = 0; i < (sc)->params.nports; i++) {
628		struct port_info *pi;
629
630		if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
631			device_printf(dev, "failed to add child port\n");
632			error = EINVAL;
633			goto out;
634		}
635		pi = &sc->port[i];
636		pi->adapter = sc;
637		pi->nqsets = port_qsets;
638		pi->first_qset = i*port_qsets;
639		pi->port_id = i;
640		pi->tx_chan = i >= ai->nports0;
641		pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
642		sc->rxpkt_map[pi->txpkt_intf] = i;
643		sc->port[i].tx_chan = i >= ai->nports0;
644		sc->portdev[i] = child;
645		device_set_softc(child, pi);
646	}
647	if ((error = bus_generic_attach(dev)) != 0)
648		goto out;
649
650	/* initialize sge private state */
651	t3_sge_init_adapter(sc);
652
653	t3_led_ready(sc);
654
655	cxgb_offload_init();
656	if (is_offload(sc)) {
657		setbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
658		cxgb_adapter_ofld(sc);
659        }
660	error = t3_get_fw_version(sc, &vers);
661	if (error)
662		goto out;
663
664	snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
665	    G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
666	    G_FW_VERSION_MICRO(vers));
667
668	snprintf(buf, sizeof(buf), "%s\t E/C: %s S/N: %s",
669		 ai->desc,
670		 sc->params.vpd.ec, sc->params.vpd.sn);
671	device_set_desc_copy(dev, buf);
672
673	snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x",
674		 sc->params.vpd.port_type[0], sc->params.vpd.port_type[1],
675		 sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]);
676
677	device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]);
678	callout_reset(&sc->cxgb_tick_ch, CXGB_TICKS(sc), cxgb_tick, sc);
679	t3_add_attach_sysctls(sc);
680out:
681	if (error)
682		cxgb_free(sc);
683
684	return (error);
685}
686
687/*
688 * The cxgb_controller_detach routine is called with the device is
689 * unloaded from the system.
690 */
691
692static int
693cxgb_controller_detach(device_t dev)
694{
695	struct adapter *sc;
696
697	sc = device_get_softc(dev);
698
699	cxgb_free(sc);
700
701	return (0);
702}
703
704/*
705 * The cxgb_free() is called by the cxgb_controller_detach() routine
706 * to tear down the structures that were built up in
707 * cxgb_controller_attach(), and should be the final piece of work
708 * done when fully unloading the driver.
709 *
710 *
711 *  1. Shutting down the threads started by the cxgb_controller_attach()
712 *     routine.
713 *  2. Stopping the lower level device and all callouts (cxgb_down_locked()).
714 *  3. Detaching all of the port devices created during the
715 *     cxgb_controller_attach() routine.
716 *  4. Removing the device children created via cxgb_controller_attach().
717 *  5. Releasing PCI resources associated with the device.
718 *  6. Turning off the offload support, iff it was turned on.
719 *  7. Destroying the mutexes created in cxgb_controller_attach().
720 *
721 */
722static void
723cxgb_free(struct adapter *sc)
724{
725	int i;
726
727	ADAPTER_LOCK(sc);
728	sc->flags |= CXGB_SHUTDOWN;
729	ADAPTER_UNLOCK(sc);
730
731	/*
732	 * Make sure all child devices are gone.
733	 */
734	bus_generic_detach(sc->dev);
735	for (i = 0; i < (sc)->params.nports; i++) {
736		if (sc->portdev[i] &&
737		    device_delete_child(sc->dev, sc->portdev[i]) != 0)
738			device_printf(sc->dev, "failed to delete child port\n");
739	}
740
741	/*
742	 * At this point, it is as if cxgb_port_detach has run on all ports, and
743	 * cxgb_down has run on the adapter.  All interrupts have been silenced,
744	 * all open devices have been closed.
745	 */
746	KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)",
747					   __func__, sc->open_device_map));
748	for (i = 0; i < sc->params.nports; i++) {
749		KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!",
750						  __func__, i));
751	}
752
753	/*
754	 * Finish off the adapter's callouts.
755	 */
756	callout_drain(&sc->cxgb_tick_ch);
757	callout_drain(&sc->sge_timer_ch);
758
759	/*
760	 * Release resources grabbed under FULL_INIT_DONE by cxgb_up.  The
761	 * sysctls are cleaned up by the kernel linker.
762	 */
763	if (sc->flags & FULL_INIT_DONE) {
764 		t3_free_sge_resources(sc);
765 		sc->flags &= ~FULL_INIT_DONE;
766 	}
767
768	/*
769	 * Release all interrupt resources.
770	 */
771	cxgb_teardown_interrupts(sc);
772#ifdef MSI_SUPPORTED
773	if (sc->flags & (USING_MSI | USING_MSIX)) {
774		device_printf(sc->dev, "releasing msi message(s)\n");
775		pci_release_msi(sc->dev);
776	} else {
777		device_printf(sc->dev, "no msi message to release\n");
778	}
779
780	if (sc->msix_regs_res != NULL) {
781		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
782		    sc->msix_regs_res);
783	}
784#endif
785
786	/*
787	 * Free the adapter's taskqueue.
788	 */
789	if (sc->tq != NULL) {
790		taskqueue_free(sc->tq);
791		sc->tq = NULL;
792	}
793
794	if (is_offload(sc)) {
795		clrbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
796		cxgb_adapter_unofld(sc);
797	}
798
799#ifdef notyet
800	if (sc->flags & CXGB_OFLD_INIT)
801		cxgb_offload_deactivate(sc);
802#endif
803	free(sc->filters, M_DEVBUF);
804	t3_sge_free(sc);
805
806	cxgb_offload_exit();
807
808	if (sc->udbs_res != NULL)
809		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid,
810		    sc->udbs_res);
811
812	if (sc->regs_res != NULL)
813		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
814		    sc->regs_res);
815
816	MTX_DESTROY(&sc->mdio_lock);
817	MTX_DESTROY(&sc->sge.reg_lock);
818	MTX_DESTROY(&sc->elmer_lock);
819	ADAPTER_LOCK_DEINIT(sc);
820}
821
822/**
823 *	setup_sge_qsets - configure SGE Tx/Rx/response queues
824 *	@sc: the controller softc
825 *
826 *	Determines how many sets of SGE queues to use and initializes them.
827 *	We support multiple queue sets per port if we have MSI-X, otherwise
828 *	just one queue set per port.
829 */
830static int
831setup_sge_qsets(adapter_t *sc)
832{
833	int i, j, err, irq_idx = 0, qset_idx = 0;
834	u_int ntxq = SGE_TXQ_PER_SET;
835
836	if ((err = t3_sge_alloc(sc)) != 0) {
837		device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
838		return (err);
839	}
840
841	if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
842		irq_idx = -1;
843
844	for (i = 0; i < (sc)->params.nports; i++) {
845		struct port_info *pi = &sc->port[i];
846
847		for (j = 0; j < pi->nqsets; j++, qset_idx++) {
848			err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
849			    (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
850			    &sc->params.sge.qset[qset_idx], ntxq, pi);
851			if (err) {
852				t3_free_sge_resources(sc);
853				device_printf(sc->dev, "t3_sge_alloc_qset failed with %d\n",
854				    err);
855				return (err);
856			}
857		}
858	}
859
860	return (0);
861}
862
863static void
864cxgb_teardown_interrupts(adapter_t *sc)
865{
866	int i;
867
868	for (i = 0; i < SGE_QSETS; i++) {
869		if (sc->msix_intr_tag[i] == NULL) {
870
871			/* Should have been setup fully or not at all */
872			KASSERT(sc->msix_irq_res[i] == NULL &&
873				sc->msix_irq_rid[i] == 0,
874				("%s: half-done interrupt (%d).", __func__, i));
875
876			continue;
877		}
878
879		bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
880				  sc->msix_intr_tag[i]);
881		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i],
882				     sc->msix_irq_res[i]);
883
884		sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL;
885		sc->msix_irq_rid[i] = 0;
886	}
887
888	if (sc->intr_tag) {
889		KASSERT(sc->irq_res != NULL,
890			("%s: half-done interrupt.", __func__));
891
892		bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
893		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
894				     sc->irq_res);
895
896		sc->irq_res = sc->intr_tag = NULL;
897		sc->irq_rid = 0;
898	}
899}
900
901static int
902cxgb_setup_interrupts(adapter_t *sc)
903{
904	struct resource *res;
905	void *tag;
906	int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX);
907
908	sc->irq_rid = intr_flag ? 1 : 0;
909	sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid,
910					     RF_SHAREABLE | RF_ACTIVE);
911	if (sc->irq_res == NULL) {
912		device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n",
913			      intr_flag, sc->irq_rid);
914		err = EINVAL;
915		sc->irq_rid = 0;
916	} else {
917		err = bus_setup_intr(sc->dev, sc->irq_res,
918				     INTR_MPSAFE | INTR_TYPE_NET,
919#ifdef INTR_FILTERS
920				     NULL,
921#endif
922				     sc->cxgb_intr, sc, &sc->intr_tag);
923
924		if (err) {
925			device_printf(sc->dev,
926				      "Cannot set up interrupt (%x, %u, %d)\n",
927				      intr_flag, sc->irq_rid, err);
928			bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
929					     sc->irq_res);
930			sc->irq_res = sc->intr_tag = NULL;
931			sc->irq_rid = 0;
932		}
933	}
934
935	/* That's all for INTx or MSI */
936	if (!(intr_flag & USING_MSIX) || err)
937		return (err);
938
939	for (i = 0; i < sc->msi_count - 1; i++) {
940		rid = i + 2;
941		res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid,
942					     RF_SHAREABLE | RF_ACTIVE);
943		if (res == NULL) {
944			device_printf(sc->dev, "Cannot allocate interrupt "
945				      "for message %d\n", rid);
946			err = EINVAL;
947			break;
948		}
949
950		err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET,
951#ifdef INTR_FILTERS
952				     NULL,
953#endif
954				     t3_intr_msix, &sc->sge.qs[i], &tag);
955		if (err) {
956			device_printf(sc->dev, "Cannot set up interrupt "
957				      "for message %d (%d)\n", rid, err);
958			bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res);
959			break;
960		}
961
962		sc->msix_irq_rid[i] = rid;
963		sc->msix_irq_res[i] = res;
964		sc->msix_intr_tag[i] = tag;
965	}
966
967	if (err)
968		cxgb_teardown_interrupts(sc);
969
970	return (err);
971}
972
973
974static int
975cxgb_port_probe(device_t dev)
976{
977	struct port_info *p;
978	char buf[80];
979	const char *desc;
980
981	p = device_get_softc(dev);
982	desc = p->phy.desc;
983	snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, desc);
984	device_set_desc_copy(dev, buf);
985	return (0);
986}
987
988
989static int
990cxgb_makedev(struct port_info *pi)
991{
992
993	pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit,
994	    UID_ROOT, GID_WHEEL, 0600, if_name(pi->ifp));
995
996	if (pi->port_cdev == NULL)
997		return (ENOMEM);
998
999	pi->port_cdev->si_drv1 = (void *)pi;
1000
1001	return (0);
1002}
1003
1004#ifndef LRO_SUPPORTED
1005#ifdef IFCAP_LRO
1006#undef IFCAP_LRO
1007#endif
1008#define IFCAP_LRO 0x0
1009#endif
1010
1011#ifdef TSO_SUPPORTED
1012#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO)
1013/* Don't enable TSO6 yet */
1014#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4 | IFCAP_JUMBO_MTU | IFCAP_LRO)
1015#else
1016#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
1017/* Don't enable TSO6 yet */
1018#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM |  IFCAP_JUMBO_MTU)
1019#define IFCAP_TSO4 0x0
1020#define IFCAP_TSO6 0x0
1021#define CSUM_TSO   0x0
1022#endif
1023
1024
1025static int
1026cxgb_port_attach(device_t dev)
1027{
1028	struct port_info *p;
1029	struct ifnet *ifp;
1030	int err;
1031	struct adapter *sc;
1032
1033
1034	p = device_get_softc(dev);
1035	sc = p->adapter;
1036	snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
1037	    device_get_unit(device_get_parent(dev)), p->port_id);
1038	PORT_LOCK_INIT(p, p->lockbuf);
1039
1040	/* Allocate an ifnet object and set it up */
1041	ifp = p->ifp = if_alloc(IFT_ETHER);
1042	if (ifp == NULL) {
1043		device_printf(dev, "Cannot allocate ifnet\n");
1044		return (ENOMEM);
1045	}
1046
1047	/*
1048	 * Note that there is currently no watchdog timer.
1049	 */
1050	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1051	ifp->if_init = cxgb_init;
1052	ifp->if_softc = p;
1053	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1054	ifp->if_ioctl = cxgb_ioctl;
1055	ifp->if_start = cxgb_start;
1056
1057
1058	ifp->if_timer = 0;	/* Disable ifnet watchdog */
1059	ifp->if_watchdog = NULL;
1060
1061	ifp->if_snd.ifq_drv_maxlen = cxgb_snd_queue_len;
1062	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1063	IFQ_SET_READY(&ifp->if_snd);
1064
1065	ifp->if_hwassist = ifp->if_capabilities = ifp->if_capenable = 0;
1066	ifp->if_capabilities |= CXGB_CAP;
1067	ifp->if_capenable |= CXGB_CAP_ENABLE;
1068	ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO);
1069	/*
1070	 * disable TSO on 4-port - it isn't supported by the firmware yet
1071	 */
1072	if (p->adapter->params.nports > 2) {
1073		ifp->if_capabilities &= ~(IFCAP_TSO4 | IFCAP_TSO6);
1074		ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TSO6);
1075		ifp->if_hwassist &= ~CSUM_TSO;
1076	}
1077
1078	ether_ifattach(ifp, p->hw_addr);
1079	ifp->if_transmit = cxgb_transmit;
1080	ifp->if_qflush = cxgb_qflush;
1081
1082	/*
1083	 * Only default to jumbo frames on 10GigE
1084	 */
1085	if (p->adapter->params.nports <= 2)
1086		ifp->if_mtu = ETHERMTU_JUMBO;
1087	if ((err = cxgb_makedev(p)) != 0) {
1088		printf("makedev failed %d\n", err);
1089		return (err);
1090	}
1091
1092	/* Create a list of media supported by this port */
1093	ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
1094	    cxgb_media_status);
1095	cxgb_build_medialist(p);
1096
1097	t3_sge_init_port(p);
1098
1099	return (err);
1100}
1101
1102/*
1103 * cxgb_port_detach() is called via the device_detach methods when
1104 * cxgb_free() calls the bus_generic_detach.  It is responsible for
1105 * removing the device from the view of the kernel, i.e. from all
1106 * interfaces lists etc.  This routine is only called when the driver is
1107 * being unloaded, not when the link goes down.
1108 */
1109static int
1110cxgb_port_detach(device_t dev)
1111{
1112	struct port_info *p;
1113	struct adapter *sc;
1114	int i;
1115
1116	p = device_get_softc(dev);
1117	sc = p->adapter;
1118
1119	cxgb_begin_detach(p);
1120
1121	if (p->port_cdev != NULL)
1122		destroy_dev(p->port_cdev);
1123
1124	cxgb_uninit_synchronized(p);
1125	ether_ifdetach(p->ifp);
1126
1127	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1128		struct sge_qset *qs = &sc->sge.qs[i];
1129		struct sge_txq *txq = &qs->txq[TXQ_ETH];
1130
1131		callout_drain(&txq->txq_watchdog);
1132		callout_drain(&txq->txq_timer);
1133	}
1134
1135	PORT_LOCK_DEINIT(p);
1136	if_free(p->ifp);
1137	p->ifp = NULL;
1138
1139	cxgb_end_op(p);
1140	return (0);
1141}
1142
1143void
1144t3_fatal_err(struct adapter *sc)
1145{
1146	u_int fw_status[4];
1147
1148	if (sc->flags & FULL_INIT_DONE) {
1149		t3_sge_stop(sc);
1150		t3_write_reg(sc, A_XGM_TX_CTRL, 0);
1151		t3_write_reg(sc, A_XGM_RX_CTRL, 0);
1152		t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
1153		t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
1154		t3_intr_disable(sc);
1155	}
1156	device_printf(sc->dev,"encountered fatal error, operation suspended\n");
1157	if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
1158		device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
1159		    fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
1160}
1161
1162int
1163t3_os_find_pci_capability(adapter_t *sc, int cap)
1164{
1165	device_t dev;
1166	struct pci_devinfo *dinfo;
1167	pcicfgregs *cfg;
1168	uint32_t status;
1169	uint8_t ptr;
1170
1171	dev = sc->dev;
1172	dinfo = device_get_ivars(dev);
1173	cfg = &dinfo->cfg;
1174
1175	status = pci_read_config(dev, PCIR_STATUS, 2);
1176	if (!(status & PCIM_STATUS_CAPPRESENT))
1177		return (0);
1178
1179	switch (cfg->hdrtype & PCIM_HDRTYPE) {
1180	case 0:
1181	case 1:
1182		ptr = PCIR_CAP_PTR;
1183		break;
1184	case 2:
1185		ptr = PCIR_CAP_PTR_2;
1186		break;
1187	default:
1188		return (0);
1189		break;
1190	}
1191	ptr = pci_read_config(dev, ptr, 1);
1192
1193	while (ptr != 0) {
1194		if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
1195			return (ptr);
1196		ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1197	}
1198
1199	return (0);
1200}
1201
1202int
1203t3_os_pci_save_state(struct adapter *sc)
1204{
1205	device_t dev;
1206	struct pci_devinfo *dinfo;
1207
1208	dev = sc->dev;
1209	dinfo = device_get_ivars(dev);
1210
1211	pci_cfg_save(dev, dinfo, 0);
1212	return (0);
1213}
1214
1215int
1216t3_os_pci_restore_state(struct adapter *sc)
1217{
1218	device_t dev;
1219	struct pci_devinfo *dinfo;
1220
1221	dev = sc->dev;
1222	dinfo = device_get_ivars(dev);
1223
1224	pci_cfg_restore(dev, dinfo);
1225	return (0);
1226}
1227
1228/**
1229 *	t3_os_link_changed - handle link status changes
1230 *	@sc: the adapter associated with the link change
1231 *	@port_id: the port index whose link status has changed
1232 *	@link_status: the new status of the link
1233 *	@speed: the new speed setting
1234 *	@duplex: the new duplex setting
1235 *	@fc: the new flow-control setting
1236 *
1237 *	This is the OS-dependent handler for link status changes.  The OS
1238 *	neutral handler takes care of most of the processing for these events,
1239 *	then calls this handler for any OS-specific processing.
1240 */
1241void
1242t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
1243     int duplex, int fc, int mac_was_reset)
1244{
1245	struct port_info *pi = &adapter->port[port_id];
1246	struct ifnet *ifp = pi->ifp;
1247
1248	/* no race with detach, so ifp should always be good */
1249	KASSERT(ifp, ("%s: if detached.", __func__));
1250
1251	/* Reapply mac settings if they were lost due to a reset */
1252	if (mac_was_reset) {
1253		PORT_LOCK(pi);
1254		cxgb_update_mac_settings(pi);
1255		PORT_UNLOCK(pi);
1256	}
1257
1258	if (link_status) {
1259		ifp->if_baudrate = IF_Mbps(speed);
1260		if_link_state_change(ifp, LINK_STATE_UP);
1261	} else
1262		if_link_state_change(ifp, LINK_STATE_DOWN);
1263}
1264
1265/**
1266 *	t3_os_phymod_changed - handle PHY module changes
1267 *	@phy: the PHY reporting the module change
1268 *	@mod_type: new module type
1269 *
1270 *	This is the OS-dependent handler for PHY module changes.  It is
1271 *	invoked when a PHY module is removed or inserted for any OS-specific
1272 *	processing.
1273 */
1274void t3_os_phymod_changed(struct adapter *adap, int port_id)
1275{
1276	static const char *mod_str[] = {
1277		NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
1278	};
1279	struct port_info *pi = &adap->port[port_id];
1280	int mod = pi->phy.modtype;
1281
1282	if (mod != pi->media.ifm_cur->ifm_data)
1283		cxgb_build_medialist(pi);
1284
1285	if (mod == phy_modtype_none)
1286		if_printf(pi->ifp, "PHY module unplugged\n");
1287	else {
1288		KASSERT(mod < ARRAY_SIZE(mod_str),
1289			("invalid PHY module type %d", mod));
1290		if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]);
1291	}
1292}
1293
1294/*
1295 * Interrupt-context handler for external (PHY) interrupts.
1296 */
1297void
1298t3_os_ext_intr_handler(adapter_t *sc)
1299{
1300	if (cxgb_debug)
1301		printf("t3_os_ext_intr_handler\n");
1302	/*
1303	 * Schedule a task to handle external interrupts as they may be slow
1304	 * and we use a mutex to protect MDIO registers.  We disable PHY
1305	 * interrupts in the meantime and let the task reenable them when
1306	 * it's done.
1307	 */
1308	if (sc->slow_intr_mask) {
1309		ADAPTER_LOCK(sc);
1310		sc->slow_intr_mask &= ~F_T3DBG;
1311		t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
1312		taskqueue_enqueue(sc->tq, &sc->ext_intr_task);
1313		ADAPTER_UNLOCK(sc);
1314	}
1315}
1316
1317void
1318t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
1319{
1320
1321	/*
1322	 * The ifnet might not be allocated before this gets called,
1323	 * as this is called early on in attach by t3_prep_adapter
1324	 * save the address off in the port structure
1325	 */
1326	if (cxgb_debug)
1327		printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
1328	bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
1329}
1330
1331/*
1332 * Programs the XGMAC based on the settings in the ifnet.  These settings
1333 * include MTU, MAC address, mcast addresses, etc.
1334 */
1335static void
1336cxgb_update_mac_settings(struct port_info *p)
1337{
1338	struct ifnet *ifp = p->ifp;
1339	struct t3_rx_mode rm;
1340	struct cmac *mac = &p->mac;
1341	int mtu, hwtagging;
1342
1343	PORT_LOCK_ASSERT_OWNED(p);
1344
1345	bcopy(IF_LLADDR(ifp), p->hw_addr, ETHER_ADDR_LEN);
1346
1347	mtu = ifp->if_mtu;
1348	if (ifp->if_capenable & IFCAP_VLAN_MTU)
1349		mtu += ETHER_VLAN_ENCAP_LEN;
1350
1351	hwtagging = (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0;
1352
1353	t3_mac_set_mtu(mac, mtu);
1354	t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging);
1355	t3_mac_set_address(mac, 0, p->hw_addr);
1356	t3_init_rx_mode(&rm, p);
1357	t3_mac_set_rx_mode(mac, &rm);
1358}
1359
1360
1361static int
1362await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
1363			      unsigned long n)
1364{
1365	int attempts = 5;
1366
1367	while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
1368		if (!--attempts)
1369			return (ETIMEDOUT);
1370		t3_os_sleep(10);
1371	}
1372	return 0;
1373}
1374
1375static int
1376init_tp_parity(struct adapter *adap)
1377{
1378	int i;
1379	struct mbuf *m;
1380	struct cpl_set_tcb_field *greq;
1381	unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
1382
1383	t3_tp_set_offload_mode(adap, 1);
1384
1385	for (i = 0; i < 16; i++) {
1386		struct cpl_smt_write_req *req;
1387
1388		m = m_gethdr(M_WAITOK, MT_DATA);
1389		req = mtod(m, struct cpl_smt_write_req *);
1390		m->m_len = m->m_pkthdr.len = sizeof(*req);
1391		memset(req, 0, sizeof(*req));
1392		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1393		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
1394		req->iff = i;
1395		t3_mgmt_tx(adap, m);
1396	}
1397
1398	for (i = 0; i < 2048; i++) {
1399		struct cpl_l2t_write_req *req;
1400
1401		m = m_gethdr(M_WAITOK, MT_DATA);
1402		req = mtod(m, struct cpl_l2t_write_req *);
1403		m->m_len = m->m_pkthdr.len = sizeof(*req);
1404		memset(req, 0, sizeof(*req));
1405		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1406		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
1407		req->params = htonl(V_L2T_W_IDX(i));
1408		t3_mgmt_tx(adap, m);
1409	}
1410
1411	for (i = 0; i < 2048; i++) {
1412		struct cpl_rte_write_req *req;
1413
1414		m = m_gethdr(M_WAITOK, MT_DATA);
1415		req = mtod(m, struct cpl_rte_write_req *);
1416		m->m_len = m->m_pkthdr.len = sizeof(*req);
1417		memset(req, 0, sizeof(*req));
1418		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1419		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
1420		req->l2t_idx = htonl(V_L2T_W_IDX(i));
1421		t3_mgmt_tx(adap, m);
1422	}
1423
1424	m = m_gethdr(M_WAITOK, MT_DATA);
1425	greq = mtod(m, struct cpl_set_tcb_field *);
1426	m->m_len = m->m_pkthdr.len = sizeof(*greq);
1427	memset(greq, 0, sizeof(*greq));
1428	greq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1429	OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
1430	greq->mask = htobe64(1);
1431	t3_mgmt_tx(adap, m);
1432
1433	i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
1434	t3_tp_set_offload_mode(adap, 0);
1435	return (i);
1436}
1437
1438/**
1439 *	setup_rss - configure Receive Side Steering (per-queue connection demux)
1440 *	@adap: the adapter
1441 *
1442 *	Sets up RSS to distribute packets to multiple receive queues.  We
1443 *	configure the RSS CPU lookup table to distribute to the number of HW
1444 *	receive queues, and the response queue lookup table to narrow that
1445 *	down to the response queues actually configured for each port.
1446 *	We always configure the RSS mapping for two ports since the mapping
1447 *	table has plenty of entries.
1448 */
1449static void
1450setup_rss(adapter_t *adap)
1451{
1452	int i;
1453	u_int nq[2];
1454	uint8_t cpus[SGE_QSETS + 1];
1455	uint16_t rspq_map[RSS_TABLE_SIZE];
1456
1457	for (i = 0; i < SGE_QSETS; ++i)
1458		cpus[i] = i;
1459	cpus[SGE_QSETS] = 0xff;
1460
1461	nq[0] = nq[1] = 0;
1462	for_each_port(adap, i) {
1463		const struct port_info *pi = adap2pinfo(adap, i);
1464
1465		nq[pi->tx_chan] += pi->nqsets;
1466	}
1467	for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
1468		rspq_map[i] = nq[0] ? i % nq[0] : 0;
1469		rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0;
1470	}
1471
1472	/* Calculate the reverse RSS map table */
1473	for (i = 0; i < SGE_QSETS; ++i)
1474		adap->rrss_map[i] = 0xff;
1475	for (i = 0; i < RSS_TABLE_SIZE; ++i)
1476		if (adap->rrss_map[rspq_map[i]] == 0xff)
1477			adap->rrss_map[rspq_map[i]] = i;
1478
1479	t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
1480		      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
1481	              F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ,
1482	              cpus, rspq_map);
1483
1484}
1485
1486/*
1487 * Sends an mbuf to an offload queue driver
1488 * after dealing with any active network taps.
1489 */
1490static inline int
1491offload_tx(struct t3cdev *tdev, struct mbuf *m)
1492{
1493	int ret;
1494
1495	ret = t3_offload_tx(tdev, m);
1496	return (ret);
1497}
1498
1499static int
1500write_smt_entry(struct adapter *adapter, int idx)
1501{
1502	struct port_info *pi = &adapter->port[idx];
1503	struct cpl_smt_write_req *req;
1504	struct mbuf *m;
1505
1506	if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
1507		return (ENOMEM);
1508
1509	req = mtod(m, struct cpl_smt_write_req *);
1510	m->m_pkthdr.len = m->m_len = sizeof(struct cpl_smt_write_req);
1511
1512	req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1513	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
1514	req->mtu_idx = NMTUS - 1;  /* should be 0 but there's a T3 bug */
1515	req->iff = idx;
1516	memset(req->src_mac1, 0, sizeof(req->src_mac1));
1517	memcpy(req->src_mac0, pi->hw_addr, ETHER_ADDR_LEN);
1518
1519	m_set_priority(m, 1);
1520
1521	offload_tx(&adapter->tdev, m);
1522
1523	return (0);
1524}
1525
1526static int
1527init_smt(struct adapter *adapter)
1528{
1529	int i;
1530
1531	for_each_port(adapter, i)
1532		write_smt_entry(adapter, i);
1533	return 0;
1534}
1535
1536static void
1537init_port_mtus(adapter_t *adapter)
1538{
1539	unsigned int mtus = ETHERMTU | (ETHERMTU << 16);
1540
1541	t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
1542}
1543
1544static void
1545send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
1546			      int hi, int port)
1547{
1548	struct mbuf *m;
1549	struct mngt_pktsched_wr *req;
1550
1551	m = m_gethdr(M_DONTWAIT, MT_DATA);
1552	if (m) {
1553		req = mtod(m, struct mngt_pktsched_wr *);
1554		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1555		req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1556		req->sched = sched;
1557		req->idx = qidx;
1558		req->min = lo;
1559		req->max = hi;
1560		req->binding = port;
1561		m->m_len = m->m_pkthdr.len = sizeof(*req);
1562		t3_mgmt_tx(adap, m);
1563	}
1564}
1565
1566static void
1567bind_qsets(adapter_t *sc)
1568{
1569	int i, j;
1570
1571	for (i = 0; i < (sc)->params.nports; ++i) {
1572		const struct port_info *pi = adap2pinfo(sc, i);
1573
1574		for (j = 0; j < pi->nqsets; ++j) {
1575			send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1576					  -1, pi->tx_chan);
1577
1578		}
1579	}
1580}
1581
1582static void
1583update_tpeeprom(struct adapter *adap)
1584{
1585#ifdef FIRMWARE_LATEST
1586	const struct firmware *tpeeprom;
1587#else
1588	struct firmware *tpeeprom;
1589#endif
1590
1591	uint32_t version;
1592	unsigned int major, minor;
1593	int ret, len;
1594	char rev, name[32];
1595
1596	t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
1597
1598	major = G_TP_VERSION_MAJOR(version);
1599	minor = G_TP_VERSION_MINOR(version);
1600	if (major == TP_VERSION_MAJOR  && minor == TP_VERSION_MINOR)
1601		return;
1602
1603	rev = t3rev2char(adap);
1604	snprintf(name, sizeof(name), TPEEPROM_NAME, rev);
1605
1606	tpeeprom = firmware_get(name);
1607	if (tpeeprom == NULL) {
1608		device_printf(adap->dev,
1609			      "could not load TP EEPROM: unable to load %s\n",
1610			      name);
1611		return;
1612	}
1613
1614	len = tpeeprom->datasize - 4;
1615
1616	ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
1617	if (ret)
1618		goto release_tpeeprom;
1619
1620	if (len != TP_SRAM_LEN) {
1621		device_printf(adap->dev,
1622			      "%s length is wrong len=%d expected=%d\n", name,
1623			      len, TP_SRAM_LEN);
1624		return;
1625	}
1626
1627	ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
1628	    TP_SRAM_OFFSET);
1629
1630	if (!ret) {
1631		device_printf(adap->dev,
1632			"Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
1633			 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1634	} else
1635		device_printf(adap->dev,
1636			      "Protocol SRAM image update in EEPROM failed\n");
1637
1638release_tpeeprom:
1639	firmware_put(tpeeprom, FIRMWARE_UNLOAD);
1640
1641	return;
1642}
1643
1644static int
1645update_tpsram(struct adapter *adap)
1646{
1647#ifdef FIRMWARE_LATEST
1648	const struct firmware *tpsram;
1649#else
1650	struct firmware *tpsram;
1651#endif
1652	int ret;
1653	char rev, name[32];
1654
1655	rev = t3rev2char(adap);
1656	snprintf(name, sizeof(name), TPSRAM_NAME, rev);
1657
1658	update_tpeeprom(adap);
1659
1660	tpsram = firmware_get(name);
1661	if (tpsram == NULL){
1662		device_printf(adap->dev, "could not load TP SRAM\n");
1663		return (EINVAL);
1664	} else
1665		device_printf(adap->dev, "updating TP SRAM\n");
1666
1667	ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
1668	if (ret)
1669		goto release_tpsram;
1670
1671	ret = t3_set_proto_sram(adap, tpsram->data);
1672	if (ret)
1673		device_printf(adap->dev, "loading protocol SRAM failed\n");
1674
1675release_tpsram:
1676	firmware_put(tpsram, FIRMWARE_UNLOAD);
1677
1678	return ret;
1679}
1680
1681/**
1682 *	cxgb_up - enable the adapter
1683 *	@adap: adapter being enabled
1684 *
1685 *	Called when the first port is enabled, this function performs the
1686 *	actions necessary to make an adapter operational, such as completing
1687 *	the initialization of HW modules, and enabling interrupts.
1688 */
1689static int
1690cxgb_up(struct adapter *sc)
1691{
1692	int err = 0;
1693
1694	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1695	KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)",
1696					   __func__, sc->open_device_map));
1697
1698	if ((sc->flags & FULL_INIT_DONE) == 0) {
1699
1700		if ((sc->flags & FW_UPTODATE) == 0)
1701			if ((err = upgrade_fw(sc)))
1702				goto out;
1703
1704		if ((sc->flags & TPS_UPTODATE) == 0)
1705			if ((err = update_tpsram(sc)))
1706				goto out;
1707
1708		err = t3_init_hw(sc, 0);
1709		if (err)
1710			goto out;
1711
1712		t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1713		t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1714
1715		err = setup_sge_qsets(sc);
1716		if (err)
1717			goto out;
1718
1719		setup_rss(sc);
1720
1721		t3_intr_clear(sc);
1722		err = cxgb_setup_interrupts(sc);
1723		if (err)
1724			goto out;
1725
1726		t3_add_configured_sysctls(sc);
1727		sc->flags |= FULL_INIT_DONE;
1728	}
1729
1730	t3_intr_clear(sc);
1731	t3_sge_start(sc);
1732	t3_intr_enable(sc);
1733
1734	if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) &&
1735	    is_offload(sc) && init_tp_parity(sc) == 0)
1736		sc->flags |= TP_PARITY_INIT;
1737
1738	if (sc->flags & TP_PARITY_INIT) {
1739		t3_write_reg(sc, A_TP_INT_CAUSE, F_CMCACHEPERR | F_ARPLUTPERR);
1740		t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff);
1741	}
1742
1743	if (!(sc->flags & QUEUES_BOUND)) {
1744		bind_qsets(sc);
1745		sc->flags |= QUEUES_BOUND;
1746	}
1747
1748	t3_sge_reset_adapter(sc);
1749out:
1750	return (err);
1751}
1752
1753/*
1754 * Called when the last open device is closed.  Does NOT undo all of cxgb_up's
1755 * work.  Specifically, the resources grabbed under FULL_INIT_DONE are released
1756 * during controller_detach, not here.
1757 */
1758static void
1759cxgb_down(struct adapter *sc)
1760{
1761	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1762
1763	t3_sge_stop(sc);
1764	t3_intr_disable(sc);
1765}
1766
1767static int
1768offload_open(struct port_info *pi)
1769{
1770	struct adapter *sc = pi->adapter;
1771	struct t3cdev *tdev = &sc->tdev;
1772
1773	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1774
1775	setbit(&sc->open_device_map, OFFLOAD_DEVMAP_BIT);
1776
1777	t3_tp_set_offload_mode(sc, 1);
1778	tdev->lldev = pi->ifp;
1779	init_port_mtus(sc);
1780	t3_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd,
1781		     sc->params.rev == 0 ?  sc->port[0].ifp->if_mtu : 0xffff);
1782	init_smt(sc);
1783	cxgb_add_clients(tdev);
1784
1785	return (0);
1786}
1787
1788static int
1789offload_close(struct t3cdev *tdev)
1790{
1791	struct adapter *adapter = tdev2adap(tdev);
1792
1793	if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT))
1794		return (0);
1795
1796	/* Call back all registered clients */
1797	cxgb_remove_clients(tdev);
1798
1799	tdev->lldev = NULL;
1800	cxgb_set_dummy_ops(tdev);
1801	t3_tp_set_offload_mode(adapter, 0);
1802
1803	clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
1804
1805	return (0);
1806}
1807
1808/*
1809 * Begin a synchronized operation.  If this call succeeds, it is guaranteed that
1810 * no one will remove the port or its ifp from underneath the caller.  Caller is
1811 * also granted exclusive access to open_device_map.
1812 *
1813 * operation here means init, uninit, detach, and ioctl service.
1814 *
1815 * May fail.
1816 * EINTR (ctrl-c pressed during ifconfig for example).
1817 * ENXIO (port is about to detach - due to kldunload for example).
1818 */
1819int
1820cxgb_begin_op(struct port_info *p, const char *wmsg)
1821{
1822	int rc = 0;
1823	struct adapter *sc = p->adapter;
1824
1825	ADAPTER_LOCK(sc);
1826
1827	while (!IS_DOOMED(p) && IS_BUSY(sc)) {
1828		if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, wmsg, 0)) {
1829			rc = EINTR;
1830			goto done;
1831		}
1832	}
1833
1834	if (IS_DOOMED(p))
1835		rc = ENXIO;
1836	else if (!IS_BUSY(sc))
1837		SET_BUSY(sc);
1838	else {
1839		KASSERT(0, ("%s: port %d, p->flags = %x , sc->flags = %x",
1840			    __func__, p->port_id, p->flags, sc->flags));
1841		rc = EDOOFUS;
1842	}
1843
1844done:
1845	ADAPTER_UNLOCK(sc);
1846	return (rc);
1847}
1848
1849/*
1850 * End a synchronized operation.  Read comment block above cxgb_begin_op.
1851 */
1852int
1853cxgb_end_op(struct port_info *p)
1854{
1855	struct adapter *sc = p->adapter;
1856
1857	ADAPTER_LOCK(sc);
1858	KASSERT(IS_BUSY(sc), ("%s: not busy.", __func__));
1859	CLR_BUSY(sc);
1860	wakeup_one(&sc->flags);
1861	ADAPTER_UNLOCK(sc);
1862
1863	return (0);
1864}
1865
1866/*
1867 * Prepare for port detachment.  Detach is a special kind of synchronized
1868 * operation.  Also read comment before cxgb_begin_op.
1869 */
1870static int
1871cxgb_begin_detach(struct port_info *p)
1872{
1873	struct adapter *sc = p->adapter;
1874
1875	/*
1876	 * Inform those waiting for this port that it is going to be destroyed
1877	 * and they should not continue further.  (They'll return with ENXIO).
1878	 */
1879	ADAPTER_LOCK(sc);
1880	SET_DOOMED(p);
1881	wakeup(&sc->flags);
1882	ADAPTER_UNLOCK(sc);
1883
1884	/*
1885	 * Wait for in-progress operations.
1886	 */
1887	ADAPTER_LOCK(sc);
1888	while (IS_BUSY(sc)) {
1889		mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0);
1890	}
1891	SET_BUSY(sc);
1892	ADAPTER_UNLOCK(sc);
1893
1894	return (0);
1895}
1896
1897/*
1898 * if_init for cxgb ports.
1899 */
1900static void
1901cxgb_init(void *arg)
1902{
1903	struct port_info *p = arg;
1904
1905	if (cxgb_begin_op(p, "cxgbinit"))
1906		return;
1907
1908	cxgb_init_synchronized(p);
1909	cxgb_end_op(p);
1910}
1911
1912static int
1913cxgb_init_synchronized(struct port_info *p)
1914{
1915	struct adapter *sc = p->adapter;
1916	struct ifnet *ifp = p->ifp;
1917	struct cmac *mac = &p->mac;
1918	int i, rc;
1919
1920	if (sc->open_device_map == 0) {
1921		if ((rc = cxgb_up(sc)) != 0)
1922			return (rc);
1923
1924		if (is_offload(sc) && !ofld_disable && offload_open(p))
1925			log(LOG_WARNING,
1926			    "Could not initialize offload capabilities\n");
1927	}
1928
1929	PORT_LOCK(p);
1930	t3_port_intr_enable(sc, p->port_id);
1931	if (!mac->multiport)
1932		t3_mac_init(mac);
1933	cxgb_update_mac_settings(p);
1934	t3_link_start(&p->phy, mac, &p->link_config);
1935	t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1936	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1937	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1938	PORT_UNLOCK(p);
1939
1940	t3_link_changed(sc, p->port_id);
1941
1942	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1943		struct sge_qset *qs = &sc->sge.qs[i];
1944		struct sge_txq *txq = &qs->txq[TXQ_ETH];
1945
1946		callout_reset_on(&txq->txq_watchdog, hz, cxgb_tx_watchdog, qs,
1947				 txq->txq_watchdog.c_cpu);
1948	}
1949
1950	/* all ok */
1951	setbit(&sc->open_device_map, p->port_id);
1952
1953	return (0);
1954}
1955
1956/*
1957 * Called on "ifconfig down", and from port_detach
1958 */
1959static int
1960cxgb_uninit_synchronized(struct port_info *pi)
1961{
1962	struct adapter *sc = pi->adapter;
1963	struct ifnet *ifp = pi->ifp;
1964
1965	/*
1966	 * Clear this port's bit from the open device map, and then drain all
1967	 * the tasks that can access/manipulate this port's port_info or ifp.
1968	 * We disable this port's interrupts here and so the the slow/ext
1969	 * interrupt tasks won't be enqueued.  The tick task will continue to
1970	 * be enqueued every second but the runs after this drain will not see
1971	 * this port in the open device map.
1972	 *
1973	 * A well behaved task must take open_device_map into account and ignore
1974	 * ports that are not open.
1975	 */
1976	clrbit(&sc->open_device_map, pi->port_id);
1977	t3_port_intr_disable(sc, pi->port_id);
1978	taskqueue_drain(sc->tq, &sc->slow_intr_task);
1979	taskqueue_drain(sc->tq, &sc->ext_intr_task);
1980	taskqueue_drain(sc->tq, &sc->tick_task);
1981
1982	PORT_LOCK(pi);
1983	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1984
1985	/* disable pause frames */
1986	t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0);
1987
1988	/* Reset RX FIFO HWM */
1989	t3_set_reg_field(sc, A_XGM_RXFIFO_CFG +  pi->mac.offset,
1990			 V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0);
1991
1992	DELAY(100);
1993
1994	/* Wait for TXFIFO empty */
1995	t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset,
1996			F_TXFIFO_EMPTY, 1, 20, 5);
1997
1998	DELAY(100);
1999	t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
2000
2001
2002	pi->phy.ops->power_down(&pi->phy, 1);
2003
2004	PORT_UNLOCK(pi);
2005
2006	pi->link_config.link_ok = 0;
2007	t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0);
2008
2009	if ((sc->open_device_map & PORT_MASK) == 0)
2010		offload_close(&sc->tdev);
2011
2012	if (sc->open_device_map == 0)
2013		cxgb_down(pi->adapter);
2014
2015	return (0);
2016}
2017
2018#ifdef LRO_SUPPORTED
2019/*
2020 * Mark lro enabled or disabled in all qsets for this port
2021 */
2022static int
2023cxgb_set_lro(struct port_info *p, int enabled)
2024{
2025	int i;
2026	struct adapter *adp = p->adapter;
2027	struct sge_qset *q;
2028
2029	PORT_LOCK_ASSERT_OWNED(p);
2030	for (i = 0; i < p->nqsets; i++) {
2031		q = &adp->sge.qs[p->first_qset + i];
2032		q->lro.enabled = (enabled != 0);
2033	}
2034	return (0);
2035}
2036#endif
2037
2038static int
2039cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
2040{
2041	struct port_info *p = ifp->if_softc;
2042	struct ifreq *ifr = (struct ifreq *)data;
2043	int flags, error = 0, mtu, handle_unsynchronized = 0;
2044	uint32_t mask;
2045
2046	if ((error = cxgb_begin_op(p, "cxgbioct")) != 0)
2047		return (error);
2048
2049	/*
2050	 * Only commands that should be handled within begin-op/end-op are
2051	 * serviced in this switch statement.  See handle_unsynchronized.
2052	 */
2053	switch (command) {
2054	case SIOCSIFMTU:
2055		mtu = ifr->ifr_mtu;
2056		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
2057			error = EINVAL;
2058		} else {
2059			ifp->if_mtu = mtu;
2060			PORT_LOCK(p);
2061			cxgb_update_mac_settings(p);
2062			PORT_UNLOCK(p);
2063		}
2064
2065		break;
2066	case SIOCSIFFLAGS:
2067		if (ifp->if_flags & IFF_UP) {
2068			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2069				flags = p->if_flags;
2070				if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
2071				    ((ifp->if_flags ^ flags) & IFF_ALLMULTI)) {
2072					PORT_LOCK(p);
2073					cxgb_update_mac_settings(p);
2074					PORT_UNLOCK(p);
2075				}
2076			} else
2077				error = cxgb_init_synchronized(p);
2078			p->if_flags = ifp->if_flags;
2079		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2080			error = cxgb_uninit_synchronized(p);
2081
2082		break;
2083	case SIOCADDMULTI:
2084	case SIOCDELMULTI:
2085		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2086			PORT_LOCK(p);
2087			cxgb_update_mac_settings(p);
2088			PORT_UNLOCK(p);
2089		}
2090
2091		break;
2092	case SIOCSIFCAP:
2093		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2094		if (mask & IFCAP_TXCSUM) {
2095			if (IFCAP_TXCSUM & ifp->if_capenable) {
2096				ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
2097				ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
2098				    | CSUM_IP | CSUM_TSO);
2099			} else {
2100				ifp->if_capenable |= IFCAP_TXCSUM;
2101				ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP
2102				    | CSUM_IP);
2103			}
2104		}
2105		if (mask & IFCAP_RXCSUM) {
2106			ifp->if_capenable ^= IFCAP_RXCSUM;
2107		}
2108		if (mask & IFCAP_TSO4) {
2109			if (IFCAP_TSO4 & ifp->if_capenable) {
2110				ifp->if_capenable &= ~IFCAP_TSO4;
2111				ifp->if_hwassist &= ~CSUM_TSO;
2112			} else if (IFCAP_TXCSUM & ifp->if_capenable) {
2113				ifp->if_capenable |= IFCAP_TSO4;
2114				ifp->if_hwassist |= CSUM_TSO;
2115			} else
2116				error = EINVAL;
2117		}
2118#ifdef LRO_SUPPORTED
2119		if (mask & IFCAP_LRO) {
2120			ifp->if_capenable ^= IFCAP_LRO;
2121
2122			/* Safe to do this even if cxgb_up not called yet */
2123			cxgb_set_lro(p, ifp->if_capenable & IFCAP_LRO);
2124		}
2125#endif
2126		if (mask & IFCAP_VLAN_HWTAGGING) {
2127			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2128			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2129				PORT_LOCK(p);
2130				cxgb_update_mac_settings(p);
2131				PORT_UNLOCK(p);
2132			}
2133		}
2134		if (mask & IFCAP_VLAN_MTU) {
2135			ifp->if_capenable ^= IFCAP_VLAN_MTU;
2136			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2137				PORT_LOCK(p);
2138				cxgb_update_mac_settings(p);
2139				PORT_UNLOCK(p);
2140			}
2141		}
2142		if (mask & IFCAP_VLAN_HWCSUM) {
2143			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2144		}
2145
2146#ifdef VLAN_CAPABILITIES
2147		VLAN_CAPABILITIES(ifp);
2148#endif
2149		break;
2150	default:
2151		handle_unsynchronized = 1;
2152		break;
2153	}
2154
2155	/*
2156	 * We don't want to call anything outside the driver while inside a
2157	 * begin-op/end-op block.  If it calls us back (eg.  ether_ioctl may
2158	 * call cxgb_init) we may deadlock if the state is already marked busy.
2159	 *
2160	 * XXX: this probably opens a small race window with kldunload...
2161	 */
2162	cxgb_end_op(p);
2163
2164	/* The IS_DOOMED check is racy, we're clutching at straws here */
2165	if (handle_unsynchronized && !IS_DOOMED(p)) {
2166		if (command == SIOCSIFMEDIA || command == SIOCGIFMEDIA)
2167			error = ifmedia_ioctl(ifp, ifr, &p->media, command);
2168		else
2169			error = ether_ioctl(ifp, command, data);
2170	}
2171
2172	return (error);
2173}
2174
2175static int
2176cxgb_media_change(struct ifnet *ifp)
2177{
2178	return (EOPNOTSUPP);
2179}
2180
2181/*
2182 * Translates phy->modtype to the correct Ethernet media subtype.
2183 */
2184static int
2185cxgb_ifm_type(int mod)
2186{
2187	switch (mod) {
2188	case phy_modtype_sr:
2189		return (IFM_10G_SR);
2190	case phy_modtype_lr:
2191		return (IFM_10G_LR);
2192	case phy_modtype_lrm:
2193		return (IFM_10G_LRM);
2194	case phy_modtype_twinax:
2195		return (IFM_10G_TWINAX);
2196	case phy_modtype_twinax_long:
2197		return (IFM_10G_TWINAX_LONG);
2198	case phy_modtype_none:
2199		return (IFM_NONE);
2200	case phy_modtype_unknown:
2201		return (IFM_UNKNOWN);
2202	}
2203
2204	KASSERT(0, ("%s: modtype %d unknown", __func__, mod));
2205	return (IFM_UNKNOWN);
2206}
2207
2208/*
2209 * Rebuilds the ifmedia list for this port, and sets the current media.
2210 */
2211static void
2212cxgb_build_medialist(struct port_info *p)
2213{
2214	struct cphy *phy = &p->phy;
2215	struct ifmedia *media = &p->media;
2216	int mod = phy->modtype;
2217	int m = IFM_ETHER | IFM_FDX;
2218
2219	PORT_LOCK(p);
2220
2221	ifmedia_removeall(media);
2222	if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) {
2223		/* Copper (RJ45) */
2224
2225		if (phy->caps & SUPPORTED_10000baseT_Full)
2226			ifmedia_add(media, m | IFM_10G_T, mod, NULL);
2227
2228		if (phy->caps & SUPPORTED_1000baseT_Full)
2229			ifmedia_add(media, m | IFM_1000_T, mod, NULL);
2230
2231		if (phy->caps & SUPPORTED_100baseT_Full)
2232			ifmedia_add(media, m | IFM_100_TX, mod, NULL);
2233
2234		if (phy->caps & SUPPORTED_10baseT_Full)
2235			ifmedia_add(media, m | IFM_10_T, mod, NULL);
2236
2237		ifmedia_add(media, IFM_ETHER | IFM_AUTO, mod, NULL);
2238		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2239
2240	} else if (phy->caps & SUPPORTED_TP) {
2241		/* Copper (CX4) */
2242
2243		KASSERT(phy->caps & SUPPORTED_10000baseT_Full,
2244			("%s: unexpected cap 0x%x", __func__, phy->caps));
2245
2246		ifmedia_add(media, m | IFM_10G_CX4, mod, NULL);
2247		ifmedia_set(media, m | IFM_10G_CX4);
2248
2249	} else if (phy->caps & SUPPORTED_FIBRE &&
2250		   phy->caps & SUPPORTED_10000baseT_Full) {
2251		/* 10G optical (but includes SFP+ twinax) */
2252
2253		m |= cxgb_ifm_type(mod);
2254		if (IFM_SUBTYPE(m) == IFM_NONE)
2255			m &= ~IFM_FDX;
2256
2257		ifmedia_add(media, m, mod, NULL);
2258		ifmedia_set(media, m);
2259
2260	} else if (phy->caps & SUPPORTED_FIBRE &&
2261		   phy->caps & SUPPORTED_1000baseT_Full) {
2262		/* 1G optical */
2263
2264		/* XXX: Lie and claim to be SX, could actually be any 1G-X */
2265		ifmedia_add(media, m | IFM_1000_SX, mod, NULL);
2266		ifmedia_set(media, m | IFM_1000_SX);
2267
2268	} else {
2269		KASSERT(0, ("%s: don't know how to handle 0x%x.", __func__,
2270			    phy->caps));
2271	}
2272
2273	PORT_UNLOCK(p);
2274}
2275
2276static void
2277cxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2278{
2279	struct port_info *p = ifp->if_softc;
2280	struct ifmedia_entry *cur = p->media.ifm_cur;
2281	int speed = p->link_config.speed;
2282
2283	if (cur->ifm_data != p->phy.modtype) {
2284		cxgb_build_medialist(p);
2285		cur = p->media.ifm_cur;
2286	}
2287
2288	ifmr->ifm_status = IFM_AVALID;
2289	if (!p->link_config.link_ok)
2290		return;
2291
2292	ifmr->ifm_status |= IFM_ACTIVE;
2293
2294	/*
2295	 * active and current will differ iff current media is autoselect.  That
2296	 * can happen only for copper RJ45.
2297	 */
2298	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
2299		return;
2300	KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg,
2301		("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps));
2302
2303	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2304	if (speed == SPEED_10000)
2305		ifmr->ifm_active |= IFM_10G_T;
2306	else if (speed == SPEED_1000)
2307		ifmr->ifm_active |= IFM_1000_T;
2308	else if (speed == SPEED_100)
2309		ifmr->ifm_active |= IFM_100_TX;
2310	else if (speed == SPEED_10)
2311		ifmr->ifm_active |= IFM_10_T;
2312	else
2313		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
2314			    speed));
2315}
2316
2317static void
2318cxgb_async_intr(void *data)
2319{
2320	adapter_t *sc = data;
2321
2322	if (cxgb_debug)
2323		device_printf(sc->dev, "cxgb_async_intr\n");
2324	/*
2325	 * May need to sleep - defer to taskqueue
2326	 */
2327	taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
2328}
2329
2330static void
2331cxgb_ext_intr_handler(void *arg, int count)
2332{
2333	adapter_t *sc = (adapter_t *)arg;
2334
2335	if (cxgb_debug)
2336		printf("cxgb_ext_intr_handler\n");
2337
2338	t3_phy_intr_handler(sc);
2339
2340	/* Now reenable external interrupts */
2341	ADAPTER_LOCK(sc);
2342	if (sc->slow_intr_mask) {
2343		sc->slow_intr_mask |= F_T3DBG;
2344		t3_write_reg(sc, A_PL_INT_CAUSE0, F_T3DBG);
2345		t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
2346	}
2347	ADAPTER_UNLOCK(sc);
2348}
2349
2350static inline int
2351link_poll_needed(struct port_info *p)
2352{
2353	struct cphy *phy = &p->phy;
2354
2355	if (phy->caps & POLL_LINK_1ST_TIME) {
2356		p->phy.caps &= ~POLL_LINK_1ST_TIME;
2357		return (1);
2358	}
2359
2360	return (p->link_fault || !(phy->caps & SUPPORTED_LINK_IRQ));
2361}
2362
2363static void
2364check_link_status(adapter_t *sc)
2365{
2366	int i;
2367
2368	for (i = 0; i < (sc)->params.nports; ++i) {
2369		struct port_info *p = &sc->port[i];
2370
2371		if (!isset(&sc->open_device_map, p->port_id))
2372			continue;
2373
2374		if (link_poll_needed(p))
2375			t3_link_changed(sc, i);
2376	}
2377}
2378
2379static void
2380check_t3b2_mac(struct adapter *sc)
2381{
2382	int i;
2383
2384	if (sc->flags & CXGB_SHUTDOWN)
2385		return;
2386
2387	for_each_port(sc, i) {
2388		struct port_info *p = &sc->port[i];
2389		int status;
2390#ifdef INVARIANTS
2391		struct ifnet *ifp = p->ifp;
2392#endif
2393
2394		if (!isset(&sc->open_device_map, p->port_id) || p->link_fault ||
2395		    !p->link_config.link_ok)
2396			continue;
2397
2398		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2399			("%s: state mismatch (drv_flags %x, device_map %x)",
2400			 __func__, ifp->if_drv_flags, sc->open_device_map));
2401
2402		PORT_LOCK(p);
2403		status = t3b2_mac_watchdog_task(&p->mac);
2404		if (status == 1)
2405			p->mac.stats.num_toggled++;
2406		else if (status == 2) {
2407			struct cmac *mac = &p->mac;
2408
2409			cxgb_update_mac_settings(p);
2410			t3_link_start(&p->phy, mac, &p->link_config);
2411			t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2412			t3_port_intr_enable(sc, p->port_id);
2413			p->mac.stats.num_resets++;
2414		}
2415		PORT_UNLOCK(p);
2416	}
2417}
2418
2419static void
2420cxgb_tick(void *arg)
2421{
2422	adapter_t *sc = (adapter_t *)arg;
2423
2424	if (sc->flags & CXGB_SHUTDOWN)
2425		return;
2426
2427	taskqueue_enqueue(sc->tq, &sc->tick_task);
2428	callout_reset(&sc->cxgb_tick_ch, CXGB_TICKS(sc), cxgb_tick, sc);
2429}
2430
2431static void
2432cxgb_tick_handler(void *arg, int count)
2433{
2434	adapter_t *sc = (adapter_t *)arg;
2435	const struct adapter_params *p = &sc->params;
2436	int i;
2437	uint32_t cause, reset;
2438
2439	if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE))
2440		return;
2441
2442	check_link_status(sc);
2443
2444	if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map)
2445		check_t3b2_mac(sc);
2446
2447	cause = t3_read_reg(sc, A_SG_INT_CAUSE);
2448	reset = 0;
2449	if (cause & F_FLEMPTY) {
2450		struct sge_qset *qs = &sc->sge.qs[0];
2451
2452		i = 0;
2453		reset |= F_FLEMPTY;
2454
2455		cause = (t3_read_reg(sc, A_SG_RSPQ_FL_STATUS) >>
2456			 S_FL0EMPTY) & 0xffff;
2457		while (cause) {
2458			qs->fl[i].empty += (cause & 1);
2459			if (i)
2460				qs++;
2461			i ^= 1;
2462			cause >>= 1;
2463		}
2464	}
2465	t3_write_reg(sc, A_SG_INT_CAUSE, reset);
2466
2467	for (i = 0; i < sc->params.nports; i++) {
2468		struct port_info *pi = &sc->port[i];
2469		struct ifnet *ifp = pi->ifp;
2470		struct cmac *mac = &pi->mac;
2471		struct mac_stats *mstats = &mac->stats;
2472
2473		if (!isset(&sc->open_device_map, pi->port_id))
2474			continue;
2475
2476		PORT_LOCK(pi);
2477		t3_mac_update_stats(mac);
2478		PORT_UNLOCK(pi);
2479
2480		ifp->if_opackets =
2481		    mstats->tx_frames_64 +
2482		    mstats->tx_frames_65_127 +
2483		    mstats->tx_frames_128_255 +
2484		    mstats->tx_frames_256_511 +
2485		    mstats->tx_frames_512_1023 +
2486		    mstats->tx_frames_1024_1518 +
2487		    mstats->tx_frames_1519_max;
2488
2489		ifp->if_ipackets =
2490		    mstats->rx_frames_64 +
2491		    mstats->rx_frames_65_127 +
2492		    mstats->rx_frames_128_255 +
2493		    mstats->rx_frames_256_511 +
2494		    mstats->rx_frames_512_1023 +
2495		    mstats->rx_frames_1024_1518 +
2496		    mstats->rx_frames_1519_max;
2497
2498		ifp->if_obytes = mstats->tx_octets;
2499		ifp->if_ibytes = mstats->rx_octets;
2500		ifp->if_omcasts = mstats->tx_mcast_frames;
2501		ifp->if_imcasts = mstats->rx_mcast_frames;
2502
2503		ifp->if_collisions =
2504		    mstats->tx_total_collisions;
2505
2506		ifp->if_iqdrops = mstats->rx_cong_drops;
2507
2508		ifp->if_oerrors =
2509		    mstats->tx_excess_collisions +
2510		    mstats->tx_underrun +
2511		    mstats->tx_len_errs +
2512		    mstats->tx_mac_internal_errs +
2513		    mstats->tx_excess_deferral +
2514		    mstats->tx_fcs_errs;
2515		ifp->if_ierrors =
2516		    mstats->rx_jabber +
2517		    mstats->rx_data_errs +
2518		    mstats->rx_sequence_errs +
2519		    mstats->rx_runt +
2520		    mstats->rx_too_long +
2521		    mstats->rx_mac_internal_errs +
2522		    mstats->rx_short +
2523		    mstats->rx_fcs_errs;
2524
2525		if (mac->multiport)
2526			continue;
2527
2528		/* Count rx fifo overflows, once per second */
2529		cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset);
2530		reset = 0;
2531		if (cause & F_RXFIFO_OVERFLOW) {
2532			mac->stats.rx_fifo_ovfl++;
2533			reset |= F_RXFIFO_OVERFLOW;
2534		}
2535		t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset);
2536	}
2537}
2538
2539static void
2540touch_bars(device_t dev)
2541{
2542	/*
2543	 * Don't enable yet
2544	 */
2545#if !defined(__LP64__) && 0
2546	u32 v;
2547
2548	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
2549	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
2550	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
2551	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
2552	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
2553	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
2554#endif
2555}
2556
2557static int
2558set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
2559{
2560	uint8_t *buf;
2561	int err = 0;
2562	u32 aligned_offset, aligned_len, *p;
2563	struct adapter *adapter = pi->adapter;
2564
2565
2566	aligned_offset = offset & ~3;
2567	aligned_len = (len + (offset & 3) + 3) & ~3;
2568
2569	if (aligned_offset != offset || aligned_len != len) {
2570		buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);
2571		if (!buf)
2572			return (ENOMEM);
2573		err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
2574		if (!err && aligned_len > 4)
2575			err = t3_seeprom_read(adapter,
2576					      aligned_offset + aligned_len - 4,
2577					      (u32 *)&buf[aligned_len - 4]);
2578		if (err)
2579			goto out;
2580		memcpy(buf + (offset & 3), data, len);
2581	} else
2582		buf = (uint8_t *)(uintptr_t)data;
2583
2584	err = t3_seeprom_wp(adapter, 0);
2585	if (err)
2586		goto out;
2587
2588	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2589		err = t3_seeprom_write(adapter, aligned_offset, *p);
2590		aligned_offset += 4;
2591	}
2592
2593	if (!err)
2594		err = t3_seeprom_wp(adapter, 1);
2595out:
2596	if (buf != data)
2597		free(buf, M_DEVBUF);
2598	return err;
2599}
2600
2601
2602static int
2603in_range(int val, int lo, int hi)
2604{
2605	return val < 0 || (val <= hi && val >= lo);
2606}
2607
2608static int
2609cxgb_extension_open(struct cdev *dev, int flags, int fmp, struct thread *td)
2610{
2611       return (0);
2612}
2613
2614static int
2615cxgb_extension_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2616{
2617       return (0);
2618}
2619
2620static int
2621cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
2622    int fflag, struct thread *td)
2623{
2624	int mmd, error = 0;
2625	struct port_info *pi = dev->si_drv1;
2626	adapter_t *sc = pi->adapter;
2627
2628#ifdef PRIV_SUPPORTED
2629	if (priv_check(td, PRIV_DRIVER)) {
2630		if (cxgb_debug)
2631			printf("user does not have access to privileged ioctls\n");
2632		return (EPERM);
2633	}
2634#else
2635	if (suser(td)) {
2636		if (cxgb_debug)
2637			printf("user does not have access to privileged ioctls\n");
2638		return (EPERM);
2639	}
2640#endif
2641
2642	switch (cmd) {
2643	case CHELSIO_GET_MIIREG: {
2644		uint32_t val;
2645		struct cphy *phy = &pi->phy;
2646		struct ch_mii_data *mid = (struct ch_mii_data *)data;
2647
2648		if (!phy->mdio_read)
2649			return (EOPNOTSUPP);
2650		if (is_10G(sc)) {
2651			mmd = mid->phy_id >> 8;
2652			if (!mmd)
2653				mmd = MDIO_DEV_PCS;
2654			else if (mmd > MDIO_DEV_VEND2)
2655				return (EINVAL);
2656
2657			error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
2658					     mid->reg_num, &val);
2659		} else
2660		        error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
2661					     mid->reg_num & 0x1f, &val);
2662		if (error == 0)
2663			mid->val_out = val;
2664		break;
2665	}
2666	case CHELSIO_SET_MIIREG: {
2667		struct cphy *phy = &pi->phy;
2668		struct ch_mii_data *mid = (struct ch_mii_data *)data;
2669
2670		if (!phy->mdio_write)
2671			return (EOPNOTSUPP);
2672		if (is_10G(sc)) {
2673			mmd = mid->phy_id >> 8;
2674			if (!mmd)
2675				mmd = MDIO_DEV_PCS;
2676			else if (mmd > MDIO_DEV_VEND2)
2677				return (EINVAL);
2678
2679			error = phy->mdio_write(sc, mid->phy_id & 0x1f,
2680					      mmd, mid->reg_num, mid->val_in);
2681		} else
2682			error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
2683					      mid->reg_num & 0x1f,
2684					      mid->val_in);
2685		break;
2686	}
2687	case CHELSIO_SETREG: {
2688		struct ch_reg *edata = (struct ch_reg *)data;
2689		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2690			return (EFAULT);
2691		t3_write_reg(sc, edata->addr, edata->val);
2692		break;
2693	}
2694	case CHELSIO_GETREG: {
2695		struct ch_reg *edata = (struct ch_reg *)data;
2696		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2697			return (EFAULT);
2698		edata->val = t3_read_reg(sc, edata->addr);
2699		break;
2700	}
2701	case CHELSIO_GET_SGE_CONTEXT: {
2702		struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
2703		mtx_lock_spin(&sc->sge.reg_lock);
2704		switch (ecntxt->cntxt_type) {
2705		case CNTXT_TYPE_EGRESS:
2706			error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
2707			    ecntxt->data);
2708			break;
2709		case CNTXT_TYPE_FL:
2710			error = -t3_sge_read_fl(sc, ecntxt->cntxt_id,
2711			    ecntxt->data);
2712			break;
2713		case CNTXT_TYPE_RSP:
2714			error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id,
2715			    ecntxt->data);
2716			break;
2717		case CNTXT_TYPE_CQ:
2718			error = -t3_sge_read_cq(sc, ecntxt->cntxt_id,
2719			    ecntxt->data);
2720			break;
2721		default:
2722			error = EINVAL;
2723			break;
2724		}
2725		mtx_unlock_spin(&sc->sge.reg_lock);
2726		break;
2727	}
2728	case CHELSIO_GET_SGE_DESC: {
2729		struct ch_desc *edesc = (struct ch_desc *)data;
2730		int ret;
2731		if (edesc->queue_num >= SGE_QSETS * 6)
2732			return (EINVAL);
2733		ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
2734		    edesc->queue_num % 6, edesc->idx, edesc->data);
2735		if (ret < 0)
2736			return (EINVAL);
2737		edesc->size = ret;
2738		break;
2739	}
2740	case CHELSIO_GET_QSET_PARAMS: {
2741		struct qset_params *q;
2742		struct ch_qset_params *t = (struct ch_qset_params *)data;
2743		int q1 = pi->first_qset;
2744		int nqsets = pi->nqsets;
2745		int i;
2746
2747		if (t->qset_idx >= nqsets)
2748			return EINVAL;
2749
2750		i = q1 + t->qset_idx;
2751		q = &sc->params.sge.qset[i];
2752		t->rspq_size   = q->rspq_size;
2753		t->txq_size[0] = q->txq_size[0];
2754		t->txq_size[1] = q->txq_size[1];
2755		t->txq_size[2] = q->txq_size[2];
2756		t->fl_size[0]  = q->fl_size;
2757		t->fl_size[1]  = q->jumbo_size;
2758		t->polling     = q->polling;
2759		t->lro         = q->lro;
2760		t->intr_lat    = q->coalesce_usecs;
2761		t->cong_thres  = q->cong_thres;
2762		t->qnum        = i;
2763
2764		if (sc->flags & USING_MSIX)
2765			t->vector = rman_get_start(sc->msix_irq_res[i]);
2766		else
2767			t->vector = rman_get_start(sc->irq_res);
2768
2769		break;
2770	}
2771	case CHELSIO_GET_QSET_NUM: {
2772		struct ch_reg *edata = (struct ch_reg *)data;
2773		edata->val = pi->nqsets;
2774		break;
2775	}
2776	case CHELSIO_LOAD_FW: {
2777		uint8_t *fw_data;
2778		uint32_t vers;
2779		struct ch_mem_range *t = (struct ch_mem_range *)data;
2780
2781		/*
2782		 * You're allowed to load a firmware only before FULL_INIT_DONE
2783		 *
2784		 * FW_UPTODATE is also set so the rest of the initialization
2785		 * will not overwrite what was loaded here.  This gives you the
2786		 * flexibility to load any firmware (and maybe shoot yourself in
2787		 * the foot).
2788		 */
2789
2790		ADAPTER_LOCK(sc);
2791		if (sc->open_device_map || sc->flags & FULL_INIT_DONE) {
2792			ADAPTER_UNLOCK(sc);
2793			return (EBUSY);
2794		}
2795
2796		fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2797		if (!fw_data)
2798			error = ENOMEM;
2799		else
2800			error = copyin(t->buf, fw_data, t->len);
2801
2802		if (!error)
2803			error = -t3_load_fw(sc, fw_data, t->len);
2804
2805		if (t3_get_fw_version(sc, &vers) == 0) {
2806			snprintf(&sc->fw_version[0], sizeof(sc->fw_version),
2807			    "%d.%d.%d", G_FW_VERSION_MAJOR(vers),
2808			    G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers));
2809		}
2810
2811		if (!error)
2812			sc->flags |= FW_UPTODATE;
2813
2814		free(fw_data, M_DEVBUF);
2815		ADAPTER_UNLOCK(sc);
2816		break;
2817	}
2818	case CHELSIO_LOAD_BOOT: {
2819		uint8_t *boot_data;
2820		struct ch_mem_range *t = (struct ch_mem_range *)data;
2821
2822		boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2823		if (!boot_data)
2824			return ENOMEM;
2825
2826		error = copyin(t->buf, boot_data, t->len);
2827		if (!error)
2828			error = -t3_load_boot(sc, boot_data, t->len);
2829
2830		free(boot_data, M_DEVBUF);
2831		break;
2832	}
2833	case CHELSIO_GET_PM: {
2834		struct ch_pm *m = (struct ch_pm *)data;
2835		struct tp_params *p = &sc->params.tp;
2836
2837		if (!is_offload(sc))
2838			return (EOPNOTSUPP);
2839
2840		m->tx_pg_sz = p->tx_pg_size;
2841		m->tx_num_pg = p->tx_num_pgs;
2842		m->rx_pg_sz  = p->rx_pg_size;
2843		m->rx_num_pg = p->rx_num_pgs;
2844		m->pm_total  = p->pmtx_size + p->chan_rx_size * p->nchan;
2845
2846		break;
2847	}
2848	case CHELSIO_SET_PM: {
2849		struct ch_pm *m = (struct ch_pm *)data;
2850		struct tp_params *p = &sc->params.tp;
2851
2852		if (!is_offload(sc))
2853			return (EOPNOTSUPP);
2854		if (sc->flags & FULL_INIT_DONE)
2855			return (EBUSY);
2856
2857		if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) ||
2858		    !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1)))
2859			return (EINVAL);	/* not power of 2 */
2860		if (!(m->rx_pg_sz & 0x14000))
2861			return (EINVAL);	/* not 16KB or 64KB */
2862		if (!(m->tx_pg_sz & 0x1554000))
2863			return (EINVAL);
2864		if (m->tx_num_pg == -1)
2865			m->tx_num_pg = p->tx_num_pgs;
2866		if (m->rx_num_pg == -1)
2867			m->rx_num_pg = p->rx_num_pgs;
2868		if (m->tx_num_pg % 24 || m->rx_num_pg % 24)
2869			return (EINVAL);
2870		if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size ||
2871		    m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size)
2872			return (EINVAL);
2873
2874		p->rx_pg_size = m->rx_pg_sz;
2875		p->tx_pg_size = m->tx_pg_sz;
2876		p->rx_num_pgs = m->rx_num_pg;
2877		p->tx_num_pgs = m->tx_num_pg;
2878		break;
2879	}
2880	case CHELSIO_SETMTUTAB: {
2881		struct ch_mtus *m = (struct ch_mtus *)data;
2882		int i;
2883
2884		if (!is_offload(sc))
2885			return (EOPNOTSUPP);
2886		if (offload_running(sc))
2887			return (EBUSY);
2888		if (m->nmtus != NMTUS)
2889			return (EINVAL);
2890		if (m->mtus[0] < 81)         /* accommodate SACK */
2891			return (EINVAL);
2892
2893		/*
2894		 * MTUs must be in ascending order
2895		 */
2896		for (i = 1; i < NMTUS; ++i)
2897			if (m->mtus[i] < m->mtus[i - 1])
2898				return (EINVAL);
2899
2900		memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus));
2901		break;
2902	}
2903	case CHELSIO_GETMTUTAB: {
2904		struct ch_mtus *m = (struct ch_mtus *)data;
2905
2906		if (!is_offload(sc))
2907			return (EOPNOTSUPP);
2908
2909		memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
2910		m->nmtus = NMTUS;
2911		break;
2912	}
2913	case CHELSIO_GET_MEM: {
2914		struct ch_mem_range *t = (struct ch_mem_range *)data;
2915		struct mc7 *mem;
2916		uint8_t *useraddr;
2917		u64 buf[32];
2918
2919		/*
2920		 * Use these to avoid modifying len/addr in the the return
2921		 * struct
2922		 */
2923		uint32_t len = t->len, addr = t->addr;
2924
2925		if (!is_offload(sc))
2926			return (EOPNOTSUPP);
2927		if (!(sc->flags & FULL_INIT_DONE))
2928			return (EIO);         /* need the memory controllers */
2929		if ((addr & 0x7) || (len & 0x7))
2930			return (EINVAL);
2931		if (t->mem_id == MEM_CM)
2932			mem = &sc->cm;
2933		else if (t->mem_id == MEM_PMRX)
2934			mem = &sc->pmrx;
2935		else if (t->mem_id == MEM_PMTX)
2936			mem = &sc->pmtx;
2937		else
2938			return (EINVAL);
2939
2940		/*
2941		 * Version scheme:
2942		 * bits 0..9: chip version
2943		 * bits 10..15: chip revision
2944		 */
2945		t->version = 3 | (sc->params.rev << 10);
2946
2947		/*
2948		 * Read 256 bytes at a time as len can be large and we don't
2949		 * want to use huge intermediate buffers.
2950		 */
2951		useraddr = (uint8_t *)t->buf;
2952		while (len) {
2953			unsigned int chunk = min(len, sizeof(buf));
2954
2955			error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf);
2956			if (error)
2957				return (-error);
2958			if (copyout(buf, useraddr, chunk))
2959				return (EFAULT);
2960			useraddr += chunk;
2961			addr += chunk;
2962			len -= chunk;
2963		}
2964		break;
2965	}
2966	case CHELSIO_READ_TCAM_WORD: {
2967		struct ch_tcam_word *t = (struct ch_tcam_word *)data;
2968
2969		if (!is_offload(sc))
2970			return (EOPNOTSUPP);
2971		if (!(sc->flags & FULL_INIT_DONE))
2972			return (EIO);         /* need MC5 */
2973		return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
2974		break;
2975	}
2976	case CHELSIO_SET_TRACE_FILTER: {
2977		struct ch_trace *t = (struct ch_trace *)data;
2978		const struct trace_params *tp;
2979
2980		tp = (const struct trace_params *)&t->sip;
2981		if (t->config_tx)
2982			t3_config_trace_filter(sc, tp, 0, t->invert_match,
2983					       t->trace_tx);
2984		if (t->config_rx)
2985			t3_config_trace_filter(sc, tp, 1, t->invert_match,
2986					       t->trace_rx);
2987		break;
2988	}
2989	case CHELSIO_SET_PKTSCHED: {
2990		struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
2991		if (sc->open_device_map == 0)
2992			return (EAGAIN);
2993		send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
2994		    p->binding);
2995		break;
2996	}
2997	case CHELSIO_IFCONF_GETREGS: {
2998		struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data;
2999		int reglen = cxgb_get_regs_len();
3000		uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT);
3001		if (buf == NULL) {
3002			return (ENOMEM);
3003		}
3004		if (regs->len > reglen)
3005			regs->len = reglen;
3006		else if (regs->len < reglen)
3007			error = ENOBUFS;
3008
3009		if (!error) {
3010			cxgb_get_regs(sc, regs, buf);
3011			error = copyout(buf, regs->data, reglen);
3012		}
3013		free(buf, M_DEVBUF);
3014
3015		break;
3016	}
3017	case CHELSIO_SET_HW_SCHED: {
3018		struct ch_hw_sched *t = (struct ch_hw_sched *)data;
3019		unsigned int ticks_per_usec = core_ticks_per_usec(sc);
3020
3021		if ((sc->flags & FULL_INIT_DONE) == 0)
3022			return (EAGAIN);       /* need TP to be initialized */
3023		if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
3024		    !in_range(t->channel, 0, 1) ||
3025		    !in_range(t->kbps, 0, 10000000) ||
3026		    !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
3027		    !in_range(t->flow_ipg, 0,
3028			      dack_ticks_to_usec(sc, 0x7ff)))
3029			return (EINVAL);
3030
3031		if (t->kbps >= 0) {
3032			error = t3_config_sched(sc, t->kbps, t->sched);
3033			if (error < 0)
3034				return (-error);
3035		}
3036		if (t->class_ipg >= 0)
3037			t3_set_sched_ipg(sc, t->sched, t->class_ipg);
3038		if (t->flow_ipg >= 0) {
3039			t->flow_ipg *= 1000;     /* us -> ns */
3040			t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
3041		}
3042		if (t->mode >= 0) {
3043			int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
3044
3045			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
3046					 bit, t->mode ? bit : 0);
3047		}
3048		if (t->channel >= 0)
3049			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
3050					 1 << t->sched, t->channel << t->sched);
3051		break;
3052	}
3053	case CHELSIO_GET_EEPROM: {
3054		int i;
3055		struct ch_eeprom *e = (struct ch_eeprom *)data;
3056		uint8_t *buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT);
3057
3058		if (buf == NULL) {
3059			return (ENOMEM);
3060		}
3061		e->magic = EEPROM_MAGIC;
3062		for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4)
3063			error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]);
3064
3065		if (!error)
3066			error = copyout(buf + e->offset, e->data, e->len);
3067
3068		free(buf, M_DEVBUF);
3069		break;
3070	}
3071	case CHELSIO_CLEAR_STATS: {
3072		if (!(sc->flags & FULL_INIT_DONE))
3073			return EAGAIN;
3074
3075		PORT_LOCK(pi);
3076		t3_mac_update_stats(&pi->mac);
3077		memset(&pi->mac.stats, 0, sizeof(pi->mac.stats));
3078		PORT_UNLOCK(pi);
3079		break;
3080	}
3081	case CHELSIO_GET_UP_LA: {
3082		struct ch_up_la *la = (struct ch_up_la *)data;
3083		uint8_t *buf = malloc(LA_BUFSIZE, M_DEVBUF, M_NOWAIT);
3084		if (buf == NULL) {
3085			return (ENOMEM);
3086		}
3087		if (la->bufsize < LA_BUFSIZE)
3088			error = ENOBUFS;
3089
3090		if (!error)
3091			error = -t3_get_up_la(sc, &la->stopped, &la->idx,
3092					      &la->bufsize, buf);
3093		if (!error)
3094			error = copyout(buf, la->data, la->bufsize);
3095
3096		free(buf, M_DEVBUF);
3097		break;
3098	}
3099	case CHELSIO_GET_UP_IOQS: {
3100		struct ch_up_ioqs *ioqs = (struct ch_up_ioqs *)data;
3101		uint8_t *buf = malloc(IOQS_BUFSIZE, M_DEVBUF, M_NOWAIT);
3102		uint32_t *v;
3103
3104		if (buf == NULL) {
3105			return (ENOMEM);
3106		}
3107		if (ioqs->bufsize < IOQS_BUFSIZE)
3108			error = ENOBUFS;
3109
3110		if (!error)
3111			error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf);
3112
3113		if (!error) {
3114			v = (uint32_t *)buf;
3115
3116			ioqs->bufsize -= 4 * sizeof(uint32_t);
3117			ioqs->ioq_rx_enable = *v++;
3118			ioqs->ioq_tx_enable = *v++;
3119			ioqs->ioq_rx_status = *v++;
3120			ioqs->ioq_tx_status = *v++;
3121
3122			error = copyout(v, ioqs->data, ioqs->bufsize);
3123		}
3124
3125		free(buf, M_DEVBUF);
3126		break;
3127	}
3128	default:
3129		return (EOPNOTSUPP);
3130		break;
3131	}
3132
3133	return (error);
3134}
3135
3136static __inline void
3137reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
3138    unsigned int end)
3139{
3140	uint32_t *p = (uint32_t *)(buf + start);
3141
3142	for ( ; start <= end; start += sizeof(uint32_t))
3143		*p++ = t3_read_reg(ap, start);
3144}
3145
3146#define T3_REGMAP_SIZE (3 * 1024)
3147static int
3148cxgb_get_regs_len(void)
3149{
3150	return T3_REGMAP_SIZE;
3151}
3152
3153static void
3154cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf)
3155{
3156
3157	/*
3158	 * Version scheme:
3159	 * bits 0..9: chip version
3160	 * bits 10..15: chip revision
3161	 * bit 31: set for PCIe cards
3162	 */
3163	regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
3164
3165	/*
3166	 * We skip the MAC statistics registers because they are clear-on-read.
3167	 * Also reading multi-register stats would need to synchronize with the
3168	 * periodic mac stats accumulation.  Hard to justify the complexity.
3169	 */
3170	memset(buf, 0, cxgb_get_regs_len());
3171	reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
3172	reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
3173	reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
3174	reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
3175	reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
3176	reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
3177		       XGM_REG(A_XGM_SERDES_STAT3, 1));
3178	reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
3179		       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
3180}
3181
3182
3183MODULE_DEPEND(if_cxgb, cxgb_t3fw, 1, 1, 1);
3184