Deleted Added
sdiff udiff text old ( 172105 ) new ( 172109 )
full compact
1/**************************************************************************
2
3Copyright (c) 2007, Chelsio Inc.
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
122. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
15
16THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26POSSIBILITY OF SUCH DAMAGE.
27
28***************************************************************************/
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/cxgb/cxgb_main.c 172105 2007-09-09 20:26:02Z kmacy $");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/kernel.h>
36#include <sys/bus.h>
37#include <sys/module.h>
38#include <sys/pciio.h>
39#include <sys/conf.h>
40#include <machine/bus.h>
41#include <machine/resource.h>
42#include <sys/bus_dma.h>
43#include <sys/rman.h>
44#include <sys/ioccom.h>
45#include <sys/mbuf.h>
46#include <sys/linker.h>
47#include <sys/firmware.h>
48#include <sys/socket.h>
49#include <sys/sockio.h>
50#include <sys/smp.h>
51#include <sys/sysctl.h>
52#include <sys/queue.h>
53#include <sys/taskqueue.h>
54
55#include <net/bpf.h>
56#include <net/ethernet.h>
57#include <net/if.h>
58#include <net/if_arp.h>
59#include <net/if_dl.h>
60#include <net/if_media.h>
61#include <net/if_types.h>
62
63#include <netinet/in_systm.h>
64#include <netinet/in.h>
65#include <netinet/if_ether.h>
66#include <netinet/ip.h>
67#include <netinet/ip.h>
68#include <netinet/tcp.h>
69#include <netinet/udp.h>
70
71#include <dev/pci/pcireg.h>
72#include <dev/pci/pcivar.h>
73#include <dev/pci/pci_private.h>
74
75#ifdef CONFIG_DEFINED
76#include <cxgb_include.h>
77#else
78#include <dev/cxgb/cxgb_include.h>
79#endif
80
81#ifdef PRIV_SUPPORTED
82#include <sys/priv.h>
83#endif
84
85static int cxgb_setup_msix(adapter_t *, int);
86static void cxgb_teardown_msix(adapter_t *);
87static void cxgb_init(void *);
88static void cxgb_init_locked(struct port_info *);
89static void cxgb_stop_locked(struct port_info *);
90static void cxgb_set_rxmode(struct port_info *);
91static int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
92static void cxgb_start(struct ifnet *);
93static void cxgb_start_proc(void *, int ncount);
94static int cxgb_media_change(struct ifnet *);
95static void cxgb_media_status(struct ifnet *, struct ifmediareq *);
96static int setup_sge_qsets(adapter_t *);
97static void cxgb_async_intr(void *);
98static void cxgb_ext_intr_handler(void *, int);
99static void cxgb_tick_handler(void *, int);
100static void cxgb_down_locked(struct adapter *sc);
101static void cxgb_tick(void *);
102static void setup_rss(adapter_t *sc);
103
104/* Attachment glue for the PCI controller end of the device. Each port of
105 * the device is attached separately, as defined later.
106 */
107static int cxgb_controller_probe(device_t);
108static int cxgb_controller_attach(device_t);
109static int cxgb_controller_detach(device_t);
110static void cxgb_free(struct adapter *);
111static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
112 unsigned int end);
113static void cxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf);
114static int cxgb_get_regs_len(void);
115static int offload_open(struct port_info *pi);
116static void touch_bars(device_t dev);
117
118#ifdef notyet
119static int offload_close(struct toedev *tdev);
120#endif
121
122
123static device_method_t cxgb_controller_methods[] = {
124 DEVMETHOD(device_probe, cxgb_controller_probe),
125 DEVMETHOD(device_attach, cxgb_controller_attach),
126 DEVMETHOD(device_detach, cxgb_controller_detach),
127
128 /* bus interface */
129 DEVMETHOD(bus_print_child, bus_generic_print_child),
130 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
131
132 { 0, 0 }
133};
134
135static driver_t cxgb_controller_driver = {
136 "cxgbc",
137 cxgb_controller_methods,
138 sizeof(struct adapter)
139};
140
141static devclass_t cxgb_controller_devclass;
142DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, 0, 0);
143
144/*
145 * Attachment glue for the ports. Attachment is done directly to the
146 * controller device.
147 */
148static int cxgb_port_probe(device_t);
149static int cxgb_port_attach(device_t);
150static int cxgb_port_detach(device_t);
151
152static device_method_t cxgb_port_methods[] = {
153 DEVMETHOD(device_probe, cxgb_port_probe),
154 DEVMETHOD(device_attach, cxgb_port_attach),
155 DEVMETHOD(device_detach, cxgb_port_detach),
156 { 0, 0 }
157};
158
159static driver_t cxgb_port_driver = {
160 "cxgb",
161 cxgb_port_methods,
162 0
163};
164
165static d_ioctl_t cxgb_extension_ioctl;
166static d_open_t cxgb_extension_open;
167static d_close_t cxgb_extension_close;
168
169static struct cdevsw cxgb_cdevsw = {
170 .d_version = D_VERSION,
171 .d_flags = 0,
172 .d_open = cxgb_extension_open,
173 .d_close = cxgb_extension_close,
174 .d_ioctl = cxgb_extension_ioctl,
175 .d_name = "cxgb",
176};
177
178static devclass_t cxgb_port_devclass;
179DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
180
181#define SGE_MSIX_COUNT (SGE_QSETS + 1)
182
183extern int collapse_mbufs;
184/*
185 * The driver uses the best interrupt scheme available on a platform in the
186 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
187 * of these schemes the driver may consider as follows:
188 *
189 * msi = 2: choose from among all three options
190 * msi = 1 : only consider MSI and pin interrupts
191 * msi = 0: force pin interrupts
192 */
193static int msi_allowed = 2;
194
195TUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
196SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
197SYSCTL_UINT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
198 "MSI-X, MSI, INTx selector");
199
200/*
201 * The driver enables offload as a default.
202 * To disable it, use ofld_disable = 1.
203 */
204static int ofld_disable = 0;
205TUNABLE_INT("hw.cxgb.ofld_disable", &ofld_disable);
206SYSCTL_UINT(_hw_cxgb, OID_AUTO, ofld_disable, CTLFLAG_RDTUN, &ofld_disable, 0,
207 "disable ULP offload");
208
209/*
210 * The driver uses an auto-queue algorithm by default.
211 * To disable it and force a single queue-set per port, use singleq = 1.
212 */
213static int singleq = 1;
214TUNABLE_INT("hw.cxgb.singleq", &singleq);
215SYSCTL_UINT(_hw_cxgb, OID_AUTO, singleq, CTLFLAG_RDTUN, &singleq, 0,
216 "use a single queue-set per port");
217
218enum {
219 MAX_TXQ_ENTRIES = 16384,
220 MAX_CTRL_TXQ_ENTRIES = 1024,
221 MAX_RSPQ_ENTRIES = 16384,
222 MAX_RX_BUFFERS = 16384,
223 MAX_RX_JUMBO_BUFFERS = 16384,
224 MIN_TXQ_ENTRIES = 4,
225 MIN_CTRL_TXQ_ENTRIES = 4,
226 MIN_RSPQ_ENTRIES = 32,
227 MIN_FL_ENTRIES = 32,
228 MIN_FL_JUMBO_ENTRIES = 32
229};
230
231struct filter_info {
232 u32 sip;
233 u32 sip_mask;
234 u32 dip;
235 u16 sport;
236 u16 dport;
237 u32 vlan:12;
238 u32 vlan_prio:3;
239 u32 mac_hit:1;
240 u32 mac_idx:4;
241 u32 mac_vld:1;
242 u32 pkt_type:2;
243 u32 report_filter_id:1;
244 u32 pass:1;
245 u32 rss:1;
246 u32 qset:3;
247 u32 locked:1;
248 u32 valid:1;
249};
250
251enum { FILTER_NO_VLAN_PRI = 7 };
252
253#define PORT_MASK ((1 << MAX_NPORTS) - 1)
254
255/* Table for probing the cards. The desc field isn't actually used */
256struct cxgb_ident {
257 uint16_t vendor;
258 uint16_t device;
259 int index;
260 char *desc;
261} cxgb_identifiers[] = {
262 {PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
263 {PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
264 {PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
265 {PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
266 {PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
267 {PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
268 {PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
269 {PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
270 {PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
271 {PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
272 {PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
273 {0, 0, 0, NULL}
274};
275
276
277static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
278
279static inline char
280t3rev2char(struct adapter *adapter)
281{
282 char rev = 'z';
283
284 switch(adapter->params.rev) {
285 case T3_REV_A:
286 rev = 'a';
287 break;
288 case T3_REV_B:
289 case T3_REV_B2:
290 rev = 'b';
291 break;
292 case T3_REV_C:
293 rev = 'c';
294 break;
295 }
296 return rev;
297}
298
299static struct cxgb_ident *
300cxgb_get_ident(device_t dev)
301{
302 struct cxgb_ident *id;
303
304 for (id = cxgb_identifiers; id->desc != NULL; id++) {
305 if ((id->vendor == pci_get_vendor(dev)) &&
306 (id->device == pci_get_device(dev))) {
307 return (id);
308 }
309 }
310 return (NULL);
311}
312
313static const struct adapter_info *
314cxgb_get_adapter_info(device_t dev)
315{
316 struct cxgb_ident *id;
317 const struct adapter_info *ai;
318
319 id = cxgb_get_ident(dev);
320 if (id == NULL)
321 return (NULL);
322
323 ai = t3_get_adapter_info(id->index);
324
325 return (ai);
326}
327
328static int
329cxgb_controller_probe(device_t dev)
330{
331 const struct adapter_info *ai;
332 char *ports, buf[80];
333 int nports;
334
335 ai = cxgb_get_adapter_info(dev);
336 if (ai == NULL)
337 return (ENXIO);
338
339 nports = ai->nports0 + ai->nports1;
340 if (nports == 1)
341 ports = "port";
342 else
343 ports = "ports";
344
345 snprintf(buf, sizeof(buf), "%s RNIC, %d %s", ai->desc, nports, ports);
346 device_set_desc_copy(dev, buf);
347 return (BUS_PROBE_DEFAULT);
348}
349
350#define FW_FNAME "t3fw%d%d%d"
351#define TPEEPROM_NAME "t3%ctpe%d%d%d"
352#define TPSRAM_NAME "t3%cps%d%d%d"
353
354static int
355upgrade_fw(adapter_t *sc)
356{
357 char buf[32];
358#ifdef FIRMWARE_LATEST
359 const struct firmware *fw;
360#else
361 struct firmware *fw;
362#endif
363 int status;
364
365 snprintf(&buf[0], sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
366 FW_VERSION_MINOR, FW_VERSION_MICRO);
367
368 fw = firmware_get(buf);
369
370 if (fw == NULL) {
371 device_printf(sc->dev, "Could not find firmware image %s\n", buf);
372 return (ENOENT);
373 } else
374 device_printf(sc->dev, "updating firmware on card with %s\n", buf);
375 status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
376
377 device_printf(sc->dev, "firmware update returned %s %d\n", (status == 0) ? "success" : "fail", status);
378
379 firmware_put(fw, FIRMWARE_UNLOAD);
380
381 return (status);
382}
383
384static int
385cxgb_controller_attach(device_t dev)
386{
387 device_t child;
388 const struct adapter_info *ai;
389 struct adapter *sc;
390 int i, reg, error = 0;
391 uint32_t vers;
392 int port_qsets = 1;
393#ifdef MSI_SUPPORTED
394 int msi_needed;
395#endif
396 sc = device_get_softc(dev);
397 sc->dev = dev;
398 sc->msi_count = 0;
399
400 /* find the PCIe link width and set max read request to 4KB*/
401 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
402 uint16_t lnk, pectl;
403 lnk = pci_read_config(dev, reg + 0x12, 2);
404 sc->link_width = (lnk >> 4) & 0x3f;
405
406 pectl = pci_read_config(dev, reg + 0x8, 2);
407 pectl = (pectl & ~0x7000) | (5 << 12);
408 pci_write_config(dev, reg + 0x8, pectl, 2);
409 }
410
411 ai = cxgb_get_adapter_info(dev);
412 if (sc->link_width != 0 && sc->link_width <= 4 &&
413 (ai->nports0 + ai->nports1) <= 2) {
414 device_printf(sc->dev,
415 "PCIe x%d Link, expect reduced performance\n",
416 sc->link_width);
417 }
418
419 touch_bars(dev);
420 pci_enable_busmaster(dev);
421 /*
422 * Allocate the registers and make them available to the driver.
423 * The registers that we care about for NIC mode are in BAR 0
424 */
425 sc->regs_rid = PCIR_BAR(0);
426 if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
427 &sc->regs_rid, RF_ACTIVE)) == NULL) {
428 device_printf(dev, "Cannot allocate BAR\n");
429 return (ENXIO);
430 }
431
432 snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
433 device_get_unit(dev));
434 ADAPTER_LOCK_INIT(sc, sc->lockbuf);
435
436 snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
437 device_get_unit(dev));
438 snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
439 device_get_unit(dev));
440 snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
441 device_get_unit(dev));
442
443 MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_DEF);
444 MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
445 MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
446
447 sc->bt = rman_get_bustag(sc->regs_res);
448 sc->bh = rman_get_bushandle(sc->regs_res);
449 sc->mmio_len = rman_get_size(sc->regs_res);
450
451 if (t3_prep_adapter(sc, ai, 1) < 0) {
452 printf("prep adapter failed\n");
453 error = ENODEV;
454 goto out;
455 }
456 /* Allocate the BAR for doing MSI-X. If it succeeds, try to allocate
457 * enough messages for the queue sets. If that fails, try falling
458 * back to MSI. If that fails, then try falling back to the legacy
459 * interrupt pin model.
460 */
461#ifdef MSI_SUPPORTED
462
463 sc->msix_regs_rid = 0x20;
464 if ((msi_allowed >= 2) &&
465 (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
466 &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
467
468 msi_needed = sc->msi_count = SGE_MSIX_COUNT;
469
470 if (((error = pci_alloc_msix(dev, &sc->msi_count)) != 0) ||
471 (sc->msi_count != msi_needed)) {
472 device_printf(dev, "msix allocation failed - msi_count = %d"
473 " msi_needed=%d will try msi err=%d\n", sc->msi_count,
474 msi_needed, error);
475 sc->msi_count = 0;
476 pci_release_msi(dev);
477 bus_release_resource(dev, SYS_RES_MEMORY,
478 sc->msix_regs_rid, sc->msix_regs_res);
479 sc->msix_regs_res = NULL;
480 } else {
481 sc->flags |= USING_MSIX;
482 sc->cxgb_intr = t3_intr_msix;
483 }
484 }
485
486 if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
487 sc->msi_count = 1;
488 if (pci_alloc_msi(dev, &sc->msi_count)) {
489 device_printf(dev, "alloc msi failed - will try INTx\n");
490 sc->msi_count = 0;
491 pci_release_msi(dev);
492 } else {
493 sc->flags |= USING_MSI;
494 sc->irq_rid = 1;
495 sc->cxgb_intr = t3_intr_msi;
496 }
497 }
498#endif
499 if (sc->msi_count == 0) {
500 device_printf(dev, "using line interrupts\n");
501 sc->irq_rid = 0;
502 sc->cxgb_intr = t3b_intr;
503 }
504
505
506 /* Create a private taskqueue thread for handling driver events */
507#ifdef TASKQUEUE_CURRENT
508 sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
509 taskqueue_thread_enqueue, &sc->tq);
510#else
511 sc->tq = taskqueue_create_fast("cxgb_taskq", M_NOWAIT,
512 taskqueue_thread_enqueue, &sc->tq);
513#endif
514 if (sc->tq == NULL) {
515 device_printf(dev, "failed to allocate controller task queue\n");
516 goto out;
517 }
518
519 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
520 device_get_nameunit(dev));
521 TASK_INIT(&sc->ext_intr_task, 0, cxgb_ext_intr_handler, sc);
522 TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
523
524
525 /* Create a periodic callout for checking adapter status */
526 callout_init(&sc->cxgb_tick_ch, TRUE);
527
528 if (t3_check_fw_version(sc) != 0) {
529 /*
530 * Warn user that a firmware update will be attempted in init.
531 */
532 device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
533 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
534 sc->flags &= ~FW_UPTODATE;
535 } else {
536 sc->flags |= FW_UPTODATE;
537 }
538
539 if (t3_check_tpsram_version(sc) != 0) {
540 /*
541 * Warn user that a firmware update will be attempted in init.
542 */
543 device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
544 t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
545 sc->flags &= ~TPS_UPTODATE;
546 } else {
547 sc->flags |= TPS_UPTODATE;
548 }
549
550 if ((sc->flags & USING_MSIX) && !singleq)
551 port_qsets = min((SGE_QSETS/(sc)->params.nports), mp_ncpus);
552
553 /*
554 * Create a child device for each MAC. The ethernet attachment
555 * will be done in these children.
556 */
557 for (i = 0; i < (sc)->params.nports; i++) {
558 struct port_info *pi;
559
560 if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
561 device_printf(dev, "failed to add child port\n");
562 error = EINVAL;
563 goto out;
564 }
565 pi = &sc->port[i];
566 pi->adapter = sc;
567 pi->nqsets = port_qsets;
568 pi->first_qset = i*port_qsets;
569 pi->port_id = i;
570 pi->tx_chan = i >= ai->nports0;
571 pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
572 sc->rxpkt_map[pi->txpkt_intf] = i;
573 sc->portdev[i] = child;
574 device_set_softc(child, pi);
575 }
576 if ((error = bus_generic_attach(dev)) != 0)
577 goto out;
578
579 /*
580 * XXX need to poll for link status
581 */
582 sc->params.stats_update_period = 1;
583
584 /* initialize sge private state */
585 t3_sge_init_adapter(sc);
586
587 t3_led_ready(sc);
588
589 cxgb_offload_init();
590 if (is_offload(sc)) {
591 setbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
592 cxgb_adapter_ofld(sc);
593 }
594 error = t3_get_fw_version(sc, &vers);
595 if (error)
596 goto out;
597
598 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
599 G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
600 G_FW_VERSION_MICRO(vers));
601
602 t3_add_sysctls(sc);
603out:
604 if (error)
605 cxgb_free(sc);
606
607 return (error);
608}
609
610static int
611cxgb_controller_detach(device_t dev)
612{
613 struct adapter *sc;
614
615 sc = device_get_softc(dev);
616
617 cxgb_free(sc);
618
619 return (0);
620}
621
622static void
623cxgb_free(struct adapter *sc)
624{
625 int i;
626
627 ADAPTER_LOCK(sc);
628 /*
629 * drops the lock
630 */
631 cxgb_down_locked(sc);
632
633#ifdef MSI_SUPPORTED
634 if (sc->flags & (USING_MSI | USING_MSIX)) {
635 device_printf(sc->dev, "releasing msi message(s)\n");
636 pci_release_msi(sc->dev);
637 } else {
638 device_printf(sc->dev, "no msi message to release\n");
639 }
640#endif
641 if (sc->msix_regs_res != NULL) {
642 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
643 sc->msix_regs_res);
644 }
645
646 if (sc->tq != NULL) {
647 taskqueue_drain(sc->tq, &sc->ext_intr_task);
648 taskqueue_drain(sc->tq, &sc->tick_task);
649 }
650 t3_sge_deinit_sw(sc);
651 /*
652 * Wait for last callout
653 */
654
655 tsleep(&sc, 0, "cxgb unload", 3*hz);
656
657 for (i = 0; i < (sc)->params.nports; ++i) {
658 if (sc->portdev[i] != NULL)
659 device_delete_child(sc->dev, sc->portdev[i]);
660 }
661
662 bus_generic_detach(sc->dev);
663 if (sc->tq != NULL)
664 taskqueue_free(sc->tq);
665#ifdef notyet
666 if (is_offload(sc)) {
667 cxgb_adapter_unofld(sc);
668 if (isset(&sc->open_device_map, OFFLOAD_DEVMAP_BIT))
669 offload_close(&sc->tdev);
670 }
671#endif
672
673 t3_free_sge_resources(sc);
674 free(sc->filters, M_DEVBUF);
675 t3_sge_free(sc);
676
677 cxgb_offload_exit();
678
679 if (sc->regs_res != NULL)
680 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
681 sc->regs_res);
682
683 MTX_DESTROY(&sc->mdio_lock);
684 MTX_DESTROY(&sc->sge.reg_lock);
685 MTX_DESTROY(&sc->elmer_lock);
686 ADAPTER_LOCK_DEINIT(sc);
687
688 return;
689}
690
691/**
692 * setup_sge_qsets - configure SGE Tx/Rx/response queues
693 * @sc: the controller softc
694 *
695 * Determines how many sets of SGE queues to use and initializes them.
696 * We support multiple queue sets per port if we have MSI-X, otherwise
697 * just one queue set per port.
698 */
699static int
700setup_sge_qsets(adapter_t *sc)
701{
702 int i, j, err, irq_idx = 0, qset_idx = 0;
703 u_int ntxq = SGE_TXQ_PER_SET;
704
705 if ((err = t3_sge_alloc(sc)) != 0) {
706 device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
707 return (err);
708 }
709
710 if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
711 irq_idx = -1;
712
713 for (i = 0; i < (sc)->params.nports; i++) {
714 struct port_info *pi = &sc->port[i];
715
716 for (j = 0; j < pi->nqsets; j++, qset_idx++) {
717 err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
718 (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
719 &sc->params.sge.qset[qset_idx], ntxq, pi);
720 if (err) {
721 t3_free_sge_resources(sc);
722 device_printf(sc->dev, "t3_sge_alloc_qset failed with %d\n",
723 err);
724 return (err);
725 }
726 }
727 }
728
729 return (0);
730}
731
732static void
733cxgb_teardown_msix(adapter_t *sc)
734{
735 int i, nqsets;
736
737 for (nqsets = i = 0; i < (sc)->params.nports; i++)
738 nqsets += sc->port[i].nqsets;
739
740 for (i = 0; i < nqsets; i++) {
741 if (sc->msix_intr_tag[i] != NULL) {
742 bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
743 sc->msix_intr_tag[i]);
744 sc->msix_intr_tag[i] = NULL;
745 }
746 if (sc->msix_irq_res[i] != NULL) {
747 bus_release_resource(sc->dev, SYS_RES_IRQ,
748 sc->msix_irq_rid[i], sc->msix_irq_res[i]);
749 sc->msix_irq_res[i] = NULL;
750 }
751 }
752}
753
754static int
755cxgb_setup_msix(adapter_t *sc, int msix_count)
756{
757 int i, j, k, nqsets, rid;
758
759 /* The first message indicates link changes and error conditions */
760 sc->irq_rid = 1;
761 if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
762 &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
763 device_printf(sc->dev, "Cannot allocate msix interrupt\n");
764 return (EINVAL);
765 }
766
767 if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
768#ifdef INTR_FILTERS
769 NULL,
770#endif
771 cxgb_async_intr, sc, &sc->intr_tag)) {
772 device_printf(sc->dev, "Cannot set up interrupt\n");
773 return (EINVAL);
774 }
775 for (i = k = 0; i < (sc)->params.nports; i++) {
776 nqsets = sc->port[i].nqsets;
777 for (j = 0; j < nqsets; j++, k++) {
778 struct sge_qset *qs = &sc->sge.qs[k];
779
780 rid = k + 2;
781 if (cxgb_debug)
782 printf("rid=%d ", rid);
783 if ((sc->msix_irq_res[k] = bus_alloc_resource_any(
784 sc->dev, SYS_RES_IRQ, &rid,
785 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
786 device_printf(sc->dev, "Cannot allocate "
787 "interrupt for message %d\n", rid);
788 return (EINVAL);
789 }
790 sc->msix_irq_rid[k] = rid;
791 printf("setting up interrupt for port=%d\n",
792 qs->port->port_id);
793 if (bus_setup_intr(sc->dev, sc->msix_irq_res[k],
794 INTR_MPSAFE|INTR_TYPE_NET,
795#ifdef INTR_FILTERS
796 NULL,
797#endif
798 t3_intr_msix, qs, &sc->msix_intr_tag[k])) {
799 device_printf(sc->dev, "Cannot set up "
800 "interrupt for message %d\n", rid);
801 return (EINVAL);
802 }
803 }
804 }
805
806
807 return (0);
808}
809
810static int
811cxgb_port_probe(device_t dev)
812{
813 struct port_info *p;
814 char buf[80];
815
816 p = device_get_softc(dev);
817
818 snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, p->port_type->desc);
819 device_set_desc_copy(dev, buf);
820 return (0);
821}
822
823
824static int
825cxgb_makedev(struct port_info *pi)
826{
827
828 pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit,
829 UID_ROOT, GID_WHEEL, 0600, if_name(pi->ifp));
830
831 if (pi->port_cdev == NULL)
832 return (ENOMEM);
833
834 pi->port_cdev->si_drv1 = (void *)pi;
835
836 return (0);
837}
838
839
840#ifdef TSO_SUPPORTED
841#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU)
842/* Don't enable TSO6 yet */
843#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4 | IFCAP_JUMBO_MTU)
844#else
845#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
846/* Don't enable TSO6 yet */
847#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
848#define IFCAP_TSO4 0x0
849#define IFCAP_TSO6 0x0
850#define CSUM_TSO 0x0
851#endif
852
853
854static int
855cxgb_port_attach(device_t dev)
856{
857 struct port_info *p;
858 struct ifnet *ifp;
859 int err, media_flags;
860
861 p = device_get_softc(dev);
862
863 snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
864 device_get_unit(device_get_parent(dev)), p->port_id);
865 PORT_LOCK_INIT(p, p->lockbuf);
866
867 /* Allocate an ifnet object and set it up */
868 ifp = p->ifp = if_alloc(IFT_ETHER);
869 if (ifp == NULL) {
870 device_printf(dev, "Cannot allocate ifnet\n");
871 return (ENOMEM);
872 }
873
874 /*
875 * Note that there is currently no watchdog timer.
876 */
877 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
878 ifp->if_init = cxgb_init;
879 ifp->if_softc = p;
880 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
881 ifp->if_ioctl = cxgb_ioctl;
882 ifp->if_start = cxgb_start;
883 ifp->if_timer = 0; /* Disable ifnet watchdog */
884 ifp->if_watchdog = NULL;
885
886 ifp->if_snd.ifq_drv_maxlen = TX_ETH_Q_SIZE;
887 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
888 IFQ_SET_READY(&ifp->if_snd);
889
890 ifp->if_hwassist = ifp->if_capabilities = ifp->if_capenable = 0;
891 ifp->if_capabilities |= CXGB_CAP;
892 ifp->if_capenable |= CXGB_CAP_ENABLE;
893 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO);
894 /*
895 * disable TSO on 4-port - it isn't supported by the firmware yet
896 */
897 if (p->adapter->params.nports > 2) {
898 ifp->if_capabilities &= ~(IFCAP_TSO4 | IFCAP_TSO6);
899 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TSO6);
900 ifp->if_hwassist &= ~CSUM_TSO;
901 }
902
903 ether_ifattach(ifp, p->hw_addr);
904 /*
905 * Only default to jumbo frames on 10GigE
906 */
907 if (p->adapter->params.nports <= 2)
908 ifp->if_mtu = 9000;
909 if ((err = cxgb_makedev(p)) != 0) {
910 printf("makedev failed %d\n", err);
911 return (err);
912 }
913 ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
914 cxgb_media_status);
915
916 if (!strcmp(p->port_type->desc, "10GBASE-CX4")) {
917 media_flags = IFM_ETHER | IFM_10G_CX4 | IFM_FDX;
918 } else if (!strcmp(p->port_type->desc, "10GBASE-SR")) {
919 media_flags = IFM_ETHER | IFM_10G_SR | IFM_FDX;
920 } else if (!strcmp(p->port_type->desc, "10GBASE-XR")) {
921 media_flags = IFM_ETHER | IFM_10G_LR | IFM_FDX;
922 } else if (!strcmp(p->port_type->desc, "10/100/1000BASE-T")) {
923 ifmedia_add(&p->media, IFM_ETHER | IFM_10_T, 0, NULL);
924 ifmedia_add(&p->media, IFM_ETHER | IFM_10_T | IFM_FDX,
925 0, NULL);
926 ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX,
927 0, NULL);
928 ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
929 0, NULL);
930 ifmedia_add(&p->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
931 0, NULL);
932 media_flags = 0;
933 } else {
934 printf("unsupported media type %s\n", p->port_type->desc);
935 return (ENXIO);
936 }
937 if (media_flags) {
938 ifmedia_add(&p->media, media_flags, 0, NULL);
939 ifmedia_set(&p->media, media_flags);
940 } else {
941 ifmedia_add(&p->media, IFM_ETHER | IFM_AUTO, 0, NULL);
942 ifmedia_set(&p->media, IFM_ETHER | IFM_AUTO);
943 }
944
945
946 snprintf(p->taskqbuf, TASKQ_NAME_LEN, "cxgb_port_taskq%d", p->port_id);
947#ifdef TASKQUEUE_CURRENT
948 /* Create a port for handling TX without starvation */
949 p->tq = taskqueue_create(p->taskqbuf, M_NOWAIT,
950 taskqueue_thread_enqueue, &p->tq);
951#else
952 /* Create a port for handling TX without starvation */
953 p->tq = taskqueue_create_fast(p->taskqbuf, M_NOWAIT,
954 taskqueue_thread_enqueue, &p->tq);
955#endif
956
957 if (p->tq == NULL) {
958 device_printf(dev, "failed to allocate port task queue\n");
959 return (ENOMEM);
960 }
961 taskqueue_start_threads(&p->tq, 1, PI_NET, "%s taskq",
962 device_get_nameunit(dev));
963
964 TASK_INIT(&p->start_task, 0, cxgb_start_proc, ifp);
965
966 t3_sge_init_port(p);
967
968 return (0);
969}
970
971static int
972cxgb_port_detach(device_t dev)
973{
974 struct port_info *p;
975
976 p = device_get_softc(dev);
977
978 PORT_LOCK(p);
979 if (p->ifp->if_drv_flags & IFF_DRV_RUNNING)
980 cxgb_stop_locked(p);
981 PORT_UNLOCK(p);
982
983 if (p->tq != NULL) {
984 taskqueue_drain(p->tq, &p->start_task);
985 taskqueue_free(p->tq);
986 p->tq = NULL;
987 }
988
989 ether_ifdetach(p->ifp);
990 /*
991 * the lock may be acquired in ifdetach
992 */
993 PORT_LOCK_DEINIT(p);
994 if_free(p->ifp);
995
996 if (p->port_cdev != NULL)
997 destroy_dev(p->port_cdev);
998
999 return (0);
1000}
1001
1002void
1003t3_fatal_err(struct adapter *sc)
1004{
1005 u_int fw_status[4];
1006
1007 if (sc->flags & FULL_INIT_DONE) {
1008 t3_sge_stop(sc);
1009 t3_write_reg(sc, A_XGM_TX_CTRL, 0);
1010 t3_write_reg(sc, A_XGM_RX_CTRL, 0);
1011 t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
1012 t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
1013 t3_intr_disable(sc);
1014 }
1015 device_printf(sc->dev,"encountered fatal error, operation suspended\n");
1016 if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
1017 device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
1018 fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
1019}
1020
1021int
1022t3_os_find_pci_capability(adapter_t *sc, int cap)
1023{
1024 device_t dev;
1025 struct pci_devinfo *dinfo;
1026 pcicfgregs *cfg;
1027 uint32_t status;
1028 uint8_t ptr;
1029
1030 dev = sc->dev;
1031 dinfo = device_get_ivars(dev);
1032 cfg = &dinfo->cfg;
1033
1034 status = pci_read_config(dev, PCIR_STATUS, 2);
1035 if (!(status & PCIM_STATUS_CAPPRESENT))
1036 return (0);
1037
1038 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1039 case 0:
1040 case 1:
1041 ptr = PCIR_CAP_PTR;
1042 break;
1043 case 2:
1044 ptr = PCIR_CAP_PTR_2;
1045 break;
1046 default:
1047 return (0);
1048 break;
1049 }
1050 ptr = pci_read_config(dev, ptr, 1);
1051
1052 while (ptr != 0) {
1053 if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
1054 return (ptr);
1055 ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1056 }
1057
1058 return (0);
1059}
1060
1061int
1062t3_os_pci_save_state(struct adapter *sc)
1063{
1064 device_t dev;
1065 struct pci_devinfo *dinfo;
1066
1067 dev = sc->dev;
1068 dinfo = device_get_ivars(dev);
1069
1070 pci_cfg_save(dev, dinfo, 0);
1071 return (0);
1072}
1073
1074int
1075t3_os_pci_restore_state(struct adapter *sc)
1076{
1077 device_t dev;
1078 struct pci_devinfo *dinfo;
1079
1080 dev = sc->dev;
1081 dinfo = device_get_ivars(dev);
1082
1083 pci_cfg_restore(dev, dinfo);
1084 return (0);
1085}
1086
1087/**
1088 * t3_os_link_changed - handle link status changes
1089 * @adapter: the adapter associated with the link change
1090 * @port_id: the port index whose limk status has changed
1091 * @link_stat: the new status of the link
1092 * @speed: the new speed setting
1093 * @duplex: the new duplex setting
1094 * @fc: the new flow-control setting
1095 *
1096 * This is the OS-dependent handler for link status changes. The OS
1097 * neutral handler takes care of most of the processing for these events,
1098 * then calls this handler for any OS-specific processing.
1099 */
1100void
1101t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
1102 int duplex, int fc)
1103{
1104 struct port_info *pi = &adapter->port[port_id];
1105 struct cmac *mac = &adapter->port[port_id].mac;
1106
1107 if ((pi->ifp->if_flags & IFF_UP) == 0)
1108 return;
1109
1110 if (link_status) {
1111 t3_mac_enable(mac, MAC_DIRECTION_RX);
1112 if_link_state_change(pi->ifp, LINK_STATE_UP);
1113 } else {
1114 if_link_state_change(pi->ifp, LINK_STATE_DOWN);
1115 pi->phy.ops->power_down(&pi->phy, 1);
1116 t3_mac_disable(mac, MAC_DIRECTION_RX);
1117 t3_link_start(&pi->phy, mac, &pi->link_config);
1118 }
1119}
1120
1121/*
1122 * Interrupt-context handler for external (PHY) interrupts.
1123 */
1124void
1125t3_os_ext_intr_handler(adapter_t *sc)
1126{
1127 if (cxgb_debug)
1128 printf("t3_os_ext_intr_handler\n");
1129 /*
1130 * Schedule a task to handle external interrupts as they may be slow
1131 * and we use a mutex to protect MDIO registers. We disable PHY
1132 * interrupts in the meantime and let the task reenable them when
1133 * it's done.
1134 */
1135 ADAPTER_LOCK(sc);
1136 if (sc->slow_intr_mask) {
1137 sc->slow_intr_mask &= ~F_T3DBG;
1138 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
1139 taskqueue_enqueue(sc->tq, &sc->ext_intr_task);
1140 }
1141 ADAPTER_UNLOCK(sc);
1142}
1143
1144void
1145t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
1146{
1147
1148 /*
1149 * The ifnet might not be allocated before this gets called,
1150 * as this is called early on in attach by t3_prep_adapter
1151 * save the address off in the port structure
1152 */
1153 if (cxgb_debug)
1154 printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
1155 bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
1156}
1157
1158/**
1159 * link_start - enable a port
1160 * @p: the port to enable
1161 *
1162 * Performs the MAC and PHY actions needed to enable a port.
1163 */
1164static void
1165cxgb_link_start(struct port_info *p)
1166{
1167 struct ifnet *ifp;
1168 struct t3_rx_mode rm;
1169 struct cmac *mac = &p->mac;
1170
1171 ifp = p->ifp;
1172
1173 t3_init_rx_mode(&rm, p);
1174 if (!mac->multiport)
1175 t3_mac_reset(mac);
1176 t3_mac_set_mtu(mac, ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1177 t3_mac_set_address(mac, 0, p->hw_addr);
1178 t3_mac_set_rx_mode(mac, &rm);
1179 t3_link_start(&p->phy, mac, &p->link_config);
1180 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1181}
1182
1183/**
1184 * setup_rss - configure Receive Side Steering (per-queue connection demux)
1185 * @adap: the adapter
1186 *
1187 * Sets up RSS to distribute packets to multiple receive queues. We
1188 * configure the RSS CPU lookup table to distribute to the number of HW
1189 * receive queues, and the response queue lookup table to narrow that
1190 * down to the response queues actually configured for each port.
1191 * We always configure the RSS mapping for two ports since the mapping
1192 * table has plenty of entries.
1193 */
1194static void
1195setup_rss(adapter_t *adap)
1196{
1197 int i;
1198 u_int nq[2];
1199 uint8_t cpus[SGE_QSETS + 1];
1200 uint16_t rspq_map[RSS_TABLE_SIZE];
1201
1202
1203 if ((adap->flags & USING_MSIX) == 0)
1204 return;
1205
1206 for (i = 0; i < SGE_QSETS; ++i)
1207 cpus[i] = i;
1208 cpus[SGE_QSETS] = 0xff;
1209
1210 nq[0] = nq[1] = 0;
1211 for_each_port(adap, i) {
1212 const struct port_info *pi = adap2pinfo(adap, i);
1213
1214 nq[pi->tx_chan] += pi->nqsets;
1215 }
1216 nq[0] = max(nq[0], 1U);
1217 nq[1] = max(nq[1], 1U);
1218 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
1219 rspq_map[i] = i % nq[0];
1220 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq[1]) + nq[0];
1221 }
1222 /* Calculate the reverse RSS map table */
1223 for (i = 0; i < RSS_TABLE_SIZE; ++i)
1224 if (adap->rrss_map[rspq_map[i]] == 0xff)
1225 adap->rrss_map[rspq_map[i]] = i;
1226
1227 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
1228 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
1229 V_RRCPLCPUSIZE(6), cpus, rspq_map);
1230
1231}
1232
1233/*
1234 * Sends an mbuf to an offload queue driver
1235 * after dealing with any active network taps.
1236 */
1237static inline int
1238offload_tx(struct toedev *tdev, struct mbuf *m)
1239{
1240 int ret;
1241
1242 critical_enter();
1243 ret = t3_offload_tx(tdev, m);
1244 critical_exit();
1245 return (ret);
1246}
1247
1248static int
1249write_smt_entry(struct adapter *adapter, int idx)
1250{
1251 struct port_info *pi = &adapter->port[idx];
1252 struct cpl_smt_write_req *req;
1253 struct mbuf *m;
1254
1255 if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
1256 return (ENOMEM);
1257
1258 req = mtod(m, struct cpl_smt_write_req *);
1259 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1260 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
1261 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
1262 req->iff = idx;
1263 memset(req->src_mac1, 0, sizeof(req->src_mac1));
1264 memcpy(req->src_mac0, pi->hw_addr, ETHER_ADDR_LEN);
1265
1266 m_set_priority(m, 1);
1267
1268 offload_tx(&adapter->tdev, m);
1269
1270 return (0);
1271}
1272
1273static int
1274init_smt(struct adapter *adapter)
1275{
1276 int i;
1277
1278 for_each_port(adapter, i)
1279 write_smt_entry(adapter, i);
1280 return 0;
1281}
1282
1283static void
1284init_port_mtus(adapter_t *adapter)
1285{
1286 unsigned int mtus = adapter->port[0].ifp->if_mtu;
1287
1288 if (adapter->port[1].ifp)
1289 mtus |= adapter->port[1].ifp->if_mtu << 16;
1290 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
1291}
1292
1293static void
1294send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
1295 int hi, int port)
1296{
1297 struct mbuf *m;
1298 struct mngt_pktsched_wr *req;
1299
1300 m = m_gethdr(M_DONTWAIT, MT_DATA);
1301 if (m) {
1302 req = mtod(m, struct mngt_pktsched_wr *);
1303 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1304 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1305 req->sched = sched;
1306 req->idx = qidx;
1307 req->min = lo;
1308 req->max = hi;
1309 req->binding = port;
1310 m->m_len = m->m_pkthdr.len = sizeof(*req);
1311 t3_mgmt_tx(adap, m);
1312 }
1313}
1314
1315static void
1316bind_qsets(adapter_t *sc)
1317{
1318 int i, j;
1319
1320 for (i = 0; i < (sc)->params.nports; ++i) {
1321 const struct port_info *pi = adap2pinfo(sc, i);
1322
1323 for (j = 0; j < pi->nqsets; ++j) {
1324 send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1325 -1, pi->tx_chan);
1326
1327 }
1328 }
1329}
1330
1331static void
1332update_tpeeprom(struct adapter *adap)
1333{
1334 const struct firmware *tpeeprom;
1335 char buf[64];
1336 uint32_t version;
1337 unsigned int major, minor;
1338 int ret, len;
1339 char rev;
1340
1341 t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
1342
1343 major = G_TP_VERSION_MAJOR(version);
1344 minor = G_TP_VERSION_MINOR(version);
1345 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
1346 return;
1347
1348 rev = t3rev2char(adap);
1349
1350 snprintf(buf, sizeof(buf), TPEEPROM_NAME, rev,
1351 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1352
1353 tpeeprom = firmware_get(buf);
1354 if (tpeeprom == NULL) {
1355 device_printf(adap->dev, "could not load TP EEPROM: unable to load %s\n",
1356 buf);
1357 return;
1358 }
1359
1360 len = tpeeprom->datasize - 4;
1361
1362 ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
1363 if (ret)
1364 goto release_tpeeprom;
1365
1366 if (len != TP_SRAM_LEN) {
1367 device_printf(adap->dev, "%s length is wrong len=%d expected=%d\n", buf, len, TP_SRAM_LEN);
1368 return;
1369 }
1370
1371 ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
1372 TP_SRAM_OFFSET);
1373
1374 if (!ret) {
1375 device_printf(adap->dev,
1376 "Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
1377 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1378 } else
1379 device_printf(adap->dev, "Protocol SRAM image update in EEPROM failed\n");
1380
1381release_tpeeprom:
1382 firmware_put(tpeeprom, FIRMWARE_UNLOAD);
1383
1384 return;
1385}
1386
1387static int
1388update_tpsram(struct adapter *adap)
1389{
1390 const struct firmware *tpsram;
1391 char buf[64];
1392 int ret;
1393 char rev;
1394
1395 rev = t3rev2char(adap);
1396 if (!rev)
1397 return 0;
1398
1399 update_tpeeprom(adap);
1400
1401 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
1402 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1403
1404 tpsram = firmware_get(buf);
1405 if (tpsram == NULL){
1406 device_printf(adap->dev, "could not load TP SRAM: unable to load %s\n",
1407 buf);
1408 return (EINVAL);
1409 } else
1410 device_printf(adap->dev, "updating TP SRAM with %s\n", buf);
1411
1412 ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
1413 if (ret)
1414 goto release_tpsram;
1415
1416 ret = t3_set_proto_sram(adap, tpsram->data);
1417 if (ret)
1418 device_printf(adap->dev, "loading protocol SRAM failed\n");
1419
1420release_tpsram:
1421 firmware_put(tpsram, FIRMWARE_UNLOAD);
1422
1423 return ret;
1424}
1425
1426/**
1427 * cxgb_up - enable the adapter
1428 * @adap: adapter being enabled
1429 *
1430 * Called when the first port is enabled, this function performs the
1431 * actions necessary to make an adapter operational, such as completing
1432 * the initialization of HW modules, and enabling interrupts.
1433 *
1434 */
1435static int
1436cxgb_up(struct adapter *sc)
1437{
1438 int err = 0;
1439
1440 if ((sc->flags & FULL_INIT_DONE) == 0) {
1441
1442 if ((sc->flags & FW_UPTODATE) == 0)
1443 if ((err = upgrade_fw(sc)))
1444 goto out;
1445 if ((sc->flags & TPS_UPTODATE) == 0)
1446 if ((err = update_tpsram(sc)))
1447 goto out;
1448 err = t3_init_hw(sc, 0);
1449 if (err)
1450 goto out;
1451
1452 t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1453
1454 err = setup_sge_qsets(sc);
1455 if (err)
1456 goto out;
1457
1458 setup_rss(sc);
1459 sc->flags |= FULL_INIT_DONE;
1460 }
1461
1462 t3_intr_clear(sc);
1463
1464 /* If it's MSI or INTx, allocate a single interrupt for everything */
1465 if ((sc->flags & USING_MSIX) == 0) {
1466 if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
1467 &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1468 device_printf(sc->dev, "Cannot allocate interrupt rid=%d\n",
1469 sc->irq_rid);
1470 err = EINVAL;
1471 goto out;
1472 }
1473 device_printf(sc->dev, "allocated irq_res=%p\n", sc->irq_res);
1474
1475 if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
1476#ifdef INTR_FILTERS
1477 NULL,
1478#endif
1479 sc->cxgb_intr, sc, &sc->intr_tag)) {
1480 device_printf(sc->dev, "Cannot set up interrupt\n");
1481 err = EINVAL;
1482 goto irq_err;
1483 }
1484 } else {
1485 cxgb_setup_msix(sc, sc->msi_count);
1486 }
1487
1488 t3_sge_start(sc);
1489 t3_intr_enable(sc);
1490
1491 if (!(sc->flags & QUEUES_BOUND)) {
1492 printf("bind qsets\n");
1493 bind_qsets(sc);
1494 sc->flags |= QUEUES_BOUND;
1495 }
1496out:
1497 return (err);
1498irq_err:
1499 CH_ERR(sc, "request_irq failed, err %d\n", err);
1500 goto out;
1501}
1502
1503
1504/*
1505 * Release resources when all the ports and offloading have been stopped.
1506 */
1507static void
1508cxgb_down_locked(struct adapter *sc)
1509{
1510 int i;
1511
1512 t3_sge_stop(sc);
1513 t3_intr_disable(sc);
1514
1515 if (sc->intr_tag != NULL) {
1516 bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
1517 sc->intr_tag = NULL;
1518 }
1519 if (sc->irq_res != NULL) {
1520 device_printf(sc->dev, "de-allocating interrupt irq_rid=%d irq_res=%p\n",
1521 sc->irq_rid, sc->irq_res);
1522 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
1523 sc->irq_res);
1524 sc->irq_res = NULL;
1525 }
1526
1527 if (sc->flags & USING_MSIX)
1528 cxgb_teardown_msix(sc);
1529 ADAPTER_UNLOCK(sc);
1530
1531 callout_drain(&sc->cxgb_tick_ch);
1532 callout_drain(&sc->sge_timer_ch);
1533
1534 if (sc->tq != NULL) {
1535 taskqueue_drain(sc->tq, &sc->slow_intr_task);
1536 for (i = 0; i < sc->params.nports; i++)
1537 taskqueue_drain(sc->tq, &sc->port[i].timer_reclaim_task);
1538 }
1539#ifdef notyet
1540
1541 if (sc->port[i].tq != NULL)
1542#endif
1543
1544}
1545
1546static int
1547offload_open(struct port_info *pi)
1548{
1549 struct adapter *adapter = pi->adapter;
1550 struct toedev *tdev = TOEDEV(pi->ifp);
1551 int adap_up = adapter->open_device_map & PORT_MASK;
1552 int err = 0;
1553
1554 if (atomic_cmpset_int(&adapter->open_device_map,
1555 (adapter->open_device_map & ~OFFLOAD_DEVMAP_BIT),
1556 (adapter->open_device_map | OFFLOAD_DEVMAP_BIT)) == 0)
1557 return (0);
1558
1559 ADAPTER_LOCK(pi->adapter);
1560 if (!adap_up)
1561 err = cxgb_up(adapter);
1562 ADAPTER_UNLOCK(pi->adapter);
1563 if (err)
1564 return (err);
1565
1566 t3_tp_set_offload_mode(adapter, 1);
1567 tdev->lldev = adapter->port[0].ifp;
1568 err = cxgb_offload_activate(adapter);
1569 if (err)
1570 goto out;
1571
1572 init_port_mtus(adapter);
1573 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1574 adapter->params.b_wnd,
1575 adapter->params.rev == 0 ?
1576 adapter->port[0].ifp->if_mtu : 0xffff);
1577 init_smt(adapter);
1578
1579 /* Call back all registered clients */
1580 cxgb_add_clients(tdev);
1581
1582out:
1583 /* restore them in case the offload module has changed them */
1584 if (err) {
1585 t3_tp_set_offload_mode(adapter, 0);
1586 clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
1587 cxgb_set_dummy_ops(tdev);
1588 }
1589 return (err);
1590}
1591#ifdef notyet
1592static int
1593offload_close(struct toedev *tdev)
1594{
1595 struct adapter *adapter = tdev2adap(tdev);
1596
1597 if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT))
1598 return (0);
1599
1600 /* Call back all registered clients */
1601 cxgb_remove_clients(tdev);
1602 tdev->lldev = NULL;
1603 cxgb_set_dummy_ops(tdev);
1604 t3_tp_set_offload_mode(adapter, 0);
1605 clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
1606
1607 if (!adapter->open_device_map)
1608 cxgb_down(adapter);
1609
1610 cxgb_offload_deactivate(adapter);
1611 return (0);
1612}
1613#endif
1614
1615static void
1616cxgb_init(void *arg)
1617{
1618 struct port_info *p = arg;
1619
1620 PORT_LOCK(p);
1621 cxgb_init_locked(p);
1622 PORT_UNLOCK(p);
1623}
1624
1625static void
1626cxgb_init_locked(struct port_info *p)
1627{
1628 struct ifnet *ifp;
1629 adapter_t *sc = p->adapter;
1630 int err;
1631
1632 PORT_LOCK_ASSERT_OWNED(p);
1633 ifp = p->ifp;
1634
1635 ADAPTER_LOCK(p->adapter);
1636 if ((sc->open_device_map == 0) && (err = cxgb_up(sc))) {
1637 ADAPTER_UNLOCK(p->adapter);
1638 cxgb_stop_locked(p);
1639 return;
1640 }
1641 if (p->adapter->open_device_map == 0) {
1642 t3_intr_clear(sc);
1643 t3_sge_init_adapter(sc);
1644 }
1645 setbit(&p->adapter->open_device_map, p->port_id);
1646 ADAPTER_UNLOCK(p->adapter);
1647
1648 if (is_offload(sc) && !ofld_disable) {
1649 err = offload_open(p);
1650 if (err)
1651 log(LOG_WARNING,
1652 "Could not initialize offload capabilities\n");
1653 }
1654 cxgb_link_start(p);
1655 t3_link_changed(sc, p->port_id);
1656 ifp->if_baudrate = p->link_config.speed * 1000000;
1657
1658 device_printf(sc->dev, "enabling interrupts on port=%d\n", p->port_id);
1659 t3_port_intr_enable(sc, p->port_id);
1660
1661 callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz,
1662 cxgb_tick, sc);
1663
1664 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1665 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1666}
1667
1668static void
1669cxgb_set_rxmode(struct port_info *p)
1670{
1671 struct t3_rx_mode rm;
1672 struct cmac *mac = &p->mac;
1673
1674 PORT_LOCK_ASSERT_OWNED(p);
1675
1676 t3_init_rx_mode(&rm, p);
1677 t3_mac_set_rx_mode(mac, &rm);
1678}
1679
1680static void
1681cxgb_stop_locked(struct port_info *p)
1682{
1683 struct ifnet *ifp;
1684
1685 PORT_LOCK_ASSERT_OWNED(p);
1686 ADAPTER_LOCK_ASSERT_NOTOWNED(p->adapter);
1687
1688 ifp = p->ifp;
1689
1690 t3_port_intr_disable(p->adapter, p->port_id);
1691 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1692 p->phy.ops->power_down(&p->phy, 1);
1693 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1694
1695 ADAPTER_LOCK(p->adapter);
1696 clrbit(&p->adapter->open_device_map, p->port_id);
1697
1698
1699 if (p->adapter->open_device_map == 0) {
1700 cxgb_down_locked(p->adapter);
1701 } else
1702 ADAPTER_UNLOCK(p->adapter);
1703
1704}
1705
1706static int
1707cxgb_set_mtu(struct port_info *p, int mtu)
1708{
1709 struct ifnet *ifp = p->ifp;
1710 int error = 0;
1711
1712 if ((mtu < ETHERMIN) || (mtu > ETHER_MAX_LEN_JUMBO))
1713 error = EINVAL;
1714 else if (ifp->if_mtu != mtu) {
1715 PORT_LOCK(p);
1716 ifp->if_mtu = mtu;
1717 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1718 callout_stop(&p->adapter->cxgb_tick_ch);
1719 cxgb_stop_locked(p);
1720 cxgb_init_locked(p);
1721 }
1722 PORT_UNLOCK(p);
1723 }
1724 return (error);
1725}
1726
1727static int
1728cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
1729{
1730 struct port_info *p = ifp->if_softc;
1731 struct ifaddr *ifa = (struct ifaddr *)data;
1732 struct ifreq *ifr = (struct ifreq *)data;
1733 int flags, error = 0;
1734 uint32_t mask;
1735
1736 /*
1737 * XXX need to check that we aren't in the middle of an unload
1738 */
1739 switch (command) {
1740 case SIOCSIFMTU:
1741 error = cxgb_set_mtu(p, ifr->ifr_mtu);
1742 break;
1743 case SIOCSIFADDR:
1744 case SIOCGIFADDR:
1745 PORT_LOCK(p);
1746 if (ifa->ifa_addr->sa_family == AF_INET) {
1747 ifp->if_flags |= IFF_UP;
1748 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1749 cxgb_init_locked(p);
1750 arp_ifinit(ifp, ifa);
1751 } else
1752 error = ether_ioctl(ifp, command, data);
1753 PORT_UNLOCK(p);
1754 break;
1755 case SIOCSIFFLAGS:
1756 callout_drain(&p->adapter->cxgb_tick_ch);
1757 PORT_LOCK(p);
1758 if (ifp->if_flags & IFF_UP) {
1759 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1760 flags = p->if_flags;
1761 if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
1762 ((ifp->if_flags ^ flags) & IFF_ALLMULTI))
1763 cxgb_set_rxmode(p);
1764 } else
1765 cxgb_init_locked(p);
1766 p->if_flags = ifp->if_flags;
1767 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1768 cxgb_stop_locked(p);
1769
1770 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1771 adapter_t *sc = p->adapter;
1772 callout_reset(&sc->cxgb_tick_ch,
1773 sc->params.stats_update_period * hz,
1774 cxgb_tick, sc);
1775 }
1776 PORT_UNLOCK(p);
1777 break;
1778 case SIOCSIFMEDIA:
1779 case SIOCGIFMEDIA:
1780 error = ifmedia_ioctl(ifp, ifr, &p->media, command);
1781 break;
1782 case SIOCSIFCAP:
1783 PORT_LOCK(p);
1784 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1785 if (mask & IFCAP_TXCSUM) {
1786 if (IFCAP_TXCSUM & ifp->if_capenable) {
1787 ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
1788 ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
1789 | CSUM_TSO);
1790 } else {
1791 ifp->if_capenable |= IFCAP_TXCSUM;
1792 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1793 }
1794 } else if (mask & IFCAP_RXCSUM) {
1795 if (IFCAP_RXCSUM & ifp->if_capenable) {
1796 ifp->if_capenable &= ~IFCAP_RXCSUM;
1797 } else {
1798 ifp->if_capenable |= IFCAP_RXCSUM;
1799 }
1800 }
1801 if (mask & IFCAP_TSO4) {
1802 if (IFCAP_TSO4 & ifp->if_capenable) {
1803 ifp->if_capenable &= ~IFCAP_TSO4;
1804 ifp->if_hwassist &= ~CSUM_TSO;
1805 } else if (IFCAP_TXCSUM & ifp->if_capenable) {
1806 ifp->if_capenable |= IFCAP_TSO4;
1807 ifp->if_hwassist |= CSUM_TSO;
1808 } else {
1809 if (cxgb_debug)
1810 printf("cxgb requires tx checksum offload"
1811 " be enabled to use TSO\n");
1812 error = EINVAL;
1813 }
1814 }
1815 PORT_UNLOCK(p);
1816 break;
1817 default:
1818 error = ether_ioctl(ifp, command, data);
1819 break;
1820 }
1821 return (error);
1822}
1823
1824static int
1825cxgb_start_tx(struct ifnet *ifp, uint32_t txmax)
1826{
1827 struct sge_qset *qs;
1828 struct sge_txq *txq;
1829 struct port_info *p = ifp->if_softc;
1830 struct mbuf *m0, *m = NULL;
1831 int err, in_use_init, free;
1832
1833 if (!p->link_config.link_ok)
1834 return (ENXIO);
1835
1836 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1837 return (ENOBUFS);
1838
1839 qs = &p->adapter->sge.qs[p->first_qset];
1840 txq = &qs->txq[TXQ_ETH];
1841 err = 0;
1842
1843 if (txq->flags & TXQ_TRANSMITTING)
1844 return (EINPROGRESS);
1845
1846 mtx_lock(&txq->lock);
1847 txq->flags |= TXQ_TRANSMITTING;
1848 in_use_init = txq->in_use;
1849 while ((txq->in_use - in_use_init < txmax) &&
1850 (txq->size > txq->in_use + TX_MAX_DESC)) {
1851 free = 0;
1852 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1853 if (m == NULL)
1854 break;
1855 /*
1856 * Convert chain to M_IOVEC
1857 */
1858 KASSERT((m->m_flags & M_IOVEC) == 0, ("IOVEC set too early"));
1859 m0 = m;
1860#ifdef INVARIANTS
1861 /*
1862 * Clean up after net stack sloppiness
1863 * before calling m_sanity
1864 */
1865 m0 = m->m_next;
1866 while (m0) {
1867 m0->m_flags &= ~M_PKTHDR;
1868 m0 = m0->m_next;
1869 }
1870 m_sanity(m0, 0);
1871 m0 = m;
1872#endif
1873 if (collapse_mbufs && m->m_pkthdr.len > MCLBYTES &&
1874 m_collapse(m, TX_MAX_SEGS, &m0) == EFBIG) {
1875 if ((m0 = m_defrag(m, M_NOWAIT)) != NULL) {
1876 m = m0;
1877 m_collapse(m, TX_MAX_SEGS, &m0);
1878 } else
1879 break;
1880 }
1881 m = m0;
1882 if ((err = t3_encap(p, &m, &free)) != 0)
1883 break;
1884 BPF_MTAP(ifp, m);
1885 if (free)
1886 m_freem(m);
1887 }
1888 txq->flags &= ~TXQ_TRANSMITTING;
1889 mtx_unlock(&txq->lock);
1890
1891 if (__predict_false(err)) {
1892 if (err == ENOMEM) {
1893 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1894 IFQ_LOCK(&ifp->if_snd);
1895 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1896 IFQ_UNLOCK(&ifp->if_snd);
1897 }
1898 }
1899 if (err == 0 && m == NULL)
1900 err = ENOBUFS;
1901 else if ((err == 0) && (txq->size <= txq->in_use + TX_MAX_DESC) &&
1902 (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
1903 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1904 err = ENOSPC;
1905 }
1906 return (err);
1907}
1908
1909static void
1910cxgb_start_proc(void *arg, int ncount)
1911{
1912 struct ifnet *ifp = arg;
1913 struct port_info *pi = ifp->if_softc;
1914 struct sge_qset *qs;
1915 struct sge_txq *txq;
1916 int error;
1917
1918 qs = &pi->adapter->sge.qs[pi->first_qset];
1919 txq = &qs->txq[TXQ_ETH];
1920
1921 do {
1922 if (desc_reclaimable(txq) > TX_CLEAN_MAX_DESC >> 2)
1923 taskqueue_enqueue(pi->tq, &txq->qreclaim_task);
1924
1925 error = cxgb_start_tx(ifp, TX_START_MAX_DESC);
1926 } while (error == 0);
1927}
1928
1929static void
1930cxgb_start(struct ifnet *ifp)
1931{
1932 struct port_info *pi = ifp->if_softc;
1933 struct sge_qset *qs;
1934 struct sge_txq *txq;
1935 int err;
1936
1937 qs = &pi->adapter->sge.qs[pi->first_qset];
1938 txq = &qs->txq[TXQ_ETH];
1939
1940 if (desc_reclaimable(txq) > TX_CLEAN_MAX_DESC >> 2)
1941 taskqueue_enqueue(pi->tq,
1942 &txq->qreclaim_task);
1943
1944 err = cxgb_start_tx(ifp, TX_START_MAX_DESC);
1945
1946 if (err == 0)
1947 taskqueue_enqueue(pi->tq, &pi->start_task);
1948}
1949
1950
1951static int
1952cxgb_media_change(struct ifnet *ifp)
1953{
1954 if_printf(ifp, "media change not supported\n");
1955 return (ENXIO);
1956}
1957
1958static void
1959cxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1960{
1961 struct port_info *p = ifp->if_softc;
1962
1963 ifmr->ifm_status = IFM_AVALID;
1964 ifmr->ifm_active = IFM_ETHER;
1965
1966 if (!p->link_config.link_ok)
1967 return;
1968
1969 ifmr->ifm_status |= IFM_ACTIVE;
1970
1971 switch (p->link_config.speed) {
1972 case 10:
1973 ifmr->ifm_active |= IFM_10_T;
1974 break;
1975 case 100:
1976 ifmr->ifm_active |= IFM_100_TX;
1977 break;
1978 case 1000:
1979 ifmr->ifm_active |= IFM_1000_T;
1980 break;
1981 }
1982
1983 if (p->link_config.duplex)
1984 ifmr->ifm_active |= IFM_FDX;
1985 else
1986 ifmr->ifm_active |= IFM_HDX;
1987}
1988
1989static void
1990cxgb_async_intr(void *data)
1991{
1992 adapter_t *sc = data;
1993
1994 if (cxgb_debug)
1995 device_printf(sc->dev, "cxgb_async_intr\n");
1996 /*
1997 * May need to sleep - defer to taskqueue
1998 */
1999 taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
2000}
2001
2002static void
2003cxgb_ext_intr_handler(void *arg, int count)
2004{
2005 adapter_t *sc = (adapter_t *)arg;
2006
2007 if (cxgb_debug)
2008 printf("cxgb_ext_intr_handler\n");
2009
2010 t3_phy_intr_handler(sc);
2011
2012 /* Now reenable external interrupts */
2013 ADAPTER_LOCK(sc);
2014 if (sc->slow_intr_mask) {
2015 sc->slow_intr_mask |= F_T3DBG;
2016 t3_write_reg(sc, A_PL_INT_CAUSE0, F_T3DBG);
2017 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
2018 }
2019 ADAPTER_UNLOCK(sc);
2020}
2021
2022static void
2023check_link_status(adapter_t *sc)
2024{
2025 int i;
2026
2027 for (i = 0; i < (sc)->params.nports; ++i) {
2028 struct port_info *p = &sc->port[i];
2029
2030 if (!(p->port_type->caps & SUPPORTED_IRQ))
2031 t3_link_changed(sc, i);
2032 p->ifp->if_baudrate = p->link_config.speed * 1000000;
2033 }
2034}
2035
2036static void
2037check_t3b2_mac(struct adapter *adapter)
2038{
2039 int i;
2040
2041 for_each_port(adapter, i) {
2042 struct port_info *p = &adapter->port[i];
2043 struct ifnet *ifp = p->ifp;
2044 int status;
2045
2046 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2047 continue;
2048
2049 status = 0;
2050 PORT_LOCK(p);
2051 if ((ifp->if_drv_flags & IFF_DRV_RUNNING))
2052 status = t3b2_mac_watchdog_task(&p->mac);
2053 if (status == 1)
2054 p->mac.stats.num_toggled++;
2055 else if (status == 2) {
2056 struct cmac *mac = &p->mac;
2057
2058 t3_mac_set_mtu(mac, ifp->if_mtu + ETHER_HDR_LEN
2059 + ETHER_VLAN_ENCAP_LEN);
2060 t3_mac_set_address(mac, 0, p->hw_addr);
2061 cxgb_set_rxmode(p);
2062 t3_link_start(&p->phy, mac, &p->link_config);
2063 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2064 t3_port_intr_enable(adapter, p->port_id);
2065 p->mac.stats.num_resets++;
2066 }
2067 PORT_UNLOCK(p);
2068 }
2069}
2070
2071static void
2072cxgb_tick(void *arg)
2073{
2074 adapter_t *sc = (adapter_t *)arg;
2075
2076 taskqueue_enqueue(sc->tq, &sc->tick_task);
2077
2078 if (sc->open_device_map != 0)
2079 callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz,
2080 cxgb_tick, sc);
2081}
2082
2083static void
2084cxgb_tick_handler(void *arg, int count)
2085{
2086 adapter_t *sc = (adapter_t *)arg;
2087 const struct adapter_params *p = &sc->params;
2088
2089 ADAPTER_LOCK(sc);
2090 if (p->linkpoll_period)
2091 check_link_status(sc);
2092
2093 /*
2094 * adapter lock can currently only be acquire after the
2095 * port lock
2096 */
2097 ADAPTER_UNLOCK(sc);
2098
2099 if (p->rev == T3_REV_B2 && p->nports < 4)
2100 check_t3b2_mac(sc);
2101}
2102
2103static void
2104touch_bars(device_t dev)
2105{
2106 /*
2107 * Don't enable yet
2108 */
2109#if !defined(__LP64__) && 0
2110 u32 v;
2111
2112 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
2113 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
2114 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
2115 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
2116 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
2117 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
2118#endif
2119}
2120
2121static int
2122set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
2123{
2124 uint8_t *buf;
2125 int err = 0;
2126 u32 aligned_offset, aligned_len, *p;
2127 struct adapter *adapter = pi->adapter;
2128
2129
2130 aligned_offset = offset & ~3;
2131 aligned_len = (len + (offset & 3) + 3) & ~3;
2132
2133 if (aligned_offset != offset || aligned_len != len) {
2134 buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);
2135 if (!buf)
2136 return (ENOMEM);
2137 err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
2138 if (!err && aligned_len > 4)
2139 err = t3_seeprom_read(adapter,
2140 aligned_offset + aligned_len - 4,
2141 (u32 *)&buf[aligned_len - 4]);
2142 if (err)
2143 goto out;
2144 memcpy(buf + (offset & 3), data, len);
2145 } else
2146 buf = (uint8_t *)(uintptr_t)data;
2147
2148 err = t3_seeprom_wp(adapter, 0);
2149 if (err)
2150 goto out;
2151
2152 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2153 err = t3_seeprom_write(adapter, aligned_offset, *p);
2154 aligned_offset += 4;
2155 }
2156
2157 if (!err)
2158 err = t3_seeprom_wp(adapter, 1);
2159out:
2160 if (buf != data)
2161 free(buf, M_DEVBUF);
2162 return err;
2163}
2164
2165
2166static int
2167in_range(int val, int lo, int hi)
2168{
2169 return val < 0 || (val <= hi && val >= lo);
2170}
2171
2172static int
2173cxgb_extension_open(struct cdev *dev, int flags, int fmp, d_thread_t *td)
2174{
2175 return (0);
2176}
2177
2178static int
2179cxgb_extension_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
2180{
2181 return (0);
2182}
2183
2184static int
2185cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
2186 int fflag, struct thread *td)
2187{
2188 int mmd, error = 0;
2189 struct port_info *pi = dev->si_drv1;
2190 adapter_t *sc = pi->adapter;
2191
2192#ifdef PRIV_SUPPORTED
2193 if (priv_check(td, PRIV_DRIVER)) {
2194 if (cxgb_debug)
2195 printf("user does not have access to privileged ioctls\n");
2196 return (EPERM);
2197 }
2198#else
2199 if (suser(td)) {
2200 if (cxgb_debug)
2201 printf("user does not have access to privileged ioctls\n");
2202 return (EPERM);
2203 }
2204#endif
2205
2206 switch (cmd) {
2207 case SIOCGMIIREG: {
2208 uint32_t val;
2209 struct cphy *phy = &pi->phy;
2210 struct mii_data *mid = (struct mii_data *)data;
2211
2212 if (!phy->mdio_read)
2213 return (EOPNOTSUPP);
2214 if (is_10G(sc)) {
2215 mmd = mid->phy_id >> 8;
2216 if (!mmd)
2217 mmd = MDIO_DEV_PCS;
2218 else if (mmd > MDIO_DEV_XGXS)
2219 return (EINVAL);
2220
2221 error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
2222 mid->reg_num, &val);
2223 } else
2224 error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
2225 mid->reg_num & 0x1f, &val);
2226 if (error == 0)
2227 mid->val_out = val;
2228 break;
2229 }
2230 case SIOCSMIIREG: {
2231 struct cphy *phy = &pi->phy;
2232 struct mii_data *mid = (struct mii_data *)data;
2233
2234 if (!phy->mdio_write)
2235 return (EOPNOTSUPP);
2236 if (is_10G(sc)) {
2237 mmd = mid->phy_id >> 8;
2238 if (!mmd)
2239 mmd = MDIO_DEV_PCS;
2240 else if (mmd > MDIO_DEV_XGXS)
2241 return (EINVAL);
2242
2243 error = phy->mdio_write(sc, mid->phy_id & 0x1f,
2244 mmd, mid->reg_num, mid->val_in);
2245 } else
2246 error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
2247 mid->reg_num & 0x1f,
2248 mid->val_in);
2249 break;
2250 }
2251 case CHELSIO_SETREG: {
2252 struct ch_reg *edata = (struct ch_reg *)data;
2253 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2254 return (EFAULT);
2255 t3_write_reg(sc, edata->addr, edata->val);
2256 break;
2257 }
2258 case CHELSIO_GETREG: {
2259 struct ch_reg *edata = (struct ch_reg *)data;
2260 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2261 return (EFAULT);
2262 edata->val = t3_read_reg(sc, edata->addr);
2263 break;
2264 }
2265 case CHELSIO_GET_SGE_CONTEXT: {
2266 struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
2267 mtx_lock(&sc->sge.reg_lock);
2268 switch (ecntxt->cntxt_type) {
2269 case CNTXT_TYPE_EGRESS:
2270 error = t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
2271 ecntxt->data);
2272 break;
2273 case CNTXT_TYPE_FL:
2274 error = t3_sge_read_fl(sc, ecntxt->cntxt_id,
2275 ecntxt->data);
2276 break;
2277 case CNTXT_TYPE_RSP:
2278 error = t3_sge_read_rspq(sc, ecntxt->cntxt_id,
2279 ecntxt->data);
2280 break;
2281 case CNTXT_TYPE_CQ:
2282 error = t3_sge_read_cq(sc, ecntxt->cntxt_id,
2283 ecntxt->data);
2284 break;
2285 default:
2286 error = EINVAL;
2287 break;
2288 }
2289 mtx_unlock(&sc->sge.reg_lock);
2290 break;
2291 }
2292 case CHELSIO_GET_SGE_DESC: {
2293 struct ch_desc *edesc = (struct ch_desc *)data;
2294 int ret;
2295 if (edesc->queue_num >= SGE_QSETS * 6)
2296 return (EINVAL);
2297 ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
2298 edesc->queue_num % 6, edesc->idx, edesc->data);
2299 if (ret < 0)
2300 return (EINVAL);
2301 edesc->size = ret;
2302 break;
2303 }
2304 case CHELSIO_SET_QSET_PARAMS: {
2305 struct qset_params *q;
2306 struct ch_qset_params *t = (struct ch_qset_params *)data;
2307
2308 if (t->qset_idx >= SGE_QSETS)
2309 return (EINVAL);
2310 if (!in_range(t->intr_lat, 0, M_NEWTIMER) ||
2311 !in_range(t->cong_thres, 0, 255) ||
2312 !in_range(t->txq_size[0], MIN_TXQ_ENTRIES,
2313 MAX_TXQ_ENTRIES) ||
2314 !in_range(t->txq_size[1], MIN_TXQ_ENTRIES,
2315 MAX_TXQ_ENTRIES) ||
2316 !in_range(t->txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2317 MAX_CTRL_TXQ_ENTRIES) ||
2318 !in_range(t->fl_size[0], MIN_FL_ENTRIES, MAX_RX_BUFFERS) ||
2319 !in_range(t->fl_size[1], MIN_FL_ENTRIES,
2320 MAX_RX_JUMBO_BUFFERS) ||
2321 !in_range(t->rspq_size, MIN_RSPQ_ENTRIES, MAX_RSPQ_ENTRIES))
2322 return (EINVAL);
2323 if ((sc->flags & FULL_INIT_DONE) &&
2324 (t->rspq_size >= 0 || t->fl_size[0] >= 0 ||
2325 t->fl_size[1] >= 0 || t->txq_size[0] >= 0 ||
2326 t->txq_size[1] >= 0 || t->txq_size[2] >= 0 ||
2327 t->polling >= 0 || t->cong_thres >= 0))
2328 return (EBUSY);
2329
2330 q = &sc->params.sge.qset[t->qset_idx];
2331
2332 if (t->rspq_size >= 0)
2333 q->rspq_size = t->rspq_size;
2334 if (t->fl_size[0] >= 0)
2335 q->fl_size = t->fl_size[0];
2336 if (t->fl_size[1] >= 0)
2337 q->jumbo_size = t->fl_size[1];
2338 if (t->txq_size[0] >= 0)
2339 q->txq_size[0] = t->txq_size[0];
2340 if (t->txq_size[1] >= 0)
2341 q->txq_size[1] = t->txq_size[1];
2342 if (t->txq_size[2] >= 0)
2343 q->txq_size[2] = t->txq_size[2];
2344 if (t->cong_thres >= 0)
2345 q->cong_thres = t->cong_thres;
2346 if (t->intr_lat >= 0) {
2347 struct sge_qset *qs = &sc->sge.qs[t->qset_idx];
2348
2349 q->coalesce_nsecs = t->intr_lat*1000;
2350 t3_update_qset_coalesce(qs, q);
2351 }
2352 break;
2353 }
2354 case CHELSIO_GET_QSET_PARAMS: {
2355 struct qset_params *q;
2356 struct ch_qset_params *t = (struct ch_qset_params *)data;
2357
2358 if (t->qset_idx >= SGE_QSETS)
2359 return (EINVAL);
2360
2361 q = &(sc)->params.sge.qset[t->qset_idx];
2362 t->rspq_size = q->rspq_size;
2363 t->txq_size[0] = q->txq_size[0];
2364 t->txq_size[1] = q->txq_size[1];
2365 t->txq_size[2] = q->txq_size[2];
2366 t->fl_size[0] = q->fl_size;
2367 t->fl_size[1] = q->jumbo_size;
2368 t->polling = q->polling;
2369 t->intr_lat = q->coalesce_nsecs / 1000;
2370 t->cong_thres = q->cong_thres;
2371 break;
2372 }
2373 case CHELSIO_SET_QSET_NUM: {
2374 struct ch_reg *edata = (struct ch_reg *)data;
2375 unsigned int port_idx = pi->port_id;
2376
2377 if (sc->flags & FULL_INIT_DONE)
2378 return (EBUSY);
2379 if (edata->val < 1 ||
2380 (edata->val > 1 && !(sc->flags & USING_MSIX)))
2381 return (EINVAL);
2382 if (edata->val + sc->port[!port_idx].nqsets > SGE_QSETS)
2383 return (EINVAL);
2384 sc->port[port_idx].nqsets = edata->val;
2385 sc->port[0].first_qset = 0;
2386 /*
2387 * XXX hardcode ourselves to 2 ports just like LEEENUX
2388 */
2389 sc->port[1].first_qset = sc->port[0].nqsets;
2390 break;
2391 }
2392 case CHELSIO_GET_QSET_NUM: {
2393 struct ch_reg *edata = (struct ch_reg *)data;
2394 edata->val = pi->nqsets;
2395 break;
2396 }
2397#ifdef notyet
2398 case CHELSIO_LOAD_FW:
2399 case CHELSIO_GET_PM:
2400 case CHELSIO_SET_PM:
2401 return (EOPNOTSUPP);
2402 break;
2403#endif
2404 case CHELSIO_SETMTUTAB: {
2405 struct ch_mtus *m = (struct ch_mtus *)data;
2406 int i;
2407
2408 if (!is_offload(sc))
2409 return (EOPNOTSUPP);
2410 if (offload_running(sc))
2411 return (EBUSY);
2412 if (m->nmtus != NMTUS)
2413 return (EINVAL);
2414 if (m->mtus[0] < 81) /* accommodate SACK */
2415 return (EINVAL);
2416
2417 /*
2418 * MTUs must be in ascending order
2419 */
2420 for (i = 1; i < NMTUS; ++i)
2421 if (m->mtus[i] < m->mtus[i - 1])
2422 return (EINVAL);
2423
2424 memcpy(sc->params.mtus, m->mtus,
2425 sizeof(sc->params.mtus));
2426 break;
2427 }
2428 case CHELSIO_GETMTUTAB: {
2429 struct ch_mtus *m = (struct ch_mtus *)data;
2430
2431 if (!is_offload(sc))
2432 return (EOPNOTSUPP);
2433
2434 memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
2435 m->nmtus = NMTUS;
2436 break;
2437 }
2438 case CHELSIO_DEVUP:
2439 if (!is_offload(sc))
2440 return (EOPNOTSUPP);
2441 return offload_open(pi);
2442 break;
2443 case CHELSIO_GET_MEM: {
2444 struct ch_mem_range *t = (struct ch_mem_range *)data;
2445 struct mc7 *mem;
2446 uint8_t *useraddr;
2447 u64 buf[32];
2448
2449 if (!is_offload(sc))
2450 return (EOPNOTSUPP);
2451 if (!(sc->flags & FULL_INIT_DONE))
2452 return (EIO); /* need the memory controllers */
2453 if ((t->addr & 0x7) || (t->len & 0x7))
2454 return (EINVAL);
2455 if (t->mem_id == MEM_CM)
2456 mem = &sc->cm;
2457 else if (t->mem_id == MEM_PMRX)
2458 mem = &sc->pmrx;
2459 else if (t->mem_id == MEM_PMTX)
2460 mem = &sc->pmtx;
2461 else
2462 return (EINVAL);
2463
2464 /*
2465 * Version scheme:
2466 * bits 0..9: chip version
2467 * bits 10..15: chip revision
2468 */
2469 t->version = 3 | (sc->params.rev << 10);
2470
2471 /*
2472 * Read 256 bytes at a time as len can be large and we don't
2473 * want to use huge intermediate buffers.
2474 */
2475 useraddr = (uint8_t *)(t + 1); /* advance to start of buffer */
2476 while (t->len) {
2477 unsigned int chunk = min(t->len, sizeof(buf));
2478
2479 error = t3_mc7_bd_read(mem, t->addr / 8, chunk / 8, buf);
2480 if (error)
2481 return (-error);
2482 if (copyout(buf, useraddr, chunk))
2483 return (EFAULT);
2484 useraddr += chunk;
2485 t->addr += chunk;
2486 t->len -= chunk;
2487 }
2488 break;
2489 }
2490 case CHELSIO_READ_TCAM_WORD: {
2491 struct ch_tcam_word *t = (struct ch_tcam_word *)data;
2492
2493 if (!is_offload(sc))
2494 return (EOPNOTSUPP);
2495 if (!(sc->flags & FULL_INIT_DONE))
2496 return (EIO); /* need MC5 */
2497 return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
2498 break;
2499 }
2500 case CHELSIO_SET_TRACE_FILTER: {
2501 struct ch_trace *t = (struct ch_trace *)data;
2502 const struct trace_params *tp;
2503
2504 tp = (const struct trace_params *)&t->sip;
2505 if (t->config_tx)
2506 t3_config_trace_filter(sc, tp, 0, t->invert_match,
2507 t->trace_tx);
2508 if (t->config_rx)
2509 t3_config_trace_filter(sc, tp, 1, t->invert_match,
2510 t->trace_rx);
2511 break;
2512 }
2513 case CHELSIO_SET_PKTSCHED: {
2514 struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
2515 if (sc->open_device_map == 0)
2516 return (EAGAIN);
2517 send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
2518 p->binding);
2519 break;
2520 }
2521 case CHELSIO_IFCONF_GETREGS: {
2522 struct ifconf_regs *regs = (struct ifconf_regs *)data;
2523 int reglen = cxgb_get_regs_len();
2524 uint8_t *buf = malloc(REGDUMP_SIZE, M_DEVBUF, M_NOWAIT);
2525 if (buf == NULL) {
2526 return (ENOMEM);
2527 } if (regs->len > reglen)
2528 regs->len = reglen;
2529 else if (regs->len < reglen) {
2530 error = E2BIG;
2531 goto done;
2532 }
2533 cxgb_get_regs(sc, regs, buf);
2534 error = copyout(buf, regs->data, reglen);
2535
2536 done:
2537 free(buf, M_DEVBUF);
2538
2539 break;
2540 }
2541 case CHELSIO_SET_HW_SCHED: {
2542 struct ch_hw_sched *t = (struct ch_hw_sched *)data;
2543 unsigned int ticks_per_usec = core_ticks_per_usec(sc);
2544
2545 if ((sc->flags & FULL_INIT_DONE) == 0)
2546 return (EAGAIN); /* need TP to be initialized */
2547 if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
2548 !in_range(t->channel, 0, 1) ||
2549 !in_range(t->kbps, 0, 10000000) ||
2550 !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
2551 !in_range(t->flow_ipg, 0,
2552 dack_ticks_to_usec(sc, 0x7ff)))
2553 return (EINVAL);
2554
2555 if (t->kbps >= 0) {
2556 error = t3_config_sched(sc, t->kbps, t->sched);
2557 if (error < 0)
2558 return (-error);
2559 }
2560 if (t->class_ipg >= 0)
2561 t3_set_sched_ipg(sc, t->sched, t->class_ipg);
2562 if (t->flow_ipg >= 0) {
2563 t->flow_ipg *= 1000; /* us -> ns */
2564 t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
2565 }
2566 if (t->mode >= 0) {
2567 int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
2568
2569 t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2570 bit, t->mode ? bit : 0);
2571 }
2572 if (t->channel >= 0)
2573 t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2574 1 << t->sched, t->channel << t->sched);
2575 break;
2576 }
2577 default:
2578 return (EOPNOTSUPP);
2579 break;
2580 }
2581
2582 return (error);
2583}
2584
2585static __inline void
2586reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
2587 unsigned int end)
2588{
2589 uint32_t *p = (uint32_t *)buf + start;
2590
2591 for ( ; start <= end; start += sizeof(uint32_t))
2592 *p++ = t3_read_reg(ap, start);
2593}
2594
2595#define T3_REGMAP_SIZE (3 * 1024)
2596static int
2597cxgb_get_regs_len(void)
2598{
2599 return T3_REGMAP_SIZE;
2600}
2601#undef T3_REGMAP_SIZE
2602
2603static void
2604cxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf)
2605{
2606
2607 /*
2608 * Version scheme:
2609 * bits 0..9: chip version
2610 * bits 10..15: chip revision
2611 * bit 31: set for PCIe cards
2612 */
2613 regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
2614
2615 /*
2616 * We skip the MAC statistics registers because they are clear-on-read.
2617 * Also reading multi-register stats would need to synchronize with the
2618 * periodic mac stats accumulation. Hard to justify the complexity.
2619 */
2620 memset(buf, 0, REGDUMP_SIZE);
2621 reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
2622 reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
2623 reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
2624 reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
2625 reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
2626 reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
2627 XGM_REG(A_XGM_SERDES_STAT3, 1));
2628 reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
2629 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
2630}