Deleted Added
full compact
cxgb_main.c (169053) cxgb_main.c (169978)
1/**************************************************************************
2
3Copyright (c) 2007, Chelsio Inc.
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
1/**************************************************************************
2
3Copyright (c) 2007, Chelsio Inc.
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Chelsio Corporation nor the names of its
122. Neither the name of the Chelsio Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30POSSIBILITY OF SUCH DAMAGE.
31
32***************************************************************************/
33
34#include <sys/cdefs.h>
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
15
16THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26POSSIBILITY OF SUCH DAMAGE.
27
28***************************************************************************/
29
30#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/cxgb/cxgb_main.c 169053 2007-04-26 08:38:00Z kmacy $");
31__FBSDID("$FreeBSD: head/sys/dev/cxgb/cxgb_main.c 169978 2007-05-25 09:48:20Z kmacy $");
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/bus.h>
41#include <sys/module.h>
42#include <sys/pciio.h>
43#include <sys/conf.h>

--- 30 unchanged lines hidden (view full) ---

74
75#include <dev/pci/pcireg.h>
76#include <dev/pci/pcivar.h>
77#include <dev/pci/pci_private.h>
78
79#include <dev/cxgb/cxgb_osdep.h>
80#include <dev/cxgb/common/cxgb_common.h>
81#include <dev/cxgb/cxgb_ioctl.h>
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/kernel.h>
36#include <sys/bus.h>
37#include <sys/module.h>
38#include <sys/pciio.h>
39#include <sys/conf.h>

--- 30 unchanged lines hidden (view full) ---

70
71#include <dev/pci/pcireg.h>
72#include <dev/pci/pcivar.h>
73#include <dev/pci/pci_private.h>
74
75#include <dev/cxgb/cxgb_osdep.h>
76#include <dev/cxgb/common/cxgb_common.h>
77#include <dev/cxgb/cxgb_ioctl.h>
78#include <dev/cxgb/cxgb_offload.h>
82#include <dev/cxgb/common/cxgb_regs.h>
83#include <dev/cxgb/common/cxgb_t3_cpl.h>
84#include <dev/cxgb/common/cxgb_firmware_exports.h>
85
86#include <dev/cxgb/sys/mvec.h>
87
88
89#ifdef PRIV_SUPPORTED

--- 8 unchanged lines hidden (view full) ---

98static int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
99static void cxgb_start(struct ifnet *);
100static void cxgb_start_proc(void *, int ncount);
101static int cxgb_media_change(struct ifnet *);
102static void cxgb_media_status(struct ifnet *, struct ifmediareq *);
103static int setup_sge_qsets(adapter_t *);
104static void cxgb_async_intr(void *);
105static void cxgb_ext_intr_handler(void *, int);
79#include <dev/cxgb/common/cxgb_regs.h>
80#include <dev/cxgb/common/cxgb_t3_cpl.h>
81#include <dev/cxgb/common/cxgb_firmware_exports.h>
82
83#include <dev/cxgb/sys/mvec.h>
84
85
86#ifdef PRIV_SUPPORTED

--- 8 unchanged lines hidden (view full) ---

95static int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
96static void cxgb_start(struct ifnet *);
97static void cxgb_start_proc(void *, int ncount);
98static int cxgb_media_change(struct ifnet *);
99static void cxgb_media_status(struct ifnet *, struct ifmediareq *);
100static int setup_sge_qsets(adapter_t *);
101static void cxgb_async_intr(void *);
102static void cxgb_ext_intr_handler(void *, int);
103static void cxgb_down(struct adapter *sc);
106static void cxgb_tick(void *);
107static void setup_rss(adapter_t *sc);
108
109/* Attachment glue for the PCI controller end of the device. Each port of
110 * the device is attached separately, as defined later.
111 */
112static int cxgb_controller_probe(device_t);
113static int cxgb_controller_attach(device_t);
114static int cxgb_controller_detach(device_t);
115static void cxgb_free(struct adapter *);
116static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
117 unsigned int end);
118static void cxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf);
119static int cxgb_get_regs_len(void);
104static void cxgb_tick(void *);
105static void setup_rss(adapter_t *sc);
106
107/* Attachment glue for the PCI controller end of the device. Each port of
108 * the device is attached separately, as defined later.
109 */
110static int cxgb_controller_probe(device_t);
111static int cxgb_controller_attach(device_t);
112static int cxgb_controller_detach(device_t);
113static void cxgb_free(struct adapter *);
114static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
115 unsigned int end);
116static void cxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf);
117static int cxgb_get_regs_len(void);
118static int offload_open(struct port_info *pi);
119static int offload_close(struct toedev *tdev);
120
120
121
122
121static device_method_t cxgb_controller_methods[] = {
122 DEVMETHOD(device_probe, cxgb_controller_probe),
123 DEVMETHOD(device_attach, cxgb_controller_attach),
124 DEVMETHOD(device_detach, cxgb_controller_detach),
125
126 /* bus interface */
127 DEVMETHOD(bus_print_child, bus_generic_print_child),
128 DEVMETHOD(bus_driver_added, bus_generic_driver_added),

--- 45 unchanged lines hidden (view full) ---

174 * of these schemes the driver may consider as follows:
175 *
176 * msi = 2: choose from among all three options
177 * msi = 1 : only consider MSI and pin interrupts
178 * msi = 0: force pin interrupts
179 */
180static int msi_allowed = 2;
181TUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
123static device_method_t cxgb_controller_methods[] = {
124 DEVMETHOD(device_probe, cxgb_controller_probe),
125 DEVMETHOD(device_attach, cxgb_controller_attach),
126 DEVMETHOD(device_detach, cxgb_controller_detach),
127
128 /* bus interface */
129 DEVMETHOD(bus_print_child, bus_generic_print_child),
130 DEVMETHOD(bus_driver_added, bus_generic_driver_added),

--- 45 unchanged lines hidden (view full) ---

176 * of these schemes the driver may consider as follows:
177 *
178 * msi = 2: choose from among all three options
179 * msi = 1 : only consider MSI and pin interrupts
180 * msi = 0: force pin interrupts
181 */
182static int msi_allowed = 2;
183TUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
182
183SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
184SYSCTL_UINT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
185 "MSI-X, MSI, INTx selector");
184SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
185SYSCTL_UINT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
186 "MSI-X, MSI, INTx selector");
187
186/*
188/*
187 * Multiple queues need further tuning
189 * The driver enables offload as a default.
190 * To disable it, use ofld_disable = 1.
188 */
191 */
192static int ofld_disable = 0;
193TUNABLE_INT("hw.cxgb.ofld_disable", &ofld_disable);
194SYSCTL_UINT(_hw_cxgb, OID_AUTO, ofld_disable, CTLFLAG_RDTUN, &ofld_disable, 0,
195 "disable ULP offload");
196
197/*
198 * The driver uses an auto-queue algorithm by default.
199 * To disable it and force a single queue-set per port, use singleq = 1.
200 */
189static int singleq = 1;
201static int singleq = 1;
202TUNABLE_INT("hw.cxgb.singleq", &singleq);
203SYSCTL_UINT(_hw_cxgb, OID_AUTO, singleq, CTLFLAG_RDTUN, &singleq, 0,
204 "use a single queue-set per port");
190
191enum {
192 MAX_TXQ_ENTRIES = 16384,
193 MAX_CTRL_TXQ_ENTRIES = 1024,
194 MAX_RSPQ_ENTRIES = 16384,
195 MAX_RX_BUFFERS = 16384,
196 MAX_RX_JUMBO_BUFFERS = 16384,
197 MIN_TXQ_ENTRIES = 4,

--- 69 unchanged lines hidden (view full) ---

267 ports = "ports";
268
269 snprintf(buf, sizeof(buf), "%s RNIC, %d %s", ai->desc, ai->nports, ports);
270 device_set_desc_copy(dev, buf);
271 return (BUS_PROBE_DEFAULT);
272}
273
274static int
205
206enum {
207 MAX_TXQ_ENTRIES = 16384,
208 MAX_CTRL_TXQ_ENTRIES = 1024,
209 MAX_RSPQ_ENTRIES = 16384,
210 MAX_RX_BUFFERS = 16384,
211 MAX_RX_JUMBO_BUFFERS = 16384,
212 MIN_TXQ_ENTRIES = 4,

--- 69 unchanged lines hidden (view full) ---

282 ports = "ports";
283
284 snprintf(buf, sizeof(buf), "%s RNIC, %d %s", ai->desc, ai->nports, ports);
285 device_set_desc_copy(dev, buf);
286 return (BUS_PROBE_DEFAULT);
287}
288
289static int
275cxgb_fw_download(adapter_t *sc, device_t dev)
290upgrade_fw(adapter_t *sc)
276{
277 char buf[32];
278#ifdef FIRMWARE_LATEST
279 const struct firmware *fw;
280#else
281 struct firmware *fw;
282#endif
283 int status;
284
291{
292 char buf[32];
293#ifdef FIRMWARE_LATEST
294 const struct firmware *fw;
295#else
296 struct firmware *fw;
297#endif
298 int status;
299
285 snprintf(&buf[0], sizeof(buf), "t3fw%d%d", FW_VERSION_MAJOR,
286 FW_VERSION_MINOR);
300 snprintf(&buf[0], sizeof(buf), "t3fw%d%d%d", FW_VERSION_MAJOR,
301 FW_VERSION_MINOR, FW_VERSION_MICRO);
287
288 fw = firmware_get(buf);
302
303 fw = firmware_get(buf);
289
290
291 if (fw == NULL) {
304
305 if (fw == NULL) {
292 device_printf(dev, "Could not find firmware image %s\n", buf);
293 return ENOENT;
306 device_printf(sc->dev, "Could not find firmware image %s\n", buf);
307 return (ENOENT);
294 }
295
296 status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
297
298 firmware_put(fw, FIRMWARE_UNLOAD);
299
300 return (status);
301}
302
308 }
309
310 status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
311
312 firmware_put(fw, FIRMWARE_UNLOAD);
313
314 return (status);
315}
316
303
304static int
305cxgb_controller_attach(device_t dev)
306{
307 driver_intr_t *cxgb_intr = NULL;
308 device_t child;
309 const struct adapter_info *ai;
310 struct adapter *sc;
317static int
318cxgb_controller_attach(device_t dev)
319{
320 driver_intr_t *cxgb_intr = NULL;
321 device_t child;
322 const struct adapter_info *ai;
323 struct adapter *sc;
311 int i, reg, msi_needed, msi_count = 0, error = 0;
324 int i, reg, msi_needed, error = 0;
312 uint32_t vers;
313 int port_qsets = 1;
314
315 sc = device_get_softc(dev);
316 sc->dev = dev;
325 uint32_t vers;
326 int port_qsets = 1;
327
328 sc = device_get_softc(dev);
329 sc->dev = dev;
317
330 sc->msi_count = 0;
331
318 /* find the PCIe link width and set max read request to 4KB*/
319 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
320 uint16_t lnk, pectl;
321 lnk = pci_read_config(dev, reg + 0x12, 2);
322 sc->link_width = (lnk >> 4) & 0x3f;
323
324 pectl = pci_read_config(dev, reg + 0x8, 2);
325 pectl = (pectl & ~0x7000) | (5 << 12);

--- 39 unchanged lines hidden (view full) ---

365 */
366#ifdef MSI_SUPPORTED
367
368 sc->msix_regs_rid = 0x20;
369 if ((msi_allowed >= 2) &&
370 (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
371 &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
372
332 /* find the PCIe link width and set max read request to 4KB*/
333 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
334 uint16_t lnk, pectl;
335 lnk = pci_read_config(dev, reg + 0x12, 2);
336 sc->link_width = (lnk >> 4) & 0x3f;
337
338 pectl = pci_read_config(dev, reg + 0x8, 2);
339 pectl = (pectl & ~0x7000) | (5 << 12);

--- 39 unchanged lines hidden (view full) ---

379 */
380#ifdef MSI_SUPPORTED
381
382 sc->msix_regs_rid = 0x20;
383 if ((msi_allowed >= 2) &&
384 (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
385 &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
386
373 msi_needed = msi_count = SGE_MSIX_COUNT;
387 msi_needed = sc->msi_count = SGE_MSIX_COUNT;
374
388
375 if ((pci_alloc_msix(dev, &msi_count) != 0) ||
376 (msi_count != msi_needed)) {
377 device_printf(dev, "msix allocation failed"
378 " will try msi\n");
379 msi_count = 0;
389 if (((error = pci_alloc_msix(dev, &sc->msi_count)) != 0) ||
390 (sc->msi_count != msi_needed)) {
391 device_printf(dev, "msix allocation failed - msi_count = %d"
392 " msi_needed=%d will try msi err=%d\n", sc->msi_count,
393 msi_needed, error);
394 sc->msi_count = 0;
380 pci_release_msi(dev);
381 bus_release_resource(dev, SYS_RES_MEMORY,
382 sc->msix_regs_rid, sc->msix_regs_res);
383 sc->msix_regs_res = NULL;
384 } else {
385 sc->flags |= USING_MSIX;
386 cxgb_intr = t3_intr_msix;
387 }
388 }
389
395 pci_release_msi(dev);
396 bus_release_resource(dev, SYS_RES_MEMORY,
397 sc->msix_regs_rid, sc->msix_regs_res);
398 sc->msix_regs_res = NULL;
399 } else {
400 sc->flags |= USING_MSIX;
401 cxgb_intr = t3_intr_msix;
402 }
403 }
404
390 if ((msi_allowed >= 1) && (msi_count == 0)) {
391 msi_count = 1;
392 if (pci_alloc_msi(dev, &msi_count)) {
405 if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
406 sc->msi_count = 1;
407 if (pci_alloc_msi(dev, &sc->msi_count)) {
393 device_printf(dev, "alloc msi failed - will try INTx\n");
408 device_printf(dev, "alloc msi failed - will try INTx\n");
394 msi_count = 0;
409 sc->msi_count = 0;
395 pci_release_msi(dev);
396 } else {
397 sc->flags |= USING_MSI;
398 sc->irq_rid = 1;
399 cxgb_intr = t3_intr_msi;
400 }
401 }
402#endif
410 pci_release_msi(dev);
411 } else {
412 sc->flags |= USING_MSI;
413 sc->irq_rid = 1;
414 cxgb_intr = t3_intr_msi;
415 }
416 }
417#endif
403 if (msi_count == 0) {
418 if (sc->msi_count == 0) {
404 device_printf(dev, "using line interrupts\n");
405 sc->irq_rid = 0;
406 cxgb_intr = t3b_intr;
407 }
408
409
410 /* Create a private taskqueue thread for handling driver events */
411#ifdef TASKQUEUE_CURRENT

--- 15 unchanged lines hidden (view full) ---

427
428 /* Create a periodic callout for checking adapter status */
429 callout_init_mtx(&sc->cxgb_tick_ch, &sc->lock, CALLOUT_RETURNUNLOCKED);
430
431 if (t3_check_fw_version(sc) != 0) {
432 /*
433 * Warn user that a firmware update will be attempted in init.
434 */
419 device_printf(dev, "using line interrupts\n");
420 sc->irq_rid = 0;
421 cxgb_intr = t3b_intr;
422 }
423
424
425 /* Create a private taskqueue thread for handling driver events */
426#ifdef TASKQUEUE_CURRENT

--- 15 unchanged lines hidden (view full) ---

442
443 /* Create a periodic callout for checking adapter status */
444 callout_init_mtx(&sc->cxgb_tick_ch, &sc->lock, CALLOUT_RETURNUNLOCKED);
445
446 if (t3_check_fw_version(sc) != 0) {
447 /*
448 * Warn user that a firmware update will be attempted in init.
449 */
435 device_printf(dev, "firmware needs to be updated to version %d.%d\n",
436 FW_VERSION_MAJOR, FW_VERSION_MINOR);
450 device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
451 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
437 sc->flags &= ~FW_UPTODATE;
438 } else {
439 sc->flags |= FW_UPTODATE;
440 }
441
452 sc->flags &= ~FW_UPTODATE;
453 } else {
454 sc->flags |= FW_UPTODATE;
455 }
456
442 if (t3_init_hw(sc, 0) != 0) {
443 device_printf(dev, "hw initialization failed\n");
444 error = ENXIO;
445 goto out;
446 }
447 t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
448
449
450 if ((singleq == 0) && (sc->flags & USING_MSIX))
457 if ((sc->flags & USING_MSIX) && !singleq)
451 port_qsets = min((SGE_QSETS/(sc)->params.nports), mp_ncpus);
452
453 /*
454 * Create a child device for each MAC. The ethernet attachment
455 * will be done in these children.
456 */
457 for (i = 0; i < (sc)->params.nports; i++) {
458 if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {

--- 4 unchanged lines hidden (view full) ---

463 sc->portdev[i] = child;
464 sc->port[i].adapter = sc;
465 sc->port[i].nqsets = port_qsets;
466 sc->port[i].first_qset = i*port_qsets;
467 sc->port[i].port = i;
468 device_set_softc(child, &sc->port[i]);
469 }
470 if ((error = bus_generic_attach(dev)) != 0)
458 port_qsets = min((SGE_QSETS/(sc)->params.nports), mp_ncpus);
459
460 /*
461 * Create a child device for each MAC. The ethernet attachment
462 * will be done in these children.
463 */
464 for (i = 0; i < (sc)->params.nports; i++) {
465 if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {

--- 4 unchanged lines hidden (view full) ---

470 sc->portdev[i] = child;
471 sc->port[i].adapter = sc;
472 sc->port[i].nqsets = port_qsets;
473 sc->port[i].first_qset = i*port_qsets;
474 sc->port[i].port = i;
475 device_set_softc(child, &sc->port[i]);
476 }
477 if ((error = bus_generic_attach(dev)) != 0)
471 goto out;;
472
473 if ((error = setup_sge_qsets(sc)) != 0)
474 goto out;
478 goto out;
475
476 setup_rss(sc);
477
478 /* If it's MSI or INTx, allocate a single interrupt for everything */
479 if ((sc->flags & USING_MSIX) == 0) {
480 if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
481 &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
482 device_printf(dev, "Cannot allocate interrupt rid=%d\n", sc->irq_rid);
483 error = EINVAL;
484 goto out;
485 }
486 device_printf(dev, "allocated irq_res=%p\n", sc->irq_res);
487
479
488 if (bus_setup_intr(dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
489#ifdef INTR_FILTERS
490 NULL,
491#endif
492 cxgb_intr, sc, &sc->intr_tag)) {
493 device_printf(dev, "Cannot set up interrupt\n");
494 error = EINVAL;
495 goto out;
496 }
497 } else {
498 cxgb_setup_msix(sc, msi_count);
499 }
500
480 /*
481 * XXX need to poll for link status
482 */
501 sc->params.stats_update_period = 1;
502
503 /* initialize sge private state */
504 t3_sge_init_sw(sc);
505
506 t3_led_ready(sc);
483 sc->params.stats_update_period = 1;
484
485 /* initialize sge private state */
486 t3_sge_init_sw(sc);
487
488 t3_led_ready(sc);
507
489
490 cxgb_offload_init();
491 if (is_offload(sc)) {
492 setbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
493 cxgb_adapter_ofld(sc);
494 }
508 error = t3_get_fw_version(sc, &vers);
509 if (error)
510 goto out;
495 error = t3_get_fw_version(sc, &vers);
496 if (error)
497 goto out;
511
512 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d", G_FW_VERSION_MAJOR(vers),
513 G_FW_VERSION_MINOR(vers));
514
498
499 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
500 G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
501 G_FW_VERSION_MICRO(vers));
502
515 t3_add_sysctls(sc);
503 t3_add_sysctls(sc);
516
517out:
518 if (error)
519 cxgb_free(sc);
520
521 return (error);
522}
523
524static int

--- 8 unchanged lines hidden (view full) ---

533 return (0);
534}
535
536static void
537cxgb_free(struct adapter *sc)
538{
539 int i;
540
504out:
505 if (error)
506 cxgb_free(sc);
507
508 return (error);
509}
510
511static int

--- 8 unchanged lines hidden (view full) ---

520 return (0);
521}
522
523static void
524cxgb_free(struct adapter *sc)
525{
526 int i;
527
528 cxgb_down(sc);
529
530#ifdef MSI_SUPPORTED
531 if (sc->flags & (USING_MSI | USING_MSIX)) {
532 device_printf(sc->dev, "releasing msi message(s)\n");
533 pci_release_msi(sc->dev);
534 } else {
535 device_printf(sc->dev, "no msi message to release\n");
536 }
537#endif
538 if (sc->msix_regs_res != NULL) {
539 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
540 sc->msix_regs_res);
541 }
542
541 /*
542 * XXX need to drain the ifq by hand until
543 * it is taught about mbuf iovecs
544 */
543 /*
544 * XXX need to drain the ifq by hand until
545 * it is taught about mbuf iovecs
546 */
545
546 callout_drain(&sc->cxgb_tick_ch);
547
548 t3_sge_deinit_sw(sc);
549
550 if (sc->tq != NULL) {
551 taskqueue_drain(sc->tq, &sc->ext_intr_task);
552 taskqueue_free(sc->tq);
553 }
554
555 for (i = 0; i < (sc)->params.nports; ++i) {
556 if (sc->portdev[i] != NULL)
557 device_delete_child(sc->dev, sc->portdev[i]);
558 }
559
560 bus_generic_detach(sc->dev);
561
547 callout_drain(&sc->cxgb_tick_ch);
548
549 t3_sge_deinit_sw(sc);
550
551 if (sc->tq != NULL) {
552 taskqueue_drain(sc->tq, &sc->ext_intr_task);
553 taskqueue_free(sc->tq);
554 }
555
556 for (i = 0; i < (sc)->params.nports; ++i) {
557 if (sc->portdev[i] != NULL)
558 device_delete_child(sc->dev, sc->portdev[i]);
559 }
560
561 bus_generic_detach(sc->dev);
562
563 if (is_offload(sc)) {
564 cxgb_adapter_unofld(sc);
565 if (isset(&sc->open_device_map, OFFLOAD_DEVMAP_BIT))
566 offload_close(&sc->tdev);
567 }
562 t3_free_sge_resources(sc);
563 t3_sge_free(sc);
564
568 t3_free_sge_resources(sc);
569 t3_sge_free(sc);
570
565 for (i = 0; i < SGE_QSETS; i++) {
566 if (sc->msix_intr_tag[i] != NULL) {
567 bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
568 sc->msix_intr_tag[i]);
569 }
570 if (sc->msix_irq_res[i] != NULL) {
571 bus_release_resource(sc->dev, SYS_RES_IRQ,
572 sc->msix_irq_rid[i], sc->msix_irq_res[i]);
573 }
574 }
575
576 if (sc->intr_tag != NULL) {
577 bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
578 }
579
580 if (sc->irq_res != NULL) {
581 device_printf(sc->dev, "de-allocating interrupt irq_rid=%d irq_res=%p\n",
582 sc->irq_rid, sc->irq_res);
583 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
584 sc->irq_res);
585 }
586#ifdef MSI_SUPPORTED
587 if (sc->flags & (USING_MSI | USING_MSIX)) {
588 device_printf(sc->dev, "releasing msi message(s)\n");
589 pci_release_msi(sc->dev);
590 }
591#endif
592 if (sc->msix_regs_res != NULL) {
593 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
594 sc->msix_regs_res);
595 }
596
597 if (sc->regs_res != NULL)
598 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
599 sc->regs_res);
600
601 mtx_destroy(&sc->mdio_lock);
602 mtx_destroy(&sc->sge.reg_lock);
603 mtx_destroy(&sc->lock);
604

--- 7 unchanged lines hidden (view full) ---

612 * Determines how many sets of SGE queues to use and initializes them.
613 * We support multiple queue sets per port if we have MSI-X, otherwise
614 * just one queue set per port.
615 */
616static int
617setup_sge_qsets(adapter_t *sc)
618{
619 int i, j, err, irq_idx, qset_idx;
571 if (sc->regs_res != NULL)
572 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
573 sc->regs_res);
574
575 mtx_destroy(&sc->mdio_lock);
576 mtx_destroy(&sc->sge.reg_lock);
577 mtx_destroy(&sc->lock);
578

--- 7 unchanged lines hidden (view full) ---

586 * Determines how many sets of SGE queues to use and initializes them.
587 * We support multiple queue sets per port if we have MSI-X, otherwise
588 * just one queue set per port.
589 */
590static int
591setup_sge_qsets(adapter_t *sc)
592{
593 int i, j, err, irq_idx, qset_idx;
620 u_int ntxq = 3;
594 u_int ntxq = SGE_TXQ_PER_SET;
621
622 if ((err = t3_sge_alloc(sc)) != 0) {
623 device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
624 return (err);
625 }
626
627 if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
628 irq_idx = -1;

--- 187 unchanged lines hidden (view full) ---

816 printf("unsupported media type %s\n", p->port_type->desc);
817 return (ENXIO);
818 }
819
820 ifmedia_add(&p->media, media_flags, 0, NULL);
821 ifmedia_add(&p->media, IFM_ETHER | IFM_AUTO, 0, NULL);
822 ifmedia_set(&p->media, media_flags);
823
595
596 if ((err = t3_sge_alloc(sc)) != 0) {
597 device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
598 return (err);
599 }
600
601 if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
602 irq_idx = -1;

--- 187 unchanged lines hidden (view full) ---

790 printf("unsupported media type %s\n", p->port_type->desc);
791 return (ENXIO);
792 }
793
794 ifmedia_add(&p->media, media_flags, 0, NULL);
795 ifmedia_add(&p->media, IFM_ETHER | IFM_AUTO, 0, NULL);
796 ifmedia_set(&p->media, media_flags);
797
824 snprintf(buf, sizeof(buf), "cxgb_port_taskq%d", p->port);
798 snprintf(buf, sizeof(buf), "cxgb_port_taskq%d", p->port);
825#ifdef TASKQUEUE_CURRENT
826 /* Create a port for handling TX without starvation */
827 p->tq = taskqueue_create(buf, M_NOWAIT,
828 taskqueue_thread_enqueue, &p->tq);
829#else
830 /* Create a port for handling TX without starvation */
831 p->tq = taskqueue_create_fast(buf, M_NOWAIT,
832 taskqueue_thread_enqueue, &p->tq);

--- 13 unchanged lines hidden (view full) ---

846}
847
848static int
849cxgb_port_detach(device_t dev)
850{
851 struct port_info *p;
852
853 p = device_get_softc(dev);
799#ifdef TASKQUEUE_CURRENT
800 /* Create a port for handling TX without starvation */
801 p->tq = taskqueue_create(buf, M_NOWAIT,
802 taskqueue_thread_enqueue, &p->tq);
803#else
804 /* Create a port for handling TX without starvation */
805 p->tq = taskqueue_create_fast(buf, M_NOWAIT,
806 taskqueue_thread_enqueue, &p->tq);

--- 13 unchanged lines hidden (view full) ---

820}
821
822static int
823cxgb_port_detach(device_t dev)
824{
825 struct port_info *p;
826
827 p = device_get_softc(dev);
828
829 PORT_LOCK(p);
830 cxgb_stop_locked(p);
831 PORT_UNLOCK(p);
832
854 mtx_destroy(&p->lock);
855 if (p->tq != NULL) {
856 taskqueue_drain(p->tq, &p->start_task);
857 taskqueue_free(p->tq);
858 p->tq = NULL;
859 }
860
861 ether_ifdetach(p->ifp);

--- 95 unchanged lines hidden (view full) ---

957 * neutral handler takes care of most of the processing for these events,
958 * then calls this handler for any OS-specific processing.
959 */
960void
961t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
962 int duplex, int fc)
963{
964 struct port_info *pi = &adapter->port[port_id];
833 mtx_destroy(&p->lock);
834 if (p->tq != NULL) {
835 taskqueue_drain(p->tq, &p->start_task);
836 taskqueue_free(p->tq);
837 p->tq = NULL;
838 }
839
840 ether_ifdetach(p->ifp);

--- 95 unchanged lines hidden (view full) ---

936 * neutral handler takes care of most of the processing for these events,
937 * then calls this handler for any OS-specific processing.
938 */
939void
940t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
941 int duplex, int fc)
942{
943 struct port_info *pi = &adapter->port[port_id];
944 struct cmac *mac = &adapter->port[port_id].mac;
965
966 if ((pi->ifp->if_flags & IFF_UP) == 0)
967 return;
945
946 if ((pi->ifp->if_flags & IFF_UP) == 0)
947 return;
968
969 if (link_status)
948
949 if (link_status) {
950 t3_mac_enable(mac, MAC_DIRECTION_RX);
970 if_link_state_change(pi->ifp, LINK_STATE_UP);
951 if_link_state_change(pi->ifp, LINK_STATE_UP);
971 else
952 } else {
972 if_link_state_change(pi->ifp, LINK_STATE_DOWN);
953 if_link_state_change(pi->ifp, LINK_STATE_DOWN);
973
954 pi->phy.ops->power_down(&pi->phy, 1);
955 t3_mac_disable(mac, MAC_DIRECTION_RX);
956 t3_link_start(&pi->phy, mac, &pi->link_config);
957 }
974}
975
976
977/*
978 * Interrupt-context handler for external (PHY) interrupts.
979 */
980void
981t3_os_ext_intr_handler(adapter_t *sc)
982{
983 if (cxgb_debug)
984 printf("t3_os_ext_intr_handler\n");
985 /*
986 * Schedule a task to handle external interrupts as they may be slow
987 * and we use a mutex to protect MDIO registers. We disable PHY
988 * interrupts in the meantime and let the task reenable them when
989 * it's done.
990 */
958}
959
960
961/*
962 * Interrupt-context handler for external (PHY) interrupts.
963 */
964void
965t3_os_ext_intr_handler(adapter_t *sc)
966{
967 if (cxgb_debug)
968 printf("t3_os_ext_intr_handler\n");
969 /*
970 * Schedule a task to handle external interrupts as they may be slow
971 * and we use a mutex to protect MDIO registers. We disable PHY
972 * interrupts in the meantime and let the task reenable them when
973 * it's done.
974 */
975 ADAPTER_LOCK(sc);
991 if (sc->slow_intr_mask) {
992 sc->slow_intr_mask &= ~F_T3DBG;
993 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
994 taskqueue_enqueue(sc->tq, &sc->ext_intr_task);
995 }
976 if (sc->slow_intr_mask) {
977 sc->slow_intr_mask &= ~F_T3DBG;
978 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
979 taskqueue_enqueue(sc->tq, &sc->ext_intr_task);
980 }
981 ADAPTER_UNLOCK(sc);
996}
997
998void
999t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
1000{
1001
1002 /*
1003 * The ifnet might not be allocated before this gets called,

--- 58 unchanged lines hidden (view full) ---

1062 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
1063 }
1064
1065 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
1066 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
1067 V_RRCPLCPUSIZE(6), cpus, rspq_map);
1068}
1069
982}
983
984void
985t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
986{
987
988 /*
989 * The ifnet might not be allocated before this gets called,

--- 58 unchanged lines hidden (view full) ---

1048 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
1049 }
1050
1051 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
1052 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
1053 V_RRCPLCPUSIZE(6), cpus, rspq_map);
1054}
1055
1056/*
1057 * Sends an mbuf to an offload queue driver
1058 * after dealing with any active network taps.
1059 */
1060static inline int
1061offload_tx(struct toedev *tdev, struct mbuf *m)
1062{
1063 int ret;
1064
1065 critical_enter();
1066 ret = t3_offload_tx(tdev, m);
1067 critical_exit();
1068 return ret;
1069}
1070
1071static int
1072write_smt_entry(struct adapter *adapter, int idx)
1073{
1074 struct port_info *pi = &adapter->port[idx];
1075 struct cpl_smt_write_req *req;
1076 struct mbuf *m;
1077
1078 if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
1079 return (ENOMEM);
1080
1081 req = mtod(m, struct cpl_smt_write_req *);
1082 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1083 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
1084 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
1085 req->iff = idx;
1086 memset(req->src_mac1, 0, sizeof(req->src_mac1));
1087 memcpy(req->src_mac0, pi->hw_addr, ETHER_ADDR_LEN);
1088
1089 m_set_priority(m, 1);
1090
1091 offload_tx(&adapter->tdev, m);
1092
1093 return (0);
1094}
1095
1096static int
1097init_smt(struct adapter *adapter)
1098{
1099 int i;
1100
1101 for_each_port(adapter, i)
1102 write_smt_entry(adapter, i);
1103 return 0;
1104}
1105
1070static void
1106static void
1107init_port_mtus(adapter_t *adapter)
1108{
1109 unsigned int mtus = adapter->port[0].ifp->if_mtu;
1110
1111 if (adapter->port[1].ifp)
1112 mtus |= adapter->port[1].ifp->if_mtu << 16;
1113 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
1114}
1115
1116static void
1071send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
1072 int hi, int port)
1073{
1074 struct mbuf *m;
1075 struct mngt_pktsched_wr *req;
1076
1077 m = m_gethdr(M_NOWAIT, MT_DATA);
1078 if (m) {
1117send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
1118 int hi, int port)
1119{
1120 struct mbuf *m;
1121 struct mngt_pktsched_wr *req;
1122
1123 m = m_gethdr(M_NOWAIT, MT_DATA);
1124 if (m) {
1079 req = (struct mngt_pktsched_wr *)m->m_data;
1125 req = mtod(m, struct mngt_pktsched_wr *);
1080 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1081 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1082 req->sched = sched;
1083 req->idx = qidx;
1084 req->min = lo;
1085 req->max = hi;
1086 req->binding = port;
1087 m->m_len = m->m_pkthdr.len = sizeof(*req);

--- 10 unchanged lines hidden (view full) ---

1098 const struct port_info *pi = adap2pinfo(sc, i);
1099
1100 for (j = 0; j < pi->nqsets; ++j)
1101 send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1102 -1, i);
1103 }
1104}
1105
1126 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1127 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1128 req->sched = sched;
1129 req->idx = qidx;
1130 req->min = lo;
1131 req->max = hi;
1132 req->binding = port;
1133 m->m_len = m->m_pkthdr.len = sizeof(*req);

--- 10 unchanged lines hidden (view full) ---

1144 const struct port_info *pi = adap2pinfo(sc, i);
1145
1146 for (j = 0; j < pi->nqsets; ++j)
1147 send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1148 -1, i);
1149 }
1150}
1151
1152/**
1153 * cxgb_up - enable the adapter
1154 * @adap: adapter being enabled
1155 *
1156 * Called when the first port is enabled, this function performs the
1157 * actions necessary to make an adapter operational, such as completing
1158 * the initialization of HW modules, and enabling interrupts.
1159 *
1160 */
1161static int
1162cxgb_up(struct adapter *sc)
1163{
1164 int err = 0;
1165
1166 if ((sc->flags & FULL_INIT_DONE) == 0) {
1167
1168 if ((sc->flags & FW_UPTODATE) == 0)
1169 err = upgrade_fw(sc);
1170
1171 if (err)
1172 goto out;
1173
1174 err = t3_init_hw(sc, 0);
1175 if (err)
1176 goto out;
1177
1178 t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1179
1180 err = setup_sge_qsets(sc);
1181 if (err)
1182 goto out;
1183
1184 setup_rss(sc);
1185 sc->flags |= FULL_INIT_DONE;
1186 }
1187
1188 t3_intr_clear(sc);
1189
1190 /* If it's MSI or INTx, allocate a single interrupt for everything */
1191 if ((sc->flags & USING_MSIX) == 0) {
1192 if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
1193 &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1194 device_printf(sc->dev, "Cannot allocate interrupt rid=%d\n", sc->irq_rid);
1195 err = EINVAL;
1196 goto out;
1197 }
1198 device_printf(sc->dev, "allocated irq_res=%p\n", sc->irq_res);
1199
1200 if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
1201#ifdef INTR_FILTERS
1202 NULL,
1203#endif
1204 sc->cxgb_intr, sc, &sc->intr_tag)) {
1205 device_printf(sc->dev, "Cannot set up interrupt\n");
1206 err = EINVAL;
1207 goto irq_err;
1208 }
1209 } else {
1210 cxgb_setup_msix(sc, sc->msi_count);
1211 }
1212
1213 t3_sge_start(sc);
1214 t3_intr_enable(sc);
1215
1216 if ((sc->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
1217 bind_qsets(sc);
1218 sc->flags |= QUEUES_BOUND;
1219out:
1220 return (err);
1221irq_err:
1222 CH_ERR(sc, "request_irq failed, err %d\n", err);
1223 goto out;
1224}
1225
1226
1227/*
1228 * Release resources when all the ports and offloading have been stopped.
1229 */
1106static void
1230static void
1231cxgb_down(struct adapter *sc)
1232{
1233 int i;
1234
1235 t3_sge_stop(sc);
1236 t3_intr_disable(sc);
1237
1238 for (i = 0; i < SGE_QSETS; i++) {
1239 if (sc->msix_intr_tag[i] != NULL) {
1240 bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
1241 sc->msix_intr_tag[i]);
1242 sc->msix_intr_tag[i] = NULL;
1243 }
1244 if (sc->msix_irq_res[i] != NULL) {
1245 bus_release_resource(sc->dev, SYS_RES_IRQ,
1246 sc->msix_irq_rid[i], sc->msix_irq_res[i]);
1247 sc->msix_irq_res[i] = NULL;
1248 }
1249 }
1250
1251 if (sc->intr_tag != NULL) {
1252 bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
1253 sc->intr_tag = NULL;
1254 }
1255 if (sc->irq_res != NULL) {
1256 device_printf(sc->dev, "de-allocating interrupt irq_rid=%d irq_res=%p\n",
1257 sc->irq_rid, sc->irq_res);
1258 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
1259 sc->irq_res);
1260 sc->irq_res = NULL;
1261 }
1262
1263 callout_drain(&sc->sge_timer_ch);
1264 taskqueue_drain(sc->tq, &sc->slow_intr_task);
1265 taskqueue_drain(sc->tq, &sc->timer_reclaim_task);
1266}
1267
1268static int
1269offload_open(struct port_info *pi)
1270{
1271 struct adapter *adapter = pi->adapter;
1272 struct toedev *tdev = TOEDEV(pi->ifp);
1273 int adap_up = adapter->open_device_map & PORT_MASK;
1274 int err = 0;
1275
1276 if (atomic_cmpset_int(&adapter->open_device_map,
1277 (adapter->open_device_map & ~OFFLOAD_DEVMAP_BIT),
1278 (adapter->open_device_map | OFFLOAD_DEVMAP_BIT)) == 0)
1279 return (0);
1280
1281 ADAPTER_LOCK(pi->adapter);
1282 if (!adap_up)
1283 err = cxgb_up(adapter);
1284 ADAPTER_UNLOCK(pi->adapter);
1285 if (err < 0)
1286 return (err);
1287
1288 t3_tp_set_offload_mode(adapter, 1);
1289 tdev->lldev = adapter->port[0].ifp;
1290 err = cxgb_offload_activate(adapter);
1291 if (err)
1292 goto out;
1293
1294 init_port_mtus(adapter);
1295 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1296 adapter->params.b_wnd,
1297 adapter->params.rev == 0 ?
1298 adapter->port[0].ifp->if_mtu : 0xffff);
1299 init_smt(adapter);
1300
1301 /* Call back all registered clients */
1302 cxgb_add_clients(tdev);
1303
1304out:
1305 /* restore them in case the offload module has changed them */
1306 if (err) {
1307 t3_tp_set_offload_mode(adapter, 0);
1308 clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
1309 cxgb_set_dummy_ops(tdev);
1310 }
1311 return (err);
1312}
1313
1314static int
1315offload_close(struct toedev *tdev)
1316{
1317 struct adapter *adapter = tdev2adap(tdev);
1318
1319 if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT))
1320 return 0;
1321
1322 /* Call back all registered clients */
1323 cxgb_remove_clients(tdev);
1324 tdev->lldev = NULL;
1325 cxgb_set_dummy_ops(tdev);
1326 t3_tp_set_offload_mode(adapter, 0);
1327 clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
1328
1329 ADAPTER_LOCK(adapter);
1330 if (!adapter->open_device_map)
1331 cxgb_down(adapter);
1332 ADAPTER_UNLOCK(adapter);
1333
1334 cxgb_offload_deactivate(adapter);
1335 return 0;
1336}
1337
1338static void
1107cxgb_init(void *arg)
1108{
1109 struct port_info *p = arg;
1110
1111 PORT_LOCK(p);
1112 cxgb_init_locked(p);
1113 PORT_UNLOCK(p);
1114}
1115
1116static void
1117cxgb_init_locked(struct port_info *p)
1118{
1119 struct ifnet *ifp;
1120 adapter_t *sc = p->adapter;
1339cxgb_init(void *arg)
1340{
1341 struct port_info *p = arg;
1342
1343 PORT_LOCK(p);
1344 cxgb_init_locked(p);
1345 PORT_UNLOCK(p);
1346}
1347
1348static void
1349cxgb_init_locked(struct port_info *p)
1350{
1351 struct ifnet *ifp;
1352 adapter_t *sc = p->adapter;
1121 int error;
1353 int err;
1122
1123 mtx_assert(&p->lock, MA_OWNED);
1354
1355 mtx_assert(&p->lock, MA_OWNED);
1124
1125 ifp = p->ifp;
1356 ifp = p->ifp;
1126 if ((sc->flags & FW_UPTODATE) == 0) {
1127 device_printf(sc->dev, "updating firmware to version %d.%d\n",
1128 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1129 if ((error = cxgb_fw_download(sc, sc->dev)) != 0) {
1130 device_printf(sc->dev, "firmware download failed err: %d"
1131 "interface will be unavailable\n", error);
1132 return;
1133 }
1134 sc->flags |= FW_UPTODATE;
1135 }
1136
1357
1137 cxgb_link_start(p);
1138 ADAPTER_LOCK(p->adapter);
1358 ADAPTER_LOCK(p->adapter);
1359 if ((sc->open_device_map == 0) && ((err = cxgb_up(sc)) < 0)) {
1360 ADAPTER_UNLOCK(p->adapter);
1361 cxgb_stop_locked(p);
1362 return;
1363 }
1139 if (p->adapter->open_device_map == 0)
1140 t3_intr_clear(sc);
1364 if (p->adapter->open_device_map == 0)
1365 t3_intr_clear(sc);
1141 t3_sge_start(sc);
1142
1366
1143 p->adapter->open_device_map |= (1 << p->port);
1367 setbit(&p->adapter->open_device_map, p->port);
1368
1144 ADAPTER_UNLOCK(p->adapter);
1369 ADAPTER_UNLOCK(p->adapter);
1145 t3_intr_enable(sc);
1370 if (is_offload(sc) && !ofld_disable) {
1371 err = offload_open(p);
1372 if (err)
1373 log(LOG_WARNING,
1374 "Could not initialize offload capabilities\n");
1375 }
1376 cxgb_link_start(p);
1146 t3_port_intr_enable(sc, p->port);
1147
1377 t3_port_intr_enable(sc, p->port);
1378
1148 if ((p->adapter->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
1149 bind_qsets(sc);
1150 p->adapter->flags |= QUEUES_BOUND;
1151
1152 callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz,
1153 cxgb_tick, sc);
1154
1379 callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz,
1380 cxgb_tick, sc);
1381
1155
1382 PORT_LOCK(p);
1156 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1157 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1383 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1384 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1385 PORT_UNLOCK(p);
1158}
1159
1160static void
1161cxgb_set_rxmode(struct port_info *p)
1162{
1163 struct t3_rx_mode rm;
1164 struct cmac *mac = &p->mac;
1165

--- 8 unchanged lines hidden (view full) ---

1174{
1175 struct ifnet *ifp;
1176
1177 mtx_assert(&p->lock, MA_OWNED);
1178 mtx_assert(&p->adapter->lock, MA_NOTOWNED);
1179
1180 ifp = p->ifp;
1181
1386}
1387
1388static void
1389cxgb_set_rxmode(struct port_info *p)
1390{
1391 struct t3_rx_mode rm;
1392 struct cmac *mac = &p->mac;
1393

--- 8 unchanged lines hidden (view full) ---

1402{
1403 struct ifnet *ifp;
1404
1405 mtx_assert(&p->lock, MA_OWNED);
1406 mtx_assert(&p->adapter->lock, MA_NOTOWNED);
1407
1408 ifp = p->ifp;
1409
1182 ADAPTER_LOCK(p->adapter);
1410 t3_port_intr_disable(p->adapter, p->port);
1411 PORT_LOCK(p);
1183 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1412 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1184 p->adapter->open_device_map &= ~(1 << p->port);
1413 PORT_UNLOCK(p);
1414 p->phy.ops->power_down(&p->phy, 1);
1415 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1416
1417 ADAPTER_LOCK(p->adapter);
1418 clrbit(&p->adapter->open_device_map, p->port);
1419 /*
1420 * XXX cancel check_task
1421 */
1185 if (p->adapter->open_device_map == 0)
1422 if (p->adapter->open_device_map == 0)
1186 t3_intr_disable(p->adapter);
1423 cxgb_down(p->adapter);
1187 ADAPTER_UNLOCK(p->adapter);
1424 ADAPTER_UNLOCK(p->adapter);
1188 t3_port_intr_disable(p->adapter, p->port);
1189 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1190
1191}
1192
1193static int
1194cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
1195{
1196 struct port_info *p = ifp->if_softc;
1197 struct ifaddr *ifa = (struct ifaddr *)data;
1198 struct ifreq *ifr = (struct ifreq *)data;

--- 153 unchanged lines hidden (view full) ---

1352 m = m0;
1353 m_collapse(m, TX_MAX_SEGS, &m0);
1354 } else
1355 break;
1356 }
1357 m = m0;
1358 if ((err = t3_encap(p, &m)) != 0)
1359 break;
1425}
1426
1427static int
1428cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
1429{
1430 struct port_info *p = ifp->if_softc;
1431 struct ifaddr *ifa = (struct ifaddr *)data;
1432 struct ifreq *ifr = (struct ifreq *)data;

--- 153 unchanged lines hidden (view full) ---

1586 m = m0;
1587 m_collapse(m, TX_MAX_SEGS, &m0);
1588 } else
1589 break;
1590 }
1591 m = m0;
1592 if ((err = t3_encap(p, &m)) != 0)
1593 break;
1360 BPF_MTAP(ifp, m);
1594 BPF_MTAP(ifp, m);
1361 }
1362 mtx_unlock(&txq->lock);
1363
1364 if (__predict_false(err)) {
1365 if (cxgb_debug)
1366 printf("would set OFLAGS\n");
1367 if (err == ENOMEM) {
1368 IFQ_LOCK(&ifp->if_snd);

--- 94 unchanged lines hidden (view full) ---

1463 adapter_t *sc = (adapter_t *)arg;
1464
1465 if (cxgb_debug)
1466 printf("cxgb_ext_intr_handler\n");
1467
1468 t3_phy_intr_handler(sc);
1469
1470 /* Now reenable external interrupts */
1595 }
1596 mtx_unlock(&txq->lock);
1597
1598 if (__predict_false(err)) {
1599 if (cxgb_debug)
1600 printf("would set OFLAGS\n");
1601 if (err == ENOMEM) {
1602 IFQ_LOCK(&ifp->if_snd);

--- 94 unchanged lines hidden (view full) ---

1697 adapter_t *sc = (adapter_t *)arg;
1698
1699 if (cxgb_debug)
1700 printf("cxgb_ext_intr_handler\n");
1701
1702 t3_phy_intr_handler(sc);
1703
1704 /* Now reenable external interrupts */
1705 ADAPTER_LOCK(sc);
1471 if (sc->slow_intr_mask) {
1472 sc->slow_intr_mask |= F_T3DBG;
1473 t3_write_reg(sc, A_PL_INT_CAUSE0, F_T3DBG);
1474 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
1475 }
1706 if (sc->slow_intr_mask) {
1707 sc->slow_intr_mask |= F_T3DBG;
1708 t3_write_reg(sc, A_PL_INT_CAUSE0, F_T3DBG);
1709 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
1710 }
1711 ADAPTER_UNLOCK(sc);
1476}
1477
1478static void
1479check_link_status(adapter_t *sc)
1480{
1481 int i;
1482
1483 for (i = 0; i < (sc)->params.nports; ++i) {

--- 261 unchanged lines hidden (view full) ---

1745 if (sc->flags & FULL_INIT_DONE)
1746 return (EBUSY);
1747 if (edata->val < 1 ||
1748 (edata->val > 1 && !(sc->flags & USING_MSIX)))
1749 return (EINVAL);
1750 if (edata->val + sc->port[!port_idx].nqsets > SGE_QSETS)
1751 return (EINVAL);
1752 sc->port[port_idx].nqsets = edata->val;
1712}
1713
1714static void
1715check_link_status(adapter_t *sc)
1716{
1717 int i;
1718
1719 for (i = 0; i < (sc)->params.nports; ++i) {

--- 261 unchanged lines hidden (view full) ---

1981 if (sc->flags & FULL_INIT_DONE)
1982 return (EBUSY);
1983 if (edata->val < 1 ||
1984 (edata->val > 1 && !(sc->flags & USING_MSIX)))
1985 return (EINVAL);
1986 if (edata->val + sc->port[!port_idx].nqsets > SGE_QSETS)
1987 return (EINVAL);
1988 sc->port[port_idx].nqsets = edata->val;
1989 sc->port[0].first_qset = 0;
1753 /*
1990 /*
1754 * XXX we're hardcoding ourselves to 2 ports
1755 * just like the LEENUX
1991 * XXX hardcode ourselves to 2 ports just like LEEENUX
1756 */
1757 sc->port[1].first_qset = sc->port[0].nqsets;
1758 break;
1759 }
1760 case CHELSIO_GET_QSET_NUM: {
1761 struct ch_reg *edata = (struct ch_reg *)data;
1762 edata->val = pi->nqsets;
1763 break;
1764 }
1992 */
1993 sc->port[1].first_qset = sc->port[0].nqsets;
1994 break;
1995 }
1996 case CHELSIO_GET_QSET_NUM: {
1997 struct ch_reg *edata = (struct ch_reg *)data;
1998 edata->val = pi->nqsets;
1999 break;
2000 }
1765#ifdef notyet
1766 /*
1767 * XXX FreeBSD driver does not currently support any
1768 * offload functionality
1769 */
2001#ifdef notyet
1770 case CHELSIO_LOAD_FW:
2002 case CHELSIO_LOAD_FW:
1771 case CHELSIO_DEVUP:
1772 case CHELSIO_SETMTUTAB:
1773 case CHELSIO_GET_PM:
1774 case CHELSIO_SET_PM:
2003 case CHELSIO_GET_PM:
2004 case CHELSIO_SET_PM:
1775 case CHELSIO_READ_TCAM_WORD:
1776 return (EOPNOTSUPP);
1777 break;
1778#endif
2005 return (EOPNOTSUPP);
2006 break;
2007#endif
2008 case CHELSIO_SETMTUTAB: {
2009 struct ch_mtus *m = (struct ch_mtus *)data;
2010 int i;
2011
2012 if (!is_offload(sc))
2013 return (EOPNOTSUPP);
2014 if (offload_running(sc))
2015 return (EBUSY);
2016 if (m->nmtus != NMTUS)
2017 return (EINVAL);
2018 if (m->mtus[0] < 81) /* accommodate SACK */
2019 return (EINVAL);
2020
2021 /*
2022 * MTUs must be in ascending order
2023 */
2024 for (i = 1; i < NMTUS; ++i)
2025 if (m->mtus[i] < m->mtus[i - 1])
2026 return (EINVAL);
2027
2028 memcpy(sc->params.mtus, m->mtus,
2029 sizeof(sc->params.mtus));
2030 break;
2031 }
2032 case CHELSIO_GETMTUTAB: {
2033 struct ch_mtus *m = (struct ch_mtus *)data;
2034
2035 if (!is_offload(sc))
2036 return (EOPNOTSUPP);
2037
2038 memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
2039 m->nmtus = NMTUS;
2040 break;
2041 }
2042 case CHELSIO_DEVUP:
2043 if (!is_offload(sc))
2044 return (EOPNOTSUPP);
2045 return offload_open(pi);
2046 break;
1779 case CHELSIO_GET_MEM: {
1780 struct ch_mem_range *t = (struct ch_mem_range *)data;
1781 struct mc7 *mem;
1782 uint8_t *useraddr;
1783 u64 buf[32];
1784
1785 if (!is_offload(sc))
1786 return (EOPNOTSUPP);

--- 31 unchanged lines hidden (view full) ---

1818 if (copyout(buf, useraddr, chunk))
1819 return (EFAULT);
1820 useraddr += chunk;
1821 t->addr += chunk;
1822 t->len -= chunk;
1823 }
1824 break;
1825 }
2047 case CHELSIO_GET_MEM: {
2048 struct ch_mem_range *t = (struct ch_mem_range *)data;
2049 struct mc7 *mem;
2050 uint8_t *useraddr;
2051 u64 buf[32];
2052
2053 if (!is_offload(sc))
2054 return (EOPNOTSUPP);

--- 31 unchanged lines hidden (view full) ---

2086 if (copyout(buf, useraddr, chunk))
2087 return (EFAULT);
2088 useraddr += chunk;
2089 t->addr += chunk;
2090 t->len -= chunk;
2091 }
2092 break;
2093 }
2094 case CHELSIO_READ_TCAM_WORD: {
2095 struct ch_tcam_word *t = (struct ch_tcam_word *)data;
2096
2097 if (!is_offload(sc))
2098 return (EOPNOTSUPP);
2099 return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
2100 break;
2101 }
1826 case CHELSIO_SET_TRACE_FILTER: {
1827 struct ch_trace *t = (struct ch_trace *)data;
1828 const struct trace_params *tp;
1829
1830 tp = (const struct trace_params *)&t->sip;
1831 if (t->config_tx)
1832 t3_config_trace_filter(sc, tp, 0, t->invert_match,
1833 t->trace_tx);

--- 25 unchanged lines hidden (view full) ---

1859 cxgb_get_regs(sc, regs, buf);
1860 error = copyout(buf, regs->data, reglen);
1861
1862 done:
1863 free(buf, M_DEVBUF);
1864
1865 break;
1866 }
2102 case CHELSIO_SET_TRACE_FILTER: {
2103 struct ch_trace *t = (struct ch_trace *)data;
2104 const struct trace_params *tp;
2105
2106 tp = (const struct trace_params *)&t->sip;
2107 if (t->config_tx)
2108 t3_config_trace_filter(sc, tp, 0, t->invert_match,
2109 t->trace_tx);

--- 25 unchanged lines hidden (view full) ---

2135 cxgb_get_regs(sc, regs, buf);
2136 error = copyout(buf, regs->data, reglen);
2137
2138 done:
2139 free(buf, M_DEVBUF);
2140
2141 break;
2142 }
2143 case CHELSIO_SET_HW_SCHED: {
2144 struct ch_hw_sched *t = (struct ch_hw_sched *)data;
2145 unsigned int ticks_per_usec = core_ticks_per_usec(sc);
2146
2147 if ((sc->flags & FULL_INIT_DONE) == 0)
2148 return (EAGAIN); /* need TP to be initialized */
2149 if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
2150 !in_range(t->channel, 0, 1) ||
2151 !in_range(t->kbps, 0, 10000000) ||
2152 !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
2153 !in_range(t->flow_ipg, 0,
2154 dack_ticks_to_usec(sc, 0x7ff)))
2155 return (EINVAL);
2156
2157 if (t->kbps >= 0) {
2158 error = t3_config_sched(sc, t->kbps, t->sched);
2159 if (error < 0)
2160 return (-error);
2161 }
2162 if (t->class_ipg >= 0)
2163 t3_set_sched_ipg(sc, t->sched, t->class_ipg);
2164 if (t->flow_ipg >= 0) {
2165 t->flow_ipg *= 1000; /* us -> ns */
2166 t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
2167 }
2168 if (t->mode >= 0) {
2169 int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
2170
2171 t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2172 bit, t->mode ? bit : 0);
2173 }
2174 if (t->channel >= 0)
2175 t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2176 1 << t->sched, t->channel << t->sched);
2177 break;
2178 }
1867 default:
1868 return (EOPNOTSUPP);
1869 break;
1870 }
1871
1872 return (error);
1873}
1874

--- 46 unchanged lines hidden ---
2179 default:
2180 return (EOPNOTSUPP);
2181 break;
2182 }
2183
2184 return (error);
2185}
2186

--- 46 unchanged lines hidden ---