Deleted Added
full compact
cxgb_main.c (172105) cxgb_main.c (172109)
1/**************************************************************************
2
3Copyright (c) 2007, Chelsio Inc.
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
122. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
15
16THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26POSSIBILITY OF SUCH DAMAGE.
27
28***************************************************************************/
29
30#include <sys/cdefs.h>
1/**************************************************************************
2
3Copyright (c) 2007, Chelsio Inc.
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
122. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
15
16THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26POSSIBILITY OF SUCH DAMAGE.
27
28***************************************************************************/
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/cxgb/cxgb_main.c 172105 2007-09-09 20:26:02Z kmacy $");
31__FBSDID("$FreeBSD: head/sys/dev/cxgb/cxgb_main.c 172109 2007-09-10 00:59:51Z kmacy $");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/kernel.h>
36#include <sys/bus.h>
37#include <sys/module.h>
38#include <sys/pciio.h>
39#include <sys/conf.h>
40#include <machine/bus.h>
41#include <machine/resource.h>
42#include <sys/bus_dma.h>
43#include <sys/rman.h>
44#include <sys/ioccom.h>
45#include <sys/mbuf.h>
46#include <sys/linker.h>
47#include <sys/firmware.h>
48#include <sys/socket.h>
49#include <sys/sockio.h>
50#include <sys/smp.h>
51#include <sys/sysctl.h>
52#include <sys/queue.h>
53#include <sys/taskqueue.h>
54
55#include <net/bpf.h>
56#include <net/ethernet.h>
57#include <net/if.h>
58#include <net/if_arp.h>
59#include <net/if_dl.h>
60#include <net/if_media.h>
61#include <net/if_types.h>
62
63#include <netinet/in_systm.h>
64#include <netinet/in.h>
65#include <netinet/if_ether.h>
66#include <netinet/ip.h>
67#include <netinet/ip.h>
68#include <netinet/tcp.h>
69#include <netinet/udp.h>
70
71#include <dev/pci/pcireg.h>
72#include <dev/pci/pcivar.h>
73#include <dev/pci/pci_private.h>
74
75#ifdef CONFIG_DEFINED
76#include <cxgb_include.h>
77#else
78#include <dev/cxgb/cxgb_include.h>
79#endif
80
81#ifdef PRIV_SUPPORTED
82#include <sys/priv.h>
83#endif
84
85static int cxgb_setup_msix(adapter_t *, int);
86static void cxgb_teardown_msix(adapter_t *);
87static void cxgb_init(void *);
88static void cxgb_init_locked(struct port_info *);
89static void cxgb_stop_locked(struct port_info *);
90static void cxgb_set_rxmode(struct port_info *);
91static int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
92static void cxgb_start(struct ifnet *);
93static void cxgb_start_proc(void *, int ncount);
94static int cxgb_media_change(struct ifnet *);
95static void cxgb_media_status(struct ifnet *, struct ifmediareq *);
96static int setup_sge_qsets(adapter_t *);
97static void cxgb_async_intr(void *);
98static void cxgb_ext_intr_handler(void *, int);
99static void cxgb_tick_handler(void *, int);
100static void cxgb_down_locked(struct adapter *sc);
101static void cxgb_tick(void *);
102static void setup_rss(adapter_t *sc);
103
104/* Attachment glue for the PCI controller end of the device. Each port of
105 * the device is attached separately, as defined later.
106 */
107static int cxgb_controller_probe(device_t);
108static int cxgb_controller_attach(device_t);
109static int cxgb_controller_detach(device_t);
110static void cxgb_free(struct adapter *);
111static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
112 unsigned int end);
113static void cxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf);
114static int cxgb_get_regs_len(void);
115static int offload_open(struct port_info *pi);
116static void touch_bars(device_t dev);
117
118#ifdef notyet
119static int offload_close(struct toedev *tdev);
120#endif
121
122
123static device_method_t cxgb_controller_methods[] = {
124 DEVMETHOD(device_probe, cxgb_controller_probe),
125 DEVMETHOD(device_attach, cxgb_controller_attach),
126 DEVMETHOD(device_detach, cxgb_controller_detach),
127
128 /* bus interface */
129 DEVMETHOD(bus_print_child, bus_generic_print_child),
130 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
131
132 { 0, 0 }
133};
134
135static driver_t cxgb_controller_driver = {
136 "cxgbc",
137 cxgb_controller_methods,
138 sizeof(struct adapter)
139};
140
141static devclass_t cxgb_controller_devclass;
142DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, 0, 0);
143
144/*
145 * Attachment glue for the ports. Attachment is done directly to the
146 * controller device.
147 */
148static int cxgb_port_probe(device_t);
149static int cxgb_port_attach(device_t);
150static int cxgb_port_detach(device_t);
151
152static device_method_t cxgb_port_methods[] = {
153 DEVMETHOD(device_probe, cxgb_port_probe),
154 DEVMETHOD(device_attach, cxgb_port_attach),
155 DEVMETHOD(device_detach, cxgb_port_detach),
156 { 0, 0 }
157};
158
159static driver_t cxgb_port_driver = {
160 "cxgb",
161 cxgb_port_methods,
162 0
163};
164
165static d_ioctl_t cxgb_extension_ioctl;
166static d_open_t cxgb_extension_open;
167static d_close_t cxgb_extension_close;
168
169static struct cdevsw cxgb_cdevsw = {
170 .d_version = D_VERSION,
171 .d_flags = 0,
172 .d_open = cxgb_extension_open,
173 .d_close = cxgb_extension_close,
174 .d_ioctl = cxgb_extension_ioctl,
175 .d_name = "cxgb",
176};
177
178static devclass_t cxgb_port_devclass;
179DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
180
181#define SGE_MSIX_COUNT (SGE_QSETS + 1)
182
183extern int collapse_mbufs;
184/*
185 * The driver uses the best interrupt scheme available on a platform in the
186 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
187 * of these schemes the driver may consider as follows:
188 *
189 * msi = 2: choose from among all three options
190 * msi = 1 : only consider MSI and pin interrupts
191 * msi = 0: force pin interrupts
192 */
193static int msi_allowed = 2;
194
195TUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
196SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
197SYSCTL_UINT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
198 "MSI-X, MSI, INTx selector");
199
200/*
201 * The driver enables offload as a default.
202 * To disable it, use ofld_disable = 1.
203 */
204static int ofld_disable = 0;
205TUNABLE_INT("hw.cxgb.ofld_disable", &ofld_disable);
206SYSCTL_UINT(_hw_cxgb, OID_AUTO, ofld_disable, CTLFLAG_RDTUN, &ofld_disable, 0,
207 "disable ULP offload");
208
209/*
210 * The driver uses an auto-queue algorithm by default.
211 * To disable it and force a single queue-set per port, use singleq = 1.
212 */
213static int singleq = 1;
214TUNABLE_INT("hw.cxgb.singleq", &singleq);
215SYSCTL_UINT(_hw_cxgb, OID_AUTO, singleq, CTLFLAG_RDTUN, &singleq, 0,
216 "use a single queue-set per port");
217
218enum {
219 MAX_TXQ_ENTRIES = 16384,
220 MAX_CTRL_TXQ_ENTRIES = 1024,
221 MAX_RSPQ_ENTRIES = 16384,
222 MAX_RX_BUFFERS = 16384,
223 MAX_RX_JUMBO_BUFFERS = 16384,
224 MIN_TXQ_ENTRIES = 4,
225 MIN_CTRL_TXQ_ENTRIES = 4,
226 MIN_RSPQ_ENTRIES = 32,
227 MIN_FL_ENTRIES = 32,
228 MIN_FL_JUMBO_ENTRIES = 32
229};
230
231struct filter_info {
232 u32 sip;
233 u32 sip_mask;
234 u32 dip;
235 u16 sport;
236 u16 dport;
237 u32 vlan:12;
238 u32 vlan_prio:3;
239 u32 mac_hit:1;
240 u32 mac_idx:4;
241 u32 mac_vld:1;
242 u32 pkt_type:2;
243 u32 report_filter_id:1;
244 u32 pass:1;
245 u32 rss:1;
246 u32 qset:3;
247 u32 locked:1;
248 u32 valid:1;
249};
250
251enum { FILTER_NO_VLAN_PRI = 7 };
252
253#define PORT_MASK ((1 << MAX_NPORTS) - 1)
254
255/* Table for probing the cards. The desc field isn't actually used */
256struct cxgb_ident {
257 uint16_t vendor;
258 uint16_t device;
259 int index;
260 char *desc;
261} cxgb_identifiers[] = {
262 {PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
263 {PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
264 {PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
265 {PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
266 {PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
267 {PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
268 {PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
269 {PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
270 {PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
271 {PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
272 {PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
273 {0, 0, 0, NULL}
274};
275
276
277static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
278
279static inline char
280t3rev2char(struct adapter *adapter)
281{
282 char rev = 'z';
283
284 switch(adapter->params.rev) {
285 case T3_REV_A:
286 rev = 'a';
287 break;
288 case T3_REV_B:
289 case T3_REV_B2:
290 rev = 'b';
291 break;
292 case T3_REV_C:
293 rev = 'c';
294 break;
295 }
296 return rev;
297}
298
299static struct cxgb_ident *
300cxgb_get_ident(device_t dev)
301{
302 struct cxgb_ident *id;
303
304 for (id = cxgb_identifiers; id->desc != NULL; id++) {
305 if ((id->vendor == pci_get_vendor(dev)) &&
306 (id->device == pci_get_device(dev))) {
307 return (id);
308 }
309 }
310 return (NULL);
311}
312
313static const struct adapter_info *
314cxgb_get_adapter_info(device_t dev)
315{
316 struct cxgb_ident *id;
317 const struct adapter_info *ai;
318
319 id = cxgb_get_ident(dev);
320 if (id == NULL)
321 return (NULL);
322
323 ai = t3_get_adapter_info(id->index);
324
325 return (ai);
326}
327
328static int
329cxgb_controller_probe(device_t dev)
330{
331 const struct adapter_info *ai;
332 char *ports, buf[80];
333 int nports;
334
335 ai = cxgb_get_adapter_info(dev);
336 if (ai == NULL)
337 return (ENXIO);
338
339 nports = ai->nports0 + ai->nports1;
340 if (nports == 1)
341 ports = "port";
342 else
343 ports = "ports";
344
345 snprintf(buf, sizeof(buf), "%s RNIC, %d %s", ai->desc, nports, ports);
346 device_set_desc_copy(dev, buf);
347 return (BUS_PROBE_DEFAULT);
348}
349
350#define FW_FNAME "t3fw%d%d%d"
351#define TPEEPROM_NAME "t3%ctpe%d%d%d"
352#define TPSRAM_NAME "t3%cps%d%d%d"
353
354static int
355upgrade_fw(adapter_t *sc)
356{
357 char buf[32];
358#ifdef FIRMWARE_LATEST
359 const struct firmware *fw;
360#else
361 struct firmware *fw;
362#endif
363 int status;
364
365 snprintf(&buf[0], sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
366 FW_VERSION_MINOR, FW_VERSION_MICRO);
367
368 fw = firmware_get(buf);
369
370 if (fw == NULL) {
371 device_printf(sc->dev, "Could not find firmware image %s\n", buf);
372 return (ENOENT);
373 } else
374 device_printf(sc->dev, "updating firmware on card with %s\n", buf);
375 status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
376
377 device_printf(sc->dev, "firmware update returned %s %d\n", (status == 0) ? "success" : "fail", status);
378
379 firmware_put(fw, FIRMWARE_UNLOAD);
380
381 return (status);
382}
383
384static int
385cxgb_controller_attach(device_t dev)
386{
387 device_t child;
388 const struct adapter_info *ai;
389 struct adapter *sc;
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/kernel.h>
36#include <sys/bus.h>
37#include <sys/module.h>
38#include <sys/pciio.h>
39#include <sys/conf.h>
40#include <machine/bus.h>
41#include <machine/resource.h>
42#include <sys/bus_dma.h>
43#include <sys/rman.h>
44#include <sys/ioccom.h>
45#include <sys/mbuf.h>
46#include <sys/linker.h>
47#include <sys/firmware.h>
48#include <sys/socket.h>
49#include <sys/sockio.h>
50#include <sys/smp.h>
51#include <sys/sysctl.h>
52#include <sys/queue.h>
53#include <sys/taskqueue.h>
54
55#include <net/bpf.h>
56#include <net/ethernet.h>
57#include <net/if.h>
58#include <net/if_arp.h>
59#include <net/if_dl.h>
60#include <net/if_media.h>
61#include <net/if_types.h>
62
63#include <netinet/in_systm.h>
64#include <netinet/in.h>
65#include <netinet/if_ether.h>
66#include <netinet/ip.h>
67#include <netinet/ip.h>
68#include <netinet/tcp.h>
69#include <netinet/udp.h>
70
71#include <dev/pci/pcireg.h>
72#include <dev/pci/pcivar.h>
73#include <dev/pci/pci_private.h>
74
75#ifdef CONFIG_DEFINED
76#include <cxgb_include.h>
77#else
78#include <dev/cxgb/cxgb_include.h>
79#endif
80
81#ifdef PRIV_SUPPORTED
82#include <sys/priv.h>
83#endif
84
85static int cxgb_setup_msix(adapter_t *, int);
86static void cxgb_teardown_msix(adapter_t *);
87static void cxgb_init(void *);
88static void cxgb_init_locked(struct port_info *);
89static void cxgb_stop_locked(struct port_info *);
90static void cxgb_set_rxmode(struct port_info *);
91static int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
92static void cxgb_start(struct ifnet *);
93static void cxgb_start_proc(void *, int ncount);
94static int cxgb_media_change(struct ifnet *);
95static void cxgb_media_status(struct ifnet *, struct ifmediareq *);
96static int setup_sge_qsets(adapter_t *);
97static void cxgb_async_intr(void *);
98static void cxgb_ext_intr_handler(void *, int);
99static void cxgb_tick_handler(void *, int);
100static void cxgb_down_locked(struct adapter *sc);
101static void cxgb_tick(void *);
102static void setup_rss(adapter_t *sc);
103
104/* Attachment glue for the PCI controller end of the device. Each port of
105 * the device is attached separately, as defined later.
106 */
107static int cxgb_controller_probe(device_t);
108static int cxgb_controller_attach(device_t);
109static int cxgb_controller_detach(device_t);
110static void cxgb_free(struct adapter *);
111static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
112 unsigned int end);
113static void cxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf);
114static int cxgb_get_regs_len(void);
115static int offload_open(struct port_info *pi);
116static void touch_bars(device_t dev);
117
118#ifdef notyet
119static int offload_close(struct toedev *tdev);
120#endif
121
122
123static device_method_t cxgb_controller_methods[] = {
124 DEVMETHOD(device_probe, cxgb_controller_probe),
125 DEVMETHOD(device_attach, cxgb_controller_attach),
126 DEVMETHOD(device_detach, cxgb_controller_detach),
127
128 /* bus interface */
129 DEVMETHOD(bus_print_child, bus_generic_print_child),
130 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
131
132 { 0, 0 }
133};
134
135static driver_t cxgb_controller_driver = {
136 "cxgbc",
137 cxgb_controller_methods,
138 sizeof(struct adapter)
139};
140
141static devclass_t cxgb_controller_devclass;
142DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, 0, 0);
143
144/*
145 * Attachment glue for the ports. Attachment is done directly to the
146 * controller device.
147 */
148static int cxgb_port_probe(device_t);
149static int cxgb_port_attach(device_t);
150static int cxgb_port_detach(device_t);
151
152static device_method_t cxgb_port_methods[] = {
153 DEVMETHOD(device_probe, cxgb_port_probe),
154 DEVMETHOD(device_attach, cxgb_port_attach),
155 DEVMETHOD(device_detach, cxgb_port_detach),
156 { 0, 0 }
157};
158
159static driver_t cxgb_port_driver = {
160 "cxgb",
161 cxgb_port_methods,
162 0
163};
164
165static d_ioctl_t cxgb_extension_ioctl;
166static d_open_t cxgb_extension_open;
167static d_close_t cxgb_extension_close;
168
169static struct cdevsw cxgb_cdevsw = {
170 .d_version = D_VERSION,
171 .d_flags = 0,
172 .d_open = cxgb_extension_open,
173 .d_close = cxgb_extension_close,
174 .d_ioctl = cxgb_extension_ioctl,
175 .d_name = "cxgb",
176};
177
178static devclass_t cxgb_port_devclass;
179DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
180
181#define SGE_MSIX_COUNT (SGE_QSETS + 1)
182
183extern int collapse_mbufs;
184/*
185 * The driver uses the best interrupt scheme available on a platform in the
186 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
187 * of these schemes the driver may consider as follows:
188 *
189 * msi = 2: choose from among all three options
190 * msi = 1 : only consider MSI and pin interrupts
191 * msi = 0: force pin interrupts
192 */
193static int msi_allowed = 2;
194
195TUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
196SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
197SYSCTL_UINT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
198 "MSI-X, MSI, INTx selector");
199
200/*
201 * The driver enables offload as a default.
202 * To disable it, use ofld_disable = 1.
203 */
204static int ofld_disable = 0;
205TUNABLE_INT("hw.cxgb.ofld_disable", &ofld_disable);
206SYSCTL_UINT(_hw_cxgb, OID_AUTO, ofld_disable, CTLFLAG_RDTUN, &ofld_disable, 0,
207 "disable ULP offload");
208
209/*
210 * The driver uses an auto-queue algorithm by default.
211 * To disable it and force a single queue-set per port, use singleq = 1.
212 */
213static int singleq = 1;
214TUNABLE_INT("hw.cxgb.singleq", &singleq);
215SYSCTL_UINT(_hw_cxgb, OID_AUTO, singleq, CTLFLAG_RDTUN, &singleq, 0,
216 "use a single queue-set per port");
217
218enum {
219 MAX_TXQ_ENTRIES = 16384,
220 MAX_CTRL_TXQ_ENTRIES = 1024,
221 MAX_RSPQ_ENTRIES = 16384,
222 MAX_RX_BUFFERS = 16384,
223 MAX_RX_JUMBO_BUFFERS = 16384,
224 MIN_TXQ_ENTRIES = 4,
225 MIN_CTRL_TXQ_ENTRIES = 4,
226 MIN_RSPQ_ENTRIES = 32,
227 MIN_FL_ENTRIES = 32,
228 MIN_FL_JUMBO_ENTRIES = 32
229};
230
231struct filter_info {
232 u32 sip;
233 u32 sip_mask;
234 u32 dip;
235 u16 sport;
236 u16 dport;
237 u32 vlan:12;
238 u32 vlan_prio:3;
239 u32 mac_hit:1;
240 u32 mac_idx:4;
241 u32 mac_vld:1;
242 u32 pkt_type:2;
243 u32 report_filter_id:1;
244 u32 pass:1;
245 u32 rss:1;
246 u32 qset:3;
247 u32 locked:1;
248 u32 valid:1;
249};
250
251enum { FILTER_NO_VLAN_PRI = 7 };
252
253#define PORT_MASK ((1 << MAX_NPORTS) - 1)
254
255/* Table for probing the cards. The desc field isn't actually used */
256struct cxgb_ident {
257 uint16_t vendor;
258 uint16_t device;
259 int index;
260 char *desc;
261} cxgb_identifiers[] = {
262 {PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
263 {PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
264 {PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
265 {PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
266 {PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
267 {PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
268 {PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
269 {PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
270 {PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
271 {PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
272 {PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
273 {0, 0, 0, NULL}
274};
275
276
277static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
278
279static inline char
280t3rev2char(struct adapter *adapter)
281{
282 char rev = 'z';
283
284 switch(adapter->params.rev) {
285 case T3_REV_A:
286 rev = 'a';
287 break;
288 case T3_REV_B:
289 case T3_REV_B2:
290 rev = 'b';
291 break;
292 case T3_REV_C:
293 rev = 'c';
294 break;
295 }
296 return rev;
297}
298
299static struct cxgb_ident *
300cxgb_get_ident(device_t dev)
301{
302 struct cxgb_ident *id;
303
304 for (id = cxgb_identifiers; id->desc != NULL; id++) {
305 if ((id->vendor == pci_get_vendor(dev)) &&
306 (id->device == pci_get_device(dev))) {
307 return (id);
308 }
309 }
310 return (NULL);
311}
312
313static const struct adapter_info *
314cxgb_get_adapter_info(device_t dev)
315{
316 struct cxgb_ident *id;
317 const struct adapter_info *ai;
318
319 id = cxgb_get_ident(dev);
320 if (id == NULL)
321 return (NULL);
322
323 ai = t3_get_adapter_info(id->index);
324
325 return (ai);
326}
327
328static int
329cxgb_controller_probe(device_t dev)
330{
331 const struct adapter_info *ai;
332 char *ports, buf[80];
333 int nports;
334
335 ai = cxgb_get_adapter_info(dev);
336 if (ai == NULL)
337 return (ENXIO);
338
339 nports = ai->nports0 + ai->nports1;
340 if (nports == 1)
341 ports = "port";
342 else
343 ports = "ports";
344
345 snprintf(buf, sizeof(buf), "%s RNIC, %d %s", ai->desc, nports, ports);
346 device_set_desc_copy(dev, buf);
347 return (BUS_PROBE_DEFAULT);
348}
349
350#define FW_FNAME "t3fw%d%d%d"
351#define TPEEPROM_NAME "t3%ctpe%d%d%d"
352#define TPSRAM_NAME "t3%cps%d%d%d"
353
354static int
355upgrade_fw(adapter_t *sc)
356{
357 char buf[32];
358#ifdef FIRMWARE_LATEST
359 const struct firmware *fw;
360#else
361 struct firmware *fw;
362#endif
363 int status;
364
365 snprintf(&buf[0], sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
366 FW_VERSION_MINOR, FW_VERSION_MICRO);
367
368 fw = firmware_get(buf);
369
370 if (fw == NULL) {
371 device_printf(sc->dev, "Could not find firmware image %s\n", buf);
372 return (ENOENT);
373 } else
374 device_printf(sc->dev, "updating firmware on card with %s\n", buf);
375 status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
376
377 device_printf(sc->dev, "firmware update returned %s %d\n", (status == 0) ? "success" : "fail", status);
378
379 firmware_put(fw, FIRMWARE_UNLOAD);
380
381 return (status);
382}
383
384static int
385cxgb_controller_attach(device_t dev)
386{
387 device_t child;
388 const struct adapter_info *ai;
389 struct adapter *sc;
390 int i, reg, error = 0;
390 int i, error = 0;
391 uint32_t vers;
392 int port_qsets = 1;
393#ifdef MSI_SUPPORTED
391 uint32_t vers;
392 int port_qsets = 1;
393#ifdef MSI_SUPPORTED
394 int msi_needed;
394 int msi_needed, reg;
395#endif
396 sc = device_get_softc(dev);
397 sc->dev = dev;
398 sc->msi_count = 0;
395#endif
396 sc = device_get_softc(dev);
397 sc->dev = dev;
398 sc->msi_count = 0;
399
399 ai = cxgb_get_adapter_info(dev);
400
401 /*
402 * XXX not really related but a recent addition
403 */
404#ifdef MSI_SUPPORTED
400 /* find the PCIe link width and set max read request to 4KB*/
401 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
402 uint16_t lnk, pectl;
403 lnk = pci_read_config(dev, reg + 0x12, 2);
404 sc->link_width = (lnk >> 4) & 0x3f;
405
406 pectl = pci_read_config(dev, reg + 0x8, 2);
407 pectl = (pectl & ~0x7000) | (5 << 12);
408 pci_write_config(dev, reg + 0x8, pectl, 2);
409 }
410
405 /* find the PCIe link width and set max read request to 4KB*/
406 if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
407 uint16_t lnk, pectl;
408 lnk = pci_read_config(dev, reg + 0x12, 2);
409 sc->link_width = (lnk >> 4) & 0x3f;
410
411 pectl = pci_read_config(dev, reg + 0x8, 2);
412 pectl = (pectl & ~0x7000) | (5 << 12);
413 pci_write_config(dev, reg + 0x8, pectl, 2);
414 }
415
411 ai = cxgb_get_adapter_info(dev);
412 if (sc->link_width != 0 && sc->link_width <= 4 &&
413 (ai->nports0 + ai->nports1) <= 2) {
414 device_printf(sc->dev,
415 "PCIe x%d Link, expect reduced performance\n",
416 sc->link_width);
417 }
416 if (sc->link_width != 0 && sc->link_width <= 4 &&
417 (ai->nports0 + ai->nports1) <= 2) {
418 device_printf(sc->dev,
419 "PCIe x%d Link, expect reduced performance\n",
420 sc->link_width);
421 }
418
422#endif
419 touch_bars(dev);
420 pci_enable_busmaster(dev);
421 /*
422 * Allocate the registers and make them available to the driver.
423 * The registers that we care about for NIC mode are in BAR 0
424 */
425 sc->regs_rid = PCIR_BAR(0);
426 if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
427 &sc->regs_rid, RF_ACTIVE)) == NULL) {
428 device_printf(dev, "Cannot allocate BAR\n");
429 return (ENXIO);
430 }
431
432 snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
433 device_get_unit(dev));
434 ADAPTER_LOCK_INIT(sc, sc->lockbuf);
435
436 snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
437 device_get_unit(dev));
438 snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
439 device_get_unit(dev));
440 snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
441 device_get_unit(dev));
442
443 MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_DEF);
444 MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
445 MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
446
447 sc->bt = rman_get_bustag(sc->regs_res);
448 sc->bh = rman_get_bushandle(sc->regs_res);
449 sc->mmio_len = rman_get_size(sc->regs_res);
450
451 if (t3_prep_adapter(sc, ai, 1) < 0) {
452 printf("prep adapter failed\n");
453 error = ENODEV;
454 goto out;
455 }
456 /* Allocate the BAR for doing MSI-X. If it succeeds, try to allocate
457 * enough messages for the queue sets. If that fails, try falling
458 * back to MSI. If that fails, then try falling back to the legacy
459 * interrupt pin model.
460 */
461#ifdef MSI_SUPPORTED
462
463 sc->msix_regs_rid = 0x20;
464 if ((msi_allowed >= 2) &&
465 (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
466 &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
467
468 msi_needed = sc->msi_count = SGE_MSIX_COUNT;
469
470 if (((error = pci_alloc_msix(dev, &sc->msi_count)) != 0) ||
471 (sc->msi_count != msi_needed)) {
472 device_printf(dev, "msix allocation failed - msi_count = %d"
473 " msi_needed=%d will try msi err=%d\n", sc->msi_count,
474 msi_needed, error);
475 sc->msi_count = 0;
476 pci_release_msi(dev);
477 bus_release_resource(dev, SYS_RES_MEMORY,
478 sc->msix_regs_rid, sc->msix_regs_res);
479 sc->msix_regs_res = NULL;
480 } else {
481 sc->flags |= USING_MSIX;
482 sc->cxgb_intr = t3_intr_msix;
483 }
484 }
485
486 if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
487 sc->msi_count = 1;
488 if (pci_alloc_msi(dev, &sc->msi_count)) {
489 device_printf(dev, "alloc msi failed - will try INTx\n");
490 sc->msi_count = 0;
491 pci_release_msi(dev);
492 } else {
493 sc->flags |= USING_MSI;
494 sc->irq_rid = 1;
495 sc->cxgb_intr = t3_intr_msi;
496 }
497 }
498#endif
499 if (sc->msi_count == 0) {
500 device_printf(dev, "using line interrupts\n");
501 sc->irq_rid = 0;
502 sc->cxgb_intr = t3b_intr;
503 }
504
505
506 /* Create a private taskqueue thread for handling driver events */
507#ifdef TASKQUEUE_CURRENT
508 sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
509 taskqueue_thread_enqueue, &sc->tq);
510#else
511 sc->tq = taskqueue_create_fast("cxgb_taskq", M_NOWAIT,
512 taskqueue_thread_enqueue, &sc->tq);
513#endif
514 if (sc->tq == NULL) {
515 device_printf(dev, "failed to allocate controller task queue\n");
516 goto out;
517 }
518
519 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
520 device_get_nameunit(dev));
521 TASK_INIT(&sc->ext_intr_task, 0, cxgb_ext_intr_handler, sc);
522 TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
523
524
525 /* Create a periodic callout for checking adapter status */
526 callout_init(&sc->cxgb_tick_ch, TRUE);
527
528 if (t3_check_fw_version(sc) != 0) {
529 /*
530 * Warn user that a firmware update will be attempted in init.
531 */
532 device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
533 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
534 sc->flags &= ~FW_UPTODATE;
535 } else {
536 sc->flags |= FW_UPTODATE;
537 }
538
539 if (t3_check_tpsram_version(sc) != 0) {
540 /*
541 * Warn user that a firmware update will be attempted in init.
542 */
543 device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
544 t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
545 sc->flags &= ~TPS_UPTODATE;
546 } else {
547 sc->flags |= TPS_UPTODATE;
548 }
549
550 if ((sc->flags & USING_MSIX) && !singleq)
551 port_qsets = min((SGE_QSETS/(sc)->params.nports), mp_ncpus);
552
553 /*
554 * Create a child device for each MAC. The ethernet attachment
555 * will be done in these children.
556 */
557 for (i = 0; i < (sc)->params.nports; i++) {
558 struct port_info *pi;
559
560 if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
561 device_printf(dev, "failed to add child port\n");
562 error = EINVAL;
563 goto out;
564 }
565 pi = &sc->port[i];
566 pi->adapter = sc;
567 pi->nqsets = port_qsets;
568 pi->first_qset = i*port_qsets;
569 pi->port_id = i;
570 pi->tx_chan = i >= ai->nports0;
571 pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
572 sc->rxpkt_map[pi->txpkt_intf] = i;
573 sc->portdev[i] = child;
574 device_set_softc(child, pi);
575 }
576 if ((error = bus_generic_attach(dev)) != 0)
577 goto out;
578
579 /*
580 * XXX need to poll for link status
581 */
582 sc->params.stats_update_period = 1;
583
584 /* initialize sge private state */
585 t3_sge_init_adapter(sc);
586
587 t3_led_ready(sc);
588
589 cxgb_offload_init();
590 if (is_offload(sc)) {
591 setbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
592 cxgb_adapter_ofld(sc);
593 }
594 error = t3_get_fw_version(sc, &vers);
595 if (error)
596 goto out;
597
598 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
599 G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
600 G_FW_VERSION_MICRO(vers));
601
602 t3_add_sysctls(sc);
603out:
604 if (error)
605 cxgb_free(sc);
606
607 return (error);
608}
609
610static int
611cxgb_controller_detach(device_t dev)
612{
613 struct adapter *sc;
614
615 sc = device_get_softc(dev);
616
617 cxgb_free(sc);
618
619 return (0);
620}
621
622static void
623cxgb_free(struct adapter *sc)
624{
625 int i;
626
627 ADAPTER_LOCK(sc);
628 /*
629 * drops the lock
630 */
631 cxgb_down_locked(sc);
632
633#ifdef MSI_SUPPORTED
634 if (sc->flags & (USING_MSI | USING_MSIX)) {
635 device_printf(sc->dev, "releasing msi message(s)\n");
636 pci_release_msi(sc->dev);
637 } else {
638 device_printf(sc->dev, "no msi message to release\n");
639 }
640#endif
641 if (sc->msix_regs_res != NULL) {
642 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
643 sc->msix_regs_res);
644 }
645
646 if (sc->tq != NULL) {
647 taskqueue_drain(sc->tq, &sc->ext_intr_task);
648 taskqueue_drain(sc->tq, &sc->tick_task);
649 }
650 t3_sge_deinit_sw(sc);
651 /*
652 * Wait for last callout
653 */
654
655 tsleep(&sc, 0, "cxgb unload", 3*hz);
656
657 for (i = 0; i < (sc)->params.nports; ++i) {
658 if (sc->portdev[i] != NULL)
659 device_delete_child(sc->dev, sc->portdev[i]);
660 }
661
662 bus_generic_detach(sc->dev);
663 if (sc->tq != NULL)
664 taskqueue_free(sc->tq);
665#ifdef notyet
666 if (is_offload(sc)) {
667 cxgb_adapter_unofld(sc);
668 if (isset(&sc->open_device_map, OFFLOAD_DEVMAP_BIT))
669 offload_close(&sc->tdev);
670 }
671#endif
672
673 t3_free_sge_resources(sc);
674 free(sc->filters, M_DEVBUF);
675 t3_sge_free(sc);
676
677 cxgb_offload_exit();
678
679 if (sc->regs_res != NULL)
680 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
681 sc->regs_res);
682
683 MTX_DESTROY(&sc->mdio_lock);
684 MTX_DESTROY(&sc->sge.reg_lock);
685 MTX_DESTROY(&sc->elmer_lock);
686 ADAPTER_LOCK_DEINIT(sc);
687
688 return;
689}
690
691/**
692 * setup_sge_qsets - configure SGE Tx/Rx/response queues
693 * @sc: the controller softc
694 *
695 * Determines how many sets of SGE queues to use and initializes them.
696 * We support multiple queue sets per port if we have MSI-X, otherwise
697 * just one queue set per port.
698 */
699static int
700setup_sge_qsets(adapter_t *sc)
701{
702 int i, j, err, irq_idx = 0, qset_idx = 0;
703 u_int ntxq = SGE_TXQ_PER_SET;
704
705 if ((err = t3_sge_alloc(sc)) != 0) {
706 device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
707 return (err);
708 }
709
710 if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
711 irq_idx = -1;
712
713 for (i = 0; i < (sc)->params.nports; i++) {
714 struct port_info *pi = &sc->port[i];
715
716 for (j = 0; j < pi->nqsets; j++, qset_idx++) {
717 err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
718 (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
719 &sc->params.sge.qset[qset_idx], ntxq, pi);
720 if (err) {
721 t3_free_sge_resources(sc);
722 device_printf(sc->dev, "t3_sge_alloc_qset failed with %d\n",
723 err);
724 return (err);
725 }
726 }
727 }
728
729 return (0);
730}
731
732static void
733cxgb_teardown_msix(adapter_t *sc)
734{
735 int i, nqsets;
736
737 for (nqsets = i = 0; i < (sc)->params.nports; i++)
738 nqsets += sc->port[i].nqsets;
739
740 for (i = 0; i < nqsets; i++) {
741 if (sc->msix_intr_tag[i] != NULL) {
742 bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
743 sc->msix_intr_tag[i]);
744 sc->msix_intr_tag[i] = NULL;
745 }
746 if (sc->msix_irq_res[i] != NULL) {
747 bus_release_resource(sc->dev, SYS_RES_IRQ,
748 sc->msix_irq_rid[i], sc->msix_irq_res[i]);
749 sc->msix_irq_res[i] = NULL;
750 }
751 }
752}
753
754static int
755cxgb_setup_msix(adapter_t *sc, int msix_count)
756{
757 int i, j, k, nqsets, rid;
758
759 /* The first message indicates link changes and error conditions */
760 sc->irq_rid = 1;
761 if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
762 &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
763 device_printf(sc->dev, "Cannot allocate msix interrupt\n");
764 return (EINVAL);
765 }
766
767 if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
768#ifdef INTR_FILTERS
769 NULL,
770#endif
771 cxgb_async_intr, sc, &sc->intr_tag)) {
772 device_printf(sc->dev, "Cannot set up interrupt\n");
773 return (EINVAL);
774 }
775 for (i = k = 0; i < (sc)->params.nports; i++) {
776 nqsets = sc->port[i].nqsets;
777 for (j = 0; j < nqsets; j++, k++) {
778 struct sge_qset *qs = &sc->sge.qs[k];
779
780 rid = k + 2;
781 if (cxgb_debug)
782 printf("rid=%d ", rid);
783 if ((sc->msix_irq_res[k] = bus_alloc_resource_any(
784 sc->dev, SYS_RES_IRQ, &rid,
785 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
786 device_printf(sc->dev, "Cannot allocate "
787 "interrupt for message %d\n", rid);
788 return (EINVAL);
789 }
790 sc->msix_irq_rid[k] = rid;
791 printf("setting up interrupt for port=%d\n",
792 qs->port->port_id);
793 if (bus_setup_intr(sc->dev, sc->msix_irq_res[k],
794 INTR_MPSAFE|INTR_TYPE_NET,
795#ifdef INTR_FILTERS
796 NULL,
797#endif
798 t3_intr_msix, qs, &sc->msix_intr_tag[k])) {
799 device_printf(sc->dev, "Cannot set up "
800 "interrupt for message %d\n", rid);
801 return (EINVAL);
802 }
803 }
804 }
805
806
807 return (0);
808}
809
810static int
811cxgb_port_probe(device_t dev)
812{
813 struct port_info *p;
814 char buf[80];
815
816 p = device_get_softc(dev);
817
818 snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, p->port_type->desc);
819 device_set_desc_copy(dev, buf);
820 return (0);
821}
822
823
824static int
825cxgb_makedev(struct port_info *pi)
826{
827
828 pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit,
829 UID_ROOT, GID_WHEEL, 0600, if_name(pi->ifp));
830
831 if (pi->port_cdev == NULL)
832 return (ENOMEM);
833
834 pi->port_cdev->si_drv1 = (void *)pi;
835
836 return (0);
837}
838
839
840#ifdef TSO_SUPPORTED
841#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU)
842/* Don't enable TSO6 yet */
843#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4 | IFCAP_JUMBO_MTU)
844#else
845#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
846/* Don't enable TSO6 yet */
847#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
848#define IFCAP_TSO4 0x0
849#define IFCAP_TSO6 0x0
850#define CSUM_TSO 0x0
851#endif
852
853
854static int
855cxgb_port_attach(device_t dev)
856{
857 struct port_info *p;
858 struct ifnet *ifp;
859 int err, media_flags;
860
861 p = device_get_softc(dev);
862
863 snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
864 device_get_unit(device_get_parent(dev)), p->port_id);
865 PORT_LOCK_INIT(p, p->lockbuf);
866
867 /* Allocate an ifnet object and set it up */
868 ifp = p->ifp = if_alloc(IFT_ETHER);
869 if (ifp == NULL) {
870 device_printf(dev, "Cannot allocate ifnet\n");
871 return (ENOMEM);
872 }
873
874 /*
875 * Note that there is currently no watchdog timer.
876 */
877 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
878 ifp->if_init = cxgb_init;
879 ifp->if_softc = p;
880 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
881 ifp->if_ioctl = cxgb_ioctl;
882 ifp->if_start = cxgb_start;
883 ifp->if_timer = 0; /* Disable ifnet watchdog */
884 ifp->if_watchdog = NULL;
885
886 ifp->if_snd.ifq_drv_maxlen = TX_ETH_Q_SIZE;
887 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
888 IFQ_SET_READY(&ifp->if_snd);
889
890 ifp->if_hwassist = ifp->if_capabilities = ifp->if_capenable = 0;
891 ifp->if_capabilities |= CXGB_CAP;
892 ifp->if_capenable |= CXGB_CAP_ENABLE;
893 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO);
894 /*
895 * disable TSO on 4-port - it isn't supported by the firmware yet
896 */
897 if (p->adapter->params.nports > 2) {
898 ifp->if_capabilities &= ~(IFCAP_TSO4 | IFCAP_TSO6);
899 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TSO6);
900 ifp->if_hwassist &= ~CSUM_TSO;
901 }
902
903 ether_ifattach(ifp, p->hw_addr);
904 /*
905 * Only default to jumbo frames on 10GigE
906 */
907 if (p->adapter->params.nports <= 2)
908 ifp->if_mtu = 9000;
909 if ((err = cxgb_makedev(p)) != 0) {
910 printf("makedev failed %d\n", err);
911 return (err);
912 }
913 ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
914 cxgb_media_status);
915
916 if (!strcmp(p->port_type->desc, "10GBASE-CX4")) {
917 media_flags = IFM_ETHER | IFM_10G_CX4 | IFM_FDX;
918 } else if (!strcmp(p->port_type->desc, "10GBASE-SR")) {
919 media_flags = IFM_ETHER | IFM_10G_SR | IFM_FDX;
920 } else if (!strcmp(p->port_type->desc, "10GBASE-XR")) {
921 media_flags = IFM_ETHER | IFM_10G_LR | IFM_FDX;
922 } else if (!strcmp(p->port_type->desc, "10/100/1000BASE-T")) {
923 ifmedia_add(&p->media, IFM_ETHER | IFM_10_T, 0, NULL);
924 ifmedia_add(&p->media, IFM_ETHER | IFM_10_T | IFM_FDX,
925 0, NULL);
926 ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX,
927 0, NULL);
928 ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
929 0, NULL);
930 ifmedia_add(&p->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
931 0, NULL);
932 media_flags = 0;
933 } else {
934 printf("unsupported media type %s\n", p->port_type->desc);
935 return (ENXIO);
936 }
937 if (media_flags) {
938 ifmedia_add(&p->media, media_flags, 0, NULL);
939 ifmedia_set(&p->media, media_flags);
940 } else {
941 ifmedia_add(&p->media, IFM_ETHER | IFM_AUTO, 0, NULL);
942 ifmedia_set(&p->media, IFM_ETHER | IFM_AUTO);
943 }
944
945
946 snprintf(p->taskqbuf, TASKQ_NAME_LEN, "cxgb_port_taskq%d", p->port_id);
947#ifdef TASKQUEUE_CURRENT
948 /* Create a port for handling TX without starvation */
949 p->tq = taskqueue_create(p->taskqbuf, M_NOWAIT,
950 taskqueue_thread_enqueue, &p->tq);
951#else
952 /* Create a port for handling TX without starvation */
953 p->tq = taskqueue_create_fast(p->taskqbuf, M_NOWAIT,
954 taskqueue_thread_enqueue, &p->tq);
955#endif
956
957 if (p->tq == NULL) {
958 device_printf(dev, "failed to allocate port task queue\n");
959 return (ENOMEM);
960 }
961 taskqueue_start_threads(&p->tq, 1, PI_NET, "%s taskq",
962 device_get_nameunit(dev));
963
964 TASK_INIT(&p->start_task, 0, cxgb_start_proc, ifp);
965
966 t3_sge_init_port(p);
967
968 return (0);
969}
970
971static int
972cxgb_port_detach(device_t dev)
973{
974 struct port_info *p;
975
976 p = device_get_softc(dev);
977
978 PORT_LOCK(p);
979 if (p->ifp->if_drv_flags & IFF_DRV_RUNNING)
980 cxgb_stop_locked(p);
981 PORT_UNLOCK(p);
982
983 if (p->tq != NULL) {
984 taskqueue_drain(p->tq, &p->start_task);
985 taskqueue_free(p->tq);
986 p->tq = NULL;
987 }
988
989 ether_ifdetach(p->ifp);
990 /*
991 * the lock may be acquired in ifdetach
992 */
993 PORT_LOCK_DEINIT(p);
994 if_free(p->ifp);
995
996 if (p->port_cdev != NULL)
997 destroy_dev(p->port_cdev);
998
999 return (0);
1000}
1001
1002void
1003t3_fatal_err(struct adapter *sc)
1004{
1005 u_int fw_status[4];
1006
1007 if (sc->flags & FULL_INIT_DONE) {
1008 t3_sge_stop(sc);
1009 t3_write_reg(sc, A_XGM_TX_CTRL, 0);
1010 t3_write_reg(sc, A_XGM_RX_CTRL, 0);
1011 t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
1012 t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
1013 t3_intr_disable(sc);
1014 }
1015 device_printf(sc->dev,"encountered fatal error, operation suspended\n");
1016 if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
1017 device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
1018 fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
1019}
1020
1021int
1022t3_os_find_pci_capability(adapter_t *sc, int cap)
1023{
1024 device_t dev;
1025 struct pci_devinfo *dinfo;
1026 pcicfgregs *cfg;
1027 uint32_t status;
1028 uint8_t ptr;
1029
1030 dev = sc->dev;
1031 dinfo = device_get_ivars(dev);
1032 cfg = &dinfo->cfg;
1033
1034 status = pci_read_config(dev, PCIR_STATUS, 2);
1035 if (!(status & PCIM_STATUS_CAPPRESENT))
1036 return (0);
1037
1038 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1039 case 0:
1040 case 1:
1041 ptr = PCIR_CAP_PTR;
1042 break;
1043 case 2:
1044 ptr = PCIR_CAP_PTR_2;
1045 break;
1046 default:
1047 return (0);
1048 break;
1049 }
1050 ptr = pci_read_config(dev, ptr, 1);
1051
1052 while (ptr != 0) {
1053 if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
1054 return (ptr);
1055 ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1056 }
1057
1058 return (0);
1059}
1060
1061int
1062t3_os_pci_save_state(struct adapter *sc)
1063{
1064 device_t dev;
1065 struct pci_devinfo *dinfo;
1066
1067 dev = sc->dev;
1068 dinfo = device_get_ivars(dev);
1069
1070 pci_cfg_save(dev, dinfo, 0);
1071 return (0);
1072}
1073
1074int
1075t3_os_pci_restore_state(struct adapter *sc)
1076{
1077 device_t dev;
1078 struct pci_devinfo *dinfo;
1079
1080 dev = sc->dev;
1081 dinfo = device_get_ivars(dev);
1082
1083 pci_cfg_restore(dev, dinfo);
1084 return (0);
1085}
1086
1087/**
1088 * t3_os_link_changed - handle link status changes
1089 * @adapter: the adapter associated with the link change
1090 * @port_id: the port index whose limk status has changed
1091 * @link_stat: the new status of the link
1092 * @speed: the new speed setting
1093 * @duplex: the new duplex setting
1094 * @fc: the new flow-control setting
1095 *
1096 * This is the OS-dependent handler for link status changes. The OS
1097 * neutral handler takes care of most of the processing for these events,
1098 * then calls this handler for any OS-specific processing.
1099 */
1100void
1101t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
1102 int duplex, int fc)
1103{
1104 struct port_info *pi = &adapter->port[port_id];
1105 struct cmac *mac = &adapter->port[port_id].mac;
1106
1107 if ((pi->ifp->if_flags & IFF_UP) == 0)
1108 return;
1109
1110 if (link_status) {
1111 t3_mac_enable(mac, MAC_DIRECTION_RX);
1112 if_link_state_change(pi->ifp, LINK_STATE_UP);
1113 } else {
1114 if_link_state_change(pi->ifp, LINK_STATE_DOWN);
1115 pi->phy.ops->power_down(&pi->phy, 1);
1116 t3_mac_disable(mac, MAC_DIRECTION_RX);
1117 t3_link_start(&pi->phy, mac, &pi->link_config);
1118 }
1119}
1120
1121/*
1122 * Interrupt-context handler for external (PHY) interrupts.
1123 */
1124void
1125t3_os_ext_intr_handler(adapter_t *sc)
1126{
1127 if (cxgb_debug)
1128 printf("t3_os_ext_intr_handler\n");
1129 /*
1130 * Schedule a task to handle external interrupts as they may be slow
1131 * and we use a mutex to protect MDIO registers. We disable PHY
1132 * interrupts in the meantime and let the task reenable them when
1133 * it's done.
1134 */
1135 ADAPTER_LOCK(sc);
1136 if (sc->slow_intr_mask) {
1137 sc->slow_intr_mask &= ~F_T3DBG;
1138 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
1139 taskqueue_enqueue(sc->tq, &sc->ext_intr_task);
1140 }
1141 ADAPTER_UNLOCK(sc);
1142}
1143
1144void
1145t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
1146{
1147
1148 /*
1149 * The ifnet might not be allocated before this gets called,
1150 * as this is called early on in attach by t3_prep_adapter
1151 * save the address off in the port structure
1152 */
1153 if (cxgb_debug)
1154 printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
1155 bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
1156}
1157
1158/**
1159 * link_start - enable a port
1160 * @p: the port to enable
1161 *
1162 * Performs the MAC and PHY actions needed to enable a port.
1163 */
1164static void
1165cxgb_link_start(struct port_info *p)
1166{
1167 struct ifnet *ifp;
1168 struct t3_rx_mode rm;
1169 struct cmac *mac = &p->mac;
1170
1171 ifp = p->ifp;
1172
1173 t3_init_rx_mode(&rm, p);
1174 if (!mac->multiport)
1175 t3_mac_reset(mac);
1176 t3_mac_set_mtu(mac, ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1177 t3_mac_set_address(mac, 0, p->hw_addr);
1178 t3_mac_set_rx_mode(mac, &rm);
1179 t3_link_start(&p->phy, mac, &p->link_config);
1180 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1181}
1182
1183/**
1184 * setup_rss - configure Receive Side Steering (per-queue connection demux)
1185 * @adap: the adapter
1186 *
1187 * Sets up RSS to distribute packets to multiple receive queues. We
1188 * configure the RSS CPU lookup table to distribute to the number of HW
1189 * receive queues, and the response queue lookup table to narrow that
1190 * down to the response queues actually configured for each port.
1191 * We always configure the RSS mapping for two ports since the mapping
1192 * table has plenty of entries.
1193 */
1194static void
1195setup_rss(adapter_t *adap)
1196{
1197 int i;
1198 u_int nq[2];
1199 uint8_t cpus[SGE_QSETS + 1];
1200 uint16_t rspq_map[RSS_TABLE_SIZE];
1201
1202
1203 if ((adap->flags & USING_MSIX) == 0)
1204 return;
1205
1206 for (i = 0; i < SGE_QSETS; ++i)
1207 cpus[i] = i;
1208 cpus[SGE_QSETS] = 0xff;
1209
1210 nq[0] = nq[1] = 0;
1211 for_each_port(adap, i) {
1212 const struct port_info *pi = adap2pinfo(adap, i);
1213
1214 nq[pi->tx_chan] += pi->nqsets;
1215 }
1216 nq[0] = max(nq[0], 1U);
1217 nq[1] = max(nq[1], 1U);
1218 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
1219 rspq_map[i] = i % nq[0];
1220 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq[1]) + nq[0];
1221 }
1222 /* Calculate the reverse RSS map table */
1223 for (i = 0; i < RSS_TABLE_SIZE; ++i)
1224 if (adap->rrss_map[rspq_map[i]] == 0xff)
1225 adap->rrss_map[rspq_map[i]] = i;
1226
1227 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
1228 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
1229 V_RRCPLCPUSIZE(6), cpus, rspq_map);
1230
1231}
1232
1233/*
1234 * Sends an mbuf to an offload queue driver
1235 * after dealing with any active network taps.
1236 */
1237static inline int
1238offload_tx(struct toedev *tdev, struct mbuf *m)
1239{
1240 int ret;
1241
1242 critical_enter();
1243 ret = t3_offload_tx(tdev, m);
1244 critical_exit();
1245 return (ret);
1246}
1247
1248static int
1249write_smt_entry(struct adapter *adapter, int idx)
1250{
1251 struct port_info *pi = &adapter->port[idx];
1252 struct cpl_smt_write_req *req;
1253 struct mbuf *m;
1254
1255 if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
1256 return (ENOMEM);
1257
1258 req = mtod(m, struct cpl_smt_write_req *);
1259 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1260 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
1261 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
1262 req->iff = idx;
1263 memset(req->src_mac1, 0, sizeof(req->src_mac1));
1264 memcpy(req->src_mac0, pi->hw_addr, ETHER_ADDR_LEN);
1265
1266 m_set_priority(m, 1);
1267
1268 offload_tx(&adapter->tdev, m);
1269
1270 return (0);
1271}
1272
1273static int
1274init_smt(struct adapter *adapter)
1275{
1276 int i;
1277
1278 for_each_port(adapter, i)
1279 write_smt_entry(adapter, i);
1280 return 0;
1281}
1282
1283static void
1284init_port_mtus(adapter_t *adapter)
1285{
1286 unsigned int mtus = adapter->port[0].ifp->if_mtu;
1287
1288 if (adapter->port[1].ifp)
1289 mtus |= adapter->port[1].ifp->if_mtu << 16;
1290 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
1291}
1292
1293static void
1294send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
1295 int hi, int port)
1296{
1297 struct mbuf *m;
1298 struct mngt_pktsched_wr *req;
1299
1300 m = m_gethdr(M_DONTWAIT, MT_DATA);
1301 if (m) {
1302 req = mtod(m, struct mngt_pktsched_wr *);
1303 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1304 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1305 req->sched = sched;
1306 req->idx = qidx;
1307 req->min = lo;
1308 req->max = hi;
1309 req->binding = port;
1310 m->m_len = m->m_pkthdr.len = sizeof(*req);
1311 t3_mgmt_tx(adap, m);
1312 }
1313}
1314
1315static void
1316bind_qsets(adapter_t *sc)
1317{
1318 int i, j;
1319
1320 for (i = 0; i < (sc)->params.nports; ++i) {
1321 const struct port_info *pi = adap2pinfo(sc, i);
1322
1323 for (j = 0; j < pi->nqsets; ++j) {
1324 send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1325 -1, pi->tx_chan);
1326
1327 }
1328 }
1329}
1330
1331static void
1332update_tpeeprom(struct adapter *adap)
1333{
423 touch_bars(dev);
424 pci_enable_busmaster(dev);
425 /*
426 * Allocate the registers and make them available to the driver.
427 * The registers that we care about for NIC mode are in BAR 0
428 */
429 sc->regs_rid = PCIR_BAR(0);
430 if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
431 &sc->regs_rid, RF_ACTIVE)) == NULL) {
432 device_printf(dev, "Cannot allocate BAR\n");
433 return (ENXIO);
434 }
435
436 snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
437 device_get_unit(dev));
438 ADAPTER_LOCK_INIT(sc, sc->lockbuf);
439
440 snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
441 device_get_unit(dev));
442 snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
443 device_get_unit(dev));
444 snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
445 device_get_unit(dev));
446
447 MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_DEF);
448 MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
449 MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
450
451 sc->bt = rman_get_bustag(sc->regs_res);
452 sc->bh = rman_get_bushandle(sc->regs_res);
453 sc->mmio_len = rman_get_size(sc->regs_res);
454
455 if (t3_prep_adapter(sc, ai, 1) < 0) {
456 printf("prep adapter failed\n");
457 error = ENODEV;
458 goto out;
459 }
460 /* Allocate the BAR for doing MSI-X. If it succeeds, try to allocate
461 * enough messages for the queue sets. If that fails, try falling
462 * back to MSI. If that fails, then try falling back to the legacy
463 * interrupt pin model.
464 */
465#ifdef MSI_SUPPORTED
466
467 sc->msix_regs_rid = 0x20;
468 if ((msi_allowed >= 2) &&
469 (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
470 &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
471
472 msi_needed = sc->msi_count = SGE_MSIX_COUNT;
473
474 if (((error = pci_alloc_msix(dev, &sc->msi_count)) != 0) ||
475 (sc->msi_count != msi_needed)) {
476 device_printf(dev, "msix allocation failed - msi_count = %d"
477 " msi_needed=%d will try msi err=%d\n", sc->msi_count,
478 msi_needed, error);
479 sc->msi_count = 0;
480 pci_release_msi(dev);
481 bus_release_resource(dev, SYS_RES_MEMORY,
482 sc->msix_regs_rid, sc->msix_regs_res);
483 sc->msix_regs_res = NULL;
484 } else {
485 sc->flags |= USING_MSIX;
486 sc->cxgb_intr = t3_intr_msix;
487 }
488 }
489
490 if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
491 sc->msi_count = 1;
492 if (pci_alloc_msi(dev, &sc->msi_count)) {
493 device_printf(dev, "alloc msi failed - will try INTx\n");
494 sc->msi_count = 0;
495 pci_release_msi(dev);
496 } else {
497 sc->flags |= USING_MSI;
498 sc->irq_rid = 1;
499 sc->cxgb_intr = t3_intr_msi;
500 }
501 }
502#endif
503 if (sc->msi_count == 0) {
504 device_printf(dev, "using line interrupts\n");
505 sc->irq_rid = 0;
506 sc->cxgb_intr = t3b_intr;
507 }
508
509
510 /* Create a private taskqueue thread for handling driver events */
511#ifdef TASKQUEUE_CURRENT
512 sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
513 taskqueue_thread_enqueue, &sc->tq);
514#else
515 sc->tq = taskqueue_create_fast("cxgb_taskq", M_NOWAIT,
516 taskqueue_thread_enqueue, &sc->tq);
517#endif
518 if (sc->tq == NULL) {
519 device_printf(dev, "failed to allocate controller task queue\n");
520 goto out;
521 }
522
523 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
524 device_get_nameunit(dev));
525 TASK_INIT(&sc->ext_intr_task, 0, cxgb_ext_intr_handler, sc);
526 TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
527
528
529 /* Create a periodic callout for checking adapter status */
530 callout_init(&sc->cxgb_tick_ch, TRUE);
531
532 if (t3_check_fw_version(sc) != 0) {
533 /*
534 * Warn user that a firmware update will be attempted in init.
535 */
536 device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
537 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
538 sc->flags &= ~FW_UPTODATE;
539 } else {
540 sc->flags |= FW_UPTODATE;
541 }
542
543 if (t3_check_tpsram_version(sc) != 0) {
544 /*
545 * Warn user that a firmware update will be attempted in init.
546 */
547 device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
548 t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
549 sc->flags &= ~TPS_UPTODATE;
550 } else {
551 sc->flags |= TPS_UPTODATE;
552 }
553
554 if ((sc->flags & USING_MSIX) && !singleq)
555 port_qsets = min((SGE_QSETS/(sc)->params.nports), mp_ncpus);
556
557 /*
558 * Create a child device for each MAC. The ethernet attachment
559 * will be done in these children.
560 */
561 for (i = 0; i < (sc)->params.nports; i++) {
562 struct port_info *pi;
563
564 if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
565 device_printf(dev, "failed to add child port\n");
566 error = EINVAL;
567 goto out;
568 }
569 pi = &sc->port[i];
570 pi->adapter = sc;
571 pi->nqsets = port_qsets;
572 pi->first_qset = i*port_qsets;
573 pi->port_id = i;
574 pi->tx_chan = i >= ai->nports0;
575 pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
576 sc->rxpkt_map[pi->txpkt_intf] = i;
577 sc->portdev[i] = child;
578 device_set_softc(child, pi);
579 }
580 if ((error = bus_generic_attach(dev)) != 0)
581 goto out;
582
583 /*
584 * XXX need to poll for link status
585 */
586 sc->params.stats_update_period = 1;
587
588 /* initialize sge private state */
589 t3_sge_init_adapter(sc);
590
591 t3_led_ready(sc);
592
593 cxgb_offload_init();
594 if (is_offload(sc)) {
595 setbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
596 cxgb_adapter_ofld(sc);
597 }
598 error = t3_get_fw_version(sc, &vers);
599 if (error)
600 goto out;
601
602 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
603 G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
604 G_FW_VERSION_MICRO(vers));
605
606 t3_add_sysctls(sc);
607out:
608 if (error)
609 cxgb_free(sc);
610
611 return (error);
612}
613
614static int
615cxgb_controller_detach(device_t dev)
616{
617 struct adapter *sc;
618
619 sc = device_get_softc(dev);
620
621 cxgb_free(sc);
622
623 return (0);
624}
625
626static void
627cxgb_free(struct adapter *sc)
628{
629 int i;
630
631 ADAPTER_LOCK(sc);
632 /*
633 * drops the lock
634 */
635 cxgb_down_locked(sc);
636
637#ifdef MSI_SUPPORTED
638 if (sc->flags & (USING_MSI | USING_MSIX)) {
639 device_printf(sc->dev, "releasing msi message(s)\n");
640 pci_release_msi(sc->dev);
641 } else {
642 device_printf(sc->dev, "no msi message to release\n");
643 }
644#endif
645 if (sc->msix_regs_res != NULL) {
646 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
647 sc->msix_regs_res);
648 }
649
650 if (sc->tq != NULL) {
651 taskqueue_drain(sc->tq, &sc->ext_intr_task);
652 taskqueue_drain(sc->tq, &sc->tick_task);
653 }
654 t3_sge_deinit_sw(sc);
655 /*
656 * Wait for last callout
657 */
658
659 tsleep(&sc, 0, "cxgb unload", 3*hz);
660
661 for (i = 0; i < (sc)->params.nports; ++i) {
662 if (sc->portdev[i] != NULL)
663 device_delete_child(sc->dev, sc->portdev[i]);
664 }
665
666 bus_generic_detach(sc->dev);
667 if (sc->tq != NULL)
668 taskqueue_free(sc->tq);
669#ifdef notyet
670 if (is_offload(sc)) {
671 cxgb_adapter_unofld(sc);
672 if (isset(&sc->open_device_map, OFFLOAD_DEVMAP_BIT))
673 offload_close(&sc->tdev);
674 }
675#endif
676
677 t3_free_sge_resources(sc);
678 free(sc->filters, M_DEVBUF);
679 t3_sge_free(sc);
680
681 cxgb_offload_exit();
682
683 if (sc->regs_res != NULL)
684 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
685 sc->regs_res);
686
687 MTX_DESTROY(&sc->mdio_lock);
688 MTX_DESTROY(&sc->sge.reg_lock);
689 MTX_DESTROY(&sc->elmer_lock);
690 ADAPTER_LOCK_DEINIT(sc);
691
692 return;
693}
694
695/**
696 * setup_sge_qsets - configure SGE Tx/Rx/response queues
697 * @sc: the controller softc
698 *
699 * Determines how many sets of SGE queues to use and initializes them.
700 * We support multiple queue sets per port if we have MSI-X, otherwise
701 * just one queue set per port.
702 */
703static int
704setup_sge_qsets(adapter_t *sc)
705{
706 int i, j, err, irq_idx = 0, qset_idx = 0;
707 u_int ntxq = SGE_TXQ_PER_SET;
708
709 if ((err = t3_sge_alloc(sc)) != 0) {
710 device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
711 return (err);
712 }
713
714 if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
715 irq_idx = -1;
716
717 for (i = 0; i < (sc)->params.nports; i++) {
718 struct port_info *pi = &sc->port[i];
719
720 for (j = 0; j < pi->nqsets; j++, qset_idx++) {
721 err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
722 (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
723 &sc->params.sge.qset[qset_idx], ntxq, pi);
724 if (err) {
725 t3_free_sge_resources(sc);
726 device_printf(sc->dev, "t3_sge_alloc_qset failed with %d\n",
727 err);
728 return (err);
729 }
730 }
731 }
732
733 return (0);
734}
735
736static void
737cxgb_teardown_msix(adapter_t *sc)
738{
739 int i, nqsets;
740
741 for (nqsets = i = 0; i < (sc)->params.nports; i++)
742 nqsets += sc->port[i].nqsets;
743
744 for (i = 0; i < nqsets; i++) {
745 if (sc->msix_intr_tag[i] != NULL) {
746 bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
747 sc->msix_intr_tag[i]);
748 sc->msix_intr_tag[i] = NULL;
749 }
750 if (sc->msix_irq_res[i] != NULL) {
751 bus_release_resource(sc->dev, SYS_RES_IRQ,
752 sc->msix_irq_rid[i], sc->msix_irq_res[i]);
753 sc->msix_irq_res[i] = NULL;
754 }
755 }
756}
757
758static int
759cxgb_setup_msix(adapter_t *sc, int msix_count)
760{
761 int i, j, k, nqsets, rid;
762
763 /* The first message indicates link changes and error conditions */
764 sc->irq_rid = 1;
765 if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
766 &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
767 device_printf(sc->dev, "Cannot allocate msix interrupt\n");
768 return (EINVAL);
769 }
770
771 if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
772#ifdef INTR_FILTERS
773 NULL,
774#endif
775 cxgb_async_intr, sc, &sc->intr_tag)) {
776 device_printf(sc->dev, "Cannot set up interrupt\n");
777 return (EINVAL);
778 }
779 for (i = k = 0; i < (sc)->params.nports; i++) {
780 nqsets = sc->port[i].nqsets;
781 for (j = 0; j < nqsets; j++, k++) {
782 struct sge_qset *qs = &sc->sge.qs[k];
783
784 rid = k + 2;
785 if (cxgb_debug)
786 printf("rid=%d ", rid);
787 if ((sc->msix_irq_res[k] = bus_alloc_resource_any(
788 sc->dev, SYS_RES_IRQ, &rid,
789 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
790 device_printf(sc->dev, "Cannot allocate "
791 "interrupt for message %d\n", rid);
792 return (EINVAL);
793 }
794 sc->msix_irq_rid[k] = rid;
795 printf("setting up interrupt for port=%d\n",
796 qs->port->port_id);
797 if (bus_setup_intr(sc->dev, sc->msix_irq_res[k],
798 INTR_MPSAFE|INTR_TYPE_NET,
799#ifdef INTR_FILTERS
800 NULL,
801#endif
802 t3_intr_msix, qs, &sc->msix_intr_tag[k])) {
803 device_printf(sc->dev, "Cannot set up "
804 "interrupt for message %d\n", rid);
805 return (EINVAL);
806 }
807 }
808 }
809
810
811 return (0);
812}
813
814static int
815cxgb_port_probe(device_t dev)
816{
817 struct port_info *p;
818 char buf[80];
819
820 p = device_get_softc(dev);
821
822 snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, p->port_type->desc);
823 device_set_desc_copy(dev, buf);
824 return (0);
825}
826
827
828static int
829cxgb_makedev(struct port_info *pi)
830{
831
832 pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit,
833 UID_ROOT, GID_WHEEL, 0600, if_name(pi->ifp));
834
835 if (pi->port_cdev == NULL)
836 return (ENOMEM);
837
838 pi->port_cdev->si_drv1 = (void *)pi;
839
840 return (0);
841}
842
843
844#ifdef TSO_SUPPORTED
845#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU)
846/* Don't enable TSO6 yet */
847#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4 | IFCAP_JUMBO_MTU)
848#else
849#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
850/* Don't enable TSO6 yet */
851#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
852#define IFCAP_TSO4 0x0
853#define IFCAP_TSO6 0x0
854#define CSUM_TSO 0x0
855#endif
856
857
858static int
859cxgb_port_attach(device_t dev)
860{
861 struct port_info *p;
862 struct ifnet *ifp;
863 int err, media_flags;
864
865 p = device_get_softc(dev);
866
867 snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
868 device_get_unit(device_get_parent(dev)), p->port_id);
869 PORT_LOCK_INIT(p, p->lockbuf);
870
871 /* Allocate an ifnet object and set it up */
872 ifp = p->ifp = if_alloc(IFT_ETHER);
873 if (ifp == NULL) {
874 device_printf(dev, "Cannot allocate ifnet\n");
875 return (ENOMEM);
876 }
877
878 /*
879 * Note that there is currently no watchdog timer.
880 */
881 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
882 ifp->if_init = cxgb_init;
883 ifp->if_softc = p;
884 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
885 ifp->if_ioctl = cxgb_ioctl;
886 ifp->if_start = cxgb_start;
887 ifp->if_timer = 0; /* Disable ifnet watchdog */
888 ifp->if_watchdog = NULL;
889
890 ifp->if_snd.ifq_drv_maxlen = TX_ETH_Q_SIZE;
891 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
892 IFQ_SET_READY(&ifp->if_snd);
893
894 ifp->if_hwassist = ifp->if_capabilities = ifp->if_capenable = 0;
895 ifp->if_capabilities |= CXGB_CAP;
896 ifp->if_capenable |= CXGB_CAP_ENABLE;
897 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO);
898 /*
899 * disable TSO on 4-port - it isn't supported by the firmware yet
900 */
901 if (p->adapter->params.nports > 2) {
902 ifp->if_capabilities &= ~(IFCAP_TSO4 | IFCAP_TSO6);
903 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TSO6);
904 ifp->if_hwassist &= ~CSUM_TSO;
905 }
906
907 ether_ifattach(ifp, p->hw_addr);
908 /*
909 * Only default to jumbo frames on 10GigE
910 */
911 if (p->adapter->params.nports <= 2)
912 ifp->if_mtu = 9000;
913 if ((err = cxgb_makedev(p)) != 0) {
914 printf("makedev failed %d\n", err);
915 return (err);
916 }
917 ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
918 cxgb_media_status);
919
920 if (!strcmp(p->port_type->desc, "10GBASE-CX4")) {
921 media_flags = IFM_ETHER | IFM_10G_CX4 | IFM_FDX;
922 } else if (!strcmp(p->port_type->desc, "10GBASE-SR")) {
923 media_flags = IFM_ETHER | IFM_10G_SR | IFM_FDX;
924 } else if (!strcmp(p->port_type->desc, "10GBASE-XR")) {
925 media_flags = IFM_ETHER | IFM_10G_LR | IFM_FDX;
926 } else if (!strcmp(p->port_type->desc, "10/100/1000BASE-T")) {
927 ifmedia_add(&p->media, IFM_ETHER | IFM_10_T, 0, NULL);
928 ifmedia_add(&p->media, IFM_ETHER | IFM_10_T | IFM_FDX,
929 0, NULL);
930 ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX,
931 0, NULL);
932 ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
933 0, NULL);
934 ifmedia_add(&p->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
935 0, NULL);
936 media_flags = 0;
937 } else {
938 printf("unsupported media type %s\n", p->port_type->desc);
939 return (ENXIO);
940 }
941 if (media_flags) {
942 ifmedia_add(&p->media, media_flags, 0, NULL);
943 ifmedia_set(&p->media, media_flags);
944 } else {
945 ifmedia_add(&p->media, IFM_ETHER | IFM_AUTO, 0, NULL);
946 ifmedia_set(&p->media, IFM_ETHER | IFM_AUTO);
947 }
948
949
950 snprintf(p->taskqbuf, TASKQ_NAME_LEN, "cxgb_port_taskq%d", p->port_id);
951#ifdef TASKQUEUE_CURRENT
952 /* Create a port for handling TX without starvation */
953 p->tq = taskqueue_create(p->taskqbuf, M_NOWAIT,
954 taskqueue_thread_enqueue, &p->tq);
955#else
956 /* Create a port for handling TX without starvation */
957 p->tq = taskqueue_create_fast(p->taskqbuf, M_NOWAIT,
958 taskqueue_thread_enqueue, &p->tq);
959#endif
960
961 if (p->tq == NULL) {
962 device_printf(dev, "failed to allocate port task queue\n");
963 return (ENOMEM);
964 }
965 taskqueue_start_threads(&p->tq, 1, PI_NET, "%s taskq",
966 device_get_nameunit(dev));
967
968 TASK_INIT(&p->start_task, 0, cxgb_start_proc, ifp);
969
970 t3_sge_init_port(p);
971
972 return (0);
973}
974
975static int
976cxgb_port_detach(device_t dev)
977{
978 struct port_info *p;
979
980 p = device_get_softc(dev);
981
982 PORT_LOCK(p);
983 if (p->ifp->if_drv_flags & IFF_DRV_RUNNING)
984 cxgb_stop_locked(p);
985 PORT_UNLOCK(p);
986
987 if (p->tq != NULL) {
988 taskqueue_drain(p->tq, &p->start_task);
989 taskqueue_free(p->tq);
990 p->tq = NULL;
991 }
992
993 ether_ifdetach(p->ifp);
994 /*
995 * the lock may be acquired in ifdetach
996 */
997 PORT_LOCK_DEINIT(p);
998 if_free(p->ifp);
999
1000 if (p->port_cdev != NULL)
1001 destroy_dev(p->port_cdev);
1002
1003 return (0);
1004}
1005
1006void
1007t3_fatal_err(struct adapter *sc)
1008{
1009 u_int fw_status[4];
1010
1011 if (sc->flags & FULL_INIT_DONE) {
1012 t3_sge_stop(sc);
1013 t3_write_reg(sc, A_XGM_TX_CTRL, 0);
1014 t3_write_reg(sc, A_XGM_RX_CTRL, 0);
1015 t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
1016 t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
1017 t3_intr_disable(sc);
1018 }
1019 device_printf(sc->dev,"encountered fatal error, operation suspended\n");
1020 if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
1021 device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
1022 fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
1023}
1024
1025int
1026t3_os_find_pci_capability(adapter_t *sc, int cap)
1027{
1028 device_t dev;
1029 struct pci_devinfo *dinfo;
1030 pcicfgregs *cfg;
1031 uint32_t status;
1032 uint8_t ptr;
1033
1034 dev = sc->dev;
1035 dinfo = device_get_ivars(dev);
1036 cfg = &dinfo->cfg;
1037
1038 status = pci_read_config(dev, PCIR_STATUS, 2);
1039 if (!(status & PCIM_STATUS_CAPPRESENT))
1040 return (0);
1041
1042 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1043 case 0:
1044 case 1:
1045 ptr = PCIR_CAP_PTR;
1046 break;
1047 case 2:
1048 ptr = PCIR_CAP_PTR_2;
1049 break;
1050 default:
1051 return (0);
1052 break;
1053 }
1054 ptr = pci_read_config(dev, ptr, 1);
1055
1056 while (ptr != 0) {
1057 if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
1058 return (ptr);
1059 ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1060 }
1061
1062 return (0);
1063}
1064
1065int
1066t3_os_pci_save_state(struct adapter *sc)
1067{
1068 device_t dev;
1069 struct pci_devinfo *dinfo;
1070
1071 dev = sc->dev;
1072 dinfo = device_get_ivars(dev);
1073
1074 pci_cfg_save(dev, dinfo, 0);
1075 return (0);
1076}
1077
1078int
1079t3_os_pci_restore_state(struct adapter *sc)
1080{
1081 device_t dev;
1082 struct pci_devinfo *dinfo;
1083
1084 dev = sc->dev;
1085 dinfo = device_get_ivars(dev);
1086
1087 pci_cfg_restore(dev, dinfo);
1088 return (0);
1089}
1090
1091/**
1092 * t3_os_link_changed - handle link status changes
1093 * @adapter: the adapter associated with the link change
1094 * @port_id: the port index whose limk status has changed
1095 * @link_stat: the new status of the link
1096 * @speed: the new speed setting
1097 * @duplex: the new duplex setting
1098 * @fc: the new flow-control setting
1099 *
1100 * This is the OS-dependent handler for link status changes. The OS
1101 * neutral handler takes care of most of the processing for these events,
1102 * then calls this handler for any OS-specific processing.
1103 */
1104void
1105t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
1106 int duplex, int fc)
1107{
1108 struct port_info *pi = &adapter->port[port_id];
1109 struct cmac *mac = &adapter->port[port_id].mac;
1110
1111 if ((pi->ifp->if_flags & IFF_UP) == 0)
1112 return;
1113
1114 if (link_status) {
1115 t3_mac_enable(mac, MAC_DIRECTION_RX);
1116 if_link_state_change(pi->ifp, LINK_STATE_UP);
1117 } else {
1118 if_link_state_change(pi->ifp, LINK_STATE_DOWN);
1119 pi->phy.ops->power_down(&pi->phy, 1);
1120 t3_mac_disable(mac, MAC_DIRECTION_RX);
1121 t3_link_start(&pi->phy, mac, &pi->link_config);
1122 }
1123}
1124
1125/*
1126 * Interrupt-context handler for external (PHY) interrupts.
1127 */
1128void
1129t3_os_ext_intr_handler(adapter_t *sc)
1130{
1131 if (cxgb_debug)
1132 printf("t3_os_ext_intr_handler\n");
1133 /*
1134 * Schedule a task to handle external interrupts as they may be slow
1135 * and we use a mutex to protect MDIO registers. We disable PHY
1136 * interrupts in the meantime and let the task reenable them when
1137 * it's done.
1138 */
1139 ADAPTER_LOCK(sc);
1140 if (sc->slow_intr_mask) {
1141 sc->slow_intr_mask &= ~F_T3DBG;
1142 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
1143 taskqueue_enqueue(sc->tq, &sc->ext_intr_task);
1144 }
1145 ADAPTER_UNLOCK(sc);
1146}
1147
1148void
1149t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
1150{
1151
1152 /*
1153 * The ifnet might not be allocated before this gets called,
1154 * as this is called early on in attach by t3_prep_adapter
1155 * save the address off in the port structure
1156 */
1157 if (cxgb_debug)
1158 printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
1159 bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
1160}
1161
1162/**
1163 * link_start - enable a port
1164 * @p: the port to enable
1165 *
1166 * Performs the MAC and PHY actions needed to enable a port.
1167 */
1168static void
1169cxgb_link_start(struct port_info *p)
1170{
1171 struct ifnet *ifp;
1172 struct t3_rx_mode rm;
1173 struct cmac *mac = &p->mac;
1174
1175 ifp = p->ifp;
1176
1177 t3_init_rx_mode(&rm, p);
1178 if (!mac->multiport)
1179 t3_mac_reset(mac);
1180 t3_mac_set_mtu(mac, ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1181 t3_mac_set_address(mac, 0, p->hw_addr);
1182 t3_mac_set_rx_mode(mac, &rm);
1183 t3_link_start(&p->phy, mac, &p->link_config);
1184 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1185}
1186
1187/**
1188 * setup_rss - configure Receive Side Steering (per-queue connection demux)
1189 * @adap: the adapter
1190 *
1191 * Sets up RSS to distribute packets to multiple receive queues. We
1192 * configure the RSS CPU lookup table to distribute to the number of HW
1193 * receive queues, and the response queue lookup table to narrow that
1194 * down to the response queues actually configured for each port.
1195 * We always configure the RSS mapping for two ports since the mapping
1196 * table has plenty of entries.
1197 */
1198static void
1199setup_rss(adapter_t *adap)
1200{
1201 int i;
1202 u_int nq[2];
1203 uint8_t cpus[SGE_QSETS + 1];
1204 uint16_t rspq_map[RSS_TABLE_SIZE];
1205
1206
1207 if ((adap->flags & USING_MSIX) == 0)
1208 return;
1209
1210 for (i = 0; i < SGE_QSETS; ++i)
1211 cpus[i] = i;
1212 cpus[SGE_QSETS] = 0xff;
1213
1214 nq[0] = nq[1] = 0;
1215 for_each_port(adap, i) {
1216 const struct port_info *pi = adap2pinfo(adap, i);
1217
1218 nq[pi->tx_chan] += pi->nqsets;
1219 }
1220 nq[0] = max(nq[0], 1U);
1221 nq[1] = max(nq[1], 1U);
1222 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
1223 rspq_map[i] = i % nq[0];
1224 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq[1]) + nq[0];
1225 }
1226 /* Calculate the reverse RSS map table */
1227 for (i = 0; i < RSS_TABLE_SIZE; ++i)
1228 if (adap->rrss_map[rspq_map[i]] == 0xff)
1229 adap->rrss_map[rspq_map[i]] = i;
1230
1231 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
1232 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
1233 V_RRCPLCPUSIZE(6), cpus, rspq_map);
1234
1235}
1236
1237/*
1238 * Sends an mbuf to an offload queue driver
1239 * after dealing with any active network taps.
1240 */
1241static inline int
1242offload_tx(struct toedev *tdev, struct mbuf *m)
1243{
1244 int ret;
1245
1246 critical_enter();
1247 ret = t3_offload_tx(tdev, m);
1248 critical_exit();
1249 return (ret);
1250}
1251
1252static int
1253write_smt_entry(struct adapter *adapter, int idx)
1254{
1255 struct port_info *pi = &adapter->port[idx];
1256 struct cpl_smt_write_req *req;
1257 struct mbuf *m;
1258
1259 if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
1260 return (ENOMEM);
1261
1262 req = mtod(m, struct cpl_smt_write_req *);
1263 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1264 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
1265 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
1266 req->iff = idx;
1267 memset(req->src_mac1, 0, sizeof(req->src_mac1));
1268 memcpy(req->src_mac0, pi->hw_addr, ETHER_ADDR_LEN);
1269
1270 m_set_priority(m, 1);
1271
1272 offload_tx(&adapter->tdev, m);
1273
1274 return (0);
1275}
1276
1277static int
1278init_smt(struct adapter *adapter)
1279{
1280 int i;
1281
1282 for_each_port(adapter, i)
1283 write_smt_entry(adapter, i);
1284 return 0;
1285}
1286
1287static void
1288init_port_mtus(adapter_t *adapter)
1289{
1290 unsigned int mtus = adapter->port[0].ifp->if_mtu;
1291
1292 if (adapter->port[1].ifp)
1293 mtus |= adapter->port[1].ifp->if_mtu << 16;
1294 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
1295}
1296
1297static void
1298send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
1299 int hi, int port)
1300{
1301 struct mbuf *m;
1302 struct mngt_pktsched_wr *req;
1303
1304 m = m_gethdr(M_DONTWAIT, MT_DATA);
1305 if (m) {
1306 req = mtod(m, struct mngt_pktsched_wr *);
1307 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1308 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1309 req->sched = sched;
1310 req->idx = qidx;
1311 req->min = lo;
1312 req->max = hi;
1313 req->binding = port;
1314 m->m_len = m->m_pkthdr.len = sizeof(*req);
1315 t3_mgmt_tx(adap, m);
1316 }
1317}
1318
1319static void
1320bind_qsets(adapter_t *sc)
1321{
1322 int i, j;
1323
1324 for (i = 0; i < (sc)->params.nports; ++i) {
1325 const struct port_info *pi = adap2pinfo(sc, i);
1326
1327 for (j = 0; j < pi->nqsets; ++j) {
1328 send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1329 -1, pi->tx_chan);
1330
1331 }
1332 }
1333}
1334
1335static void
1336update_tpeeprom(struct adapter *adap)
1337{
1338#ifdef FIRMWARE_LATEST
1334 const struct firmware *tpeeprom;
1339 const struct firmware *tpeeprom;
1340#else
1341 struct firmware *tpeeprom;
1342#endif
1343
1335 char buf[64];
1336 uint32_t version;
1337 unsigned int major, minor;
1338 int ret, len;
1339 char rev;
1340
1341 t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
1342
1343 major = G_TP_VERSION_MAJOR(version);
1344 minor = G_TP_VERSION_MINOR(version);
1345 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
1346 return;
1347
1348 rev = t3rev2char(adap);
1349
1350 snprintf(buf, sizeof(buf), TPEEPROM_NAME, rev,
1351 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1352
1353 tpeeprom = firmware_get(buf);
1354 if (tpeeprom == NULL) {
1355 device_printf(adap->dev, "could not load TP EEPROM: unable to load %s\n",
1356 buf);
1357 return;
1358 }
1359
1360 len = tpeeprom->datasize - 4;
1361
1362 ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
1363 if (ret)
1364 goto release_tpeeprom;
1365
1366 if (len != TP_SRAM_LEN) {
1367 device_printf(adap->dev, "%s length is wrong len=%d expected=%d\n", buf, len, TP_SRAM_LEN);
1368 return;
1369 }
1370
1371 ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
1372 TP_SRAM_OFFSET);
1373
1374 if (!ret) {
1375 device_printf(adap->dev,
1376 "Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
1377 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1378 } else
1379 device_printf(adap->dev, "Protocol SRAM image update in EEPROM failed\n");
1380
1381release_tpeeprom:
1382 firmware_put(tpeeprom, FIRMWARE_UNLOAD);
1383
1384 return;
1385}
1386
1387static int
1388update_tpsram(struct adapter *adap)
1389{
1344 char buf[64];
1345 uint32_t version;
1346 unsigned int major, minor;
1347 int ret, len;
1348 char rev;
1349
1350 t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
1351
1352 major = G_TP_VERSION_MAJOR(version);
1353 minor = G_TP_VERSION_MINOR(version);
1354 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
1355 return;
1356
1357 rev = t3rev2char(adap);
1358
1359 snprintf(buf, sizeof(buf), TPEEPROM_NAME, rev,
1360 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1361
1362 tpeeprom = firmware_get(buf);
1363 if (tpeeprom == NULL) {
1364 device_printf(adap->dev, "could not load TP EEPROM: unable to load %s\n",
1365 buf);
1366 return;
1367 }
1368
1369 len = tpeeprom->datasize - 4;
1370
1371 ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
1372 if (ret)
1373 goto release_tpeeprom;
1374
1375 if (len != TP_SRAM_LEN) {
1376 device_printf(adap->dev, "%s length is wrong len=%d expected=%d\n", buf, len, TP_SRAM_LEN);
1377 return;
1378 }
1379
1380 ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
1381 TP_SRAM_OFFSET);
1382
1383 if (!ret) {
1384 device_printf(adap->dev,
1385 "Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
1386 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1387 } else
1388 device_printf(adap->dev, "Protocol SRAM image update in EEPROM failed\n");
1389
1390release_tpeeprom:
1391 firmware_put(tpeeprom, FIRMWARE_UNLOAD);
1392
1393 return;
1394}
1395
1396static int
1397update_tpsram(struct adapter *adap)
1398{
1399#ifdef FIRMWARE_LATEST
1390 const struct firmware *tpsram;
1400 const struct firmware *tpsram;
1401#else
1402 struct firmware *tpsram;
1403#endif
1391 char buf[64];
1392 int ret;
1393 char rev;
1394
1395 rev = t3rev2char(adap);
1396 if (!rev)
1397 return 0;
1398
1399 update_tpeeprom(adap);
1400
1401 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
1402 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1403
1404 tpsram = firmware_get(buf);
1405 if (tpsram == NULL){
1406 device_printf(adap->dev, "could not load TP SRAM: unable to load %s\n",
1407 buf);
1408 return (EINVAL);
1409 } else
1410 device_printf(adap->dev, "updating TP SRAM with %s\n", buf);
1411
1412 ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
1413 if (ret)
1414 goto release_tpsram;
1415
1416 ret = t3_set_proto_sram(adap, tpsram->data);
1417 if (ret)
1418 device_printf(adap->dev, "loading protocol SRAM failed\n");
1419
1420release_tpsram:
1421 firmware_put(tpsram, FIRMWARE_UNLOAD);
1422
1423 return ret;
1424}
1425
1426/**
1427 * cxgb_up - enable the adapter
1428 * @adap: adapter being enabled
1429 *
1430 * Called when the first port is enabled, this function performs the
1431 * actions necessary to make an adapter operational, such as completing
1432 * the initialization of HW modules, and enabling interrupts.
1433 *
1434 */
1435static int
1436cxgb_up(struct adapter *sc)
1437{
1438 int err = 0;
1439
1440 if ((sc->flags & FULL_INIT_DONE) == 0) {
1441
1442 if ((sc->flags & FW_UPTODATE) == 0)
1443 if ((err = upgrade_fw(sc)))
1444 goto out;
1445 if ((sc->flags & TPS_UPTODATE) == 0)
1446 if ((err = update_tpsram(sc)))
1447 goto out;
1448 err = t3_init_hw(sc, 0);
1449 if (err)
1450 goto out;
1451
1452 t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1453
1454 err = setup_sge_qsets(sc);
1455 if (err)
1456 goto out;
1457
1458 setup_rss(sc);
1459 sc->flags |= FULL_INIT_DONE;
1460 }
1461
1462 t3_intr_clear(sc);
1463
1464 /* If it's MSI or INTx, allocate a single interrupt for everything */
1465 if ((sc->flags & USING_MSIX) == 0) {
1466 if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
1467 &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1468 device_printf(sc->dev, "Cannot allocate interrupt rid=%d\n",
1469 sc->irq_rid);
1470 err = EINVAL;
1471 goto out;
1472 }
1473 device_printf(sc->dev, "allocated irq_res=%p\n", sc->irq_res);
1474
1475 if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
1476#ifdef INTR_FILTERS
1477 NULL,
1478#endif
1479 sc->cxgb_intr, sc, &sc->intr_tag)) {
1480 device_printf(sc->dev, "Cannot set up interrupt\n");
1481 err = EINVAL;
1482 goto irq_err;
1483 }
1484 } else {
1485 cxgb_setup_msix(sc, sc->msi_count);
1486 }
1487
1488 t3_sge_start(sc);
1489 t3_intr_enable(sc);
1490
1491 if (!(sc->flags & QUEUES_BOUND)) {
1492 printf("bind qsets\n");
1493 bind_qsets(sc);
1494 sc->flags |= QUEUES_BOUND;
1495 }
1496out:
1497 return (err);
1498irq_err:
1499 CH_ERR(sc, "request_irq failed, err %d\n", err);
1500 goto out;
1501}
1502
1503
1504/*
1505 * Release resources when all the ports and offloading have been stopped.
1506 */
1507static void
1508cxgb_down_locked(struct adapter *sc)
1509{
1510 int i;
1511
1512 t3_sge_stop(sc);
1513 t3_intr_disable(sc);
1514
1515 if (sc->intr_tag != NULL) {
1516 bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
1517 sc->intr_tag = NULL;
1518 }
1519 if (sc->irq_res != NULL) {
1520 device_printf(sc->dev, "de-allocating interrupt irq_rid=%d irq_res=%p\n",
1521 sc->irq_rid, sc->irq_res);
1522 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
1523 sc->irq_res);
1524 sc->irq_res = NULL;
1525 }
1526
1527 if (sc->flags & USING_MSIX)
1528 cxgb_teardown_msix(sc);
1529 ADAPTER_UNLOCK(sc);
1530
1531 callout_drain(&sc->cxgb_tick_ch);
1532 callout_drain(&sc->sge_timer_ch);
1533
1534 if (sc->tq != NULL) {
1535 taskqueue_drain(sc->tq, &sc->slow_intr_task);
1536 for (i = 0; i < sc->params.nports; i++)
1537 taskqueue_drain(sc->tq, &sc->port[i].timer_reclaim_task);
1538 }
1539#ifdef notyet
1540
1541 if (sc->port[i].tq != NULL)
1542#endif
1543
1544}
1545
1546static int
1547offload_open(struct port_info *pi)
1548{
1549 struct adapter *adapter = pi->adapter;
1550 struct toedev *tdev = TOEDEV(pi->ifp);
1551 int adap_up = adapter->open_device_map & PORT_MASK;
1552 int err = 0;
1553
1554 if (atomic_cmpset_int(&adapter->open_device_map,
1555 (adapter->open_device_map & ~OFFLOAD_DEVMAP_BIT),
1556 (adapter->open_device_map | OFFLOAD_DEVMAP_BIT)) == 0)
1557 return (0);
1558
1559 ADAPTER_LOCK(pi->adapter);
1560 if (!adap_up)
1561 err = cxgb_up(adapter);
1562 ADAPTER_UNLOCK(pi->adapter);
1563 if (err)
1564 return (err);
1565
1566 t3_tp_set_offload_mode(adapter, 1);
1567 tdev->lldev = adapter->port[0].ifp;
1568 err = cxgb_offload_activate(adapter);
1569 if (err)
1570 goto out;
1571
1572 init_port_mtus(adapter);
1573 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1574 adapter->params.b_wnd,
1575 adapter->params.rev == 0 ?
1576 adapter->port[0].ifp->if_mtu : 0xffff);
1577 init_smt(adapter);
1578
1579 /* Call back all registered clients */
1580 cxgb_add_clients(tdev);
1581
1582out:
1583 /* restore them in case the offload module has changed them */
1584 if (err) {
1585 t3_tp_set_offload_mode(adapter, 0);
1586 clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
1587 cxgb_set_dummy_ops(tdev);
1588 }
1589 return (err);
1590}
1591#ifdef notyet
1592static int
1593offload_close(struct toedev *tdev)
1594{
1595 struct adapter *adapter = tdev2adap(tdev);
1596
1597 if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT))
1598 return (0);
1599
1600 /* Call back all registered clients */
1601 cxgb_remove_clients(tdev);
1602 tdev->lldev = NULL;
1603 cxgb_set_dummy_ops(tdev);
1604 t3_tp_set_offload_mode(adapter, 0);
1605 clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
1606
1607 if (!adapter->open_device_map)
1608 cxgb_down(adapter);
1609
1610 cxgb_offload_deactivate(adapter);
1611 return (0);
1612}
1613#endif
1614
1615static void
1616cxgb_init(void *arg)
1617{
1618 struct port_info *p = arg;
1619
1620 PORT_LOCK(p);
1621 cxgb_init_locked(p);
1622 PORT_UNLOCK(p);
1623}
1624
1625static void
1626cxgb_init_locked(struct port_info *p)
1627{
1628 struct ifnet *ifp;
1629 adapter_t *sc = p->adapter;
1630 int err;
1631
1632 PORT_LOCK_ASSERT_OWNED(p);
1633 ifp = p->ifp;
1634
1635 ADAPTER_LOCK(p->adapter);
1636 if ((sc->open_device_map == 0) && (err = cxgb_up(sc))) {
1637 ADAPTER_UNLOCK(p->adapter);
1638 cxgb_stop_locked(p);
1639 return;
1640 }
1641 if (p->adapter->open_device_map == 0) {
1642 t3_intr_clear(sc);
1643 t3_sge_init_adapter(sc);
1644 }
1645 setbit(&p->adapter->open_device_map, p->port_id);
1646 ADAPTER_UNLOCK(p->adapter);
1647
1648 if (is_offload(sc) && !ofld_disable) {
1649 err = offload_open(p);
1650 if (err)
1651 log(LOG_WARNING,
1652 "Could not initialize offload capabilities\n");
1653 }
1654 cxgb_link_start(p);
1655 t3_link_changed(sc, p->port_id);
1656 ifp->if_baudrate = p->link_config.speed * 1000000;
1657
1658 device_printf(sc->dev, "enabling interrupts on port=%d\n", p->port_id);
1659 t3_port_intr_enable(sc, p->port_id);
1660
1661 callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz,
1662 cxgb_tick, sc);
1663
1664 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1665 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1666}
1667
1668static void
1669cxgb_set_rxmode(struct port_info *p)
1670{
1671 struct t3_rx_mode rm;
1672 struct cmac *mac = &p->mac;
1673
1674 PORT_LOCK_ASSERT_OWNED(p);
1675
1676 t3_init_rx_mode(&rm, p);
1677 t3_mac_set_rx_mode(mac, &rm);
1678}
1679
1680static void
1681cxgb_stop_locked(struct port_info *p)
1682{
1683 struct ifnet *ifp;
1684
1685 PORT_LOCK_ASSERT_OWNED(p);
1686 ADAPTER_LOCK_ASSERT_NOTOWNED(p->adapter);
1687
1688 ifp = p->ifp;
1689
1690 t3_port_intr_disable(p->adapter, p->port_id);
1691 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1692 p->phy.ops->power_down(&p->phy, 1);
1693 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1694
1695 ADAPTER_LOCK(p->adapter);
1696 clrbit(&p->adapter->open_device_map, p->port_id);
1697
1698
1699 if (p->adapter->open_device_map == 0) {
1700 cxgb_down_locked(p->adapter);
1701 } else
1702 ADAPTER_UNLOCK(p->adapter);
1703
1704}
1705
1706static int
1707cxgb_set_mtu(struct port_info *p, int mtu)
1708{
1709 struct ifnet *ifp = p->ifp;
1710 int error = 0;
1711
1712 if ((mtu < ETHERMIN) || (mtu > ETHER_MAX_LEN_JUMBO))
1713 error = EINVAL;
1714 else if (ifp->if_mtu != mtu) {
1715 PORT_LOCK(p);
1716 ifp->if_mtu = mtu;
1717 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1718 callout_stop(&p->adapter->cxgb_tick_ch);
1719 cxgb_stop_locked(p);
1720 cxgb_init_locked(p);
1721 }
1722 PORT_UNLOCK(p);
1723 }
1724 return (error);
1725}
1726
1727static int
1728cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
1729{
1730 struct port_info *p = ifp->if_softc;
1731 struct ifaddr *ifa = (struct ifaddr *)data;
1732 struct ifreq *ifr = (struct ifreq *)data;
1733 int flags, error = 0;
1734 uint32_t mask;
1735
1736 /*
1737 * XXX need to check that we aren't in the middle of an unload
1738 */
1739 switch (command) {
1740 case SIOCSIFMTU:
1741 error = cxgb_set_mtu(p, ifr->ifr_mtu);
1742 break;
1743 case SIOCSIFADDR:
1744 case SIOCGIFADDR:
1745 PORT_LOCK(p);
1746 if (ifa->ifa_addr->sa_family == AF_INET) {
1747 ifp->if_flags |= IFF_UP;
1748 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1749 cxgb_init_locked(p);
1750 arp_ifinit(ifp, ifa);
1751 } else
1752 error = ether_ioctl(ifp, command, data);
1753 PORT_UNLOCK(p);
1754 break;
1755 case SIOCSIFFLAGS:
1756 callout_drain(&p->adapter->cxgb_tick_ch);
1757 PORT_LOCK(p);
1758 if (ifp->if_flags & IFF_UP) {
1759 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1760 flags = p->if_flags;
1761 if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
1762 ((ifp->if_flags ^ flags) & IFF_ALLMULTI))
1763 cxgb_set_rxmode(p);
1764 } else
1765 cxgb_init_locked(p);
1766 p->if_flags = ifp->if_flags;
1767 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1768 cxgb_stop_locked(p);
1769
1770 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1771 adapter_t *sc = p->adapter;
1772 callout_reset(&sc->cxgb_tick_ch,
1773 sc->params.stats_update_period * hz,
1774 cxgb_tick, sc);
1775 }
1776 PORT_UNLOCK(p);
1777 break;
1778 case SIOCSIFMEDIA:
1779 case SIOCGIFMEDIA:
1780 error = ifmedia_ioctl(ifp, ifr, &p->media, command);
1781 break;
1782 case SIOCSIFCAP:
1783 PORT_LOCK(p);
1784 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1785 if (mask & IFCAP_TXCSUM) {
1786 if (IFCAP_TXCSUM & ifp->if_capenable) {
1787 ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
1788 ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
1789 | CSUM_TSO);
1790 } else {
1791 ifp->if_capenable |= IFCAP_TXCSUM;
1792 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1793 }
1794 } else if (mask & IFCAP_RXCSUM) {
1795 if (IFCAP_RXCSUM & ifp->if_capenable) {
1796 ifp->if_capenable &= ~IFCAP_RXCSUM;
1797 } else {
1798 ifp->if_capenable |= IFCAP_RXCSUM;
1799 }
1800 }
1801 if (mask & IFCAP_TSO4) {
1802 if (IFCAP_TSO4 & ifp->if_capenable) {
1803 ifp->if_capenable &= ~IFCAP_TSO4;
1804 ifp->if_hwassist &= ~CSUM_TSO;
1805 } else if (IFCAP_TXCSUM & ifp->if_capenable) {
1806 ifp->if_capenable |= IFCAP_TSO4;
1807 ifp->if_hwassist |= CSUM_TSO;
1808 } else {
1809 if (cxgb_debug)
1810 printf("cxgb requires tx checksum offload"
1811 " be enabled to use TSO\n");
1812 error = EINVAL;
1813 }
1814 }
1815 PORT_UNLOCK(p);
1816 break;
1817 default:
1818 error = ether_ioctl(ifp, command, data);
1819 break;
1820 }
1821 return (error);
1822}
1823
1824static int
1825cxgb_start_tx(struct ifnet *ifp, uint32_t txmax)
1826{
1827 struct sge_qset *qs;
1828 struct sge_txq *txq;
1829 struct port_info *p = ifp->if_softc;
1404 char buf[64];
1405 int ret;
1406 char rev;
1407
1408 rev = t3rev2char(adap);
1409 if (!rev)
1410 return 0;
1411
1412 update_tpeeprom(adap);
1413
1414 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
1415 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1416
1417 tpsram = firmware_get(buf);
1418 if (tpsram == NULL){
1419 device_printf(adap->dev, "could not load TP SRAM: unable to load %s\n",
1420 buf);
1421 return (EINVAL);
1422 } else
1423 device_printf(adap->dev, "updating TP SRAM with %s\n", buf);
1424
1425 ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
1426 if (ret)
1427 goto release_tpsram;
1428
1429 ret = t3_set_proto_sram(adap, tpsram->data);
1430 if (ret)
1431 device_printf(adap->dev, "loading protocol SRAM failed\n");
1432
1433release_tpsram:
1434 firmware_put(tpsram, FIRMWARE_UNLOAD);
1435
1436 return ret;
1437}
1438
1439/**
1440 * cxgb_up - enable the adapter
1441 * @adap: adapter being enabled
1442 *
1443 * Called when the first port is enabled, this function performs the
1444 * actions necessary to make an adapter operational, such as completing
1445 * the initialization of HW modules, and enabling interrupts.
1446 *
1447 */
1448static int
1449cxgb_up(struct adapter *sc)
1450{
1451 int err = 0;
1452
1453 if ((sc->flags & FULL_INIT_DONE) == 0) {
1454
1455 if ((sc->flags & FW_UPTODATE) == 0)
1456 if ((err = upgrade_fw(sc)))
1457 goto out;
1458 if ((sc->flags & TPS_UPTODATE) == 0)
1459 if ((err = update_tpsram(sc)))
1460 goto out;
1461 err = t3_init_hw(sc, 0);
1462 if (err)
1463 goto out;
1464
1465 t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1466
1467 err = setup_sge_qsets(sc);
1468 if (err)
1469 goto out;
1470
1471 setup_rss(sc);
1472 sc->flags |= FULL_INIT_DONE;
1473 }
1474
1475 t3_intr_clear(sc);
1476
1477 /* If it's MSI or INTx, allocate a single interrupt for everything */
1478 if ((sc->flags & USING_MSIX) == 0) {
1479 if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
1480 &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1481 device_printf(sc->dev, "Cannot allocate interrupt rid=%d\n",
1482 sc->irq_rid);
1483 err = EINVAL;
1484 goto out;
1485 }
1486 device_printf(sc->dev, "allocated irq_res=%p\n", sc->irq_res);
1487
1488 if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
1489#ifdef INTR_FILTERS
1490 NULL,
1491#endif
1492 sc->cxgb_intr, sc, &sc->intr_tag)) {
1493 device_printf(sc->dev, "Cannot set up interrupt\n");
1494 err = EINVAL;
1495 goto irq_err;
1496 }
1497 } else {
1498 cxgb_setup_msix(sc, sc->msi_count);
1499 }
1500
1501 t3_sge_start(sc);
1502 t3_intr_enable(sc);
1503
1504 if (!(sc->flags & QUEUES_BOUND)) {
1505 printf("bind qsets\n");
1506 bind_qsets(sc);
1507 sc->flags |= QUEUES_BOUND;
1508 }
1509out:
1510 return (err);
1511irq_err:
1512 CH_ERR(sc, "request_irq failed, err %d\n", err);
1513 goto out;
1514}
1515
1516
1517/*
1518 * Release resources when all the ports and offloading have been stopped.
1519 */
1520static void
1521cxgb_down_locked(struct adapter *sc)
1522{
1523 int i;
1524
1525 t3_sge_stop(sc);
1526 t3_intr_disable(sc);
1527
1528 if (sc->intr_tag != NULL) {
1529 bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
1530 sc->intr_tag = NULL;
1531 }
1532 if (sc->irq_res != NULL) {
1533 device_printf(sc->dev, "de-allocating interrupt irq_rid=%d irq_res=%p\n",
1534 sc->irq_rid, sc->irq_res);
1535 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
1536 sc->irq_res);
1537 sc->irq_res = NULL;
1538 }
1539
1540 if (sc->flags & USING_MSIX)
1541 cxgb_teardown_msix(sc);
1542 ADAPTER_UNLOCK(sc);
1543
1544 callout_drain(&sc->cxgb_tick_ch);
1545 callout_drain(&sc->sge_timer_ch);
1546
1547 if (sc->tq != NULL) {
1548 taskqueue_drain(sc->tq, &sc->slow_intr_task);
1549 for (i = 0; i < sc->params.nports; i++)
1550 taskqueue_drain(sc->tq, &sc->port[i].timer_reclaim_task);
1551 }
1552#ifdef notyet
1553
1554 if (sc->port[i].tq != NULL)
1555#endif
1556
1557}
1558
1559static int
1560offload_open(struct port_info *pi)
1561{
1562 struct adapter *adapter = pi->adapter;
1563 struct toedev *tdev = TOEDEV(pi->ifp);
1564 int adap_up = adapter->open_device_map & PORT_MASK;
1565 int err = 0;
1566
1567 if (atomic_cmpset_int(&adapter->open_device_map,
1568 (adapter->open_device_map & ~OFFLOAD_DEVMAP_BIT),
1569 (adapter->open_device_map | OFFLOAD_DEVMAP_BIT)) == 0)
1570 return (0);
1571
1572 ADAPTER_LOCK(pi->adapter);
1573 if (!adap_up)
1574 err = cxgb_up(adapter);
1575 ADAPTER_UNLOCK(pi->adapter);
1576 if (err)
1577 return (err);
1578
1579 t3_tp_set_offload_mode(adapter, 1);
1580 tdev->lldev = adapter->port[0].ifp;
1581 err = cxgb_offload_activate(adapter);
1582 if (err)
1583 goto out;
1584
1585 init_port_mtus(adapter);
1586 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1587 adapter->params.b_wnd,
1588 adapter->params.rev == 0 ?
1589 adapter->port[0].ifp->if_mtu : 0xffff);
1590 init_smt(adapter);
1591
1592 /* Call back all registered clients */
1593 cxgb_add_clients(tdev);
1594
1595out:
1596 /* restore them in case the offload module has changed them */
1597 if (err) {
1598 t3_tp_set_offload_mode(adapter, 0);
1599 clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
1600 cxgb_set_dummy_ops(tdev);
1601 }
1602 return (err);
1603}
1604#ifdef notyet
1605static int
1606offload_close(struct toedev *tdev)
1607{
1608 struct adapter *adapter = tdev2adap(tdev);
1609
1610 if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT))
1611 return (0);
1612
1613 /* Call back all registered clients */
1614 cxgb_remove_clients(tdev);
1615 tdev->lldev = NULL;
1616 cxgb_set_dummy_ops(tdev);
1617 t3_tp_set_offload_mode(adapter, 0);
1618 clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
1619
1620 if (!adapter->open_device_map)
1621 cxgb_down(adapter);
1622
1623 cxgb_offload_deactivate(adapter);
1624 return (0);
1625}
1626#endif
1627
1628static void
1629cxgb_init(void *arg)
1630{
1631 struct port_info *p = arg;
1632
1633 PORT_LOCK(p);
1634 cxgb_init_locked(p);
1635 PORT_UNLOCK(p);
1636}
1637
1638static void
1639cxgb_init_locked(struct port_info *p)
1640{
1641 struct ifnet *ifp;
1642 adapter_t *sc = p->adapter;
1643 int err;
1644
1645 PORT_LOCK_ASSERT_OWNED(p);
1646 ifp = p->ifp;
1647
1648 ADAPTER_LOCK(p->adapter);
1649 if ((sc->open_device_map == 0) && (err = cxgb_up(sc))) {
1650 ADAPTER_UNLOCK(p->adapter);
1651 cxgb_stop_locked(p);
1652 return;
1653 }
1654 if (p->adapter->open_device_map == 0) {
1655 t3_intr_clear(sc);
1656 t3_sge_init_adapter(sc);
1657 }
1658 setbit(&p->adapter->open_device_map, p->port_id);
1659 ADAPTER_UNLOCK(p->adapter);
1660
1661 if (is_offload(sc) && !ofld_disable) {
1662 err = offload_open(p);
1663 if (err)
1664 log(LOG_WARNING,
1665 "Could not initialize offload capabilities\n");
1666 }
1667 cxgb_link_start(p);
1668 t3_link_changed(sc, p->port_id);
1669 ifp->if_baudrate = p->link_config.speed * 1000000;
1670
1671 device_printf(sc->dev, "enabling interrupts on port=%d\n", p->port_id);
1672 t3_port_intr_enable(sc, p->port_id);
1673
1674 callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz,
1675 cxgb_tick, sc);
1676
1677 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1678 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1679}
1680
1681static void
1682cxgb_set_rxmode(struct port_info *p)
1683{
1684 struct t3_rx_mode rm;
1685 struct cmac *mac = &p->mac;
1686
1687 PORT_LOCK_ASSERT_OWNED(p);
1688
1689 t3_init_rx_mode(&rm, p);
1690 t3_mac_set_rx_mode(mac, &rm);
1691}
1692
1693static void
1694cxgb_stop_locked(struct port_info *p)
1695{
1696 struct ifnet *ifp;
1697
1698 PORT_LOCK_ASSERT_OWNED(p);
1699 ADAPTER_LOCK_ASSERT_NOTOWNED(p->adapter);
1700
1701 ifp = p->ifp;
1702
1703 t3_port_intr_disable(p->adapter, p->port_id);
1704 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1705 p->phy.ops->power_down(&p->phy, 1);
1706 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1707
1708 ADAPTER_LOCK(p->adapter);
1709 clrbit(&p->adapter->open_device_map, p->port_id);
1710
1711
1712 if (p->adapter->open_device_map == 0) {
1713 cxgb_down_locked(p->adapter);
1714 } else
1715 ADAPTER_UNLOCK(p->adapter);
1716
1717}
1718
1719static int
1720cxgb_set_mtu(struct port_info *p, int mtu)
1721{
1722 struct ifnet *ifp = p->ifp;
1723 int error = 0;
1724
1725 if ((mtu < ETHERMIN) || (mtu > ETHER_MAX_LEN_JUMBO))
1726 error = EINVAL;
1727 else if (ifp->if_mtu != mtu) {
1728 PORT_LOCK(p);
1729 ifp->if_mtu = mtu;
1730 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1731 callout_stop(&p->adapter->cxgb_tick_ch);
1732 cxgb_stop_locked(p);
1733 cxgb_init_locked(p);
1734 }
1735 PORT_UNLOCK(p);
1736 }
1737 return (error);
1738}
1739
1740static int
1741cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
1742{
1743 struct port_info *p = ifp->if_softc;
1744 struct ifaddr *ifa = (struct ifaddr *)data;
1745 struct ifreq *ifr = (struct ifreq *)data;
1746 int flags, error = 0;
1747 uint32_t mask;
1748
1749 /*
1750 * XXX need to check that we aren't in the middle of an unload
1751 */
1752 switch (command) {
1753 case SIOCSIFMTU:
1754 error = cxgb_set_mtu(p, ifr->ifr_mtu);
1755 break;
1756 case SIOCSIFADDR:
1757 case SIOCGIFADDR:
1758 PORT_LOCK(p);
1759 if (ifa->ifa_addr->sa_family == AF_INET) {
1760 ifp->if_flags |= IFF_UP;
1761 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1762 cxgb_init_locked(p);
1763 arp_ifinit(ifp, ifa);
1764 } else
1765 error = ether_ioctl(ifp, command, data);
1766 PORT_UNLOCK(p);
1767 break;
1768 case SIOCSIFFLAGS:
1769 callout_drain(&p->adapter->cxgb_tick_ch);
1770 PORT_LOCK(p);
1771 if (ifp->if_flags & IFF_UP) {
1772 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1773 flags = p->if_flags;
1774 if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
1775 ((ifp->if_flags ^ flags) & IFF_ALLMULTI))
1776 cxgb_set_rxmode(p);
1777 } else
1778 cxgb_init_locked(p);
1779 p->if_flags = ifp->if_flags;
1780 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1781 cxgb_stop_locked(p);
1782
1783 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1784 adapter_t *sc = p->adapter;
1785 callout_reset(&sc->cxgb_tick_ch,
1786 sc->params.stats_update_period * hz,
1787 cxgb_tick, sc);
1788 }
1789 PORT_UNLOCK(p);
1790 break;
1791 case SIOCSIFMEDIA:
1792 case SIOCGIFMEDIA:
1793 error = ifmedia_ioctl(ifp, ifr, &p->media, command);
1794 break;
1795 case SIOCSIFCAP:
1796 PORT_LOCK(p);
1797 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1798 if (mask & IFCAP_TXCSUM) {
1799 if (IFCAP_TXCSUM & ifp->if_capenable) {
1800 ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
1801 ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
1802 | CSUM_TSO);
1803 } else {
1804 ifp->if_capenable |= IFCAP_TXCSUM;
1805 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1806 }
1807 } else if (mask & IFCAP_RXCSUM) {
1808 if (IFCAP_RXCSUM & ifp->if_capenable) {
1809 ifp->if_capenable &= ~IFCAP_RXCSUM;
1810 } else {
1811 ifp->if_capenable |= IFCAP_RXCSUM;
1812 }
1813 }
1814 if (mask & IFCAP_TSO4) {
1815 if (IFCAP_TSO4 & ifp->if_capenable) {
1816 ifp->if_capenable &= ~IFCAP_TSO4;
1817 ifp->if_hwassist &= ~CSUM_TSO;
1818 } else if (IFCAP_TXCSUM & ifp->if_capenable) {
1819 ifp->if_capenable |= IFCAP_TSO4;
1820 ifp->if_hwassist |= CSUM_TSO;
1821 } else {
1822 if (cxgb_debug)
1823 printf("cxgb requires tx checksum offload"
1824 " be enabled to use TSO\n");
1825 error = EINVAL;
1826 }
1827 }
1828 PORT_UNLOCK(p);
1829 break;
1830 default:
1831 error = ether_ioctl(ifp, command, data);
1832 break;
1833 }
1834 return (error);
1835}
1836
1837static int
1838cxgb_start_tx(struct ifnet *ifp, uint32_t txmax)
1839{
1840 struct sge_qset *qs;
1841 struct sge_txq *txq;
1842 struct port_info *p = ifp->if_softc;
1830 struct mbuf *m0, *m = NULL;
1843 struct mbuf *m = NULL;
1831 int err, in_use_init, free;
1832
1833 if (!p->link_config.link_ok)
1834 return (ENXIO);
1835
1836 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1837 return (ENOBUFS);
1838
1839 qs = &p->adapter->sge.qs[p->first_qset];
1840 txq = &qs->txq[TXQ_ETH];
1841 err = 0;
1842
1843 if (txq->flags & TXQ_TRANSMITTING)
1844 return (EINPROGRESS);
1845
1846 mtx_lock(&txq->lock);
1847 txq->flags |= TXQ_TRANSMITTING;
1848 in_use_init = txq->in_use;
1849 while ((txq->in_use - in_use_init < txmax) &&
1850 (txq->size > txq->in_use + TX_MAX_DESC)) {
1851 free = 0;
1852 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1853 if (m == NULL)
1854 break;
1855 /*
1856 * Convert chain to M_IOVEC
1857 */
1858 KASSERT((m->m_flags & M_IOVEC) == 0, ("IOVEC set too early"));
1844 int err, in_use_init, free;
1845
1846 if (!p->link_config.link_ok)
1847 return (ENXIO);
1848
1849 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1850 return (ENOBUFS);
1851
1852 qs = &p->adapter->sge.qs[p->first_qset];
1853 txq = &qs->txq[TXQ_ETH];
1854 err = 0;
1855
1856 if (txq->flags & TXQ_TRANSMITTING)
1857 return (EINPROGRESS);
1858
1859 mtx_lock(&txq->lock);
1860 txq->flags |= TXQ_TRANSMITTING;
1861 in_use_init = txq->in_use;
1862 while ((txq->in_use - in_use_init < txmax) &&
1863 (txq->size > txq->in_use + TX_MAX_DESC)) {
1864 free = 0;
1865 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1866 if (m == NULL)
1867 break;
1868 /*
1869 * Convert chain to M_IOVEC
1870 */
1871 KASSERT((m->m_flags & M_IOVEC) == 0, ("IOVEC set too early"));
1872#ifdef notyet
1859 m0 = m;
1873 m0 = m;
1860#ifdef INVARIANTS
1861 /*
1862 * Clean up after net stack sloppiness
1863 * before calling m_sanity
1864 */
1865 m0 = m->m_next;
1866 while (m0) {
1867 m0->m_flags &= ~M_PKTHDR;
1868 m0 = m0->m_next;
1869 }
1870 m_sanity(m0, 0);
1871 m0 = m;
1872#endif
1873 if (collapse_mbufs && m->m_pkthdr.len > MCLBYTES &&
1874 m_collapse(m, TX_MAX_SEGS, &m0) == EFBIG) {
1875 if ((m0 = m_defrag(m, M_NOWAIT)) != NULL) {
1876 m = m0;
1877 m_collapse(m, TX_MAX_SEGS, &m0);
1878 } else
1879 break;
1880 }
1881 m = m0;
1874 if (collapse_mbufs && m->m_pkthdr.len > MCLBYTES &&
1875 m_collapse(m, TX_MAX_SEGS, &m0) == EFBIG) {
1876 if ((m0 = m_defrag(m, M_NOWAIT)) != NULL) {
1877 m = m0;
1878 m_collapse(m, TX_MAX_SEGS, &m0);
1879 } else
1880 break;
1881 }
1882 m = m0;
1883#endif
1882 if ((err = t3_encap(p, &m, &free)) != 0)
1883 break;
1884 BPF_MTAP(ifp, m);
1885 if (free)
1886 m_freem(m);
1887 }
1888 txq->flags &= ~TXQ_TRANSMITTING;
1889 mtx_unlock(&txq->lock);
1890
1891 if (__predict_false(err)) {
1892 if (err == ENOMEM) {
1893 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1894 IFQ_LOCK(&ifp->if_snd);
1895 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1896 IFQ_UNLOCK(&ifp->if_snd);
1897 }
1898 }
1899 if (err == 0 && m == NULL)
1900 err = ENOBUFS;
1901 else if ((err == 0) && (txq->size <= txq->in_use + TX_MAX_DESC) &&
1902 (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
1903 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1904 err = ENOSPC;
1905 }
1906 return (err);
1907}
1908
1909static void
1910cxgb_start_proc(void *arg, int ncount)
1911{
1912 struct ifnet *ifp = arg;
1913 struct port_info *pi = ifp->if_softc;
1914 struct sge_qset *qs;
1915 struct sge_txq *txq;
1916 int error;
1917
1918 qs = &pi->adapter->sge.qs[pi->first_qset];
1919 txq = &qs->txq[TXQ_ETH];
1920
1921 do {
1922 if (desc_reclaimable(txq) > TX_CLEAN_MAX_DESC >> 2)
1923 taskqueue_enqueue(pi->tq, &txq->qreclaim_task);
1924
1925 error = cxgb_start_tx(ifp, TX_START_MAX_DESC);
1926 } while (error == 0);
1927}
1928
1929static void
1930cxgb_start(struct ifnet *ifp)
1931{
1932 struct port_info *pi = ifp->if_softc;
1933 struct sge_qset *qs;
1934 struct sge_txq *txq;
1935 int err;
1936
1937 qs = &pi->adapter->sge.qs[pi->first_qset];
1938 txq = &qs->txq[TXQ_ETH];
1939
1940 if (desc_reclaimable(txq) > TX_CLEAN_MAX_DESC >> 2)
1941 taskqueue_enqueue(pi->tq,
1942 &txq->qreclaim_task);
1943
1944 err = cxgb_start_tx(ifp, TX_START_MAX_DESC);
1945
1946 if (err == 0)
1947 taskqueue_enqueue(pi->tq, &pi->start_task);
1948}
1949
1950
1951static int
1952cxgb_media_change(struct ifnet *ifp)
1953{
1954 if_printf(ifp, "media change not supported\n");
1955 return (ENXIO);
1956}
1957
1958static void
1959cxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1960{
1961 struct port_info *p = ifp->if_softc;
1962
1963 ifmr->ifm_status = IFM_AVALID;
1964 ifmr->ifm_active = IFM_ETHER;
1965
1966 if (!p->link_config.link_ok)
1967 return;
1968
1969 ifmr->ifm_status |= IFM_ACTIVE;
1970
1971 switch (p->link_config.speed) {
1972 case 10:
1973 ifmr->ifm_active |= IFM_10_T;
1974 break;
1975 case 100:
1976 ifmr->ifm_active |= IFM_100_TX;
1977 break;
1978 case 1000:
1979 ifmr->ifm_active |= IFM_1000_T;
1980 break;
1981 }
1982
1983 if (p->link_config.duplex)
1984 ifmr->ifm_active |= IFM_FDX;
1985 else
1986 ifmr->ifm_active |= IFM_HDX;
1987}
1988
1989static void
1990cxgb_async_intr(void *data)
1991{
1992 adapter_t *sc = data;
1993
1994 if (cxgb_debug)
1995 device_printf(sc->dev, "cxgb_async_intr\n");
1996 /*
1997 * May need to sleep - defer to taskqueue
1998 */
1999 taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
2000}
2001
2002static void
2003cxgb_ext_intr_handler(void *arg, int count)
2004{
2005 adapter_t *sc = (adapter_t *)arg;
2006
2007 if (cxgb_debug)
2008 printf("cxgb_ext_intr_handler\n");
2009
2010 t3_phy_intr_handler(sc);
2011
2012 /* Now reenable external interrupts */
2013 ADAPTER_LOCK(sc);
2014 if (sc->slow_intr_mask) {
2015 sc->slow_intr_mask |= F_T3DBG;
2016 t3_write_reg(sc, A_PL_INT_CAUSE0, F_T3DBG);
2017 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
2018 }
2019 ADAPTER_UNLOCK(sc);
2020}
2021
2022static void
2023check_link_status(adapter_t *sc)
2024{
2025 int i;
2026
2027 for (i = 0; i < (sc)->params.nports; ++i) {
2028 struct port_info *p = &sc->port[i];
2029
2030 if (!(p->port_type->caps & SUPPORTED_IRQ))
2031 t3_link_changed(sc, i);
2032 p->ifp->if_baudrate = p->link_config.speed * 1000000;
2033 }
2034}
2035
2036static void
2037check_t3b2_mac(struct adapter *adapter)
2038{
2039 int i;
2040
2041 for_each_port(adapter, i) {
2042 struct port_info *p = &adapter->port[i];
2043 struct ifnet *ifp = p->ifp;
2044 int status;
2045
2046 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2047 continue;
2048
2049 status = 0;
2050 PORT_LOCK(p);
2051 if ((ifp->if_drv_flags & IFF_DRV_RUNNING))
2052 status = t3b2_mac_watchdog_task(&p->mac);
2053 if (status == 1)
2054 p->mac.stats.num_toggled++;
2055 else if (status == 2) {
2056 struct cmac *mac = &p->mac;
2057
2058 t3_mac_set_mtu(mac, ifp->if_mtu + ETHER_HDR_LEN
2059 + ETHER_VLAN_ENCAP_LEN);
2060 t3_mac_set_address(mac, 0, p->hw_addr);
2061 cxgb_set_rxmode(p);
2062 t3_link_start(&p->phy, mac, &p->link_config);
2063 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2064 t3_port_intr_enable(adapter, p->port_id);
2065 p->mac.stats.num_resets++;
2066 }
2067 PORT_UNLOCK(p);
2068 }
2069}
2070
2071static void
2072cxgb_tick(void *arg)
2073{
2074 adapter_t *sc = (adapter_t *)arg;
2075
2076 taskqueue_enqueue(sc->tq, &sc->tick_task);
2077
2078 if (sc->open_device_map != 0)
2079 callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz,
2080 cxgb_tick, sc);
2081}
2082
2083static void
2084cxgb_tick_handler(void *arg, int count)
2085{
2086 adapter_t *sc = (adapter_t *)arg;
2087 const struct adapter_params *p = &sc->params;
2088
2089 ADAPTER_LOCK(sc);
2090 if (p->linkpoll_period)
2091 check_link_status(sc);
2092
2093 /*
2094 * adapter lock can currently only be acquire after the
2095 * port lock
2096 */
2097 ADAPTER_UNLOCK(sc);
2098
2099 if (p->rev == T3_REV_B2 && p->nports < 4)
2100 check_t3b2_mac(sc);
2101}
2102
2103static void
2104touch_bars(device_t dev)
2105{
2106 /*
2107 * Don't enable yet
2108 */
2109#if !defined(__LP64__) && 0
2110 u32 v;
2111
2112 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
2113 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
2114 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
2115 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
2116 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
2117 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
2118#endif
2119}
2120
2121static int
2122set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
2123{
2124 uint8_t *buf;
2125 int err = 0;
2126 u32 aligned_offset, aligned_len, *p;
2127 struct adapter *adapter = pi->adapter;
2128
2129
2130 aligned_offset = offset & ~3;
2131 aligned_len = (len + (offset & 3) + 3) & ~3;
2132
2133 if (aligned_offset != offset || aligned_len != len) {
2134 buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);
2135 if (!buf)
2136 return (ENOMEM);
2137 err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
2138 if (!err && aligned_len > 4)
2139 err = t3_seeprom_read(adapter,
2140 aligned_offset + aligned_len - 4,
2141 (u32 *)&buf[aligned_len - 4]);
2142 if (err)
2143 goto out;
2144 memcpy(buf + (offset & 3), data, len);
2145 } else
2146 buf = (uint8_t *)(uintptr_t)data;
2147
2148 err = t3_seeprom_wp(adapter, 0);
2149 if (err)
2150 goto out;
2151
2152 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2153 err = t3_seeprom_write(adapter, aligned_offset, *p);
2154 aligned_offset += 4;
2155 }
2156
2157 if (!err)
2158 err = t3_seeprom_wp(adapter, 1);
2159out:
2160 if (buf != data)
2161 free(buf, M_DEVBUF);
2162 return err;
2163}
2164
2165
2166static int
2167in_range(int val, int lo, int hi)
2168{
2169 return val < 0 || (val <= hi && val >= lo);
2170}
2171
2172static int
2173cxgb_extension_open(struct cdev *dev, int flags, int fmp, d_thread_t *td)
2174{
2175 return (0);
2176}
2177
2178static int
2179cxgb_extension_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
2180{
2181 return (0);
2182}
2183
2184static int
2185cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
2186 int fflag, struct thread *td)
2187{
2188 int mmd, error = 0;
2189 struct port_info *pi = dev->si_drv1;
2190 adapter_t *sc = pi->adapter;
2191
2192#ifdef PRIV_SUPPORTED
2193 if (priv_check(td, PRIV_DRIVER)) {
2194 if (cxgb_debug)
2195 printf("user does not have access to privileged ioctls\n");
2196 return (EPERM);
2197 }
2198#else
2199 if (suser(td)) {
2200 if (cxgb_debug)
2201 printf("user does not have access to privileged ioctls\n");
2202 return (EPERM);
2203 }
2204#endif
2205
2206 switch (cmd) {
2207 case SIOCGMIIREG: {
2208 uint32_t val;
2209 struct cphy *phy = &pi->phy;
2210 struct mii_data *mid = (struct mii_data *)data;
2211
2212 if (!phy->mdio_read)
2213 return (EOPNOTSUPP);
2214 if (is_10G(sc)) {
2215 mmd = mid->phy_id >> 8;
2216 if (!mmd)
2217 mmd = MDIO_DEV_PCS;
2218 else if (mmd > MDIO_DEV_XGXS)
2219 return (EINVAL);
2220
2221 error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
2222 mid->reg_num, &val);
2223 } else
2224 error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
2225 mid->reg_num & 0x1f, &val);
2226 if (error == 0)
2227 mid->val_out = val;
2228 break;
2229 }
2230 case SIOCSMIIREG: {
2231 struct cphy *phy = &pi->phy;
2232 struct mii_data *mid = (struct mii_data *)data;
2233
2234 if (!phy->mdio_write)
2235 return (EOPNOTSUPP);
2236 if (is_10G(sc)) {
2237 mmd = mid->phy_id >> 8;
2238 if (!mmd)
2239 mmd = MDIO_DEV_PCS;
2240 else if (mmd > MDIO_DEV_XGXS)
2241 return (EINVAL);
2242
2243 error = phy->mdio_write(sc, mid->phy_id & 0x1f,
2244 mmd, mid->reg_num, mid->val_in);
2245 } else
2246 error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
2247 mid->reg_num & 0x1f,
2248 mid->val_in);
2249 break;
2250 }
2251 case CHELSIO_SETREG: {
2252 struct ch_reg *edata = (struct ch_reg *)data;
2253 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2254 return (EFAULT);
2255 t3_write_reg(sc, edata->addr, edata->val);
2256 break;
2257 }
2258 case CHELSIO_GETREG: {
2259 struct ch_reg *edata = (struct ch_reg *)data;
2260 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2261 return (EFAULT);
2262 edata->val = t3_read_reg(sc, edata->addr);
2263 break;
2264 }
2265 case CHELSIO_GET_SGE_CONTEXT: {
2266 struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
2267 mtx_lock(&sc->sge.reg_lock);
2268 switch (ecntxt->cntxt_type) {
2269 case CNTXT_TYPE_EGRESS:
2270 error = t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
2271 ecntxt->data);
2272 break;
2273 case CNTXT_TYPE_FL:
2274 error = t3_sge_read_fl(sc, ecntxt->cntxt_id,
2275 ecntxt->data);
2276 break;
2277 case CNTXT_TYPE_RSP:
2278 error = t3_sge_read_rspq(sc, ecntxt->cntxt_id,
2279 ecntxt->data);
2280 break;
2281 case CNTXT_TYPE_CQ:
2282 error = t3_sge_read_cq(sc, ecntxt->cntxt_id,
2283 ecntxt->data);
2284 break;
2285 default:
2286 error = EINVAL;
2287 break;
2288 }
2289 mtx_unlock(&sc->sge.reg_lock);
2290 break;
2291 }
2292 case CHELSIO_GET_SGE_DESC: {
2293 struct ch_desc *edesc = (struct ch_desc *)data;
2294 int ret;
2295 if (edesc->queue_num >= SGE_QSETS * 6)
2296 return (EINVAL);
2297 ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
2298 edesc->queue_num % 6, edesc->idx, edesc->data);
2299 if (ret < 0)
2300 return (EINVAL);
2301 edesc->size = ret;
2302 break;
2303 }
2304 case CHELSIO_SET_QSET_PARAMS: {
2305 struct qset_params *q;
2306 struct ch_qset_params *t = (struct ch_qset_params *)data;
2307
2308 if (t->qset_idx >= SGE_QSETS)
2309 return (EINVAL);
2310 if (!in_range(t->intr_lat, 0, M_NEWTIMER) ||
2311 !in_range(t->cong_thres, 0, 255) ||
2312 !in_range(t->txq_size[0], MIN_TXQ_ENTRIES,
2313 MAX_TXQ_ENTRIES) ||
2314 !in_range(t->txq_size[1], MIN_TXQ_ENTRIES,
2315 MAX_TXQ_ENTRIES) ||
2316 !in_range(t->txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2317 MAX_CTRL_TXQ_ENTRIES) ||
2318 !in_range(t->fl_size[0], MIN_FL_ENTRIES, MAX_RX_BUFFERS) ||
2319 !in_range(t->fl_size[1], MIN_FL_ENTRIES,
2320 MAX_RX_JUMBO_BUFFERS) ||
2321 !in_range(t->rspq_size, MIN_RSPQ_ENTRIES, MAX_RSPQ_ENTRIES))
2322 return (EINVAL);
2323 if ((sc->flags & FULL_INIT_DONE) &&
2324 (t->rspq_size >= 0 || t->fl_size[0] >= 0 ||
2325 t->fl_size[1] >= 0 || t->txq_size[0] >= 0 ||
2326 t->txq_size[1] >= 0 || t->txq_size[2] >= 0 ||
2327 t->polling >= 0 || t->cong_thres >= 0))
2328 return (EBUSY);
2329
2330 q = &sc->params.sge.qset[t->qset_idx];
2331
2332 if (t->rspq_size >= 0)
2333 q->rspq_size = t->rspq_size;
2334 if (t->fl_size[0] >= 0)
2335 q->fl_size = t->fl_size[0];
2336 if (t->fl_size[1] >= 0)
2337 q->jumbo_size = t->fl_size[1];
2338 if (t->txq_size[0] >= 0)
2339 q->txq_size[0] = t->txq_size[0];
2340 if (t->txq_size[1] >= 0)
2341 q->txq_size[1] = t->txq_size[1];
2342 if (t->txq_size[2] >= 0)
2343 q->txq_size[2] = t->txq_size[2];
2344 if (t->cong_thres >= 0)
2345 q->cong_thres = t->cong_thres;
2346 if (t->intr_lat >= 0) {
2347 struct sge_qset *qs = &sc->sge.qs[t->qset_idx];
2348
2349 q->coalesce_nsecs = t->intr_lat*1000;
2350 t3_update_qset_coalesce(qs, q);
2351 }
2352 break;
2353 }
2354 case CHELSIO_GET_QSET_PARAMS: {
2355 struct qset_params *q;
2356 struct ch_qset_params *t = (struct ch_qset_params *)data;
2357
2358 if (t->qset_idx >= SGE_QSETS)
2359 return (EINVAL);
2360
2361 q = &(sc)->params.sge.qset[t->qset_idx];
2362 t->rspq_size = q->rspq_size;
2363 t->txq_size[0] = q->txq_size[0];
2364 t->txq_size[1] = q->txq_size[1];
2365 t->txq_size[2] = q->txq_size[2];
2366 t->fl_size[0] = q->fl_size;
2367 t->fl_size[1] = q->jumbo_size;
2368 t->polling = q->polling;
2369 t->intr_lat = q->coalesce_nsecs / 1000;
2370 t->cong_thres = q->cong_thres;
2371 break;
2372 }
2373 case CHELSIO_SET_QSET_NUM: {
2374 struct ch_reg *edata = (struct ch_reg *)data;
2375 unsigned int port_idx = pi->port_id;
2376
2377 if (sc->flags & FULL_INIT_DONE)
2378 return (EBUSY);
2379 if (edata->val < 1 ||
2380 (edata->val > 1 && !(sc->flags & USING_MSIX)))
2381 return (EINVAL);
2382 if (edata->val + sc->port[!port_idx].nqsets > SGE_QSETS)
2383 return (EINVAL);
2384 sc->port[port_idx].nqsets = edata->val;
2385 sc->port[0].first_qset = 0;
2386 /*
2387 * XXX hardcode ourselves to 2 ports just like LEEENUX
2388 */
2389 sc->port[1].first_qset = sc->port[0].nqsets;
2390 break;
2391 }
2392 case CHELSIO_GET_QSET_NUM: {
2393 struct ch_reg *edata = (struct ch_reg *)data;
2394 edata->val = pi->nqsets;
2395 break;
2396 }
2397#ifdef notyet
2398 case CHELSIO_LOAD_FW:
2399 case CHELSIO_GET_PM:
2400 case CHELSIO_SET_PM:
2401 return (EOPNOTSUPP);
2402 break;
2403#endif
2404 case CHELSIO_SETMTUTAB: {
2405 struct ch_mtus *m = (struct ch_mtus *)data;
2406 int i;
2407
2408 if (!is_offload(sc))
2409 return (EOPNOTSUPP);
2410 if (offload_running(sc))
2411 return (EBUSY);
2412 if (m->nmtus != NMTUS)
2413 return (EINVAL);
2414 if (m->mtus[0] < 81) /* accommodate SACK */
2415 return (EINVAL);
2416
2417 /*
2418 * MTUs must be in ascending order
2419 */
2420 for (i = 1; i < NMTUS; ++i)
2421 if (m->mtus[i] < m->mtus[i - 1])
2422 return (EINVAL);
2423
2424 memcpy(sc->params.mtus, m->mtus,
2425 sizeof(sc->params.mtus));
2426 break;
2427 }
2428 case CHELSIO_GETMTUTAB: {
2429 struct ch_mtus *m = (struct ch_mtus *)data;
2430
2431 if (!is_offload(sc))
2432 return (EOPNOTSUPP);
2433
2434 memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
2435 m->nmtus = NMTUS;
2436 break;
2437 }
2438 case CHELSIO_DEVUP:
2439 if (!is_offload(sc))
2440 return (EOPNOTSUPP);
2441 return offload_open(pi);
2442 break;
2443 case CHELSIO_GET_MEM: {
2444 struct ch_mem_range *t = (struct ch_mem_range *)data;
2445 struct mc7 *mem;
2446 uint8_t *useraddr;
2447 u64 buf[32];
2448
2449 if (!is_offload(sc))
2450 return (EOPNOTSUPP);
2451 if (!(sc->flags & FULL_INIT_DONE))
2452 return (EIO); /* need the memory controllers */
2453 if ((t->addr & 0x7) || (t->len & 0x7))
2454 return (EINVAL);
2455 if (t->mem_id == MEM_CM)
2456 mem = &sc->cm;
2457 else if (t->mem_id == MEM_PMRX)
2458 mem = &sc->pmrx;
2459 else if (t->mem_id == MEM_PMTX)
2460 mem = &sc->pmtx;
2461 else
2462 return (EINVAL);
2463
2464 /*
2465 * Version scheme:
2466 * bits 0..9: chip version
2467 * bits 10..15: chip revision
2468 */
2469 t->version = 3 | (sc->params.rev << 10);
2470
2471 /*
2472 * Read 256 bytes at a time as len can be large and we don't
2473 * want to use huge intermediate buffers.
2474 */
2475 useraddr = (uint8_t *)(t + 1); /* advance to start of buffer */
2476 while (t->len) {
2477 unsigned int chunk = min(t->len, sizeof(buf));
2478
2479 error = t3_mc7_bd_read(mem, t->addr / 8, chunk / 8, buf);
2480 if (error)
2481 return (-error);
2482 if (copyout(buf, useraddr, chunk))
2483 return (EFAULT);
2484 useraddr += chunk;
2485 t->addr += chunk;
2486 t->len -= chunk;
2487 }
2488 break;
2489 }
2490 case CHELSIO_READ_TCAM_WORD: {
2491 struct ch_tcam_word *t = (struct ch_tcam_word *)data;
2492
2493 if (!is_offload(sc))
2494 return (EOPNOTSUPP);
2495 if (!(sc->flags & FULL_INIT_DONE))
2496 return (EIO); /* need MC5 */
2497 return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
2498 break;
2499 }
2500 case CHELSIO_SET_TRACE_FILTER: {
2501 struct ch_trace *t = (struct ch_trace *)data;
2502 const struct trace_params *tp;
2503
2504 tp = (const struct trace_params *)&t->sip;
2505 if (t->config_tx)
2506 t3_config_trace_filter(sc, tp, 0, t->invert_match,
2507 t->trace_tx);
2508 if (t->config_rx)
2509 t3_config_trace_filter(sc, tp, 1, t->invert_match,
2510 t->trace_rx);
2511 break;
2512 }
2513 case CHELSIO_SET_PKTSCHED: {
2514 struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
2515 if (sc->open_device_map == 0)
2516 return (EAGAIN);
2517 send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
2518 p->binding);
2519 break;
2520 }
2521 case CHELSIO_IFCONF_GETREGS: {
2522 struct ifconf_regs *regs = (struct ifconf_regs *)data;
2523 int reglen = cxgb_get_regs_len();
2524 uint8_t *buf = malloc(REGDUMP_SIZE, M_DEVBUF, M_NOWAIT);
2525 if (buf == NULL) {
2526 return (ENOMEM);
2527 } if (regs->len > reglen)
2528 regs->len = reglen;
2529 else if (regs->len < reglen) {
2530 error = E2BIG;
2531 goto done;
2532 }
2533 cxgb_get_regs(sc, regs, buf);
2534 error = copyout(buf, regs->data, reglen);
2535
2536 done:
2537 free(buf, M_DEVBUF);
2538
2539 break;
2540 }
2541 case CHELSIO_SET_HW_SCHED: {
2542 struct ch_hw_sched *t = (struct ch_hw_sched *)data;
2543 unsigned int ticks_per_usec = core_ticks_per_usec(sc);
2544
2545 if ((sc->flags & FULL_INIT_DONE) == 0)
2546 return (EAGAIN); /* need TP to be initialized */
2547 if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
2548 !in_range(t->channel, 0, 1) ||
2549 !in_range(t->kbps, 0, 10000000) ||
2550 !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
2551 !in_range(t->flow_ipg, 0,
2552 dack_ticks_to_usec(sc, 0x7ff)))
2553 return (EINVAL);
2554
2555 if (t->kbps >= 0) {
2556 error = t3_config_sched(sc, t->kbps, t->sched);
2557 if (error < 0)
2558 return (-error);
2559 }
2560 if (t->class_ipg >= 0)
2561 t3_set_sched_ipg(sc, t->sched, t->class_ipg);
2562 if (t->flow_ipg >= 0) {
2563 t->flow_ipg *= 1000; /* us -> ns */
2564 t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
2565 }
2566 if (t->mode >= 0) {
2567 int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
2568
2569 t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2570 bit, t->mode ? bit : 0);
2571 }
2572 if (t->channel >= 0)
2573 t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2574 1 << t->sched, t->channel << t->sched);
2575 break;
2576 }
2577 default:
2578 return (EOPNOTSUPP);
2579 break;
2580 }
2581
2582 return (error);
2583}
2584
2585static __inline void
2586reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
2587 unsigned int end)
2588{
2589 uint32_t *p = (uint32_t *)buf + start;
2590
2591 for ( ; start <= end; start += sizeof(uint32_t))
2592 *p++ = t3_read_reg(ap, start);
2593}
2594
2595#define T3_REGMAP_SIZE (3 * 1024)
2596static int
2597cxgb_get_regs_len(void)
2598{
2599 return T3_REGMAP_SIZE;
2600}
2601#undef T3_REGMAP_SIZE
2602
2603static void
2604cxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf)
2605{
2606
2607 /*
2608 * Version scheme:
2609 * bits 0..9: chip version
2610 * bits 10..15: chip revision
2611 * bit 31: set for PCIe cards
2612 */
2613 regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
2614
2615 /*
2616 * We skip the MAC statistics registers because they are clear-on-read.
2617 * Also reading multi-register stats would need to synchronize with the
2618 * periodic mac stats accumulation. Hard to justify the complexity.
2619 */
2620 memset(buf, 0, REGDUMP_SIZE);
2621 reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
2622 reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
2623 reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
2624 reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
2625 reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
2626 reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
2627 XGM_REG(A_XGM_SERDES_STAT3, 1));
2628 reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
2629 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
2630}
1884 if ((err = t3_encap(p, &m, &free)) != 0)
1885 break;
1886 BPF_MTAP(ifp, m);
1887 if (free)
1888 m_freem(m);
1889 }
1890 txq->flags &= ~TXQ_TRANSMITTING;
1891 mtx_unlock(&txq->lock);
1892
1893 if (__predict_false(err)) {
1894 if (err == ENOMEM) {
1895 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1896 IFQ_LOCK(&ifp->if_snd);
1897 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1898 IFQ_UNLOCK(&ifp->if_snd);
1899 }
1900 }
1901 if (err == 0 && m == NULL)
1902 err = ENOBUFS;
1903 else if ((err == 0) && (txq->size <= txq->in_use + TX_MAX_DESC) &&
1904 (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
1905 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1906 err = ENOSPC;
1907 }
1908 return (err);
1909}
1910
1911static void
1912cxgb_start_proc(void *arg, int ncount)
1913{
1914 struct ifnet *ifp = arg;
1915 struct port_info *pi = ifp->if_softc;
1916 struct sge_qset *qs;
1917 struct sge_txq *txq;
1918 int error;
1919
1920 qs = &pi->adapter->sge.qs[pi->first_qset];
1921 txq = &qs->txq[TXQ_ETH];
1922
1923 do {
1924 if (desc_reclaimable(txq) > TX_CLEAN_MAX_DESC >> 2)
1925 taskqueue_enqueue(pi->tq, &txq->qreclaim_task);
1926
1927 error = cxgb_start_tx(ifp, TX_START_MAX_DESC);
1928 } while (error == 0);
1929}
1930
1931static void
1932cxgb_start(struct ifnet *ifp)
1933{
1934 struct port_info *pi = ifp->if_softc;
1935 struct sge_qset *qs;
1936 struct sge_txq *txq;
1937 int err;
1938
1939 qs = &pi->adapter->sge.qs[pi->first_qset];
1940 txq = &qs->txq[TXQ_ETH];
1941
1942 if (desc_reclaimable(txq) > TX_CLEAN_MAX_DESC >> 2)
1943 taskqueue_enqueue(pi->tq,
1944 &txq->qreclaim_task);
1945
1946 err = cxgb_start_tx(ifp, TX_START_MAX_DESC);
1947
1948 if (err == 0)
1949 taskqueue_enqueue(pi->tq, &pi->start_task);
1950}
1951
1952
1953static int
1954cxgb_media_change(struct ifnet *ifp)
1955{
1956 if_printf(ifp, "media change not supported\n");
1957 return (ENXIO);
1958}
1959
1960static void
1961cxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1962{
1963 struct port_info *p = ifp->if_softc;
1964
1965 ifmr->ifm_status = IFM_AVALID;
1966 ifmr->ifm_active = IFM_ETHER;
1967
1968 if (!p->link_config.link_ok)
1969 return;
1970
1971 ifmr->ifm_status |= IFM_ACTIVE;
1972
1973 switch (p->link_config.speed) {
1974 case 10:
1975 ifmr->ifm_active |= IFM_10_T;
1976 break;
1977 case 100:
1978 ifmr->ifm_active |= IFM_100_TX;
1979 break;
1980 case 1000:
1981 ifmr->ifm_active |= IFM_1000_T;
1982 break;
1983 }
1984
1985 if (p->link_config.duplex)
1986 ifmr->ifm_active |= IFM_FDX;
1987 else
1988 ifmr->ifm_active |= IFM_HDX;
1989}
1990
1991static void
1992cxgb_async_intr(void *data)
1993{
1994 adapter_t *sc = data;
1995
1996 if (cxgb_debug)
1997 device_printf(sc->dev, "cxgb_async_intr\n");
1998 /*
1999 * May need to sleep - defer to taskqueue
2000 */
2001 taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
2002}
2003
2004static void
2005cxgb_ext_intr_handler(void *arg, int count)
2006{
2007 adapter_t *sc = (adapter_t *)arg;
2008
2009 if (cxgb_debug)
2010 printf("cxgb_ext_intr_handler\n");
2011
2012 t3_phy_intr_handler(sc);
2013
2014 /* Now reenable external interrupts */
2015 ADAPTER_LOCK(sc);
2016 if (sc->slow_intr_mask) {
2017 sc->slow_intr_mask |= F_T3DBG;
2018 t3_write_reg(sc, A_PL_INT_CAUSE0, F_T3DBG);
2019 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
2020 }
2021 ADAPTER_UNLOCK(sc);
2022}
2023
2024static void
2025check_link_status(adapter_t *sc)
2026{
2027 int i;
2028
2029 for (i = 0; i < (sc)->params.nports; ++i) {
2030 struct port_info *p = &sc->port[i];
2031
2032 if (!(p->port_type->caps & SUPPORTED_IRQ))
2033 t3_link_changed(sc, i);
2034 p->ifp->if_baudrate = p->link_config.speed * 1000000;
2035 }
2036}
2037
2038static void
2039check_t3b2_mac(struct adapter *adapter)
2040{
2041 int i;
2042
2043 for_each_port(adapter, i) {
2044 struct port_info *p = &adapter->port[i];
2045 struct ifnet *ifp = p->ifp;
2046 int status;
2047
2048 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2049 continue;
2050
2051 status = 0;
2052 PORT_LOCK(p);
2053 if ((ifp->if_drv_flags & IFF_DRV_RUNNING))
2054 status = t3b2_mac_watchdog_task(&p->mac);
2055 if (status == 1)
2056 p->mac.stats.num_toggled++;
2057 else if (status == 2) {
2058 struct cmac *mac = &p->mac;
2059
2060 t3_mac_set_mtu(mac, ifp->if_mtu + ETHER_HDR_LEN
2061 + ETHER_VLAN_ENCAP_LEN);
2062 t3_mac_set_address(mac, 0, p->hw_addr);
2063 cxgb_set_rxmode(p);
2064 t3_link_start(&p->phy, mac, &p->link_config);
2065 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2066 t3_port_intr_enable(adapter, p->port_id);
2067 p->mac.stats.num_resets++;
2068 }
2069 PORT_UNLOCK(p);
2070 }
2071}
2072
2073static void
2074cxgb_tick(void *arg)
2075{
2076 adapter_t *sc = (adapter_t *)arg;
2077
2078 taskqueue_enqueue(sc->tq, &sc->tick_task);
2079
2080 if (sc->open_device_map != 0)
2081 callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz,
2082 cxgb_tick, sc);
2083}
2084
2085static void
2086cxgb_tick_handler(void *arg, int count)
2087{
2088 adapter_t *sc = (adapter_t *)arg;
2089 const struct adapter_params *p = &sc->params;
2090
2091 ADAPTER_LOCK(sc);
2092 if (p->linkpoll_period)
2093 check_link_status(sc);
2094
2095 /*
2096 * adapter lock can currently only be acquire after the
2097 * port lock
2098 */
2099 ADAPTER_UNLOCK(sc);
2100
2101 if (p->rev == T3_REV_B2 && p->nports < 4)
2102 check_t3b2_mac(sc);
2103}
2104
2105static void
2106touch_bars(device_t dev)
2107{
2108 /*
2109 * Don't enable yet
2110 */
2111#if !defined(__LP64__) && 0
2112 u32 v;
2113
2114 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
2115 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
2116 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
2117 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
2118 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
2119 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
2120#endif
2121}
2122
2123static int
2124set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
2125{
2126 uint8_t *buf;
2127 int err = 0;
2128 u32 aligned_offset, aligned_len, *p;
2129 struct adapter *adapter = pi->adapter;
2130
2131
2132 aligned_offset = offset & ~3;
2133 aligned_len = (len + (offset & 3) + 3) & ~3;
2134
2135 if (aligned_offset != offset || aligned_len != len) {
2136 buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);
2137 if (!buf)
2138 return (ENOMEM);
2139 err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
2140 if (!err && aligned_len > 4)
2141 err = t3_seeprom_read(adapter,
2142 aligned_offset + aligned_len - 4,
2143 (u32 *)&buf[aligned_len - 4]);
2144 if (err)
2145 goto out;
2146 memcpy(buf + (offset & 3), data, len);
2147 } else
2148 buf = (uint8_t *)(uintptr_t)data;
2149
2150 err = t3_seeprom_wp(adapter, 0);
2151 if (err)
2152 goto out;
2153
2154 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2155 err = t3_seeprom_write(adapter, aligned_offset, *p);
2156 aligned_offset += 4;
2157 }
2158
2159 if (!err)
2160 err = t3_seeprom_wp(adapter, 1);
2161out:
2162 if (buf != data)
2163 free(buf, M_DEVBUF);
2164 return err;
2165}
2166
2167
2168static int
2169in_range(int val, int lo, int hi)
2170{
2171 return val < 0 || (val <= hi && val >= lo);
2172}
2173
2174static int
2175cxgb_extension_open(struct cdev *dev, int flags, int fmp, d_thread_t *td)
2176{
2177 return (0);
2178}
2179
2180static int
2181cxgb_extension_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
2182{
2183 return (0);
2184}
2185
2186static int
2187cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
2188 int fflag, struct thread *td)
2189{
2190 int mmd, error = 0;
2191 struct port_info *pi = dev->si_drv1;
2192 adapter_t *sc = pi->adapter;
2193
2194#ifdef PRIV_SUPPORTED
2195 if (priv_check(td, PRIV_DRIVER)) {
2196 if (cxgb_debug)
2197 printf("user does not have access to privileged ioctls\n");
2198 return (EPERM);
2199 }
2200#else
2201 if (suser(td)) {
2202 if (cxgb_debug)
2203 printf("user does not have access to privileged ioctls\n");
2204 return (EPERM);
2205 }
2206#endif
2207
2208 switch (cmd) {
2209 case SIOCGMIIREG: {
2210 uint32_t val;
2211 struct cphy *phy = &pi->phy;
2212 struct mii_data *mid = (struct mii_data *)data;
2213
2214 if (!phy->mdio_read)
2215 return (EOPNOTSUPP);
2216 if (is_10G(sc)) {
2217 mmd = mid->phy_id >> 8;
2218 if (!mmd)
2219 mmd = MDIO_DEV_PCS;
2220 else if (mmd > MDIO_DEV_XGXS)
2221 return (EINVAL);
2222
2223 error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
2224 mid->reg_num, &val);
2225 } else
2226 error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
2227 mid->reg_num & 0x1f, &val);
2228 if (error == 0)
2229 mid->val_out = val;
2230 break;
2231 }
2232 case SIOCSMIIREG: {
2233 struct cphy *phy = &pi->phy;
2234 struct mii_data *mid = (struct mii_data *)data;
2235
2236 if (!phy->mdio_write)
2237 return (EOPNOTSUPP);
2238 if (is_10G(sc)) {
2239 mmd = mid->phy_id >> 8;
2240 if (!mmd)
2241 mmd = MDIO_DEV_PCS;
2242 else if (mmd > MDIO_DEV_XGXS)
2243 return (EINVAL);
2244
2245 error = phy->mdio_write(sc, mid->phy_id & 0x1f,
2246 mmd, mid->reg_num, mid->val_in);
2247 } else
2248 error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
2249 mid->reg_num & 0x1f,
2250 mid->val_in);
2251 break;
2252 }
2253 case CHELSIO_SETREG: {
2254 struct ch_reg *edata = (struct ch_reg *)data;
2255 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2256 return (EFAULT);
2257 t3_write_reg(sc, edata->addr, edata->val);
2258 break;
2259 }
2260 case CHELSIO_GETREG: {
2261 struct ch_reg *edata = (struct ch_reg *)data;
2262 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2263 return (EFAULT);
2264 edata->val = t3_read_reg(sc, edata->addr);
2265 break;
2266 }
2267 case CHELSIO_GET_SGE_CONTEXT: {
2268 struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
2269 mtx_lock(&sc->sge.reg_lock);
2270 switch (ecntxt->cntxt_type) {
2271 case CNTXT_TYPE_EGRESS:
2272 error = t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
2273 ecntxt->data);
2274 break;
2275 case CNTXT_TYPE_FL:
2276 error = t3_sge_read_fl(sc, ecntxt->cntxt_id,
2277 ecntxt->data);
2278 break;
2279 case CNTXT_TYPE_RSP:
2280 error = t3_sge_read_rspq(sc, ecntxt->cntxt_id,
2281 ecntxt->data);
2282 break;
2283 case CNTXT_TYPE_CQ:
2284 error = t3_sge_read_cq(sc, ecntxt->cntxt_id,
2285 ecntxt->data);
2286 break;
2287 default:
2288 error = EINVAL;
2289 break;
2290 }
2291 mtx_unlock(&sc->sge.reg_lock);
2292 break;
2293 }
2294 case CHELSIO_GET_SGE_DESC: {
2295 struct ch_desc *edesc = (struct ch_desc *)data;
2296 int ret;
2297 if (edesc->queue_num >= SGE_QSETS * 6)
2298 return (EINVAL);
2299 ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
2300 edesc->queue_num % 6, edesc->idx, edesc->data);
2301 if (ret < 0)
2302 return (EINVAL);
2303 edesc->size = ret;
2304 break;
2305 }
2306 case CHELSIO_SET_QSET_PARAMS: {
2307 struct qset_params *q;
2308 struct ch_qset_params *t = (struct ch_qset_params *)data;
2309
2310 if (t->qset_idx >= SGE_QSETS)
2311 return (EINVAL);
2312 if (!in_range(t->intr_lat, 0, M_NEWTIMER) ||
2313 !in_range(t->cong_thres, 0, 255) ||
2314 !in_range(t->txq_size[0], MIN_TXQ_ENTRIES,
2315 MAX_TXQ_ENTRIES) ||
2316 !in_range(t->txq_size[1], MIN_TXQ_ENTRIES,
2317 MAX_TXQ_ENTRIES) ||
2318 !in_range(t->txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2319 MAX_CTRL_TXQ_ENTRIES) ||
2320 !in_range(t->fl_size[0], MIN_FL_ENTRIES, MAX_RX_BUFFERS) ||
2321 !in_range(t->fl_size[1], MIN_FL_ENTRIES,
2322 MAX_RX_JUMBO_BUFFERS) ||
2323 !in_range(t->rspq_size, MIN_RSPQ_ENTRIES, MAX_RSPQ_ENTRIES))
2324 return (EINVAL);
2325 if ((sc->flags & FULL_INIT_DONE) &&
2326 (t->rspq_size >= 0 || t->fl_size[0] >= 0 ||
2327 t->fl_size[1] >= 0 || t->txq_size[0] >= 0 ||
2328 t->txq_size[1] >= 0 || t->txq_size[2] >= 0 ||
2329 t->polling >= 0 || t->cong_thres >= 0))
2330 return (EBUSY);
2331
2332 q = &sc->params.sge.qset[t->qset_idx];
2333
2334 if (t->rspq_size >= 0)
2335 q->rspq_size = t->rspq_size;
2336 if (t->fl_size[0] >= 0)
2337 q->fl_size = t->fl_size[0];
2338 if (t->fl_size[1] >= 0)
2339 q->jumbo_size = t->fl_size[1];
2340 if (t->txq_size[0] >= 0)
2341 q->txq_size[0] = t->txq_size[0];
2342 if (t->txq_size[1] >= 0)
2343 q->txq_size[1] = t->txq_size[1];
2344 if (t->txq_size[2] >= 0)
2345 q->txq_size[2] = t->txq_size[2];
2346 if (t->cong_thres >= 0)
2347 q->cong_thres = t->cong_thres;
2348 if (t->intr_lat >= 0) {
2349 struct sge_qset *qs = &sc->sge.qs[t->qset_idx];
2350
2351 q->coalesce_nsecs = t->intr_lat*1000;
2352 t3_update_qset_coalesce(qs, q);
2353 }
2354 break;
2355 }
2356 case CHELSIO_GET_QSET_PARAMS: {
2357 struct qset_params *q;
2358 struct ch_qset_params *t = (struct ch_qset_params *)data;
2359
2360 if (t->qset_idx >= SGE_QSETS)
2361 return (EINVAL);
2362
2363 q = &(sc)->params.sge.qset[t->qset_idx];
2364 t->rspq_size = q->rspq_size;
2365 t->txq_size[0] = q->txq_size[0];
2366 t->txq_size[1] = q->txq_size[1];
2367 t->txq_size[2] = q->txq_size[2];
2368 t->fl_size[0] = q->fl_size;
2369 t->fl_size[1] = q->jumbo_size;
2370 t->polling = q->polling;
2371 t->intr_lat = q->coalesce_nsecs / 1000;
2372 t->cong_thres = q->cong_thres;
2373 break;
2374 }
2375 case CHELSIO_SET_QSET_NUM: {
2376 struct ch_reg *edata = (struct ch_reg *)data;
2377 unsigned int port_idx = pi->port_id;
2378
2379 if (sc->flags & FULL_INIT_DONE)
2380 return (EBUSY);
2381 if (edata->val < 1 ||
2382 (edata->val > 1 && !(sc->flags & USING_MSIX)))
2383 return (EINVAL);
2384 if (edata->val + sc->port[!port_idx].nqsets > SGE_QSETS)
2385 return (EINVAL);
2386 sc->port[port_idx].nqsets = edata->val;
2387 sc->port[0].first_qset = 0;
2388 /*
2389 * XXX hardcode ourselves to 2 ports just like LEEENUX
2390 */
2391 sc->port[1].first_qset = sc->port[0].nqsets;
2392 break;
2393 }
2394 case CHELSIO_GET_QSET_NUM: {
2395 struct ch_reg *edata = (struct ch_reg *)data;
2396 edata->val = pi->nqsets;
2397 break;
2398 }
2399#ifdef notyet
2400 case CHELSIO_LOAD_FW:
2401 case CHELSIO_GET_PM:
2402 case CHELSIO_SET_PM:
2403 return (EOPNOTSUPP);
2404 break;
2405#endif
2406 case CHELSIO_SETMTUTAB: {
2407 struct ch_mtus *m = (struct ch_mtus *)data;
2408 int i;
2409
2410 if (!is_offload(sc))
2411 return (EOPNOTSUPP);
2412 if (offload_running(sc))
2413 return (EBUSY);
2414 if (m->nmtus != NMTUS)
2415 return (EINVAL);
2416 if (m->mtus[0] < 81) /* accommodate SACK */
2417 return (EINVAL);
2418
2419 /*
2420 * MTUs must be in ascending order
2421 */
2422 for (i = 1; i < NMTUS; ++i)
2423 if (m->mtus[i] < m->mtus[i - 1])
2424 return (EINVAL);
2425
2426 memcpy(sc->params.mtus, m->mtus,
2427 sizeof(sc->params.mtus));
2428 break;
2429 }
2430 case CHELSIO_GETMTUTAB: {
2431 struct ch_mtus *m = (struct ch_mtus *)data;
2432
2433 if (!is_offload(sc))
2434 return (EOPNOTSUPP);
2435
2436 memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
2437 m->nmtus = NMTUS;
2438 break;
2439 }
2440 case CHELSIO_DEVUP:
2441 if (!is_offload(sc))
2442 return (EOPNOTSUPP);
2443 return offload_open(pi);
2444 break;
2445 case CHELSIO_GET_MEM: {
2446 struct ch_mem_range *t = (struct ch_mem_range *)data;
2447 struct mc7 *mem;
2448 uint8_t *useraddr;
2449 u64 buf[32];
2450
2451 if (!is_offload(sc))
2452 return (EOPNOTSUPP);
2453 if (!(sc->flags & FULL_INIT_DONE))
2454 return (EIO); /* need the memory controllers */
2455 if ((t->addr & 0x7) || (t->len & 0x7))
2456 return (EINVAL);
2457 if (t->mem_id == MEM_CM)
2458 mem = &sc->cm;
2459 else if (t->mem_id == MEM_PMRX)
2460 mem = &sc->pmrx;
2461 else if (t->mem_id == MEM_PMTX)
2462 mem = &sc->pmtx;
2463 else
2464 return (EINVAL);
2465
2466 /*
2467 * Version scheme:
2468 * bits 0..9: chip version
2469 * bits 10..15: chip revision
2470 */
2471 t->version = 3 | (sc->params.rev << 10);
2472
2473 /*
2474 * Read 256 bytes at a time as len can be large and we don't
2475 * want to use huge intermediate buffers.
2476 */
2477 useraddr = (uint8_t *)(t + 1); /* advance to start of buffer */
2478 while (t->len) {
2479 unsigned int chunk = min(t->len, sizeof(buf));
2480
2481 error = t3_mc7_bd_read(mem, t->addr / 8, chunk / 8, buf);
2482 if (error)
2483 return (-error);
2484 if (copyout(buf, useraddr, chunk))
2485 return (EFAULT);
2486 useraddr += chunk;
2487 t->addr += chunk;
2488 t->len -= chunk;
2489 }
2490 break;
2491 }
2492 case CHELSIO_READ_TCAM_WORD: {
2493 struct ch_tcam_word *t = (struct ch_tcam_word *)data;
2494
2495 if (!is_offload(sc))
2496 return (EOPNOTSUPP);
2497 if (!(sc->flags & FULL_INIT_DONE))
2498 return (EIO); /* need MC5 */
2499 return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
2500 break;
2501 }
2502 case CHELSIO_SET_TRACE_FILTER: {
2503 struct ch_trace *t = (struct ch_trace *)data;
2504 const struct trace_params *tp;
2505
2506 tp = (const struct trace_params *)&t->sip;
2507 if (t->config_tx)
2508 t3_config_trace_filter(sc, tp, 0, t->invert_match,
2509 t->trace_tx);
2510 if (t->config_rx)
2511 t3_config_trace_filter(sc, tp, 1, t->invert_match,
2512 t->trace_rx);
2513 break;
2514 }
2515 case CHELSIO_SET_PKTSCHED: {
2516 struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
2517 if (sc->open_device_map == 0)
2518 return (EAGAIN);
2519 send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
2520 p->binding);
2521 break;
2522 }
2523 case CHELSIO_IFCONF_GETREGS: {
2524 struct ifconf_regs *regs = (struct ifconf_regs *)data;
2525 int reglen = cxgb_get_regs_len();
2526 uint8_t *buf = malloc(REGDUMP_SIZE, M_DEVBUF, M_NOWAIT);
2527 if (buf == NULL) {
2528 return (ENOMEM);
2529 } if (regs->len > reglen)
2530 regs->len = reglen;
2531 else if (regs->len < reglen) {
2532 error = E2BIG;
2533 goto done;
2534 }
2535 cxgb_get_regs(sc, regs, buf);
2536 error = copyout(buf, regs->data, reglen);
2537
2538 done:
2539 free(buf, M_DEVBUF);
2540
2541 break;
2542 }
2543 case CHELSIO_SET_HW_SCHED: {
2544 struct ch_hw_sched *t = (struct ch_hw_sched *)data;
2545 unsigned int ticks_per_usec = core_ticks_per_usec(sc);
2546
2547 if ((sc->flags & FULL_INIT_DONE) == 0)
2548 return (EAGAIN); /* need TP to be initialized */
2549 if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
2550 !in_range(t->channel, 0, 1) ||
2551 !in_range(t->kbps, 0, 10000000) ||
2552 !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
2553 !in_range(t->flow_ipg, 0,
2554 dack_ticks_to_usec(sc, 0x7ff)))
2555 return (EINVAL);
2556
2557 if (t->kbps >= 0) {
2558 error = t3_config_sched(sc, t->kbps, t->sched);
2559 if (error < 0)
2560 return (-error);
2561 }
2562 if (t->class_ipg >= 0)
2563 t3_set_sched_ipg(sc, t->sched, t->class_ipg);
2564 if (t->flow_ipg >= 0) {
2565 t->flow_ipg *= 1000; /* us -> ns */
2566 t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
2567 }
2568 if (t->mode >= 0) {
2569 int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
2570
2571 t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2572 bit, t->mode ? bit : 0);
2573 }
2574 if (t->channel >= 0)
2575 t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2576 1 << t->sched, t->channel << t->sched);
2577 break;
2578 }
2579 default:
2580 return (EOPNOTSUPP);
2581 break;
2582 }
2583
2584 return (error);
2585}
2586
2587static __inline void
2588reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
2589 unsigned int end)
2590{
2591 uint32_t *p = (uint32_t *)buf + start;
2592
2593 for ( ; start <= end; start += sizeof(uint32_t))
2594 *p++ = t3_read_reg(ap, start);
2595}
2596
2597#define T3_REGMAP_SIZE (3 * 1024)
2598static int
2599cxgb_get_regs_len(void)
2600{
2601 return T3_REGMAP_SIZE;
2602}
2603#undef T3_REGMAP_SIZE
2604
2605static void
2606cxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf)
2607{
2608
2609 /*
2610 * Version scheme:
2611 * bits 0..9: chip version
2612 * bits 10..15: chip revision
2613 * bit 31: set for PCIe cards
2614 */
2615 regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
2616
2617 /*
2618 * We skip the MAC statistics registers because they are clear-on-read.
2619 * Also reading multi-register stats would need to synchronize with the
2620 * periodic mac stats accumulation. Hard to justify the complexity.
2621 */
2622 memset(buf, 0, REGDUMP_SIZE);
2623 reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
2624 reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
2625 reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
2626 reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
2627 reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
2628 reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
2629 XGM_REG(A_XGM_SERDES_STAT3, 1));
2630 reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
2631 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
2632}