Deleted Added
full compact
cxgb_adapter.h (205950) cxgb_adapter.h (206109)
1/**************************************************************************
2
3Copyright (c) 2007-2009, Chelsio Inc.
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
15
16THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26POSSIBILITY OF SUCH DAMAGE.
27
1/**************************************************************************
2
3Copyright (c) 2007-2009, Chelsio Inc.
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
15
16THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26POSSIBILITY OF SUCH DAMAGE.
27
28$FreeBSD: head/sys/dev/cxgb/cxgb_adapter.h 205950 2010-03-31 00:27:49Z np $
28$FreeBSD: head/sys/dev/cxgb/cxgb_adapter.h 206109 2010-04-02 17:50:52Z np $
29
30***************************************************************************/
31
32
33#ifndef _CXGB_ADAPTER_H_
34#define _CXGB_ADAPTER_H_
35
36#include <sys/lock.h>
37#include <sys/mutex.h>
38#include <sys/rman.h>
39#include <sys/mbuf.h>
40#include <sys/socket.h>
41#include <sys/sockio.h>
42#include <sys/condvar.h>
43#include <sys/buf_ring.h>
44
45#include <net/ethernet.h>
46#include <net/if.h>
47#include <net/if_media.h>
48#include <net/if_dl.h>
49#include <netinet/tcp_lro.h>
50
51#include <machine/bus.h>
52#include <machine/resource.h>
53
54#include <sys/bus_dma.h>
55#include <dev/pci/pcireg.h>
56#include <dev/pci/pcivar.h>
57
58#include <cxgb_osdep.h>
59#include <t3cdev.h>
60#include <sys/mbufq.h>
61
62struct adapter;
63struct sge_qset;
64extern int cxgb_debug;
65
66#ifdef DEBUG_LOCKING
67#define MTX_INIT(lock, lockname, class, flags) \
68 do { \
69 printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
70 mtx_init((lock), lockname, class, flags); \
71 } while (0)
72
73#define MTX_DESTROY(lock) \
74 do { \
75 printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
76 mtx_destroy((lock)); \
77 } while (0)
78
79#else
80#define MTX_INIT mtx_init
81#define MTX_DESTROY mtx_destroy
82#endif
83
84enum {
85 LF_NO = 0,
86 LF_MAYBE,
87 LF_YES
88};
89
90struct port_info {
91 struct adapter *adapter;
92 struct ifnet *ifp;
93 int if_flags;
94 int flags;
95 const struct port_type_info *port_type;
96 struct cphy phy;
97 struct cmac mac;
98 struct link_config link_config;
99 struct ifmedia media;
100 struct mtx lock;
101 uint32_t port_id;
102 uint32_t tx_chan;
103 uint32_t txpkt_intf;
104 uint32_t first_qset;
105 uint32_t nqsets;
106 int link_fault;
107
108 uint8_t hw_addr[ETHER_ADDR_LEN];
109 struct task timer_reclaim_task;
110 struct cdev *port_cdev;
111
112#define PORT_LOCK_NAME_LEN 32
113#define PORT_NAME_LEN 32
114 char lockbuf[PORT_LOCK_NAME_LEN];
115 char namebuf[PORT_NAME_LEN];
116} __aligned(L1_CACHE_BYTES);
117
118enum {
119 /* adapter flags */
120 FULL_INIT_DONE = (1 << 0),
121 USING_MSI = (1 << 1),
122 USING_MSIX = (1 << 2),
123 QUEUES_BOUND = (1 << 3),
124 FW_UPTODATE = (1 << 4),
125 TPS_UPTODATE = (1 << 5),
126 CXGB_SHUTDOWN = (1 << 6),
127 CXGB_OFLD_INIT = (1 << 7),
128 TP_PARITY_INIT = (1 << 8),
129 CXGB_BUSY = (1 << 9),
130
131 /* port flags */
132 DOOMED = (1 << 0),
133};
134#define IS_DOOMED(p) (p->flags & DOOMED)
135#define SET_DOOMED(p) do {p->flags |= DOOMED;} while (0)
136#define IS_BUSY(sc) (sc->flags & CXGB_BUSY)
137#define SET_BUSY(sc) do {sc->flags |= CXGB_BUSY;} while (0)
138#define CLR_BUSY(sc) do {sc->flags &= ~CXGB_BUSY;} while (0)
139
140#define FL_Q_SIZE 4096
141#define JUMBO_Q_SIZE 1024
29
30***************************************************************************/
31
32
33#ifndef _CXGB_ADAPTER_H_
34#define _CXGB_ADAPTER_H_
35
36#include <sys/lock.h>
37#include <sys/mutex.h>
38#include <sys/rman.h>
39#include <sys/mbuf.h>
40#include <sys/socket.h>
41#include <sys/sockio.h>
42#include <sys/condvar.h>
43#include <sys/buf_ring.h>
44
45#include <net/ethernet.h>
46#include <net/if.h>
47#include <net/if_media.h>
48#include <net/if_dl.h>
49#include <netinet/tcp_lro.h>
50
51#include <machine/bus.h>
52#include <machine/resource.h>
53
54#include <sys/bus_dma.h>
55#include <dev/pci/pcireg.h>
56#include <dev/pci/pcivar.h>
57
58#include <cxgb_osdep.h>
59#include <t3cdev.h>
60#include <sys/mbufq.h>
61
62struct adapter;
63struct sge_qset;
64extern int cxgb_debug;
65
66#ifdef DEBUG_LOCKING
67#define MTX_INIT(lock, lockname, class, flags) \
68 do { \
69 printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
70 mtx_init((lock), lockname, class, flags); \
71 } while (0)
72
73#define MTX_DESTROY(lock) \
74 do { \
75 printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
76 mtx_destroy((lock)); \
77 } while (0)
78
79#else
80#define MTX_INIT mtx_init
81#define MTX_DESTROY mtx_destroy
82#endif
83
84enum {
85 LF_NO = 0,
86 LF_MAYBE,
87 LF_YES
88};
89
90struct port_info {
91 struct adapter *adapter;
92 struct ifnet *ifp;
93 int if_flags;
94 int flags;
95 const struct port_type_info *port_type;
96 struct cphy phy;
97 struct cmac mac;
98 struct link_config link_config;
99 struct ifmedia media;
100 struct mtx lock;
101 uint32_t port_id;
102 uint32_t tx_chan;
103 uint32_t txpkt_intf;
104 uint32_t first_qset;
105 uint32_t nqsets;
106 int link_fault;
107
108 uint8_t hw_addr[ETHER_ADDR_LEN];
109 struct task timer_reclaim_task;
110 struct cdev *port_cdev;
111
112#define PORT_LOCK_NAME_LEN 32
113#define PORT_NAME_LEN 32
114 char lockbuf[PORT_LOCK_NAME_LEN];
115 char namebuf[PORT_NAME_LEN];
116} __aligned(L1_CACHE_BYTES);
117
118enum {
119 /* adapter flags */
120 FULL_INIT_DONE = (1 << 0),
121 USING_MSI = (1 << 1),
122 USING_MSIX = (1 << 2),
123 QUEUES_BOUND = (1 << 3),
124 FW_UPTODATE = (1 << 4),
125 TPS_UPTODATE = (1 << 5),
126 CXGB_SHUTDOWN = (1 << 6),
127 CXGB_OFLD_INIT = (1 << 7),
128 TP_PARITY_INIT = (1 << 8),
129 CXGB_BUSY = (1 << 9),
130
131 /* port flags */
132 DOOMED = (1 << 0),
133};
134#define IS_DOOMED(p) (p->flags & DOOMED)
135#define SET_DOOMED(p) do {p->flags |= DOOMED;} while (0)
136#define IS_BUSY(sc) (sc->flags & CXGB_BUSY)
137#define SET_BUSY(sc) do {sc->flags |= CXGB_BUSY;} while (0)
138#define CLR_BUSY(sc) do {sc->flags &= ~CXGB_BUSY;} while (0)
139
140#define FL_Q_SIZE 4096
141#define JUMBO_Q_SIZE 1024
142#define RSPQ_Q_SIZE 1024
142#define RSPQ_Q_SIZE 2048
143#define TX_ETH_Q_SIZE 1024
144#define TX_OFLD_Q_SIZE 1024
145#define TX_CTRL_Q_SIZE 256
146
147enum { TXQ_ETH = 0,
148 TXQ_OFLD = 1,
149 TXQ_CTRL = 2, };
150
151
152/*
153 * work request size in bytes
154 */
155#define WR_LEN (WR_FLITS * 8)
156#define PIO_LEN (WR_LEN - sizeof(struct cpl_tx_pkt_lso))
157
158struct lro_state {
159 unsigned short enabled;
160 struct lro_ctrl ctrl;
161};
162
163#define RX_BUNDLE_SIZE 8
164
165struct rsp_desc;
166
167struct sge_rspq {
168 uint32_t credits;
169 uint32_t size;
170 uint32_t cidx;
171 uint32_t gen;
172 uint32_t polling;
173 uint32_t holdoff_tmr;
174 uint32_t next_holdoff;
175 uint32_t imm_data;
176 uint32_t async_notif;
177 uint32_t cntxt_id;
178 uint32_t offload_pkts;
179 uint32_t offload_bundles;
180 uint32_t pure_rsps;
181 uint32_t unhandled_irqs;
143#define TX_ETH_Q_SIZE 1024
144#define TX_OFLD_Q_SIZE 1024
145#define TX_CTRL_Q_SIZE 256
146
147enum { TXQ_ETH = 0,
148 TXQ_OFLD = 1,
149 TXQ_CTRL = 2, };
150
151
152/*
153 * work request size in bytes
154 */
155#define WR_LEN (WR_FLITS * 8)
156#define PIO_LEN (WR_LEN - sizeof(struct cpl_tx_pkt_lso))
157
158struct lro_state {
159 unsigned short enabled;
160 struct lro_ctrl ctrl;
161};
162
163#define RX_BUNDLE_SIZE 8
164
165struct rsp_desc;
166
167struct sge_rspq {
168 uint32_t credits;
169 uint32_t size;
170 uint32_t cidx;
171 uint32_t gen;
172 uint32_t polling;
173 uint32_t holdoff_tmr;
174 uint32_t next_holdoff;
175 uint32_t imm_data;
176 uint32_t async_notif;
177 uint32_t cntxt_id;
178 uint32_t offload_pkts;
179 uint32_t offload_bundles;
180 uint32_t pure_rsps;
181 uint32_t unhandled_irqs;
182 uint32_t starved;
182
183 bus_addr_t phys_addr;
184 bus_dma_tag_t desc_tag;
185 bus_dmamap_t desc_map;
186
187 struct t3_mbuf_hdr rspq_mh;
188 struct rsp_desc *desc;
189 struct mtx lock;
190#define RSPQ_NAME_LEN 32
191 char lockbuf[RSPQ_NAME_LEN];
192 uint32_t rspq_dump_start;
193 uint32_t rspq_dump_count;
194};
195
196struct rx_desc;
197struct rx_sw_desc;
198
199struct sge_fl {
200 uint32_t buf_size;
201 uint32_t credits;
202 uint32_t size;
203 uint32_t cidx;
204 uint32_t pidx;
205 uint32_t gen;
206 bus_addr_t phys_addr;
207 uint32_t cntxt_id;
208 uint32_t empty;
209 bus_dma_tag_t desc_tag;
210 bus_dmamap_t desc_map;
211 bus_dma_tag_t entry_tag;
212 uma_zone_t zone;
213 struct rx_desc *desc;
214 struct rx_sw_desc *sdesc;
215 int type;
216};
217
218struct tx_desc;
219struct tx_sw_desc;
220
221#define TXQ_TRANSMITTING 0x1
222
223struct sge_txq {
224 uint64_t flags;
225 uint32_t in_use;
226 uint32_t size;
227 uint32_t processed;
228 uint32_t cleaned;
229 uint32_t stop_thres;
230 uint32_t cidx;
231 uint32_t pidx;
232 uint32_t gen;
233 uint32_t unacked;
234 struct tx_desc *desc;
235 struct tx_sw_desc *sdesc;
236 uint32_t token;
237 bus_addr_t phys_addr;
238 struct task qresume_task;
239 struct task qreclaim_task;
240 uint32_t cntxt_id;
241 uint64_t stops;
242 uint64_t restarts;
243 bus_dma_tag_t desc_tag;
244 bus_dmamap_t desc_map;
245 bus_dma_tag_t entry_tag;
246 struct mbuf_head sendq;
247
248 struct buf_ring *txq_mr;
249 struct ifaltq *txq_ifq;
250 struct callout txq_timer;
251 struct callout txq_watchdog;
252 uint64_t txq_coalesced;
253 uint32_t txq_skipped;
254 uint32_t txq_enqueued;
255 uint32_t txq_dump_start;
256 uint32_t txq_dump_count;
257 uint64_t txq_direct_packets;
258 uint64_t txq_direct_bytes;
259 uint64_t txq_frees;
260 struct sg_ent txq_sgl[TX_MAX_SEGS / 2 + 1];
261};
262
263
264enum {
265 SGE_PSTAT_TSO, /* # of TSO requests */
266 SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
267 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
268 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
269 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
270};
271
272#define SGE_PSTAT_MAX (SGE_PSTAT_VLANINS+1)
273
274#define QS_EXITING 0x1
275#define QS_RUNNING 0x2
276#define QS_BOUND 0x4
277#define QS_FLUSHING 0x8
278#define QS_TIMEOUT 0x10
279
280struct sge_qset {
281 struct sge_rspq rspq;
282 struct sge_fl fl[SGE_RXQ_PER_SET];
283 struct lro_state lro;
284 struct sge_txq txq[SGE_TXQ_PER_SET];
285 uint32_t txq_stopped; /* which Tx queues are stopped */
286 uint64_t port_stats[SGE_PSTAT_MAX];
287 struct port_info *port;
288 int idx; /* qset # */
289 int qs_flags;
290 int coalescing;
291 struct cv qs_cv;
292 struct mtx lock;
293#define QS_NAME_LEN 32
294 char namebuf[QS_NAME_LEN];
295};
296
297struct sge {
298 struct sge_qset qs[SGE_QSETS];
299 struct mtx reg_lock;
300};
301
302struct filter_info;
303
304struct adapter {
305 device_t dev;
306 int flags;
307 TAILQ_ENTRY(adapter) adapter_entry;
308
309 /* PCI register resources */
310 int regs_rid;
311 struct resource *regs_res;
312 int udbs_rid;
313 struct resource *udbs_res;
314 bus_space_handle_t bh;
315 bus_space_tag_t bt;
316 bus_size_t mmio_len;
317 uint32_t link_width;
318
319 /* DMA resources */
320 bus_dma_tag_t parent_dmat;
321 bus_dma_tag_t rx_dmat;
322 bus_dma_tag_t rx_jumbo_dmat;
323 bus_dma_tag_t tx_dmat;
324
325 /* Interrupt resources */
326 struct resource *irq_res;
327 int irq_rid;
328 void *intr_tag;
329
330 uint32_t msix_regs_rid;
331 struct resource *msix_regs_res;
332
333 struct resource *msix_irq_res[SGE_QSETS];
334 int msix_irq_rid[SGE_QSETS];
335 void *msix_intr_tag[SGE_QSETS];
336 uint8_t rxpkt_map[8]; /* maps RX_PKT interface values to port ids */
337 uint8_t rrss_map[SGE_QSETS]; /* revers RSS map table */
338 uint16_t rspq_map[RSS_TABLE_SIZE]; /* maps 7-bit cookie to qidx */
339 union {
340 uint8_t fill[SGE_QSETS];
341 uint64_t coalesce;
342 } u;
343
344#define tunq_fill u.fill
345#define tunq_coalesce u.coalesce
346
347 struct filter_info *filters;
348
349 /* Tasks */
350 struct task ext_intr_task;
351 struct task slow_intr_task;
352 struct task tick_task;
353 struct taskqueue *tq;
354 struct callout cxgb_tick_ch;
355 struct callout sge_timer_ch;
356
357 /* Register lock for use by the hardware layer */
358 struct mtx mdio_lock;
359 struct mtx elmer_lock;
360
361 /* Bookkeeping for the hardware layer */
362 struct adapter_params params;
363 unsigned int slow_intr_mask;
364 unsigned long irq_stats[IRQ_NUM_STATS];
365
366 struct sge sge;
367 struct mc7 pmrx;
368 struct mc7 pmtx;
369 struct mc7 cm;
370 struct mc5 mc5;
371
372 struct port_info port[MAX_NPORTS];
373 device_t portdev[MAX_NPORTS];
374 struct t3cdev tdev;
375 char fw_version[64];
376 char port_types[MAX_NPORTS + 1];
377 uint32_t open_device_map;
378 uint32_t registered_device_map;
379 struct mtx lock;
380 driver_intr_t *cxgb_intr;
381 int msi_count;
382
383#define ADAPTER_LOCK_NAME_LEN 32
384 char lockbuf[ADAPTER_LOCK_NAME_LEN];
385 char reglockbuf[ADAPTER_LOCK_NAME_LEN];
386 char mdiolockbuf[ADAPTER_LOCK_NAME_LEN];
387 char elmerlockbuf[ADAPTER_LOCK_NAME_LEN];
388};
389
390struct t3_rx_mode {
391
392 uint32_t idx;
393 struct port_info *port;
394};
395
396#define MDIO_LOCK(adapter) mtx_lock(&(adapter)->mdio_lock)
397#define MDIO_UNLOCK(adapter) mtx_unlock(&(adapter)->mdio_lock)
398#define ELMR_LOCK(adapter) mtx_lock(&(adapter)->elmer_lock)
399#define ELMR_UNLOCK(adapter) mtx_unlock(&(adapter)->elmer_lock)
400
401
402#define PORT_LOCK(port) mtx_lock(&(port)->lock);
403#define PORT_UNLOCK(port) mtx_unlock(&(port)->lock);
404#define PORT_LOCK_INIT(port, name) mtx_init(&(port)->lock, name, 0, MTX_DEF)
405#define PORT_LOCK_DEINIT(port) mtx_destroy(&(port)->lock)
406#define PORT_LOCK_ASSERT_NOTOWNED(port) mtx_assert(&(port)->lock, MA_NOTOWNED)
407#define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED)
408
409#define ADAPTER_LOCK(adap) mtx_lock(&(adap)->lock);
410#define ADAPTER_UNLOCK(adap) mtx_unlock(&(adap)->lock);
411#define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF)
412#define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock)
413#define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MA_NOTOWNED)
414#define ADAPTER_LOCK_ASSERT_OWNED(adap) mtx_assert(&(adap)->lock, MA_OWNED)
415
416
417static __inline uint32_t
418t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
419{
420 return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr));
421}
422
423static __inline void
424t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
425{
426 bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val);
427}
428
429static __inline void
430t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val)
431{
432 *val = pci_read_config(adapter->dev, reg, 4);
433}
434
435static __inline void
436t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val)
437{
438 pci_write_config(adapter->dev, reg, val, 4);
439}
440
441static __inline void
442t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val)
443{
444 *val = pci_read_config(adapter->dev, reg, 2);
445}
446
447static __inline void
448t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val)
449{
450 pci_write_config(adapter->dev, reg, val, 2);
451}
452
453static __inline uint8_t *
454t3_get_next_mcaddr(struct t3_rx_mode *rm)
455{
456 uint8_t *macaddr = NULL;
457 struct ifnet *ifp = rm->port->ifp;
458 struct ifmultiaddr *ifma;
459 int i = 0;
460
461 if_maddr_rlock(ifp);
462 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
463 if (ifma->ifma_addr->sa_family != AF_LINK)
464 continue;
465 if (i == rm->idx) {
466 macaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
467 break;
468 }
469 i++;
470 }
471 if_maddr_runlock(ifp);
472
473 rm->idx++;
474 return (macaddr);
475}
476
477static __inline void
478t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port)
479{
480 rm->idx = 0;
481 rm->port = port;
482}
483
484static __inline struct port_info *
485adap2pinfo(struct adapter *adap, int idx)
486{
487 return &adap->port[idx];
488}
489
490int t3_os_find_pci_capability(adapter_t *adapter, int cap);
491int t3_os_pci_save_state(struct adapter *adapter);
492int t3_os_pci_restore_state(struct adapter *adapter);
493void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status,
494 int speed, int duplex, int fc, int mac_was_reset);
495void t3_os_phymod_changed(struct adapter *adap, int port_id);
496void t3_sge_err_intr_handler(adapter_t *adapter);
497int t3_offload_tx(struct t3cdev *, struct mbuf *);
498void t3_os_ext_intr_handler(adapter_t *adapter);
499void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]);
500int t3_mgmt_tx(adapter_t *adap, struct mbuf *m);
501
502
503int t3_sge_alloc(struct adapter *);
504int t3_sge_free(struct adapter *);
505int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *,
506 int, struct port_info *);
507void t3_free_sge_resources(adapter_t *);
508void t3_sge_start(adapter_t *);
509void t3_sge_stop(adapter_t *);
510void t3b_intr(void *data);
511void t3_intr_msi(void *data);
512void t3_intr_msix(void *data);
513
514int t3_sge_init_adapter(adapter_t *);
515int t3_sge_reset_adapter(adapter_t *);
516int t3_sge_init_port(struct port_info *);
517void t3_free_tx_desc(struct sge_qset *qs, int n, int qid);
518
519void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad);
520
521void t3_add_attach_sysctls(adapter_t *sc);
522void t3_add_configured_sysctls(adapter_t *sc);
523int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
524 unsigned char *data);
525void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
526
527#define CXGB_TICKS(a) ((a)->params.linkpoll_period ? \
528 (hz * (a)->params.linkpoll_period) / 10 : \
529 (a)->params.stats_update_period * hz)
530
531/*
532 * XXX figure out how we can return this to being private to sge
533 */
534#define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC))
535
536#define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
537
538static __inline struct sge_qset *
539fl_to_qset(struct sge_fl *q, int qidx)
540{
541 return container_of(q, struct sge_qset, fl[qidx]);
542}
543
544static __inline struct sge_qset *
545rspq_to_qset(struct sge_rspq *q)
546{
547 return container_of(q, struct sge_qset, rspq);
548}
549
550static __inline struct sge_qset *
551txq_to_qset(struct sge_txq *q, int qidx)
552{
553 return container_of(q, struct sge_qset, txq[qidx]);
554}
555
556static __inline struct adapter *
557tdev2adap(struct t3cdev *d)
558{
559 return container_of(d, struct adapter, tdev);
560}
561
562#undef container_of
563
564#define OFFLOAD_DEVMAP_BIT 15
565static inline int offload_running(adapter_t *adapter)
566{
567 return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
568}
569
570void cxgb_tx_watchdog(void *arg);
571int cxgb_transmit(struct ifnet *ifp, struct mbuf *m);
572void cxgb_qflush(struct ifnet *ifp);
573void cxgb_start(struct ifnet *ifp);
574#endif
183
184 bus_addr_t phys_addr;
185 bus_dma_tag_t desc_tag;
186 bus_dmamap_t desc_map;
187
188 struct t3_mbuf_hdr rspq_mh;
189 struct rsp_desc *desc;
190 struct mtx lock;
191#define RSPQ_NAME_LEN 32
192 char lockbuf[RSPQ_NAME_LEN];
193 uint32_t rspq_dump_start;
194 uint32_t rspq_dump_count;
195};
196
197struct rx_desc;
198struct rx_sw_desc;
199
200struct sge_fl {
201 uint32_t buf_size;
202 uint32_t credits;
203 uint32_t size;
204 uint32_t cidx;
205 uint32_t pidx;
206 uint32_t gen;
207 bus_addr_t phys_addr;
208 uint32_t cntxt_id;
209 uint32_t empty;
210 bus_dma_tag_t desc_tag;
211 bus_dmamap_t desc_map;
212 bus_dma_tag_t entry_tag;
213 uma_zone_t zone;
214 struct rx_desc *desc;
215 struct rx_sw_desc *sdesc;
216 int type;
217};
218
219struct tx_desc;
220struct tx_sw_desc;
221
222#define TXQ_TRANSMITTING 0x1
223
224struct sge_txq {
225 uint64_t flags;
226 uint32_t in_use;
227 uint32_t size;
228 uint32_t processed;
229 uint32_t cleaned;
230 uint32_t stop_thres;
231 uint32_t cidx;
232 uint32_t pidx;
233 uint32_t gen;
234 uint32_t unacked;
235 struct tx_desc *desc;
236 struct tx_sw_desc *sdesc;
237 uint32_t token;
238 bus_addr_t phys_addr;
239 struct task qresume_task;
240 struct task qreclaim_task;
241 uint32_t cntxt_id;
242 uint64_t stops;
243 uint64_t restarts;
244 bus_dma_tag_t desc_tag;
245 bus_dmamap_t desc_map;
246 bus_dma_tag_t entry_tag;
247 struct mbuf_head sendq;
248
249 struct buf_ring *txq_mr;
250 struct ifaltq *txq_ifq;
251 struct callout txq_timer;
252 struct callout txq_watchdog;
253 uint64_t txq_coalesced;
254 uint32_t txq_skipped;
255 uint32_t txq_enqueued;
256 uint32_t txq_dump_start;
257 uint32_t txq_dump_count;
258 uint64_t txq_direct_packets;
259 uint64_t txq_direct_bytes;
260 uint64_t txq_frees;
261 struct sg_ent txq_sgl[TX_MAX_SEGS / 2 + 1];
262};
263
264
265enum {
266 SGE_PSTAT_TSO, /* # of TSO requests */
267 SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
268 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
269 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
270 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
271};
272
273#define SGE_PSTAT_MAX (SGE_PSTAT_VLANINS+1)
274
275#define QS_EXITING 0x1
276#define QS_RUNNING 0x2
277#define QS_BOUND 0x4
278#define QS_FLUSHING 0x8
279#define QS_TIMEOUT 0x10
280
281struct sge_qset {
282 struct sge_rspq rspq;
283 struct sge_fl fl[SGE_RXQ_PER_SET];
284 struct lro_state lro;
285 struct sge_txq txq[SGE_TXQ_PER_SET];
286 uint32_t txq_stopped; /* which Tx queues are stopped */
287 uint64_t port_stats[SGE_PSTAT_MAX];
288 struct port_info *port;
289 int idx; /* qset # */
290 int qs_flags;
291 int coalescing;
292 struct cv qs_cv;
293 struct mtx lock;
294#define QS_NAME_LEN 32
295 char namebuf[QS_NAME_LEN];
296};
297
298struct sge {
299 struct sge_qset qs[SGE_QSETS];
300 struct mtx reg_lock;
301};
302
303struct filter_info;
304
305struct adapter {
306 device_t dev;
307 int flags;
308 TAILQ_ENTRY(adapter) adapter_entry;
309
310 /* PCI register resources */
311 int regs_rid;
312 struct resource *regs_res;
313 int udbs_rid;
314 struct resource *udbs_res;
315 bus_space_handle_t bh;
316 bus_space_tag_t bt;
317 bus_size_t mmio_len;
318 uint32_t link_width;
319
320 /* DMA resources */
321 bus_dma_tag_t parent_dmat;
322 bus_dma_tag_t rx_dmat;
323 bus_dma_tag_t rx_jumbo_dmat;
324 bus_dma_tag_t tx_dmat;
325
326 /* Interrupt resources */
327 struct resource *irq_res;
328 int irq_rid;
329 void *intr_tag;
330
331 uint32_t msix_regs_rid;
332 struct resource *msix_regs_res;
333
334 struct resource *msix_irq_res[SGE_QSETS];
335 int msix_irq_rid[SGE_QSETS];
336 void *msix_intr_tag[SGE_QSETS];
337 uint8_t rxpkt_map[8]; /* maps RX_PKT interface values to port ids */
338 uint8_t rrss_map[SGE_QSETS]; /* revers RSS map table */
339 uint16_t rspq_map[RSS_TABLE_SIZE]; /* maps 7-bit cookie to qidx */
340 union {
341 uint8_t fill[SGE_QSETS];
342 uint64_t coalesce;
343 } u;
344
345#define tunq_fill u.fill
346#define tunq_coalesce u.coalesce
347
348 struct filter_info *filters;
349
350 /* Tasks */
351 struct task ext_intr_task;
352 struct task slow_intr_task;
353 struct task tick_task;
354 struct taskqueue *tq;
355 struct callout cxgb_tick_ch;
356 struct callout sge_timer_ch;
357
358 /* Register lock for use by the hardware layer */
359 struct mtx mdio_lock;
360 struct mtx elmer_lock;
361
362 /* Bookkeeping for the hardware layer */
363 struct adapter_params params;
364 unsigned int slow_intr_mask;
365 unsigned long irq_stats[IRQ_NUM_STATS];
366
367 struct sge sge;
368 struct mc7 pmrx;
369 struct mc7 pmtx;
370 struct mc7 cm;
371 struct mc5 mc5;
372
373 struct port_info port[MAX_NPORTS];
374 device_t portdev[MAX_NPORTS];
375 struct t3cdev tdev;
376 char fw_version[64];
377 char port_types[MAX_NPORTS + 1];
378 uint32_t open_device_map;
379 uint32_t registered_device_map;
380 struct mtx lock;
381 driver_intr_t *cxgb_intr;
382 int msi_count;
383
384#define ADAPTER_LOCK_NAME_LEN 32
385 char lockbuf[ADAPTER_LOCK_NAME_LEN];
386 char reglockbuf[ADAPTER_LOCK_NAME_LEN];
387 char mdiolockbuf[ADAPTER_LOCK_NAME_LEN];
388 char elmerlockbuf[ADAPTER_LOCK_NAME_LEN];
389};
390
391struct t3_rx_mode {
392
393 uint32_t idx;
394 struct port_info *port;
395};
396
397#define MDIO_LOCK(adapter) mtx_lock(&(adapter)->mdio_lock)
398#define MDIO_UNLOCK(adapter) mtx_unlock(&(adapter)->mdio_lock)
399#define ELMR_LOCK(adapter) mtx_lock(&(adapter)->elmer_lock)
400#define ELMR_UNLOCK(adapter) mtx_unlock(&(adapter)->elmer_lock)
401
402
403#define PORT_LOCK(port) mtx_lock(&(port)->lock);
404#define PORT_UNLOCK(port) mtx_unlock(&(port)->lock);
405#define PORT_LOCK_INIT(port, name) mtx_init(&(port)->lock, name, 0, MTX_DEF)
406#define PORT_LOCK_DEINIT(port) mtx_destroy(&(port)->lock)
407#define PORT_LOCK_ASSERT_NOTOWNED(port) mtx_assert(&(port)->lock, MA_NOTOWNED)
408#define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED)
409
410#define ADAPTER_LOCK(adap) mtx_lock(&(adap)->lock);
411#define ADAPTER_UNLOCK(adap) mtx_unlock(&(adap)->lock);
412#define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF)
413#define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock)
414#define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MA_NOTOWNED)
415#define ADAPTER_LOCK_ASSERT_OWNED(adap) mtx_assert(&(adap)->lock, MA_OWNED)
416
417
418static __inline uint32_t
419t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
420{
421 return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr));
422}
423
424static __inline void
425t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
426{
427 bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val);
428}
429
430static __inline void
431t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val)
432{
433 *val = pci_read_config(adapter->dev, reg, 4);
434}
435
436static __inline void
437t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val)
438{
439 pci_write_config(adapter->dev, reg, val, 4);
440}
441
442static __inline void
443t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val)
444{
445 *val = pci_read_config(adapter->dev, reg, 2);
446}
447
448static __inline void
449t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val)
450{
451 pci_write_config(adapter->dev, reg, val, 2);
452}
453
454static __inline uint8_t *
455t3_get_next_mcaddr(struct t3_rx_mode *rm)
456{
457 uint8_t *macaddr = NULL;
458 struct ifnet *ifp = rm->port->ifp;
459 struct ifmultiaddr *ifma;
460 int i = 0;
461
462 if_maddr_rlock(ifp);
463 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
464 if (ifma->ifma_addr->sa_family != AF_LINK)
465 continue;
466 if (i == rm->idx) {
467 macaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
468 break;
469 }
470 i++;
471 }
472 if_maddr_runlock(ifp);
473
474 rm->idx++;
475 return (macaddr);
476}
477
478static __inline void
479t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port)
480{
481 rm->idx = 0;
482 rm->port = port;
483}
484
485static __inline struct port_info *
486adap2pinfo(struct adapter *adap, int idx)
487{
488 return &adap->port[idx];
489}
490
491int t3_os_find_pci_capability(adapter_t *adapter, int cap);
492int t3_os_pci_save_state(struct adapter *adapter);
493int t3_os_pci_restore_state(struct adapter *adapter);
494void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status,
495 int speed, int duplex, int fc, int mac_was_reset);
496void t3_os_phymod_changed(struct adapter *adap, int port_id);
497void t3_sge_err_intr_handler(adapter_t *adapter);
498int t3_offload_tx(struct t3cdev *, struct mbuf *);
499void t3_os_ext_intr_handler(adapter_t *adapter);
500void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]);
501int t3_mgmt_tx(adapter_t *adap, struct mbuf *m);
502
503
504int t3_sge_alloc(struct adapter *);
505int t3_sge_free(struct adapter *);
506int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *,
507 int, struct port_info *);
508void t3_free_sge_resources(adapter_t *);
509void t3_sge_start(adapter_t *);
510void t3_sge_stop(adapter_t *);
511void t3b_intr(void *data);
512void t3_intr_msi(void *data);
513void t3_intr_msix(void *data);
514
515int t3_sge_init_adapter(adapter_t *);
516int t3_sge_reset_adapter(adapter_t *);
517int t3_sge_init_port(struct port_info *);
518void t3_free_tx_desc(struct sge_qset *qs, int n, int qid);
519
520void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad);
521
522void t3_add_attach_sysctls(adapter_t *sc);
523void t3_add_configured_sysctls(adapter_t *sc);
524int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
525 unsigned char *data);
526void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
527
528#define CXGB_TICKS(a) ((a)->params.linkpoll_period ? \
529 (hz * (a)->params.linkpoll_period) / 10 : \
530 (a)->params.stats_update_period * hz)
531
532/*
533 * XXX figure out how we can return this to being private to sge
534 */
535#define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC))
536
537#define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
538
539static __inline struct sge_qset *
540fl_to_qset(struct sge_fl *q, int qidx)
541{
542 return container_of(q, struct sge_qset, fl[qidx]);
543}
544
545static __inline struct sge_qset *
546rspq_to_qset(struct sge_rspq *q)
547{
548 return container_of(q, struct sge_qset, rspq);
549}
550
551static __inline struct sge_qset *
552txq_to_qset(struct sge_txq *q, int qidx)
553{
554 return container_of(q, struct sge_qset, txq[qidx]);
555}
556
557static __inline struct adapter *
558tdev2adap(struct t3cdev *d)
559{
560 return container_of(d, struct adapter, tdev);
561}
562
563#undef container_of
564
565#define OFFLOAD_DEVMAP_BIT 15
566static inline int offload_running(adapter_t *adapter)
567{
568 return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
569}
570
571void cxgb_tx_watchdog(void *arg);
572int cxgb_transmit(struct ifnet *ifp, struct mbuf *m);
573void cxgb_qflush(struct ifnet *ifp);
574void cxgb_start(struct ifnet *ifp);
575#endif