Deleted Added
full compact
cxgb_adapter.h (206109) cxgb_adapter.h (207688)
1/**************************************************************************
2
3Copyright (c) 2007-2009, Chelsio Inc.
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
15
16THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26POSSIBILITY OF SUCH DAMAGE.
27
1/**************************************************************************
2
3Copyright (c) 2007-2009, Chelsio Inc.
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
15
16THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26POSSIBILITY OF SUCH DAMAGE.
27
28$FreeBSD: head/sys/dev/cxgb/cxgb_adapter.h 206109 2010-04-02 17:50:52Z np $
28$FreeBSD: head/sys/dev/cxgb/cxgb_adapter.h 207688 2010-05-05 22:52:06Z np $
29
30***************************************************************************/
31
32
33#ifndef _CXGB_ADAPTER_H_
34#define _CXGB_ADAPTER_H_
35
36#include <sys/lock.h>
37#include <sys/mutex.h>
38#include <sys/rman.h>
39#include <sys/mbuf.h>
40#include <sys/socket.h>
41#include <sys/sockio.h>
42#include <sys/condvar.h>
43#include <sys/buf_ring.h>
44
45#include <net/ethernet.h>
46#include <net/if.h>
47#include <net/if_media.h>
48#include <net/if_dl.h>
49#include <netinet/tcp_lro.h>
50
51#include <machine/bus.h>
52#include <machine/resource.h>
53
54#include <sys/bus_dma.h>
55#include <dev/pci/pcireg.h>
56#include <dev/pci/pcivar.h>
57
58#include <cxgb_osdep.h>
59#include <t3cdev.h>
60#include <sys/mbufq.h>
61
62struct adapter;
63struct sge_qset;
64extern int cxgb_debug;
65
66#ifdef DEBUG_LOCKING
67#define MTX_INIT(lock, lockname, class, flags) \
68 do { \
69 printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
70 mtx_init((lock), lockname, class, flags); \
71 } while (0)
72
73#define MTX_DESTROY(lock) \
74 do { \
75 printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
76 mtx_destroy((lock)); \
77 } while (0)
78
79#else
80#define MTX_INIT mtx_init
81#define MTX_DESTROY mtx_destroy
82#endif
83
84enum {
85 LF_NO = 0,
86 LF_MAYBE,
87 LF_YES
88};
89
90struct port_info {
91 struct adapter *adapter;
92 struct ifnet *ifp;
93 int if_flags;
94 int flags;
95 const struct port_type_info *port_type;
96 struct cphy phy;
97 struct cmac mac;
98 struct link_config link_config;
99 struct ifmedia media;
100 struct mtx lock;
101 uint32_t port_id;
102 uint32_t tx_chan;
103 uint32_t txpkt_intf;
104 uint32_t first_qset;
105 uint32_t nqsets;
106 int link_fault;
107
108 uint8_t hw_addr[ETHER_ADDR_LEN];
109 struct task timer_reclaim_task;
110 struct cdev *port_cdev;
111
112#define PORT_LOCK_NAME_LEN 32
113#define PORT_NAME_LEN 32
114 char lockbuf[PORT_LOCK_NAME_LEN];
115 char namebuf[PORT_NAME_LEN];
116} __aligned(L1_CACHE_BYTES);
117
118enum {
119 /* adapter flags */
120 FULL_INIT_DONE = (1 << 0),
121 USING_MSI = (1 << 1),
122 USING_MSIX = (1 << 2),
123 QUEUES_BOUND = (1 << 3),
124 FW_UPTODATE = (1 << 4),
125 TPS_UPTODATE = (1 << 5),
126 CXGB_SHUTDOWN = (1 << 6),
127 CXGB_OFLD_INIT = (1 << 7),
128 TP_PARITY_INIT = (1 << 8),
129 CXGB_BUSY = (1 << 9),
130
131 /* port flags */
132 DOOMED = (1 << 0),
133};
134#define IS_DOOMED(p) (p->flags & DOOMED)
135#define SET_DOOMED(p) do {p->flags |= DOOMED;} while (0)
136#define IS_BUSY(sc) (sc->flags & CXGB_BUSY)
137#define SET_BUSY(sc) do {sc->flags |= CXGB_BUSY;} while (0)
138#define CLR_BUSY(sc) do {sc->flags &= ~CXGB_BUSY;} while (0)
139
140#define FL_Q_SIZE 4096
141#define JUMBO_Q_SIZE 1024
142#define RSPQ_Q_SIZE 2048
143#define TX_ETH_Q_SIZE 1024
144#define TX_OFLD_Q_SIZE 1024
145#define TX_CTRL_Q_SIZE 256
146
147enum { TXQ_ETH = 0,
148 TXQ_OFLD = 1,
149 TXQ_CTRL = 2, };
150
151
152/*
153 * work request size in bytes
154 */
155#define WR_LEN (WR_FLITS * 8)
156#define PIO_LEN (WR_LEN - sizeof(struct cpl_tx_pkt_lso))
157
158struct lro_state {
159 unsigned short enabled;
160 struct lro_ctrl ctrl;
161};
162
163#define RX_BUNDLE_SIZE 8
164
165struct rsp_desc;
166
167struct sge_rspq {
168 uint32_t credits;
169 uint32_t size;
170 uint32_t cidx;
171 uint32_t gen;
172 uint32_t polling;
173 uint32_t holdoff_tmr;
174 uint32_t next_holdoff;
175 uint32_t imm_data;
176 uint32_t async_notif;
177 uint32_t cntxt_id;
178 uint32_t offload_pkts;
179 uint32_t offload_bundles;
180 uint32_t pure_rsps;
181 uint32_t unhandled_irqs;
182 uint32_t starved;
183
184 bus_addr_t phys_addr;
185 bus_dma_tag_t desc_tag;
186 bus_dmamap_t desc_map;
187
188 struct t3_mbuf_hdr rspq_mh;
189 struct rsp_desc *desc;
190 struct mtx lock;
191#define RSPQ_NAME_LEN 32
192 char lockbuf[RSPQ_NAME_LEN];
193 uint32_t rspq_dump_start;
194 uint32_t rspq_dump_count;
195};
196
197struct rx_desc;
198struct rx_sw_desc;
199
200struct sge_fl {
201 uint32_t buf_size;
202 uint32_t credits;
203 uint32_t size;
204 uint32_t cidx;
205 uint32_t pidx;
206 uint32_t gen;
29
30***************************************************************************/
31
32
33#ifndef _CXGB_ADAPTER_H_
34#define _CXGB_ADAPTER_H_
35
36#include <sys/lock.h>
37#include <sys/mutex.h>
38#include <sys/rman.h>
39#include <sys/mbuf.h>
40#include <sys/socket.h>
41#include <sys/sockio.h>
42#include <sys/condvar.h>
43#include <sys/buf_ring.h>
44
45#include <net/ethernet.h>
46#include <net/if.h>
47#include <net/if_media.h>
48#include <net/if_dl.h>
49#include <netinet/tcp_lro.h>
50
51#include <machine/bus.h>
52#include <machine/resource.h>
53
54#include <sys/bus_dma.h>
55#include <dev/pci/pcireg.h>
56#include <dev/pci/pcivar.h>
57
58#include <cxgb_osdep.h>
59#include <t3cdev.h>
60#include <sys/mbufq.h>
61
62struct adapter;
63struct sge_qset;
64extern int cxgb_debug;
65
66#ifdef DEBUG_LOCKING
67#define MTX_INIT(lock, lockname, class, flags) \
68 do { \
69 printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
70 mtx_init((lock), lockname, class, flags); \
71 } while (0)
72
73#define MTX_DESTROY(lock) \
74 do { \
75 printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
76 mtx_destroy((lock)); \
77 } while (0)
78
79#else
80#define MTX_INIT mtx_init
81#define MTX_DESTROY mtx_destroy
82#endif
83
84enum {
85 LF_NO = 0,
86 LF_MAYBE,
87 LF_YES
88};
89
90struct port_info {
91 struct adapter *adapter;
92 struct ifnet *ifp;
93 int if_flags;
94 int flags;
95 const struct port_type_info *port_type;
96 struct cphy phy;
97 struct cmac mac;
98 struct link_config link_config;
99 struct ifmedia media;
100 struct mtx lock;
101 uint32_t port_id;
102 uint32_t tx_chan;
103 uint32_t txpkt_intf;
104 uint32_t first_qset;
105 uint32_t nqsets;
106 int link_fault;
107
108 uint8_t hw_addr[ETHER_ADDR_LEN];
109 struct task timer_reclaim_task;
110 struct cdev *port_cdev;
111
112#define PORT_LOCK_NAME_LEN 32
113#define PORT_NAME_LEN 32
114 char lockbuf[PORT_LOCK_NAME_LEN];
115 char namebuf[PORT_NAME_LEN];
116} __aligned(L1_CACHE_BYTES);
117
118enum {
119 /* adapter flags */
120 FULL_INIT_DONE = (1 << 0),
121 USING_MSI = (1 << 1),
122 USING_MSIX = (1 << 2),
123 QUEUES_BOUND = (1 << 3),
124 FW_UPTODATE = (1 << 4),
125 TPS_UPTODATE = (1 << 5),
126 CXGB_SHUTDOWN = (1 << 6),
127 CXGB_OFLD_INIT = (1 << 7),
128 TP_PARITY_INIT = (1 << 8),
129 CXGB_BUSY = (1 << 9),
130
131 /* port flags */
132 DOOMED = (1 << 0),
133};
134#define IS_DOOMED(p) (p->flags & DOOMED)
135#define SET_DOOMED(p) do {p->flags |= DOOMED;} while (0)
136#define IS_BUSY(sc) (sc->flags & CXGB_BUSY)
137#define SET_BUSY(sc) do {sc->flags |= CXGB_BUSY;} while (0)
138#define CLR_BUSY(sc) do {sc->flags &= ~CXGB_BUSY;} while (0)
139
140#define FL_Q_SIZE 4096
141#define JUMBO_Q_SIZE 1024
142#define RSPQ_Q_SIZE 2048
143#define TX_ETH_Q_SIZE 1024
144#define TX_OFLD_Q_SIZE 1024
145#define TX_CTRL_Q_SIZE 256
146
147enum { TXQ_ETH = 0,
148 TXQ_OFLD = 1,
149 TXQ_CTRL = 2, };
150
151
152/*
153 * work request size in bytes
154 */
155#define WR_LEN (WR_FLITS * 8)
156#define PIO_LEN (WR_LEN - sizeof(struct cpl_tx_pkt_lso))
157
158struct lro_state {
159 unsigned short enabled;
160 struct lro_ctrl ctrl;
161};
162
163#define RX_BUNDLE_SIZE 8
164
165struct rsp_desc;
166
167struct sge_rspq {
168 uint32_t credits;
169 uint32_t size;
170 uint32_t cidx;
171 uint32_t gen;
172 uint32_t polling;
173 uint32_t holdoff_tmr;
174 uint32_t next_holdoff;
175 uint32_t imm_data;
176 uint32_t async_notif;
177 uint32_t cntxt_id;
178 uint32_t offload_pkts;
179 uint32_t offload_bundles;
180 uint32_t pure_rsps;
181 uint32_t unhandled_irqs;
182 uint32_t starved;
183
184 bus_addr_t phys_addr;
185 bus_dma_tag_t desc_tag;
186 bus_dmamap_t desc_map;
187
188 struct t3_mbuf_hdr rspq_mh;
189 struct rsp_desc *desc;
190 struct mtx lock;
191#define RSPQ_NAME_LEN 32
192 char lockbuf[RSPQ_NAME_LEN];
193 uint32_t rspq_dump_start;
194 uint32_t rspq_dump_count;
195};
196
197struct rx_desc;
198struct rx_sw_desc;
199
200struct sge_fl {
201 uint32_t buf_size;
202 uint32_t credits;
203 uint32_t size;
204 uint32_t cidx;
205 uint32_t pidx;
206 uint32_t gen;
207 uint32_t db_pending;
207 bus_addr_t phys_addr;
208 uint32_t cntxt_id;
209 uint32_t empty;
210 bus_dma_tag_t desc_tag;
211 bus_dmamap_t desc_map;
212 bus_dma_tag_t entry_tag;
213 uma_zone_t zone;
214 struct rx_desc *desc;
215 struct rx_sw_desc *sdesc;
216 int type;
217};
218
219struct tx_desc;
220struct tx_sw_desc;
221
222#define TXQ_TRANSMITTING 0x1
223
224struct sge_txq {
225 uint64_t flags;
226 uint32_t in_use;
227 uint32_t size;
228 uint32_t processed;
229 uint32_t cleaned;
230 uint32_t stop_thres;
231 uint32_t cidx;
232 uint32_t pidx;
233 uint32_t gen;
234 uint32_t unacked;
208 bus_addr_t phys_addr;
209 uint32_t cntxt_id;
210 uint32_t empty;
211 bus_dma_tag_t desc_tag;
212 bus_dmamap_t desc_map;
213 bus_dma_tag_t entry_tag;
214 uma_zone_t zone;
215 struct rx_desc *desc;
216 struct rx_sw_desc *sdesc;
217 int type;
218};
219
220struct tx_desc;
221struct tx_sw_desc;
222
223#define TXQ_TRANSMITTING 0x1
224
225struct sge_txq {
226 uint64_t flags;
227 uint32_t in_use;
228 uint32_t size;
229 uint32_t processed;
230 uint32_t cleaned;
231 uint32_t stop_thres;
232 uint32_t cidx;
233 uint32_t pidx;
234 uint32_t gen;
235 uint32_t unacked;
236 uint32_t db_pending;
235 struct tx_desc *desc;
236 struct tx_sw_desc *sdesc;
237 uint32_t token;
238 bus_addr_t phys_addr;
239 struct task qresume_task;
240 struct task qreclaim_task;
241 uint32_t cntxt_id;
242 uint64_t stops;
243 uint64_t restarts;
244 bus_dma_tag_t desc_tag;
245 bus_dmamap_t desc_map;
246 bus_dma_tag_t entry_tag;
247 struct mbuf_head sendq;
248
249 struct buf_ring *txq_mr;
250 struct ifaltq *txq_ifq;
251 struct callout txq_timer;
252 struct callout txq_watchdog;
253 uint64_t txq_coalesced;
254 uint32_t txq_skipped;
255 uint32_t txq_enqueued;
256 uint32_t txq_dump_start;
257 uint32_t txq_dump_count;
258 uint64_t txq_direct_packets;
259 uint64_t txq_direct_bytes;
260 uint64_t txq_frees;
261 struct sg_ent txq_sgl[TX_MAX_SEGS / 2 + 1];
262};
263
264
265enum {
266 SGE_PSTAT_TSO, /* # of TSO requests */
267 SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
268 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
269 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
270 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
271};
272
273#define SGE_PSTAT_MAX (SGE_PSTAT_VLANINS+1)
274
275#define QS_EXITING 0x1
276#define QS_RUNNING 0x2
277#define QS_BOUND 0x4
278#define QS_FLUSHING 0x8
279#define QS_TIMEOUT 0x10
280
281struct sge_qset {
282 struct sge_rspq rspq;
283 struct sge_fl fl[SGE_RXQ_PER_SET];
284 struct lro_state lro;
285 struct sge_txq txq[SGE_TXQ_PER_SET];
286 uint32_t txq_stopped; /* which Tx queues are stopped */
287 uint64_t port_stats[SGE_PSTAT_MAX];
288 struct port_info *port;
289 int idx; /* qset # */
290 int qs_flags;
291 int coalescing;
292 struct cv qs_cv;
293 struct mtx lock;
294#define QS_NAME_LEN 32
295 char namebuf[QS_NAME_LEN];
296};
297
298struct sge {
299 struct sge_qset qs[SGE_QSETS];
300 struct mtx reg_lock;
301};
302
303struct filter_info;
304
305struct adapter {
306 device_t dev;
307 int flags;
308 TAILQ_ENTRY(adapter) adapter_entry;
309
310 /* PCI register resources */
311 int regs_rid;
312 struct resource *regs_res;
313 int udbs_rid;
314 struct resource *udbs_res;
315 bus_space_handle_t bh;
316 bus_space_tag_t bt;
317 bus_size_t mmio_len;
318 uint32_t link_width;
319
320 /* DMA resources */
321 bus_dma_tag_t parent_dmat;
322 bus_dma_tag_t rx_dmat;
323 bus_dma_tag_t rx_jumbo_dmat;
324 bus_dma_tag_t tx_dmat;
325
326 /* Interrupt resources */
327 struct resource *irq_res;
328 int irq_rid;
329 void *intr_tag;
330
331 uint32_t msix_regs_rid;
332 struct resource *msix_regs_res;
333
334 struct resource *msix_irq_res[SGE_QSETS];
335 int msix_irq_rid[SGE_QSETS];
336 void *msix_intr_tag[SGE_QSETS];
337 uint8_t rxpkt_map[8]; /* maps RX_PKT interface values to port ids */
338 uint8_t rrss_map[SGE_QSETS]; /* revers RSS map table */
339 uint16_t rspq_map[RSS_TABLE_SIZE]; /* maps 7-bit cookie to qidx */
340 union {
341 uint8_t fill[SGE_QSETS];
342 uint64_t coalesce;
343 } u;
344
345#define tunq_fill u.fill
346#define tunq_coalesce u.coalesce
347
348 struct filter_info *filters;
349
350 /* Tasks */
351 struct task ext_intr_task;
352 struct task slow_intr_task;
353 struct task tick_task;
354 struct taskqueue *tq;
355 struct callout cxgb_tick_ch;
356 struct callout sge_timer_ch;
357
358 /* Register lock for use by the hardware layer */
359 struct mtx mdio_lock;
360 struct mtx elmer_lock;
361
362 /* Bookkeeping for the hardware layer */
363 struct adapter_params params;
364 unsigned int slow_intr_mask;
365 unsigned long irq_stats[IRQ_NUM_STATS];
366
367 struct sge sge;
368 struct mc7 pmrx;
369 struct mc7 pmtx;
370 struct mc7 cm;
371 struct mc5 mc5;
372
373 struct port_info port[MAX_NPORTS];
374 device_t portdev[MAX_NPORTS];
375 struct t3cdev tdev;
376 char fw_version[64];
377 char port_types[MAX_NPORTS + 1];
378 uint32_t open_device_map;
379 uint32_t registered_device_map;
380 struct mtx lock;
381 driver_intr_t *cxgb_intr;
382 int msi_count;
383
384#define ADAPTER_LOCK_NAME_LEN 32
385 char lockbuf[ADAPTER_LOCK_NAME_LEN];
386 char reglockbuf[ADAPTER_LOCK_NAME_LEN];
387 char mdiolockbuf[ADAPTER_LOCK_NAME_LEN];
388 char elmerlockbuf[ADAPTER_LOCK_NAME_LEN];
389};
390
391struct t3_rx_mode {
392
393 uint32_t idx;
394 struct port_info *port;
395};
396
397#define MDIO_LOCK(adapter) mtx_lock(&(adapter)->mdio_lock)
398#define MDIO_UNLOCK(adapter) mtx_unlock(&(adapter)->mdio_lock)
399#define ELMR_LOCK(adapter) mtx_lock(&(adapter)->elmer_lock)
400#define ELMR_UNLOCK(adapter) mtx_unlock(&(adapter)->elmer_lock)
401
402
403#define PORT_LOCK(port) mtx_lock(&(port)->lock);
404#define PORT_UNLOCK(port) mtx_unlock(&(port)->lock);
405#define PORT_LOCK_INIT(port, name) mtx_init(&(port)->lock, name, 0, MTX_DEF)
406#define PORT_LOCK_DEINIT(port) mtx_destroy(&(port)->lock)
407#define PORT_LOCK_ASSERT_NOTOWNED(port) mtx_assert(&(port)->lock, MA_NOTOWNED)
408#define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED)
409
410#define ADAPTER_LOCK(adap) mtx_lock(&(adap)->lock);
411#define ADAPTER_UNLOCK(adap) mtx_unlock(&(adap)->lock);
412#define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF)
413#define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock)
414#define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MA_NOTOWNED)
415#define ADAPTER_LOCK_ASSERT_OWNED(adap) mtx_assert(&(adap)->lock, MA_OWNED)
416
417
418static __inline uint32_t
419t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
420{
421 return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr));
422}
423
424static __inline void
425t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
426{
427 bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val);
428}
429
430static __inline void
431t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val)
432{
433 *val = pci_read_config(adapter->dev, reg, 4);
434}
435
436static __inline void
437t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val)
438{
439 pci_write_config(adapter->dev, reg, val, 4);
440}
441
442static __inline void
443t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val)
444{
445 *val = pci_read_config(adapter->dev, reg, 2);
446}
447
448static __inline void
449t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val)
450{
451 pci_write_config(adapter->dev, reg, val, 2);
452}
453
454static __inline uint8_t *
455t3_get_next_mcaddr(struct t3_rx_mode *rm)
456{
457 uint8_t *macaddr = NULL;
458 struct ifnet *ifp = rm->port->ifp;
459 struct ifmultiaddr *ifma;
460 int i = 0;
461
462 if_maddr_rlock(ifp);
463 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
464 if (ifma->ifma_addr->sa_family != AF_LINK)
465 continue;
466 if (i == rm->idx) {
467 macaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
468 break;
469 }
470 i++;
471 }
472 if_maddr_runlock(ifp);
473
474 rm->idx++;
475 return (macaddr);
476}
477
478static __inline void
479t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port)
480{
481 rm->idx = 0;
482 rm->port = port;
483}
484
485static __inline struct port_info *
486adap2pinfo(struct adapter *adap, int idx)
487{
488 return &adap->port[idx];
489}
490
491int t3_os_find_pci_capability(adapter_t *adapter, int cap);
492int t3_os_pci_save_state(struct adapter *adapter);
493int t3_os_pci_restore_state(struct adapter *adapter);
494void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status,
495 int speed, int duplex, int fc, int mac_was_reset);
496void t3_os_phymod_changed(struct adapter *adap, int port_id);
497void t3_sge_err_intr_handler(adapter_t *adapter);
498int t3_offload_tx(struct t3cdev *, struct mbuf *);
499void t3_os_ext_intr_handler(adapter_t *adapter);
500void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]);
501int t3_mgmt_tx(adapter_t *adap, struct mbuf *m);
502
503
504int t3_sge_alloc(struct adapter *);
505int t3_sge_free(struct adapter *);
506int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *,
507 int, struct port_info *);
508void t3_free_sge_resources(adapter_t *);
509void t3_sge_start(adapter_t *);
510void t3_sge_stop(adapter_t *);
511void t3b_intr(void *data);
512void t3_intr_msi(void *data);
513void t3_intr_msix(void *data);
514
515int t3_sge_init_adapter(adapter_t *);
516int t3_sge_reset_adapter(adapter_t *);
517int t3_sge_init_port(struct port_info *);
518void t3_free_tx_desc(struct sge_qset *qs, int n, int qid);
519
520void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad);
521
522void t3_add_attach_sysctls(adapter_t *sc);
523void t3_add_configured_sysctls(adapter_t *sc);
524int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
525 unsigned char *data);
526void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
527
528#define CXGB_TICKS(a) ((a)->params.linkpoll_period ? \
529 (hz * (a)->params.linkpoll_period) / 10 : \
530 (a)->params.stats_update_period * hz)
531
532/*
533 * XXX figure out how we can return this to being private to sge
534 */
535#define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC))
536
537#define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
538
539static __inline struct sge_qset *
540fl_to_qset(struct sge_fl *q, int qidx)
541{
542 return container_of(q, struct sge_qset, fl[qidx]);
543}
544
545static __inline struct sge_qset *
546rspq_to_qset(struct sge_rspq *q)
547{
548 return container_of(q, struct sge_qset, rspq);
549}
550
551static __inline struct sge_qset *
552txq_to_qset(struct sge_txq *q, int qidx)
553{
554 return container_of(q, struct sge_qset, txq[qidx]);
555}
556
557static __inline struct adapter *
558tdev2adap(struct t3cdev *d)
559{
560 return container_of(d, struct adapter, tdev);
561}
562
563#undef container_of
564
565#define OFFLOAD_DEVMAP_BIT 15
566static inline int offload_running(adapter_t *adapter)
567{
568 return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
569}
570
571void cxgb_tx_watchdog(void *arg);
572int cxgb_transmit(struct ifnet *ifp, struct mbuf *m);
573void cxgb_qflush(struct ifnet *ifp);
574void cxgb_start(struct ifnet *ifp);
575#endif
237 struct tx_desc *desc;
238 struct tx_sw_desc *sdesc;
239 uint32_t token;
240 bus_addr_t phys_addr;
241 struct task qresume_task;
242 struct task qreclaim_task;
243 uint32_t cntxt_id;
244 uint64_t stops;
245 uint64_t restarts;
246 bus_dma_tag_t desc_tag;
247 bus_dmamap_t desc_map;
248 bus_dma_tag_t entry_tag;
249 struct mbuf_head sendq;
250
251 struct buf_ring *txq_mr;
252 struct ifaltq *txq_ifq;
253 struct callout txq_timer;
254 struct callout txq_watchdog;
255 uint64_t txq_coalesced;
256 uint32_t txq_skipped;
257 uint32_t txq_enqueued;
258 uint32_t txq_dump_start;
259 uint32_t txq_dump_count;
260 uint64_t txq_direct_packets;
261 uint64_t txq_direct_bytes;
262 uint64_t txq_frees;
263 struct sg_ent txq_sgl[TX_MAX_SEGS / 2 + 1];
264};
265
266
267enum {
268 SGE_PSTAT_TSO, /* # of TSO requests */
269 SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
270 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
271 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
272 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
273};
274
275#define SGE_PSTAT_MAX (SGE_PSTAT_VLANINS+1)
276
277#define QS_EXITING 0x1
278#define QS_RUNNING 0x2
279#define QS_BOUND 0x4
280#define QS_FLUSHING 0x8
281#define QS_TIMEOUT 0x10
282
283struct sge_qset {
284 struct sge_rspq rspq;
285 struct sge_fl fl[SGE_RXQ_PER_SET];
286 struct lro_state lro;
287 struct sge_txq txq[SGE_TXQ_PER_SET];
288 uint32_t txq_stopped; /* which Tx queues are stopped */
289 uint64_t port_stats[SGE_PSTAT_MAX];
290 struct port_info *port;
291 int idx; /* qset # */
292 int qs_flags;
293 int coalescing;
294 struct cv qs_cv;
295 struct mtx lock;
296#define QS_NAME_LEN 32
297 char namebuf[QS_NAME_LEN];
298};
299
300struct sge {
301 struct sge_qset qs[SGE_QSETS];
302 struct mtx reg_lock;
303};
304
305struct filter_info;
306
307struct adapter {
308 device_t dev;
309 int flags;
310 TAILQ_ENTRY(adapter) adapter_entry;
311
312 /* PCI register resources */
313 int regs_rid;
314 struct resource *regs_res;
315 int udbs_rid;
316 struct resource *udbs_res;
317 bus_space_handle_t bh;
318 bus_space_tag_t bt;
319 bus_size_t mmio_len;
320 uint32_t link_width;
321
322 /* DMA resources */
323 bus_dma_tag_t parent_dmat;
324 bus_dma_tag_t rx_dmat;
325 bus_dma_tag_t rx_jumbo_dmat;
326 bus_dma_tag_t tx_dmat;
327
328 /* Interrupt resources */
329 struct resource *irq_res;
330 int irq_rid;
331 void *intr_tag;
332
333 uint32_t msix_regs_rid;
334 struct resource *msix_regs_res;
335
336 struct resource *msix_irq_res[SGE_QSETS];
337 int msix_irq_rid[SGE_QSETS];
338 void *msix_intr_tag[SGE_QSETS];
339 uint8_t rxpkt_map[8]; /* maps RX_PKT interface values to port ids */
340 uint8_t rrss_map[SGE_QSETS]; /* revers RSS map table */
341 uint16_t rspq_map[RSS_TABLE_SIZE]; /* maps 7-bit cookie to qidx */
342 union {
343 uint8_t fill[SGE_QSETS];
344 uint64_t coalesce;
345 } u;
346
347#define tunq_fill u.fill
348#define tunq_coalesce u.coalesce
349
350 struct filter_info *filters;
351
352 /* Tasks */
353 struct task ext_intr_task;
354 struct task slow_intr_task;
355 struct task tick_task;
356 struct taskqueue *tq;
357 struct callout cxgb_tick_ch;
358 struct callout sge_timer_ch;
359
360 /* Register lock for use by the hardware layer */
361 struct mtx mdio_lock;
362 struct mtx elmer_lock;
363
364 /* Bookkeeping for the hardware layer */
365 struct adapter_params params;
366 unsigned int slow_intr_mask;
367 unsigned long irq_stats[IRQ_NUM_STATS];
368
369 struct sge sge;
370 struct mc7 pmrx;
371 struct mc7 pmtx;
372 struct mc7 cm;
373 struct mc5 mc5;
374
375 struct port_info port[MAX_NPORTS];
376 device_t portdev[MAX_NPORTS];
377 struct t3cdev tdev;
378 char fw_version[64];
379 char port_types[MAX_NPORTS + 1];
380 uint32_t open_device_map;
381 uint32_t registered_device_map;
382 struct mtx lock;
383 driver_intr_t *cxgb_intr;
384 int msi_count;
385
386#define ADAPTER_LOCK_NAME_LEN 32
387 char lockbuf[ADAPTER_LOCK_NAME_LEN];
388 char reglockbuf[ADAPTER_LOCK_NAME_LEN];
389 char mdiolockbuf[ADAPTER_LOCK_NAME_LEN];
390 char elmerlockbuf[ADAPTER_LOCK_NAME_LEN];
391};
392
393struct t3_rx_mode {
394
395 uint32_t idx;
396 struct port_info *port;
397};
398
399#define MDIO_LOCK(adapter) mtx_lock(&(adapter)->mdio_lock)
400#define MDIO_UNLOCK(adapter) mtx_unlock(&(adapter)->mdio_lock)
401#define ELMR_LOCK(adapter) mtx_lock(&(adapter)->elmer_lock)
402#define ELMR_UNLOCK(adapter) mtx_unlock(&(adapter)->elmer_lock)
403
404
405#define PORT_LOCK(port) mtx_lock(&(port)->lock);
406#define PORT_UNLOCK(port) mtx_unlock(&(port)->lock);
407#define PORT_LOCK_INIT(port, name) mtx_init(&(port)->lock, name, 0, MTX_DEF)
408#define PORT_LOCK_DEINIT(port) mtx_destroy(&(port)->lock)
409#define PORT_LOCK_ASSERT_NOTOWNED(port) mtx_assert(&(port)->lock, MA_NOTOWNED)
410#define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED)
411
412#define ADAPTER_LOCK(adap) mtx_lock(&(adap)->lock);
413#define ADAPTER_UNLOCK(adap) mtx_unlock(&(adap)->lock);
414#define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF)
415#define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock)
416#define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MA_NOTOWNED)
417#define ADAPTER_LOCK_ASSERT_OWNED(adap) mtx_assert(&(adap)->lock, MA_OWNED)
418
419
420static __inline uint32_t
421t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
422{
423 return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr));
424}
425
426static __inline void
427t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
428{
429 bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val);
430}
431
432static __inline void
433t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val)
434{
435 *val = pci_read_config(adapter->dev, reg, 4);
436}
437
438static __inline void
439t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val)
440{
441 pci_write_config(adapter->dev, reg, val, 4);
442}
443
444static __inline void
445t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val)
446{
447 *val = pci_read_config(adapter->dev, reg, 2);
448}
449
450static __inline void
451t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val)
452{
453 pci_write_config(adapter->dev, reg, val, 2);
454}
455
456static __inline uint8_t *
457t3_get_next_mcaddr(struct t3_rx_mode *rm)
458{
459 uint8_t *macaddr = NULL;
460 struct ifnet *ifp = rm->port->ifp;
461 struct ifmultiaddr *ifma;
462 int i = 0;
463
464 if_maddr_rlock(ifp);
465 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
466 if (ifma->ifma_addr->sa_family != AF_LINK)
467 continue;
468 if (i == rm->idx) {
469 macaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
470 break;
471 }
472 i++;
473 }
474 if_maddr_runlock(ifp);
475
476 rm->idx++;
477 return (macaddr);
478}
479
480static __inline void
481t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port)
482{
483 rm->idx = 0;
484 rm->port = port;
485}
486
487static __inline struct port_info *
488adap2pinfo(struct adapter *adap, int idx)
489{
490 return &adap->port[idx];
491}
492
493int t3_os_find_pci_capability(adapter_t *adapter, int cap);
494int t3_os_pci_save_state(struct adapter *adapter);
495int t3_os_pci_restore_state(struct adapter *adapter);
496void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status,
497 int speed, int duplex, int fc, int mac_was_reset);
498void t3_os_phymod_changed(struct adapter *adap, int port_id);
499void t3_sge_err_intr_handler(adapter_t *adapter);
500int t3_offload_tx(struct t3cdev *, struct mbuf *);
501void t3_os_ext_intr_handler(adapter_t *adapter);
502void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]);
503int t3_mgmt_tx(adapter_t *adap, struct mbuf *m);
504
505
506int t3_sge_alloc(struct adapter *);
507int t3_sge_free(struct adapter *);
508int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *,
509 int, struct port_info *);
510void t3_free_sge_resources(adapter_t *);
511void t3_sge_start(adapter_t *);
512void t3_sge_stop(adapter_t *);
513void t3b_intr(void *data);
514void t3_intr_msi(void *data);
515void t3_intr_msix(void *data);
516
517int t3_sge_init_adapter(adapter_t *);
518int t3_sge_reset_adapter(adapter_t *);
519int t3_sge_init_port(struct port_info *);
520void t3_free_tx_desc(struct sge_qset *qs, int n, int qid);
521
522void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad);
523
524void t3_add_attach_sysctls(adapter_t *sc);
525void t3_add_configured_sysctls(adapter_t *sc);
526int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
527 unsigned char *data);
528void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
529
530#define CXGB_TICKS(a) ((a)->params.linkpoll_period ? \
531 (hz * (a)->params.linkpoll_period) / 10 : \
532 (a)->params.stats_update_period * hz)
533
534/*
535 * XXX figure out how we can return this to being private to sge
536 */
537#define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC))
538
539#define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
540
541static __inline struct sge_qset *
542fl_to_qset(struct sge_fl *q, int qidx)
543{
544 return container_of(q, struct sge_qset, fl[qidx]);
545}
546
547static __inline struct sge_qset *
548rspq_to_qset(struct sge_rspq *q)
549{
550 return container_of(q, struct sge_qset, rspq);
551}
552
553static __inline struct sge_qset *
554txq_to_qset(struct sge_txq *q, int qidx)
555{
556 return container_of(q, struct sge_qset, txq[qidx]);
557}
558
559static __inline struct adapter *
560tdev2adap(struct t3cdev *d)
561{
562 return container_of(d, struct adapter, tdev);
563}
564
565#undef container_of
566
567#define OFFLOAD_DEVMAP_BIT 15
568static inline int offload_running(adapter_t *adapter)
569{
570 return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
571}
572
573void cxgb_tx_watchdog(void *arg);
574int cxgb_transmit(struct ifnet *ifp, struct mbuf *m);
575void cxgb_qflush(struct ifnet *ifp);
576void cxgb_start(struct ifnet *ifp);
577#endif