Deleted Added
sdiff udiff text old ( 175374 ) new ( 176472 )
full compact
1/**************************************************************************
2
3Copyright (c) 2007, Chelsio Inc.
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
15
16THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26POSSIBILITY OF SUCH DAMAGE.
27
28
29$FreeBSD: head/sys/dev/cxgb/cxgb_adapter.h 176472 2008-02-23 01:06:17Z kmacy $
30
31***************************************************************************/
32
33
34#ifndef _CXGB_ADAPTER_H_
35#define _CXGB_ADAPTER_H_
36
37#include <sys/lock.h>
38#include <sys/mutex.h>
39#include <sys/sx.h>
40#include <sys/rman.h>
41#include <sys/mbuf.h>
42#include <sys/socket.h>
43#include <sys/sockio.h>
44#include <sys/condvar.h>
45
46#include <net/ethernet.h>
47#include <net/if.h>
48#include <net/if_media.h>
49#include <net/if_dl.h>
50
51#include <machine/bus.h>
52#include <machine/resource.h>
53
54#include <sys/bus_dma.h>
55#include <dev/pci/pcireg.h>
56#include <dev/pci/pcivar.h>
57
58
59#ifdef CONFIG_DEFINED
60#include <cxgb_osdep.h>
61#include <t3cdev.h>
62#include <ulp/toecore/cxgb_toedev.h>
63#include <sys/mbufq.h>
64#else
65#include <dev/cxgb/cxgb_osdep.h>
66#include <dev/cxgb/t3cdev.h>
67#include <dev/cxgb/sys/mbufq.h>
68#include <dev/cxgb/ulp/toecore/cxgb_toedev.h>
69#endif
70
71#define USE_SX
72
73struct adapter;
74struct sge_qset;
75extern int cxgb_debug;
76
77#ifdef DEBUG_LOCKING
78#define MTX_INIT(lock, lockname, class, flags) \
79 do { \
80 printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
81 mtx_init((lock), lockname, class, flags); \
82 } while (0)
83
84#define MTX_DESTROY(lock) \
85 do { \
86 printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
87 mtx_destroy((lock)); \
88 } while (0)
89
90#define SX_INIT(lock, lockname) \
91 do { \
92 printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
93 sx_init((lock), lockname); \
94 } while (0)
95
96#define SX_DESTROY(lock) \
97 do { \
98 printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
99 sx_destroy((lock)); \
100 } while (0)
101#else
102#define MTX_INIT mtx_init
103#define MTX_DESTROY mtx_destroy
104#define SX_INIT sx_init
105#define SX_DESTROY sx_destroy
106#endif
107
108struct port_info {
109 struct adapter *adapter;
110 struct ifnet *ifp;
111 int if_flags;
112 const struct port_type_info *port_type;
113 struct cphy phy;
114 struct cmac mac;
115 struct link_config link_config;
116 struct ifmedia media;
117#ifdef USE_SX
118 struct sx lock;
119#else
120 struct mtx lock;
121#endif
122 uint8_t port_id;
123 uint8_t tx_chan;
124 uint8_t txpkt_intf;
125 uint8_t first_qset;
126 uint32_t nqsets;
127
128 uint8_t hw_addr[ETHER_ADDR_LEN];
129 struct taskqueue *tq;
130 struct task start_task;
131 struct task timer_reclaim_task;
132 struct cdev *port_cdev;
133
134#define PORT_LOCK_NAME_LEN 32
135#define TASKQ_NAME_LEN 32
136#define PORT_NAME_LEN 32
137 char lockbuf[PORT_LOCK_NAME_LEN];
138 char taskqbuf[TASKQ_NAME_LEN];
139 char namebuf[PORT_NAME_LEN];
140};
141
142enum { /* adapter flags */
143 FULL_INIT_DONE = (1 << 0),
144 USING_MSI = (1 << 1),
145 USING_MSIX = (1 << 2),
146 QUEUES_BOUND = (1 << 3),
147 FW_UPTODATE = (1 << 4),
148 TPS_UPTODATE = (1 << 5),
149 CXGB_SHUTDOWN = (1 << 6),
150 CXGB_OFLD_INIT = (1 << 7),
151 TP_PARITY_INIT = (1 << 8),
152};
153
154#define FL_Q_SIZE 4096
155#define JUMBO_Q_SIZE 1024
156#define RSPQ_Q_SIZE 1024
157#define TX_ETH_Q_SIZE 1024
158
159enum { TXQ_ETH = 0,
160 TXQ_OFLD = 1,
161 TXQ_CTRL = 2, };
162
163
164/*
165 * work request size in bytes
166 */
167#define WR_LEN (WR_FLITS * 8)
168#define PIO_LEN (WR_LEN - sizeof(struct cpl_tx_pkt))
169
170
171/* careful, the following are set on priv_flags and must not collide with
172 * IFF_ flags!
173 */
174enum {
175 LRO_ACTIVE = (1 << 8),
176};
177
178/* Max concurrent LRO sessions per queue set */
179#define MAX_LRO_SES 8
180
181struct t3_lro_session {
182 struct mbuf *head;
183 struct mbuf *tail;
184 uint32_t seq;
185 uint16_t ip_len;
186 uint16_t mss;
187 uint16_t vtag;
188 uint8_t npkts;
189};
190
191struct lro_state {
192 unsigned short enabled;
193 unsigned short active_idx;
194 unsigned int nactive;
195 struct t3_lro_session sess[MAX_LRO_SES];
196};
197
198#define RX_BUNDLE_SIZE 8
199
200struct rsp_desc;
201
202struct sge_rspq {
203 uint32_t credits;
204 uint32_t size;
205 uint32_t cidx;
206 uint32_t gen;
207 uint32_t polling;
208 uint32_t holdoff_tmr;
209 uint32_t next_holdoff;
210 uint32_t imm_data;
211 uint32_t async_notif;
212 uint32_t cntxt_id;
213 uint32_t offload_pkts;
214 uint32_t offload_bundles;
215 uint32_t pure_rsps;
216 uint32_t unhandled_irqs;
217
218 bus_addr_t phys_addr;
219 bus_dma_tag_t desc_tag;
220 bus_dmamap_t desc_map;
221
222 struct t3_mbuf_hdr rspq_mh;
223 struct rsp_desc *desc;
224 struct mtx lock;
225#define RSPQ_NAME_LEN 32
226 char lockbuf[RSPQ_NAME_LEN];
227 uint32_t rspq_dump_start;
228 uint32_t rspq_dump_count;
229};
230
231#ifndef DISABLE_MBUF_IOVEC
232#define rspq_mbuf rspq_mh.mh_head
233#endif
234
235struct rx_desc;
236struct rx_sw_desc;
237
238struct sge_fl {
239 uint32_t buf_size;
240 uint32_t credits;
241 uint32_t size;
242 uint32_t cidx;
243 uint32_t pidx;
244 uint32_t gen;
245 bus_addr_t phys_addr;
246 uint32_t cntxt_id;
247 uint64_t empty;
248 bus_dma_tag_t desc_tag;
249 bus_dmamap_t desc_map;
250 bus_dma_tag_t entry_tag;
251 uma_zone_t zone;
252 struct rx_desc *desc;
253 struct rx_sw_desc *sdesc;
254 int type;
255};
256
257struct tx_desc;
258struct tx_sw_desc;
259
260#define TXQ_TRANSMITTING 0x1
261
262struct sge_txq {
263 uint64_t flags;
264 uint32_t in_use;
265 uint32_t size;
266 uint32_t processed;
267 uint32_t cleaned;
268 uint32_t stop_thres;
269 uint32_t cidx;
270 uint32_t pidx;
271 uint32_t gen;
272 uint32_t unacked;
273 struct tx_desc *desc;
274 struct tx_sw_desc *sdesc;
275 uint32_t token;
276 bus_addr_t phys_addr;
277 struct task qresume_task;
278 struct task qreclaim_task;
279 struct port_info *port;
280 uint32_t cntxt_id;
281 uint64_t stops;
282 uint64_t restarts;
283 bus_dma_tag_t desc_tag;
284 bus_dmamap_t desc_map;
285 bus_dma_tag_t entry_tag;
286 struct mbuf_head sendq;
287 /*
288 * cleanq should really be an buf_ring to avoid extra
289 * mbuf touches
290 */
291 struct mbuf_head cleanq;
292 struct buf_ring txq_mr;
293 struct mbuf *immpkt;
294 uint32_t txq_drops;
295 uint32_t txq_skipped;
296 uint32_t txq_coalesced;
297 uint32_t txq_enqueued;
298 uint32_t txq_dump_start;
299 uint32_t txq_dump_count;
300 unsigned long txq_frees;
301 struct mtx lock;
302 struct sg_ent txq_sgl[TX_MAX_SEGS / 2 + 1];
303 #define TXQ_NAME_LEN 32
304 char lockbuf[TXQ_NAME_LEN];
305};
306
307
308enum {
309 SGE_PSTAT_TSO, /* # of TSO requests */
310 SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
311 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
312 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
313 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
314 SGE_PSTATS_LRO_QUEUED, /* # of LRO appended packets */
315 SGE_PSTATS_LRO_FLUSHED, /* # of LRO flushed packets */
316 SGE_PSTATS_LRO_X_STREAMS, /* # of exceeded LRO contexts */
317};
318
319#define SGE_PSTAT_MAX (SGE_PSTATS_LRO_X_STREAMS+1)
320
321#define QS_EXITING 0x1
322#define QS_RUNNING 0x2
323#define QS_BOUND 0x4
324
325struct sge_qset {
326 struct sge_rspq rspq;
327 struct sge_fl fl[SGE_RXQ_PER_SET];
328 struct lro_state lro;
329 struct sge_txq txq[SGE_TXQ_PER_SET];
330 uint32_t txq_stopped; /* which Tx queues are stopped */
331 uint64_t port_stats[SGE_PSTAT_MAX];
332 struct port_info *port;
333 int idx; /* qset # */
334 int qs_cpuid;
335 int qs_flags;
336 struct cv qs_cv;
337 struct mtx qs_mtx;
338#define QS_NAME_LEN 32
339 char namebuf[QS_NAME_LEN];
340};
341
342struct sge {
343 struct sge_qset qs[SGE_QSETS];
344 struct mtx reg_lock;
345};
346
347struct filter_info;
348
349struct adapter {
350 device_t dev;
351 int flags;
352 TAILQ_ENTRY(adapter) adapter_entry;
353
354 /* PCI register resources */
355 int regs_rid;
356 struct resource *regs_res;
357 int udbs_rid;
358 struct resource *udbs_res;
359 bus_space_handle_t bh;
360 bus_space_tag_t bt;
361 bus_size_t mmio_len;
362 uint32_t link_width;
363
364 /* DMA resources */
365 bus_dma_tag_t parent_dmat;
366 bus_dma_tag_t rx_dmat;
367 bus_dma_tag_t rx_jumbo_dmat;
368 bus_dma_tag_t tx_dmat;
369
370 /* Interrupt resources */
371 struct resource *irq_res;
372 int irq_rid;
373 void *intr_tag;
374
375 uint32_t msix_regs_rid;
376 struct resource *msix_regs_res;
377
378 struct resource *msix_irq_res[SGE_QSETS];
379 int msix_irq_rid[SGE_QSETS];
380 void *msix_intr_tag[SGE_QSETS];
381 uint8_t rxpkt_map[8]; /* maps RX_PKT interface values to port ids */
382 uint8_t rrss_map[SGE_QSETS]; /* revers RSS map table */
383 uint16_t rspq_map[RSS_TABLE_SIZE]; /* maps 7-bit cookie to qidx */
384 union {
385 uint8_t fill[SGE_QSETS];
386 uint64_t coalesce;
387 } u;
388
389#define tunq_fill u.fill
390#define tunq_coalesce u.coalesce
391
392 struct filter_info *filters;
393
394 /* Tasks */
395 struct task ext_intr_task;
396 struct task slow_intr_task;
397 struct task tick_task;
398 struct task process_responses_task;
399 struct taskqueue *tq;
400 struct callout cxgb_tick_ch;
401 struct callout sge_timer_ch;
402
403 /* Register lock for use by the hardware layer */
404 struct mtx mdio_lock;
405 struct mtx elmer_lock;
406
407 /* Bookkeeping for the hardware layer */
408 struct adapter_params params;
409 unsigned int slow_intr_mask;
410 unsigned long irq_stats[IRQ_NUM_STATS];
411
412 struct sge sge;
413 struct mc7 pmrx;
414 struct mc7 pmtx;
415 struct mc7 cm;
416 struct mc5 mc5;
417
418 struct port_info port[MAX_NPORTS];
419 device_t portdev[MAX_NPORTS];
420 struct t3cdev tdev;
421 char fw_version[64];
422 uint32_t open_device_map;
423 uint32_t registered_device_map;
424#ifdef USE_SX
425 struct sx lock;
426#else
427 struct mtx lock;
428#endif
429 driver_intr_t *cxgb_intr;
430 int msi_count;
431
432#define ADAPTER_LOCK_NAME_LEN 32
433 char lockbuf[ADAPTER_LOCK_NAME_LEN];
434 char reglockbuf[ADAPTER_LOCK_NAME_LEN];
435 char mdiolockbuf[ADAPTER_LOCK_NAME_LEN];
436 char elmerlockbuf[ADAPTER_LOCK_NAME_LEN];
437};
438
439struct t3_rx_mode {
440
441 uint32_t idx;
442 struct port_info *port;
443};
444
445
446#define MDIO_LOCK(adapter) mtx_lock(&(adapter)->mdio_lock)
447#define MDIO_UNLOCK(adapter) mtx_unlock(&(adapter)->mdio_lock)
448#define ELMR_LOCK(adapter) mtx_lock(&(adapter)->elmer_lock)
449#define ELMR_UNLOCK(adapter) mtx_unlock(&(adapter)->elmer_lock)
450
451
452#ifdef USE_SX
453#define PORT_LOCK(port) sx_xlock(&(port)->lock);
454#define PORT_UNLOCK(port) sx_xunlock(&(port)->lock);
455#define PORT_LOCK_INIT(port, name) SX_INIT(&(port)->lock, name)
456#define PORT_LOCK_DEINIT(port) SX_DESTROY(&(port)->lock)
457#define PORT_LOCK_ASSERT_OWNED(port) sx_assert(&(port)->lock, SA_LOCKED)
458
459#define ADAPTER_LOCK(adap) sx_xlock(&(adap)->lock);
460#define ADAPTER_UNLOCK(adap) sx_xunlock(&(adap)->lock);
461#define ADAPTER_LOCK_INIT(adap, name) SX_INIT(&(adap)->lock, name)
462#define ADAPTER_LOCK_DEINIT(adap) SX_DESTROY(&(adap)->lock)
463#define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) sx_assert(&(adap)->lock, SA_UNLOCKED)
464#else
465#define PORT_LOCK(port) mtx_lock(&(port)->lock);
466#define PORT_UNLOCK(port) mtx_unlock(&(port)->lock);
467#define PORT_LOCK_INIT(port, name) mtx_init(&(port)->lock, name, 0, MTX_DEF)
468#define PORT_LOCK_DEINIT(port) mtx_destroy(&(port)->lock)
469#define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED)
470
471#define ADAPTER_LOCK(adap) mtx_lock(&(adap)->lock);
472#define ADAPTER_UNLOCK(adap) mtx_unlock(&(adap)->lock);
473#define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF)
474#define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock)
475#define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MO_NOTOWNED)
476#endif
477
478
479static __inline uint32_t
480t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
481{
482 return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr));
483}
484
485static __inline void
486t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
487{
488 bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val);
489}
490
491static __inline void
492t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val)
493{
494 *val = pci_read_config(adapter->dev, reg, 4);
495}
496
497static __inline void
498t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val)
499{
500 pci_write_config(adapter->dev, reg, val, 4);
501}
502
503static __inline void
504t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val)
505{
506 *val = pci_read_config(adapter->dev, reg, 2);
507}
508
509static __inline void
510t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val)
511{
512 pci_write_config(adapter->dev, reg, val, 2);
513}
514
515static __inline uint8_t *
516t3_get_next_mcaddr(struct t3_rx_mode *rm)
517{
518 uint8_t *macaddr = NULL;
519 struct ifnet *ifp = rm->port->ifp;
520 struct ifmultiaddr *ifma;
521 int i = 0;
522
523 IF_ADDR_LOCK(ifp);
524 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
525 if (ifma->ifma_addr->sa_family != AF_LINK)
526 continue;
527 if (i == rm->idx) {
528 macaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
529 break;
530 }
531 i++;
532 }
533 IF_ADDR_UNLOCK(ifp);
534
535
536 rm->idx++;
537 return (macaddr);
538}
539
540static __inline void
541t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port)
542{
543 rm->idx = 0;
544 rm->port = port;
545}
546
547static __inline struct port_info *
548adap2pinfo(struct adapter *adap, int idx)
549{
550 return &adap->port[idx];
551}
552
553int t3_os_find_pci_capability(adapter_t *adapter, int cap);
554int t3_os_pci_save_state(struct adapter *adapter);
555int t3_os_pci_restore_state(struct adapter *adapter);
556void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status,
557 int speed, int duplex, int fc);
558void t3_sge_err_intr_handler(adapter_t *adapter);
559int t3_offload_tx(struct t3cdev *, struct mbuf *);
560void t3_os_ext_intr_handler(adapter_t *adapter);
561void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]);
562int t3_mgmt_tx(adapter_t *adap, struct mbuf *m);
563
564
565int t3_sge_alloc(struct adapter *);
566int t3_sge_free(struct adapter *);
567int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *,
568 int, struct port_info *);
569void t3_free_sge_resources(adapter_t *);
570void t3_sge_start(adapter_t *);
571void t3_sge_stop(adapter_t *);
572void t3b_intr(void *data);
573void t3_intr_msi(void *data);
574void t3_intr_msix(void *data);
575int t3_encap(struct sge_qset *, struct mbuf **, int);
576
577int t3_sge_init_adapter(adapter_t *);
578int t3_sge_reset_adapter(adapter_t *);
579int t3_sge_init_port(struct port_info *);
580void t3_sge_deinit_sw(adapter_t *);
581void t3_free_tx_desc(struct sge_txq *q, int n);
582void t3_free_tx_desc_all(struct sge_txq *q);
583
584void t3_rx_eth_lro(adapter_t *adap, struct sge_rspq *rq, struct mbuf *m,
585 int ethpad, uint32_t rss_hash, uint32_t rss_csum, int lro);
586void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad);
587void t3_lro_flush(adapter_t *adap, struct sge_qset *qs, struct lro_state *state);
588
589void t3_add_attach_sysctls(adapter_t *sc);
590void t3_add_configured_sysctls(adapter_t *sc);
591int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
592 unsigned char *data);
593void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
594/*
595 * XXX figure out how we can return this to being private to sge
596 */
597#define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC))
598
599#define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
600
601static __inline struct sge_qset *
602fl_to_qset(struct sge_fl *q, int qidx)
603{
604 return container_of(q, struct sge_qset, fl[qidx]);
605}
606
607static __inline struct sge_qset *
608rspq_to_qset(struct sge_rspq *q)
609{
610 return container_of(q, struct sge_qset, rspq);
611}
612
613static __inline struct sge_qset *
614txq_to_qset(struct sge_txq *q, int qidx)
615{
616 return container_of(q, struct sge_qset, txq[qidx]);
617}
618
619static __inline struct adapter *
620tdev2adap(struct t3cdev *d)
621{
622 return container_of(d, struct adapter, tdev);
623}
624
625#undef container_of
626
627#define OFFLOAD_DEVMAP_BIT 15
628static inline int offload_running(adapter_t *adapter)
629{
630 return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
631}
632
633int cxgb_pcpu_enqueue_packet(struct ifnet *ifp, struct mbuf *m);
634int cxgb_pcpu_start(struct ifnet *ifp, struct mbuf *m);
635void cxgb_pcpu_shutdown_threads(struct adapter *sc);
636void cxgb_pcpu_startup_threads(struct adapter *sc);
637
638int process_responses(adapter_t *adap, struct sge_qset *qs, int budget);
639void t3_free_qset(adapter_t *sc, struct sge_qset *q);
640void cxgb_start(struct ifnet *ifp);
641void refill_fl_service(adapter_t *adap, struct sge_fl *fl);
642#endif