Deleted Added
full compact
adapter.h (219289) adapter.h (219290)
1/*-
2 * Copyright (c) 2011 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
1/*-
2 * Copyright (c) 2011 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: head/sys/dev/cxgbe/adapter.h 219289 2011-03-05 03:27:14Z np $
27 * $FreeBSD: head/sys/dev/cxgbe/adapter.h 219290 2011-03-05 03:42:03Z np $
28 *
29 */
30
31#ifndef __T4_ADAPTER_H__
32#define __T4_ADAPTER_H__
33
34#include <sys/bus.h>
35#include <sys/rman.h>
36#include <sys/types.h>
37#include <sys/malloc.h>
38#include <dev/pci/pcivar.h>
39#include <dev/pci/pcireg.h>
40#include <machine/bus.h>
41#include <sys/socket.h>
42#include <sys/sysctl.h>
43#include <net/ethernet.h>
44#include <net/if.h>
45#include <net/if_media.h>
46#include <netinet/tcp_lro.h>
47
48#include "offload.h"
49#include "common/t4fw_interface.h"
50
51#define T4_FWNAME "t4fw"
52
53MALLOC_DECLARE(M_CXGBE);
54#define CXGBE_UNIMPLEMENTED(s) \
55 panic("%s (%s, line %d) not implemented yet.", s, __FILE__, __LINE__)
56
57#if defined(__i386__) || defined(__amd64__)
58static __inline void
59prefetch(void *x)
60{
61 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
62}
63#else
64#define prefetch(x)
65#endif
66
67#ifdef __amd64__
68/* XXX: need systemwide bus_space_read_8/bus_space_write_8 */
69static __inline uint64_t
70t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
71 bus_size_t offset)
72{
73 KASSERT(tag == X86_BUS_SPACE_MEM,
74 ("%s: can only handle mem space", __func__));
75
76 return (*(volatile uint64_t *)(handle + offset));
77}
78
79static __inline void
80t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh,
81 bus_size_t offset, uint64_t value)
82{
83 KASSERT(tag == X86_BUS_SPACE_MEM,
84 ("%s: can only handle mem space", __func__));
85
86 *(volatile uint64_t *)(bsh + offset) = value;
87}
88#else
89static __inline uint64_t
90t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
91 bus_size_t offset)
92{
93 return (uint64_t)bus_space_read_4(tag, handle, offset) +
94 ((uint64_t)bus_space_read_4(tag, handle, offset + 4) << 32);
95}
96
97static __inline void
98t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh,
99 bus_size_t offset, uint64_t value)
100{
101 bus_space_write_4(tag, bsh, offset, value);
102 bus_space_write_4(tag, bsh, offset + 4, value >> 32);
103}
104#endif
105
106struct adapter;
107typedef struct adapter adapter_t;
108
109enum {
110 FW_IQ_QSIZE = 256,
111 FW_IQ_ESIZE = 64, /* At least 64 mandated by the firmware spec */
112
113 RX_IQ_QSIZE = 1024,
114 RX_IQ_ESIZE = 64, /* At least 64 so CPL_RX_PKT will fit */
115
116 RX_FL_ESIZE = 64, /* 8 64bit addresses */
117
118 FL_BUF_SIZES = 4,
119
120 TX_EQ_QSIZE = 1024,
121 TX_EQ_ESIZE = 64,
122 TX_SGL_SEGS = 36,
123 TX_WR_FLITS = SGE_MAX_WR_LEN / 8
124};
125
126enum {
127 /* adapter flags */
128 FULL_INIT_DONE = (1 << 0),
129 FW_OK = (1 << 1),
130 INTR_FWD = (1 << 2),
131
132 CXGBE_BUSY = (1 << 9),
133
134 /* port flags */
135 DOOMED = (1 << 0),
136 VI_ENABLED = (1 << 1),
137};
138
139#define IS_DOOMED(pi) (pi->flags & DOOMED)
140#define SET_DOOMED(pi) do {pi->flags |= DOOMED;} while (0)
141#define IS_BUSY(sc) (sc->flags & CXGBE_BUSY)
142#define SET_BUSY(sc) do {sc->flags |= CXGBE_BUSY;} while (0)
143#define CLR_BUSY(sc) do {sc->flags &= ~CXGBE_BUSY;} while (0)
144
145struct port_info {
146 device_t dev;
147 struct adapter *adapter;
148
149 struct ifnet *ifp;
150 struct ifmedia media;
151
152 struct mtx pi_lock;
153 char lockname[16];
154 unsigned long flags;
155 int if_flags;
156
157 uint16_t viid;
158 int16_t xact_addr_filt;/* index of exact MAC address filter */
159 uint16_t rss_size; /* size of VI's RSS table slice */
160 uint8_t lport; /* associated offload logical port */
161 int8_t mdio_addr;
162 uint8_t port_type;
163 uint8_t mod_type;
164 uint8_t port_id;
165 uint8_t tx_chan;
166
167 /* These need to be int as they are used in sysctl */
168 int ntxq; /* # of tx queues */
169 int first_txq; /* index of first tx queue */
170 int nrxq; /* # of rx queues */
171 int first_rxq; /* index of first rx queue */
172 int tmr_idx;
173 int pktc_idx;
174 int qsize_rxq;
175 int qsize_txq;
176
177 struct link_config link_cfg;
178 struct port_stats stats;
179
180 struct taskqueue *tq;
181 struct callout tick;
182 struct sysctl_ctx_list ctx; /* lives from ifconfig up to down */
183 struct sysctl_oid *oid_rxq;
184 struct sysctl_oid *oid_txq;
185
186 uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */
187};
188
189struct fl_sdesc {
190 struct mbuf *m;
191 bus_dmamap_t map;
192 caddr_t cl;
193 uint8_t tag_idx; /* the sc->fl_tag this map comes from */
194#ifdef INVARIANTS
195 __be64 ba_tag;
196#endif
197};
198
199struct tx_desc {
200 __be64 flit[8];
201};
202
203struct tx_map {
204 struct mbuf *m;
205 bus_dmamap_t map;
206};
207
208struct tx_sdesc {
209 uint8_t desc_used; /* # of hardware descriptors used by the WR */
210 uint8_t map_used; /* # of frames sent out in the WR */
211};
212
213typedef void (iq_intr_handler_t)(void *);
214
215enum {
216 /* iq flags */
217 IQ_ALLOCATED = (1 << 1), /* firmware resources allocated */
218 IQ_STARTED = (1 << 2), /* started */
219};
220
221/*
222 * Ingress Queue: T4 is producer, driver is consumer.
223 */
224struct sge_iq {
225 bus_dma_tag_t desc_tag;
226 bus_dmamap_t desc_map;
28 *
29 */
30
31#ifndef __T4_ADAPTER_H__
32#define __T4_ADAPTER_H__
33
34#include <sys/bus.h>
35#include <sys/rman.h>
36#include <sys/types.h>
37#include <sys/malloc.h>
38#include <dev/pci/pcivar.h>
39#include <dev/pci/pcireg.h>
40#include <machine/bus.h>
41#include <sys/socket.h>
42#include <sys/sysctl.h>
43#include <net/ethernet.h>
44#include <net/if.h>
45#include <net/if_media.h>
46#include <netinet/tcp_lro.h>
47
48#include "offload.h"
49#include "common/t4fw_interface.h"
50
51#define T4_FWNAME "t4fw"
52
53MALLOC_DECLARE(M_CXGBE);
54#define CXGBE_UNIMPLEMENTED(s) \
55 panic("%s (%s, line %d) not implemented yet.", s, __FILE__, __LINE__)
56
57#if defined(__i386__) || defined(__amd64__)
58static __inline void
59prefetch(void *x)
60{
61 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
62}
63#else
64#define prefetch(x)
65#endif
66
67#ifdef __amd64__
68/* XXX: need systemwide bus_space_read_8/bus_space_write_8 */
69static __inline uint64_t
70t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
71 bus_size_t offset)
72{
73 KASSERT(tag == X86_BUS_SPACE_MEM,
74 ("%s: can only handle mem space", __func__));
75
76 return (*(volatile uint64_t *)(handle + offset));
77}
78
79static __inline void
80t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh,
81 bus_size_t offset, uint64_t value)
82{
83 KASSERT(tag == X86_BUS_SPACE_MEM,
84 ("%s: can only handle mem space", __func__));
85
86 *(volatile uint64_t *)(bsh + offset) = value;
87}
88#else
89static __inline uint64_t
90t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
91 bus_size_t offset)
92{
93 return (uint64_t)bus_space_read_4(tag, handle, offset) +
94 ((uint64_t)bus_space_read_4(tag, handle, offset + 4) << 32);
95}
96
97static __inline void
98t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh,
99 bus_size_t offset, uint64_t value)
100{
101 bus_space_write_4(tag, bsh, offset, value);
102 bus_space_write_4(tag, bsh, offset + 4, value >> 32);
103}
104#endif
105
106struct adapter;
107typedef struct adapter adapter_t;
108
109enum {
110 FW_IQ_QSIZE = 256,
111 FW_IQ_ESIZE = 64, /* At least 64 mandated by the firmware spec */
112
113 RX_IQ_QSIZE = 1024,
114 RX_IQ_ESIZE = 64, /* At least 64 so CPL_RX_PKT will fit */
115
116 RX_FL_ESIZE = 64, /* 8 64bit addresses */
117
118 FL_BUF_SIZES = 4,
119
120 TX_EQ_QSIZE = 1024,
121 TX_EQ_ESIZE = 64,
122 TX_SGL_SEGS = 36,
123 TX_WR_FLITS = SGE_MAX_WR_LEN / 8
124};
125
126enum {
127 /* adapter flags */
128 FULL_INIT_DONE = (1 << 0),
129 FW_OK = (1 << 1),
130 INTR_FWD = (1 << 2),
131
132 CXGBE_BUSY = (1 << 9),
133
134 /* port flags */
135 DOOMED = (1 << 0),
136 VI_ENABLED = (1 << 1),
137};
138
139#define IS_DOOMED(pi) (pi->flags & DOOMED)
140#define SET_DOOMED(pi) do {pi->flags |= DOOMED;} while (0)
141#define IS_BUSY(sc) (sc->flags & CXGBE_BUSY)
142#define SET_BUSY(sc) do {sc->flags |= CXGBE_BUSY;} while (0)
143#define CLR_BUSY(sc) do {sc->flags &= ~CXGBE_BUSY;} while (0)
144
145struct port_info {
146 device_t dev;
147 struct adapter *adapter;
148
149 struct ifnet *ifp;
150 struct ifmedia media;
151
152 struct mtx pi_lock;
153 char lockname[16];
154 unsigned long flags;
155 int if_flags;
156
157 uint16_t viid;
158 int16_t xact_addr_filt;/* index of exact MAC address filter */
159 uint16_t rss_size; /* size of VI's RSS table slice */
160 uint8_t lport; /* associated offload logical port */
161 int8_t mdio_addr;
162 uint8_t port_type;
163 uint8_t mod_type;
164 uint8_t port_id;
165 uint8_t tx_chan;
166
167 /* These need to be int as they are used in sysctl */
168 int ntxq; /* # of tx queues */
169 int first_txq; /* index of first tx queue */
170 int nrxq; /* # of rx queues */
171 int first_rxq; /* index of first rx queue */
172 int tmr_idx;
173 int pktc_idx;
174 int qsize_rxq;
175 int qsize_txq;
176
177 struct link_config link_cfg;
178 struct port_stats stats;
179
180 struct taskqueue *tq;
181 struct callout tick;
182 struct sysctl_ctx_list ctx; /* lives from ifconfig up to down */
183 struct sysctl_oid *oid_rxq;
184 struct sysctl_oid *oid_txq;
185
186 uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */
187};
188
189struct fl_sdesc {
190 struct mbuf *m;
191 bus_dmamap_t map;
192 caddr_t cl;
193 uint8_t tag_idx; /* the sc->fl_tag this map comes from */
194#ifdef INVARIANTS
195 __be64 ba_tag;
196#endif
197};
198
199struct tx_desc {
200 __be64 flit[8];
201};
202
203struct tx_map {
204 struct mbuf *m;
205 bus_dmamap_t map;
206};
207
208struct tx_sdesc {
209 uint8_t desc_used; /* # of hardware descriptors used by the WR */
210 uint8_t map_used; /* # of frames sent out in the WR */
211};
212
213typedef void (iq_intr_handler_t)(void *);
214
215enum {
216 /* iq flags */
217 IQ_ALLOCATED = (1 << 1), /* firmware resources allocated */
218 IQ_STARTED = (1 << 2), /* started */
219};
220
221/*
222 * Ingress Queue: T4 is producer, driver is consumer.
223 */
224struct sge_iq {
225 bus_dma_tag_t desc_tag;
226 bus_dmamap_t desc_map;
227 struct mtx iq_lock;
227 bus_addr_t ba; /* bus address of descriptor ring */
228 char lockname[16];
228 char lockname[16];
229 unsigned int flags;
230 struct adapter *adapter;
229 uint32_t flags;
230 uint16_t abs_id; /* absolute SGE id for the iq */
231 int8_t intr_pktc_idx; /* packet count threshold index */
232 int8_t pad0;
233 iq_intr_handler_t *handler;
234 __be64 *desc; /* KVA of descriptor ring */
231
235
232 __be64 *desc; /* KVA of descriptor ring */
233 bus_addr_t ba; /* bus address of descriptor ring */
236 struct mtx iq_lock;
237 struct adapter *adapter;
234 const __be64 *cdesc; /* current descriptor */
235 uint8_t gen; /* generation bit */
236 uint8_t intr_params; /* interrupt holdoff parameters */
238 const __be64 *cdesc; /* current descriptor */
239 uint8_t gen; /* generation bit */
240 uint8_t intr_params; /* interrupt holdoff parameters */
237 int8_t intr_pktc_idx; /* packet count threshold index */
238 uint8_t intr_next; /* holdoff for next interrupt */
239 uint8_t esize; /* size (bytes) of each entry in the queue */
240 uint16_t qsize; /* size (# of entries) of the queue */
241 uint16_t cidx; /* consumer index */
242 uint16_t cntxt_id; /* SGE context id for the iq */
241 uint8_t intr_next; /* holdoff for next interrupt */
242 uint8_t esize; /* size (bytes) of each entry in the queue */
243 uint16_t qsize; /* size (# of entries) of the queue */
244 uint16_t cidx; /* consumer index */
245 uint16_t cntxt_id; /* SGE context id for the iq */
243 uint16_t abs_id; /* absolute SGE id for the iq */
244 iq_intr_handler_t *handler;
245};
246
247enum {
248 /* eq flags */
249 EQ_ALLOCATED = (1 << 1), /* firmware resources allocated */
250 EQ_STARTED = (1 << 2), /* started */
251 EQ_STALLED = (1 << 3), /* currently stalled */
252};
253
254/*
255 * Egress Queue: driver is producer, T4 is consumer.
256 *
257 * Note: A free list is an egress queue (driver produces the buffers and T4
258 * consumes them) but it's special enough to have its own struct (see sge_fl).
259 */
260struct sge_eq {
261 bus_dma_tag_t tx_tag; /* tag for transmit buffers */
262 bus_dma_tag_t desc_tag;
263 bus_dmamap_t desc_map;
264 char lockname[16];
265 unsigned int flags;
266 struct mtx eq_lock;
267
268 struct tx_desc *desc; /* KVA of descriptor ring */
269 bus_addr_t ba; /* bus address of descriptor ring */
270 struct tx_sdesc *sdesc; /* KVA of software descriptor ring */
271 struct buf_ring *br; /* tx buffer ring */
272 struct sge_qstat *spg; /* status page, for convenience */
273 uint16_t cap; /* max # of desc, for convenience */
274 uint16_t avail; /* available descriptors, for convenience */
275 uint16_t qsize; /* size (# of entries) of the queue */
276 uint16_t cidx; /* consumer idx (desc idx) */
277 uint16_t pidx; /* producer idx (desc idx) */
278 uint16_t pending; /* # of descriptors used since last doorbell */
279 uint16_t iqid; /* iq that gets egr_update for the eq */
280 uint32_t cntxt_id; /* SGE context id for the eq */
281
282 /* DMA maps used for tx */
283 struct tx_map *maps;
284 uint32_t map_total; /* # of DMA maps */
285 uint32_t map_pidx; /* next map to be used */
286 uint32_t map_cidx; /* reclaimed up to this index */
287 uint32_t map_avail; /* # of available maps */
288} __aligned(CACHE_LINE_SIZE);
289
290struct sge_fl {
291 bus_dma_tag_t desc_tag;
292 bus_dmamap_t desc_map;
293 bus_dma_tag_t tag[FL_BUF_SIZES];
294 uint8_t tag_idx;
295 struct mtx fl_lock;
296 char lockname[16];
297
298 __be64 *desc; /* KVA of descriptor ring, ptr to addresses */
299 bus_addr_t ba; /* bus address of descriptor ring */
300 struct fl_sdesc *sdesc; /* KVA of software descriptor ring */
301 uint32_t cap; /* max # of buffers, for convenience */
302 uint16_t qsize; /* size (# of entries) of the queue */
303 uint16_t cntxt_id; /* SGE context id for the freelist */
304 uint32_t cidx; /* consumer idx (buffer idx, NOT hw desc idx) */
305 uint32_t pidx; /* producer idx (buffer idx, NOT hw desc idx) */
306 uint32_t needed; /* # of buffers needed to fill up fl. */
307 uint32_t pending; /* # of bufs allocated since last doorbell */
308 unsigned int dmamap_failed;
309};
310
311/* txq: SGE egress queue + miscellaneous items */
312struct sge_txq {
313 struct sge_eq eq; /* MUST be first */
314 struct mbuf *m; /* held up due to temporary resource shortage */
315 struct task resume_tx;
316
317 struct ifnet *ifp; /* the interface this txq belongs to */
318
319 /* stats for common events first */
320
321 uint64_t txcsum; /* # of times hardware assisted with checksum */
322 uint64_t tso_wrs; /* # of IPv4 TSO work requests */
323 uint64_t vlan_insertion;/* # of times VLAN tag was inserted */
324 uint64_t imm_wrs; /* # of work requests with immediate data */
325 uint64_t sgl_wrs; /* # of work requests with direct SGL */
326 uint64_t txpkt_wrs; /* # of txpkt work requests (not coalesced) */
327 uint64_t txpkts_wrs; /* # of coalesced tx work requests */
328 uint64_t txpkts_pkts; /* # of frames in coalesced tx work requests */
329
330 /* stats for not-that-common events */
331
332 uint32_t no_dmamap; /* no DMA map to load the mbuf */
333 uint32_t no_desc; /* out of hardware descriptors */
334 uint32_t egr_update; /* # of SGE_EGR_UPDATE notifications for txq */
335};
336
337enum {
338 RXQ_LRO_ENABLED = (1 << 0)
339};
340/* rxq: SGE ingress queue + SGE free list + miscellaneous items */
341struct sge_rxq {
342 struct sge_iq iq; /* MUST be first */
343 struct sge_fl fl;
344
246};
247
248enum {
249 /* eq flags */
250 EQ_ALLOCATED = (1 << 1), /* firmware resources allocated */
251 EQ_STARTED = (1 << 2), /* started */
252 EQ_STALLED = (1 << 3), /* currently stalled */
253};
254
255/*
256 * Egress Queue: driver is producer, T4 is consumer.
257 *
258 * Note: A free list is an egress queue (driver produces the buffers and T4
259 * consumes them) but it's special enough to have its own struct (see sge_fl).
260 */
261struct sge_eq {
262 bus_dma_tag_t tx_tag; /* tag for transmit buffers */
263 bus_dma_tag_t desc_tag;
264 bus_dmamap_t desc_map;
265 char lockname[16];
266 unsigned int flags;
267 struct mtx eq_lock;
268
269 struct tx_desc *desc; /* KVA of descriptor ring */
270 bus_addr_t ba; /* bus address of descriptor ring */
271 struct tx_sdesc *sdesc; /* KVA of software descriptor ring */
272 struct buf_ring *br; /* tx buffer ring */
273 struct sge_qstat *spg; /* status page, for convenience */
274 uint16_t cap; /* max # of desc, for convenience */
275 uint16_t avail; /* available descriptors, for convenience */
276 uint16_t qsize; /* size (# of entries) of the queue */
277 uint16_t cidx; /* consumer idx (desc idx) */
278 uint16_t pidx; /* producer idx (desc idx) */
279 uint16_t pending; /* # of descriptors used since last doorbell */
280 uint16_t iqid; /* iq that gets egr_update for the eq */
281 uint32_t cntxt_id; /* SGE context id for the eq */
282
283 /* DMA maps used for tx */
284 struct tx_map *maps;
285 uint32_t map_total; /* # of DMA maps */
286 uint32_t map_pidx; /* next map to be used */
287 uint32_t map_cidx; /* reclaimed up to this index */
288 uint32_t map_avail; /* # of available maps */
289} __aligned(CACHE_LINE_SIZE);
290
291struct sge_fl {
292 bus_dma_tag_t desc_tag;
293 bus_dmamap_t desc_map;
294 bus_dma_tag_t tag[FL_BUF_SIZES];
295 uint8_t tag_idx;
296 struct mtx fl_lock;
297 char lockname[16];
298
299 __be64 *desc; /* KVA of descriptor ring, ptr to addresses */
300 bus_addr_t ba; /* bus address of descriptor ring */
301 struct fl_sdesc *sdesc; /* KVA of software descriptor ring */
302 uint32_t cap; /* max # of buffers, for convenience */
303 uint16_t qsize; /* size (# of entries) of the queue */
304 uint16_t cntxt_id; /* SGE context id for the freelist */
305 uint32_t cidx; /* consumer idx (buffer idx, NOT hw desc idx) */
306 uint32_t pidx; /* producer idx (buffer idx, NOT hw desc idx) */
307 uint32_t needed; /* # of buffers needed to fill up fl. */
308 uint32_t pending; /* # of bufs allocated since last doorbell */
309 unsigned int dmamap_failed;
310};
311
312/* txq: SGE egress queue + miscellaneous items */
313struct sge_txq {
314 struct sge_eq eq; /* MUST be first */
315 struct mbuf *m; /* held up due to temporary resource shortage */
316 struct task resume_tx;
317
318 struct ifnet *ifp; /* the interface this txq belongs to */
319
320 /* stats for common events first */
321
322 uint64_t txcsum; /* # of times hardware assisted with checksum */
323 uint64_t tso_wrs; /* # of IPv4 TSO work requests */
324 uint64_t vlan_insertion;/* # of times VLAN tag was inserted */
325 uint64_t imm_wrs; /* # of work requests with immediate data */
326 uint64_t sgl_wrs; /* # of work requests with direct SGL */
327 uint64_t txpkt_wrs; /* # of txpkt work requests (not coalesced) */
328 uint64_t txpkts_wrs; /* # of coalesced tx work requests */
329 uint64_t txpkts_pkts; /* # of frames in coalesced tx work requests */
330
331 /* stats for not-that-common events */
332
333 uint32_t no_dmamap; /* no DMA map to load the mbuf */
334 uint32_t no_desc; /* out of hardware descriptors */
335 uint32_t egr_update; /* # of SGE_EGR_UPDATE notifications for txq */
336};
337
338enum {
339 RXQ_LRO_ENABLED = (1 << 0)
340};
341/* rxq: SGE ingress queue + SGE free list + miscellaneous items */
342struct sge_rxq {
343 struct sge_iq iq; /* MUST be first */
344 struct sge_fl fl;
345
345 unsigned int flags;
346 struct ifnet *ifp; /* the interface this rxq belongs to */
346 struct ifnet *ifp; /* the interface this rxq belongs to */
347 unsigned int flags;
348#ifdef INET
347 struct lro_ctrl lro; /* LRO state */
349 struct lro_ctrl lro; /* LRO state */
350#endif
348
349 /* stats for common events first */
350
351 uint64_t rxcsum; /* # of times hardware assisted with checksum */
352 uint64_t vlan_extraction;/* # of times VLAN tag was extracted */
353
354 /* stats for not-that-common events */
355
356} __aligned(CACHE_LINE_SIZE);
357
358struct sge {
359 uint16_t timer_val[SGE_NTIMERS];
360 uint8_t counter_val[SGE_NCOUNTERS];
361
362 int nrxq; /* total rx queues (all ports and the rest) */
363 int ntxq; /* total tx queues (all ports and the rest) */
364 int niq; /* total ingress queues */
365 int neq; /* total egress queues */
366
367 struct sge_iq fwq; /* Firmware event queue */
368 struct sge_iq *fiq; /* Forwarded interrupt queues (INTR_FWD) */
369 struct sge_txq *txq; /* NIC tx queues */
370 struct sge_rxq *rxq; /* NIC rx queues */
371
372 uint16_t iq_start;
373 int eq_start;
374 struct sge_iq **iqmap; /* iq->cntxt_id to iq mapping */
375 struct sge_eq **eqmap; /* eq->cntxt_id to eq mapping */
376};
377
378struct adapter {
379 device_t dev;
380 struct cdev *cdev;
381
382 /* PCIe register resources */
383 int regs_rid;
384 struct resource *regs_res;
385 int msix_rid;
386 struct resource *msix_res;
387 bus_space_handle_t bh;
388 bus_space_tag_t bt;
389 bus_size_t mmio_len;
390
391 unsigned int pf;
392 unsigned int mbox;
393
394 /* Interrupt information */
395 int intr_type;
396 int intr_count;
397 struct irq {
398 struct resource *res;
399 int rid;
400 void *tag;
401 } *irq;
402
403 bus_dma_tag_t dmat; /* Parent DMA tag */
404
405 struct sge sge;
406
407 struct port_info *port[MAX_NPORTS];
408 uint8_t chan_map[NCHAN];
409
410 struct tid_info tids;
411
412 int registered_device_map;
413 int open_device_map;
414 int flags;
415
416 char fw_version[32];
417 struct adapter_params params;
418 struct t4_virt_res vres;
419
420 struct mtx sc_lock;
421 char lockname[16];
422};
423
424#define ADAPTER_LOCK(sc) mtx_lock(&(sc)->sc_lock)
425#define ADAPTER_UNLOCK(sc) mtx_unlock(&(sc)->sc_lock)
426#define ADAPTER_LOCK_ASSERT_OWNED(sc) mtx_assert(&(sc)->sc_lock, MA_OWNED)
427#define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) mtx_assert(&(sc)->sc_lock, MA_NOTOWNED)
428
429#define PORT_LOCK(pi) mtx_lock(&(pi)->pi_lock)
430#define PORT_UNLOCK(pi) mtx_unlock(&(pi)->pi_lock)
431#define PORT_LOCK_ASSERT_OWNED(pi) mtx_assert(&(pi)->pi_lock, MA_OWNED)
432#define PORT_LOCK_ASSERT_NOTOWNED(pi) mtx_assert(&(pi)->pi_lock, MA_NOTOWNED)
433
434#define IQ_LOCK(iq) mtx_lock(&(iq)->iq_lock)
435#define IQ_UNLOCK(iq) mtx_unlock(&(iq)->iq_lock)
436#define IQ_LOCK_ASSERT_OWNED(iq) mtx_assert(&(iq)->iq_lock, MA_OWNED)
437#define IQ_LOCK_ASSERT_NOTOWNED(iq) mtx_assert(&(iq)->iq_lock, MA_NOTOWNED)
438
439#define FL_LOCK(fl) mtx_lock(&(fl)->fl_lock)
440#define FL_TRYLOCK(fl) mtx_trylock(&(fl)->fl_lock)
441#define FL_UNLOCK(fl) mtx_unlock(&(fl)->fl_lock)
442#define FL_LOCK_ASSERT_OWNED(fl) mtx_assert(&(fl)->fl_lock, MA_OWNED)
443#define FL_LOCK_ASSERT_NOTOWNED(fl) mtx_assert(&(fl)->fl_lock, MA_NOTOWNED)
444
445#define RXQ_LOCK(rxq) IQ_LOCK(&(rxq)->iq)
446#define RXQ_UNLOCK(rxq) IQ_UNLOCK(&(rxq)->iq)
447#define RXQ_LOCK_ASSERT_OWNED(rxq) IQ_LOCK_ASSERT_OWNED(&(rxq)->iq)
448#define RXQ_LOCK_ASSERT_NOTOWNED(rxq) IQ_LOCK_ASSERT_NOTOWNED(&(rxq)->iq)
449
450#define RXQ_FL_LOCK(rxq) FL_LOCK(&(rxq)->fl)
451#define RXQ_FL_UNLOCK(rxq) FL_UNLOCK(&(rxq)->fl)
452#define RXQ_FL_LOCK_ASSERT_OWNED(rxq) FL_LOCK_ASSERT_OWNED(&(rxq)->fl)
453#define RXQ_FL_LOCK_ASSERT_NOTOWNED(rxq) FL_LOCK_ASSERT_NOTOWNED(&(rxq)->fl)
454
455#define EQ_LOCK(eq) mtx_lock(&(eq)->eq_lock)
456#define EQ_TRYLOCK(eq) mtx_trylock(&(eq)->eq_lock)
457#define EQ_UNLOCK(eq) mtx_unlock(&(eq)->eq_lock)
458#define EQ_LOCK_ASSERT_OWNED(eq) mtx_assert(&(eq)->eq_lock, MA_OWNED)
459#define EQ_LOCK_ASSERT_NOTOWNED(eq) mtx_assert(&(eq)->eq_lock, MA_NOTOWNED)
460
461#define TXQ_LOCK(txq) EQ_LOCK(&(txq)->eq)
462#define TXQ_TRYLOCK(txq) EQ_TRYLOCK(&(txq)->eq)
463#define TXQ_UNLOCK(txq) EQ_UNLOCK(&(txq)->eq)
464#define TXQ_LOCK_ASSERT_OWNED(txq) EQ_LOCK_ASSERT_OWNED(&(txq)->eq)
465#define TXQ_LOCK_ASSERT_NOTOWNED(txq) EQ_LOCK_ASSERT_NOTOWNED(&(txq)->eq)
466
467#define for_each_txq(pi, iter, txq) \
468 txq = &pi->adapter->sge.txq[pi->first_txq]; \
469 for (iter = 0; iter < pi->ntxq; ++iter, ++txq)
470#define for_each_rxq(pi, iter, rxq) \
471 rxq = &pi->adapter->sge.rxq[pi->first_rxq]; \
472 for (iter = 0; iter < pi->nrxq; ++iter, ++rxq)
473
474#define NFIQ(sc) ((sc)->intr_count > 1 ? (sc)->intr_count - 1 : 1)
475
476static inline uint32_t
477t4_read_reg(struct adapter *sc, uint32_t reg)
478{
479 return bus_space_read_4(sc->bt, sc->bh, reg);
480}
481
482static inline void
483t4_write_reg(struct adapter *sc, uint32_t reg, uint32_t val)
484{
485 bus_space_write_4(sc->bt, sc->bh, reg, val);
486}
487
488static inline uint64_t
489t4_read_reg64(struct adapter *sc, uint32_t reg)
490{
491 return t4_bus_space_read_8(sc->bt, sc->bh, reg);
492}
493
494static inline void
495t4_write_reg64(struct adapter *sc, uint32_t reg, uint64_t val)
496{
497 t4_bus_space_write_8(sc->bt, sc->bh, reg, val);
498}
499
500static inline void
501t4_os_pci_read_cfg1(struct adapter *sc, int reg, uint8_t *val)
502{
503 *val = pci_read_config(sc->dev, reg, 1);
504}
505
506static inline void
507t4_os_pci_write_cfg1(struct adapter *sc, int reg, uint8_t val)
508{
509 pci_write_config(sc->dev, reg, val, 1);
510}
511
512static inline void
513t4_os_pci_read_cfg2(struct adapter *sc, int reg, uint16_t *val)
514{
515 *val = pci_read_config(sc->dev, reg, 2);
516}
517
518static inline void
519t4_os_pci_write_cfg2(struct adapter *sc, int reg, uint16_t val)
520{
521 pci_write_config(sc->dev, reg, val, 2);
522}
523
524static inline void
525t4_os_pci_read_cfg4(struct adapter *sc, int reg, uint32_t *val)
526{
527 *val = pci_read_config(sc->dev, reg, 4);
528}
529
530static inline void
531t4_os_pci_write_cfg4(struct adapter *sc, int reg, uint32_t val)
532{
533 pci_write_config(sc->dev, reg, val, 4);
534}
535
536static inline struct port_info *
537adap2pinfo(struct adapter *sc, int idx)
538{
539 return (sc->port[idx]);
540}
541
542static inline void
543t4_os_set_hw_addr(struct adapter *sc, int idx, uint8_t hw_addr[])
544{
545 bcopy(hw_addr, sc->port[idx]->hw_addr, ETHER_ADDR_LEN);
546}
547
548static inline bool is_10G_port(const struct port_info *pi)
549{
550 return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) != 0);
551}
552
553/* t4_main.c */
554void cxgbe_txq_start(void *, int);
555int t4_os_find_pci_capability(struct adapter *, int);
556int t4_os_pci_save_state(struct adapter *);
557int t4_os_pci_restore_state(struct adapter *);
558void t4_os_portmod_changed(const struct adapter *, int);
559void t4_os_link_changed(struct adapter *, int, int);
560
561/* t4_sge.c */
562void t4_sge_init(struct adapter *);
563int t4_create_dma_tag(struct adapter *);
564int t4_destroy_dma_tag(struct adapter *);
565int t4_setup_adapter_iqs(struct adapter *);
566int t4_teardown_adapter_iqs(struct adapter *);
567int t4_setup_eth_queues(struct port_info *);
568int t4_teardown_eth_queues(struct port_info *);
569void t4_intr_all(void *);
570void t4_intr_fwd(void *);
571void t4_intr_err(void *);
572void t4_intr_evt(void *);
573void t4_intr_data(void *);
574int t4_eth_tx(struct ifnet *, struct sge_txq *, struct mbuf *);
575void t4_update_fl_bufsize(struct ifnet *);
576
577#endif
351
352 /* stats for common events first */
353
354 uint64_t rxcsum; /* # of times hardware assisted with checksum */
355 uint64_t vlan_extraction;/* # of times VLAN tag was extracted */
356
357 /* stats for not-that-common events */
358
359} __aligned(CACHE_LINE_SIZE);
360
361struct sge {
362 uint16_t timer_val[SGE_NTIMERS];
363 uint8_t counter_val[SGE_NCOUNTERS];
364
365 int nrxq; /* total rx queues (all ports and the rest) */
366 int ntxq; /* total tx queues (all ports and the rest) */
367 int niq; /* total ingress queues */
368 int neq; /* total egress queues */
369
370 struct sge_iq fwq; /* Firmware event queue */
371 struct sge_iq *fiq; /* Forwarded interrupt queues (INTR_FWD) */
372 struct sge_txq *txq; /* NIC tx queues */
373 struct sge_rxq *rxq; /* NIC rx queues */
374
375 uint16_t iq_start;
376 int eq_start;
377 struct sge_iq **iqmap; /* iq->cntxt_id to iq mapping */
378 struct sge_eq **eqmap; /* eq->cntxt_id to eq mapping */
379};
380
381struct adapter {
382 device_t dev;
383 struct cdev *cdev;
384
385 /* PCIe register resources */
386 int regs_rid;
387 struct resource *regs_res;
388 int msix_rid;
389 struct resource *msix_res;
390 bus_space_handle_t bh;
391 bus_space_tag_t bt;
392 bus_size_t mmio_len;
393
394 unsigned int pf;
395 unsigned int mbox;
396
397 /* Interrupt information */
398 int intr_type;
399 int intr_count;
400 struct irq {
401 struct resource *res;
402 int rid;
403 void *tag;
404 } *irq;
405
406 bus_dma_tag_t dmat; /* Parent DMA tag */
407
408 struct sge sge;
409
410 struct port_info *port[MAX_NPORTS];
411 uint8_t chan_map[NCHAN];
412
413 struct tid_info tids;
414
415 int registered_device_map;
416 int open_device_map;
417 int flags;
418
419 char fw_version[32];
420 struct adapter_params params;
421 struct t4_virt_res vres;
422
423 struct mtx sc_lock;
424 char lockname[16];
425};
426
427#define ADAPTER_LOCK(sc) mtx_lock(&(sc)->sc_lock)
428#define ADAPTER_UNLOCK(sc) mtx_unlock(&(sc)->sc_lock)
429#define ADAPTER_LOCK_ASSERT_OWNED(sc) mtx_assert(&(sc)->sc_lock, MA_OWNED)
430#define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) mtx_assert(&(sc)->sc_lock, MA_NOTOWNED)
431
432#define PORT_LOCK(pi) mtx_lock(&(pi)->pi_lock)
433#define PORT_UNLOCK(pi) mtx_unlock(&(pi)->pi_lock)
434#define PORT_LOCK_ASSERT_OWNED(pi) mtx_assert(&(pi)->pi_lock, MA_OWNED)
435#define PORT_LOCK_ASSERT_NOTOWNED(pi) mtx_assert(&(pi)->pi_lock, MA_NOTOWNED)
436
437#define IQ_LOCK(iq) mtx_lock(&(iq)->iq_lock)
438#define IQ_UNLOCK(iq) mtx_unlock(&(iq)->iq_lock)
439#define IQ_LOCK_ASSERT_OWNED(iq) mtx_assert(&(iq)->iq_lock, MA_OWNED)
440#define IQ_LOCK_ASSERT_NOTOWNED(iq) mtx_assert(&(iq)->iq_lock, MA_NOTOWNED)
441
442#define FL_LOCK(fl) mtx_lock(&(fl)->fl_lock)
443#define FL_TRYLOCK(fl) mtx_trylock(&(fl)->fl_lock)
444#define FL_UNLOCK(fl) mtx_unlock(&(fl)->fl_lock)
445#define FL_LOCK_ASSERT_OWNED(fl) mtx_assert(&(fl)->fl_lock, MA_OWNED)
446#define FL_LOCK_ASSERT_NOTOWNED(fl) mtx_assert(&(fl)->fl_lock, MA_NOTOWNED)
447
448#define RXQ_LOCK(rxq) IQ_LOCK(&(rxq)->iq)
449#define RXQ_UNLOCK(rxq) IQ_UNLOCK(&(rxq)->iq)
450#define RXQ_LOCK_ASSERT_OWNED(rxq) IQ_LOCK_ASSERT_OWNED(&(rxq)->iq)
451#define RXQ_LOCK_ASSERT_NOTOWNED(rxq) IQ_LOCK_ASSERT_NOTOWNED(&(rxq)->iq)
452
453#define RXQ_FL_LOCK(rxq) FL_LOCK(&(rxq)->fl)
454#define RXQ_FL_UNLOCK(rxq) FL_UNLOCK(&(rxq)->fl)
455#define RXQ_FL_LOCK_ASSERT_OWNED(rxq) FL_LOCK_ASSERT_OWNED(&(rxq)->fl)
456#define RXQ_FL_LOCK_ASSERT_NOTOWNED(rxq) FL_LOCK_ASSERT_NOTOWNED(&(rxq)->fl)
457
458#define EQ_LOCK(eq) mtx_lock(&(eq)->eq_lock)
459#define EQ_TRYLOCK(eq) mtx_trylock(&(eq)->eq_lock)
460#define EQ_UNLOCK(eq) mtx_unlock(&(eq)->eq_lock)
461#define EQ_LOCK_ASSERT_OWNED(eq) mtx_assert(&(eq)->eq_lock, MA_OWNED)
462#define EQ_LOCK_ASSERT_NOTOWNED(eq) mtx_assert(&(eq)->eq_lock, MA_NOTOWNED)
463
464#define TXQ_LOCK(txq) EQ_LOCK(&(txq)->eq)
465#define TXQ_TRYLOCK(txq) EQ_TRYLOCK(&(txq)->eq)
466#define TXQ_UNLOCK(txq) EQ_UNLOCK(&(txq)->eq)
467#define TXQ_LOCK_ASSERT_OWNED(txq) EQ_LOCK_ASSERT_OWNED(&(txq)->eq)
468#define TXQ_LOCK_ASSERT_NOTOWNED(txq) EQ_LOCK_ASSERT_NOTOWNED(&(txq)->eq)
469
470#define for_each_txq(pi, iter, txq) \
471 txq = &pi->adapter->sge.txq[pi->first_txq]; \
472 for (iter = 0; iter < pi->ntxq; ++iter, ++txq)
473#define for_each_rxq(pi, iter, rxq) \
474 rxq = &pi->adapter->sge.rxq[pi->first_rxq]; \
475 for (iter = 0; iter < pi->nrxq; ++iter, ++rxq)
476
477#define NFIQ(sc) ((sc)->intr_count > 1 ? (sc)->intr_count - 1 : 1)
478
479static inline uint32_t
480t4_read_reg(struct adapter *sc, uint32_t reg)
481{
482 return bus_space_read_4(sc->bt, sc->bh, reg);
483}
484
485static inline void
486t4_write_reg(struct adapter *sc, uint32_t reg, uint32_t val)
487{
488 bus_space_write_4(sc->bt, sc->bh, reg, val);
489}
490
491static inline uint64_t
492t4_read_reg64(struct adapter *sc, uint32_t reg)
493{
494 return t4_bus_space_read_8(sc->bt, sc->bh, reg);
495}
496
497static inline void
498t4_write_reg64(struct adapter *sc, uint32_t reg, uint64_t val)
499{
500 t4_bus_space_write_8(sc->bt, sc->bh, reg, val);
501}
502
503static inline void
504t4_os_pci_read_cfg1(struct adapter *sc, int reg, uint8_t *val)
505{
506 *val = pci_read_config(sc->dev, reg, 1);
507}
508
509static inline void
510t4_os_pci_write_cfg1(struct adapter *sc, int reg, uint8_t val)
511{
512 pci_write_config(sc->dev, reg, val, 1);
513}
514
515static inline void
516t4_os_pci_read_cfg2(struct adapter *sc, int reg, uint16_t *val)
517{
518 *val = pci_read_config(sc->dev, reg, 2);
519}
520
521static inline void
522t4_os_pci_write_cfg2(struct adapter *sc, int reg, uint16_t val)
523{
524 pci_write_config(sc->dev, reg, val, 2);
525}
526
527static inline void
528t4_os_pci_read_cfg4(struct adapter *sc, int reg, uint32_t *val)
529{
530 *val = pci_read_config(sc->dev, reg, 4);
531}
532
533static inline void
534t4_os_pci_write_cfg4(struct adapter *sc, int reg, uint32_t val)
535{
536 pci_write_config(sc->dev, reg, val, 4);
537}
538
539static inline struct port_info *
540adap2pinfo(struct adapter *sc, int idx)
541{
542 return (sc->port[idx]);
543}
544
545static inline void
546t4_os_set_hw_addr(struct adapter *sc, int idx, uint8_t hw_addr[])
547{
548 bcopy(hw_addr, sc->port[idx]->hw_addr, ETHER_ADDR_LEN);
549}
550
551static inline bool is_10G_port(const struct port_info *pi)
552{
553 return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) != 0);
554}
555
556/* t4_main.c */
557void cxgbe_txq_start(void *, int);
558int t4_os_find_pci_capability(struct adapter *, int);
559int t4_os_pci_save_state(struct adapter *);
560int t4_os_pci_restore_state(struct adapter *);
561void t4_os_portmod_changed(const struct adapter *, int);
562void t4_os_link_changed(struct adapter *, int, int);
563
564/* t4_sge.c */
565void t4_sge_init(struct adapter *);
566int t4_create_dma_tag(struct adapter *);
567int t4_destroy_dma_tag(struct adapter *);
568int t4_setup_adapter_iqs(struct adapter *);
569int t4_teardown_adapter_iqs(struct adapter *);
570int t4_setup_eth_queues(struct port_info *);
571int t4_teardown_eth_queues(struct port_info *);
572void t4_intr_all(void *);
573void t4_intr_fwd(void *);
574void t4_intr_err(void *);
575void t4_intr_evt(void *);
576void t4_intr_data(void *);
577int t4_eth_tx(struct ifnet *, struct sge_txq *, struct mbuf *);
578void t4_update_fl_bufsize(struct ifnet *);
579
580#endif