Deleted Added
full compact
t4_sge.c (221911) t4_sge.c (222085)
1/*-
2 * Copyright (c) 2011 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2011 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/dev/cxgbe/t4_sge.c 221911 2011-05-14 19:27:15Z np $");
29__FBSDID("$FreeBSD: head/sys/dev/cxgbe/t4_sge.c 222085 2011-05-18 22:09:04Z np $");
30
31#include "opt_inet.h"
32
33#include <sys/types.h>
34#include <sys/mbuf.h>
35#include <sys/socket.h>
36#include <sys/kernel.h>
37#include <sys/malloc.h>
38#include <sys/queue.h>
39#include <sys/taskqueue.h>
40#include <sys/sysctl.h>
41#include <net/bpf.h>
42#include <net/ethernet.h>
43#include <net/if.h>
44#include <net/if_vlan_var.h>
45#include <netinet/in.h>
46#include <netinet/ip.h>
47#include <netinet/tcp.h>
48
49#include "common/common.h"
50#include "common/t4_regs.h"
51#include "common/t4_regs_values.h"
52#include "common/t4_msg.h"
53#include "common/t4fw_interface.h"
54
55struct fl_buf_info {
56 int size;
57 int type;
58 uma_zone_t zone;
59};
60
61/* Filled up by t4_sge_modload */
62static struct fl_buf_info fl_buf_info[FL_BUF_SIZES];
63
64#define FL_BUF_SIZE(x) (fl_buf_info[x].size)
65#define FL_BUF_TYPE(x) (fl_buf_info[x].type)
66#define FL_BUF_ZONE(x) (fl_buf_info[x].zone)
67
68enum {
69 FL_PKTSHIFT = 2
70};
71
72#define FL_ALIGN min(CACHE_LINE_SIZE, 32)
73#if CACHE_LINE_SIZE > 64
74#define SPG_LEN 128
75#else
76#define SPG_LEN 64
77#endif
78
79/* Used to track coalesced tx work request */
80struct txpkts {
81 uint64_t *flitp; /* ptr to flit where next pkt should start */
82 uint8_t npkt; /* # of packets in this work request */
83 uint8_t nflits; /* # of flits used by this work request */
84 uint16_t plen; /* total payload (sum of all packets) */
85};
86
87/* A packet's SGL. This + m_pkthdr has all info needed for tx */
88struct sgl {
89 int nsegs; /* # of segments in the SGL, 0 means imm. tx */
90 int nflits; /* # of flits needed for the SGL */
91 bus_dma_segment_t seg[TX_SGL_SEGS];
92};
93
94static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int,
95 int, iq_intr_handler_t *, char *);
96static inline void init_fl(struct sge_fl *, int, char *);
97static inline void init_eq(struct sge_eq *, int, char *);
98static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *,
99 bus_addr_t *, void **);
100static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
101 void *);
102static int alloc_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *,
30
31#include "opt_inet.h"
32
33#include <sys/types.h>
34#include <sys/mbuf.h>
35#include <sys/socket.h>
36#include <sys/kernel.h>
37#include <sys/malloc.h>
38#include <sys/queue.h>
39#include <sys/taskqueue.h>
40#include <sys/sysctl.h>
41#include <net/bpf.h>
42#include <net/ethernet.h>
43#include <net/if.h>
44#include <net/if_vlan_var.h>
45#include <netinet/in.h>
46#include <netinet/ip.h>
47#include <netinet/tcp.h>
48
49#include "common/common.h"
50#include "common/t4_regs.h"
51#include "common/t4_regs_values.h"
52#include "common/t4_msg.h"
53#include "common/t4fw_interface.h"
54
55struct fl_buf_info {
56 int size;
57 int type;
58 uma_zone_t zone;
59};
60
61/* Filled up by t4_sge_modload */
62static struct fl_buf_info fl_buf_info[FL_BUF_SIZES];
63
64#define FL_BUF_SIZE(x) (fl_buf_info[x].size)
65#define FL_BUF_TYPE(x) (fl_buf_info[x].type)
66#define FL_BUF_ZONE(x) (fl_buf_info[x].zone)
67
68enum {
69 FL_PKTSHIFT = 2
70};
71
72#define FL_ALIGN min(CACHE_LINE_SIZE, 32)
73#if CACHE_LINE_SIZE > 64
74#define SPG_LEN 128
75#else
76#define SPG_LEN 64
77#endif
78
79/* Used to track coalesced tx work request */
80struct txpkts {
81 uint64_t *flitp; /* ptr to flit where next pkt should start */
82 uint8_t npkt; /* # of packets in this work request */
83 uint8_t nflits; /* # of flits used by this work request */
84 uint16_t plen; /* total payload (sum of all packets) */
85};
86
87/* A packet's SGL. This + m_pkthdr has all info needed for tx */
88struct sgl {
89 int nsegs; /* # of segments in the SGL, 0 means imm. tx */
90 int nflits; /* # of flits needed for the SGL */
91 bus_dma_segment_t seg[TX_SGL_SEGS];
92};
93
94static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int,
95 int, iq_intr_handler_t *, char *);
96static inline void init_fl(struct sge_fl *, int, char *);
97static inline void init_eq(struct sge_eq *, int, char *);
98static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *,
99 bus_addr_t *, void **);
100static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
101 void *);
102static int alloc_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *,
103 int);
103 int, int);
104static int free_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *);
105static int alloc_iq(struct sge_iq *, int);
106static int free_iq(struct sge_iq *);
107static int alloc_rxq(struct port_info *, struct sge_rxq *, int, int);
108static int free_rxq(struct port_info *, struct sge_rxq *);
109static int alloc_ctrlq(struct adapter *, struct sge_ctrlq *, int);
110static int free_ctrlq(struct adapter *, struct sge_ctrlq *);
111static int alloc_txq(struct port_info *, struct sge_txq *, int);
112static int free_txq(struct port_info *, struct sge_txq *);
113static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int);
114static inline bool is_new_response(const struct sge_iq *, struct rsp_ctrl **);
115static inline void iq_next(struct sge_iq *);
116static inline void ring_fl_db(struct adapter *, struct sge_fl *);
117static void refill_fl(struct adapter *, struct sge_fl *, int, int);
118static int alloc_fl_sdesc(struct sge_fl *);
119static void free_fl_sdesc(struct sge_fl *);
120static int alloc_tx_maps(struct sge_txq *);
121static void free_tx_maps(struct sge_txq *);
122static void set_fl_tag_idx(struct sge_fl *, int);
123
124static int get_pkt_sgl(struct sge_txq *, struct mbuf **, struct sgl *, int);
125static int free_pkt_sgl(struct sge_txq *, struct sgl *);
126static int write_txpkt_wr(struct port_info *, struct sge_txq *, struct mbuf *,
127 struct sgl *);
128static int add_to_txpkts(struct port_info *, struct sge_txq *, struct txpkts *,
129 struct mbuf *, struct sgl *);
130static void write_txpkts_wr(struct sge_txq *, struct txpkts *);
131static inline void write_ulp_cpl_sgl(struct port_info *, struct sge_txq *,
132 struct txpkts *, struct mbuf *, struct sgl *);
133static int write_sgl_to_txd(struct sge_eq *, struct sgl *, caddr_t *);
134static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int);
135static inline void ring_eq_db(struct adapter *, struct sge_eq *);
136static inline int reclaimable(struct sge_eq *);
137static int reclaim_tx_descs(struct sge_txq *, int, int);
138static void write_eqflush_wr(struct sge_eq *);
139static __be64 get_flit(bus_dma_segment_t *, int, int);
140static int handle_sge_egr_update(struct adapter *,
141 const struct cpl_sge_egr_update *);
142
143static int ctrl_tx(struct adapter *, struct sge_ctrlq *, struct mbuf *);
144static int sysctl_abs_id(SYSCTL_HANDLER_ARGS);
145
146extern void filter_rpl(struct adapter *, const struct cpl_set_tcb_rpl *);
147
148/*
149 * Called on MOD_LOAD and fills up fl_buf_info[].
150 */
151void
152t4_sge_modload(void)
153{
154 int i;
155 int bufsize[FL_BUF_SIZES] = {
156 MCLBYTES,
157#if MJUMPAGESIZE != MCLBYTES
158 MJUMPAGESIZE,
159#endif
160 MJUM9BYTES,
161 MJUM16BYTES
162 };
163
164 for (i = 0; i < FL_BUF_SIZES; i++) {
165 FL_BUF_SIZE(i) = bufsize[i];
166 FL_BUF_TYPE(i) = m_gettype(bufsize[i]);
167 FL_BUF_ZONE(i) = m_getzone(bufsize[i]);
168 }
169}
170
171/**
172 * t4_sge_init - initialize SGE
173 * @sc: the adapter
174 *
175 * Performs SGE initialization needed every time after a chip reset.
176 * We do not initialize any of the queues here, instead the driver
177 * top-level must request them individually.
178 */
179void
180t4_sge_init(struct adapter *sc)
181{
182 struct sge *s = &sc->sge;
183 int i;
184
185 t4_set_reg_field(sc, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT) |
186 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
187 F_EGRSTATUSPAGESIZE,
188 V_INGPADBOUNDARY(ilog2(FL_ALIGN) - 5) |
189 V_PKTSHIFT(FL_PKTSHIFT) |
190 F_RXPKTCPLMODE |
191 V_EGRSTATUSPAGESIZE(SPG_LEN == 128));
192 t4_set_reg_field(sc, A_SGE_HOST_PAGE_SIZE,
193 V_HOSTPAGESIZEPF0(M_HOSTPAGESIZEPF0),
194 V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10));
195
196 for (i = 0; i < FL_BUF_SIZES; i++) {
197 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i),
198 FL_BUF_SIZE(i));
199 }
200
201 t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD,
202 V_THRESHOLD_0(s->counter_val[0]) |
203 V_THRESHOLD_1(s->counter_val[1]) |
204 V_THRESHOLD_2(s->counter_val[2]) |
205 V_THRESHOLD_3(s->counter_val[3]));
206
207 t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1,
208 V_TIMERVALUE0(us_to_core_ticks(sc, s->timer_val[0])) |
209 V_TIMERVALUE1(us_to_core_ticks(sc, s->timer_val[1])));
210 t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3,
211 V_TIMERVALUE2(us_to_core_ticks(sc, s->timer_val[2])) |
212 V_TIMERVALUE3(us_to_core_ticks(sc, s->timer_val[3])));
213 t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5,
214 V_TIMERVALUE4(us_to_core_ticks(sc, s->timer_val[4])) |
215 V_TIMERVALUE5(us_to_core_ticks(sc, s->timer_val[5])));
216}
217
218int
219t4_create_dma_tag(struct adapter *sc)
220{
221 int rc;
222
223 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
224 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
225 BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL,
226 NULL, &sc->dmat);
227 if (rc != 0) {
228 device_printf(sc->dev,
229 "failed to create main DMA tag: %d\n", rc);
230 }
231
232 return (rc);
233}
234
235int
236t4_destroy_dma_tag(struct adapter *sc)
237{
238 if (sc->dmat)
239 bus_dma_tag_destroy(sc->dmat);
240
241 return (0);
242}
243
244/*
245 * Allocate and initialize the firmware event queue, control queues, and the
246 * forwarded interrupt queues (if any). The adapter owns all these queues as
247 * they are not associated with any particular port.
248 *
249 * Returns errno on failure. Resources allocated up to that point may still be
250 * allocated. Caller is responsible for cleanup in case this function fails.
251 */
252int
253t4_setup_adapter_queues(struct adapter *sc)
254{
255 int i, rc;
256 struct sge_iq *iq, *fwq;
257 struct sge_ctrlq *ctrlq;
258 iq_intr_handler_t *handler;
259 char name[16];
260
261 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
262
263 if (sysctl_ctx_init(&sc->ctx) == 0) {
264 struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev);
265 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
266
267 sc->oid_ctrlq = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO,
268 "ctrlq", CTLFLAG_RD, NULL, "ctrl queues");
269 }
270
271 fwq = &sc->sge.fwq;
272 if (sc->flags & INTR_FWD) {
273 iq = &sc->sge.fiq[0];
274
275 /*
276 * Forwarded interrupt queues - allocate 1 if there's only 1
277 * vector available, one less than the number of vectors
278 * otherwise (the first vector is reserved for the error
279 * interrupt in that case).
280 */
281 i = sc->intr_count > 1 ? 1 : 0;
282 for (; i < sc->intr_count; i++, iq++) {
283
284 snprintf(name, sizeof(name), "%s fiq%d",
285 device_get_nameunit(sc->dev), i);
286 init_iq(iq, sc, 0, 0, (sc->sge.nrxq + 1) * 2, 16, NULL,
287 name);
288
289 rc = alloc_iq(iq, i);
290 if (rc != 0) {
291 device_printf(sc->dev,
292 "failed to create fwd intr queue %d: %d\n",
293 i, rc);
294 return (rc);
295 }
296 }
297
298 handler = t4_evt_rx;
299 i = 0; /* forward fwq's interrupt to the first fiq */
300 } else {
301 handler = NULL;
302 i = 1; /* fwq should use vector 1 (0 is used by error) */
303 }
304
305 snprintf(name, sizeof(name), "%s fwq", device_get_nameunit(sc->dev));
306 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, FW_IQ_ESIZE, handler, name);
307 rc = alloc_iq(fwq, i);
308 if (rc != 0) {
309 device_printf(sc->dev,
310 "failed to create firmware event queue: %d\n", rc);
311
312 return (rc);
313 }
314
315 /*
316 * Control queues - one per hardware channel.
317 */
318 ctrlq = &sc->sge.ctrlq[0];
319 for (i = 0; i < NCHAN; i++, ctrlq++) {
320 snprintf(name, sizeof(name), "%s ctrlq%d",
321 device_get_nameunit(sc->dev), i);
322 init_eq(&ctrlq->eq, CTRL_EQ_QSIZE, name);
323
324 rc = alloc_ctrlq(sc, ctrlq, i);
325 if (rc != 0) {
326 device_printf(sc->dev,
327 "failed to create control queue %d: %d\n", i, rc);
328 return (rc);
329 }
330 }
331
332 return (rc);
333}
334
335/*
336 * Idempotent
337 */
338int
339t4_teardown_adapter_queues(struct adapter *sc)
340{
341 int i;
342 struct sge_iq *iq;
343
344 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
345
346 /* Do this before freeing the queues */
347 if (sc->oid_ctrlq) {
348 sysctl_ctx_free(&sc->ctx);
349 sc->oid_ctrlq = NULL;
350 }
351
352 for (i = 0; i < NCHAN; i++)
353 free_ctrlq(sc, &sc->sge.ctrlq[i]);
354
355 iq = &sc->sge.fwq;
356 free_iq(iq);
357 if (sc->flags & INTR_FWD) {
358 for (i = 0; i < NFIQ(sc); i++) {
359 iq = &sc->sge.fiq[i];
360 free_iq(iq);
361 }
362 }
363
364 return (0);
365}
366
367int
368t4_setup_eth_queues(struct port_info *pi)
369{
370 int rc = 0, i, intr_idx;
371 struct sge_rxq *rxq;
372 struct sge_txq *txq;
373 char name[16];
374 struct adapter *sc = pi->adapter;
375
376 if (sysctl_ctx_init(&pi->ctx) == 0) {
377 struct sysctl_oid *oid = device_get_sysctl_tree(pi->dev);
378 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
379
380 pi->oid_rxq = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO,
381 "rxq", CTLFLAG_RD, NULL, "rx queues");
382 pi->oid_txq = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO,
383 "txq", CTLFLAG_RD, NULL, "tx queues");
384 }
385
386 for_each_rxq(pi, i, rxq) {
387
388 snprintf(name, sizeof(name), "%s rxq%d-iq",
389 device_get_nameunit(pi->dev), i);
390 init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx,
391 pi->qsize_rxq, RX_IQ_ESIZE,
392 sc->flags & INTR_FWD ? t4_eth_rx : NULL, name);
393
394 snprintf(name, sizeof(name), "%s rxq%d-fl",
395 device_get_nameunit(pi->dev), i);
396 init_fl(&rxq->fl, pi->qsize_rxq / 8, name);
397
398 if (sc->flags & INTR_FWD)
399 intr_idx = (pi->first_rxq + i) % NFIQ(sc);
400 else
401 intr_idx = pi->first_rxq + i + 2;
402
403 rc = alloc_rxq(pi, rxq, intr_idx, i);
404 if (rc != 0)
405 goto done;
406
407 intr_idx++;
408 }
409
410 for_each_txq(pi, i, txq) {
411
412 snprintf(name, sizeof(name), "%s txq%d",
413 device_get_nameunit(pi->dev), i);
414 init_eq(&txq->eq, pi->qsize_txq, name);
415
416 rc = alloc_txq(pi, txq, i);
417 if (rc != 0)
418 goto done;
419 }
420
421done:
422 if (rc)
423 t4_teardown_eth_queues(pi);
424
425 return (rc);
426}
427
428/*
429 * Idempotent
430 */
431int
432t4_teardown_eth_queues(struct port_info *pi)
433{
434 int i;
435 struct sge_rxq *rxq;
436 struct sge_txq *txq;
437
438 /* Do this before freeing the queues */
439 if (pi->oid_txq || pi->oid_rxq) {
440 sysctl_ctx_free(&pi->ctx);
441 pi->oid_txq = pi->oid_rxq = NULL;
442 }
443
444 for_each_txq(pi, i, txq) {
445 free_txq(pi, txq);
446 }
447
448 for_each_rxq(pi, i, rxq) {
449 free_rxq(pi, rxq);
450 }
451
452 return (0);
453}
454
455/* Deals with errors and forwarded interrupts */
456void
457t4_intr_all(void *arg)
458{
459 struct adapter *sc = arg;
460
461 t4_intr_err(arg);
462 t4_intr_fwd(&sc->sge.fiq[0]);
463}
464
465/* Deals with forwarded interrupts on the given ingress queue */
466void
467t4_intr_fwd(void *arg)
468{
469 struct sge_iq *iq = arg, *q;
470 struct adapter *sc = iq->adapter;
471 struct rsp_ctrl *ctrl;
472 int ndesc_pending = 0, ndesc_total = 0;
473 int qid;
474
475 if (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY))
476 return;
477
478 while (is_new_response(iq, &ctrl)) {
479
480 rmb();
481
482 /* Only interrupt muxing expected on this queue */
483 KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_INTR,
484 ("unexpected event on forwarded interrupt queue: %x",
485 G_RSPD_TYPE(ctrl->u.type_gen)));
486
487 qid = ntohl(ctrl->pldbuflen_qid) - sc->sge.iq_start;
488 q = sc->sge.iqmap[qid];
489
490 q->handler(q);
491
492 ndesc_total++;
493 if (++ndesc_pending >= iq->qsize / 4) {
494 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
495 V_CIDXINC(ndesc_pending) |
496 V_INGRESSQID(iq->cntxt_id) |
497 V_SEINTARM(
498 V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
499 ndesc_pending = 0;
500 }
501
502 iq_next(iq);
503 }
504
505 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndesc_pending) |
506 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params));
507
508 atomic_cmpset_32(&iq->state, IQS_BUSY, IQS_IDLE);
509}
510
511/* Deals with error interrupts */
512void
513t4_intr_err(void *arg)
514{
515 struct adapter *sc = arg;
516
517 if (sc->intr_type == INTR_INTX)
518 t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0);
519
520 t4_slow_intr_handler(sc);
521}
522
523/* Deals with the firmware event queue */
524void
525t4_intr_evt(void *arg)
526{
527 struct sge_iq *iq = arg;
528
529 if (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY))
530 return;
531
532 t4_evt_rx(arg);
533
534 atomic_cmpset_32(&iq->state, IQS_BUSY, IQS_IDLE);
535}
536
537void
538t4_intr_data(void *arg)
539{
540 struct sge_iq *iq = arg;
541
542 if (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY))
543 return;
544
545 t4_eth_rx(arg);
546
547 atomic_cmpset_32(&iq->state, IQS_BUSY, IQS_IDLE);
548}
549
550void
551t4_evt_rx(void *arg)
552{
553 struct sge_iq *iq = arg;
554 struct adapter *sc = iq->adapter;
555 struct rsp_ctrl *ctrl;
556 const struct rss_header *rss;
557 int ndesc_pending = 0, ndesc_total = 0;
558
559 KASSERT(iq == &sc->sge.fwq, ("%s: unexpected ingress queue", __func__));
560
561 while (is_new_response(iq, &ctrl)) {
562
563 rmb();
564
565 rss = (const void *)iq->cdesc;
566
567 /* Should only get CPL on this queue */
568 KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_CPL,
569 ("%s: unexpected type %d", __func__,
570 G_RSPD_TYPE(ctrl->u.type_gen)));
571
572 switch (rss->opcode) {
573 case CPL_FW4_MSG:
574 case CPL_FW6_MSG: {
575 const struct cpl_fw6_msg *cpl;
576
577 cpl = (const void *)(rss + 1);
578 if (cpl->type == FW6_TYPE_CMD_RPL)
579 t4_handle_fw_rpl(sc, cpl->data);
580
581 break;
582 }
583 case CPL_SGE_EGR_UPDATE:
584 handle_sge_egr_update(sc, (const void *)(rss + 1));
585 break;
586 case CPL_SET_TCB_RPL:
587 filter_rpl(sc, (const void *) (rss + 1));
588 break;
589 default:
590 device_printf(sc->dev,
591 "can't handle CPL opcode %d.", rss->opcode);
592 }
593
594 ndesc_total++;
595 if (++ndesc_pending >= iq->qsize / 4) {
596 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
597 V_CIDXINC(ndesc_pending) |
598 V_INGRESSQID(iq->cntxt_id) |
599 V_SEINTARM(
600 V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
601 ndesc_pending = 0;
602 }
603 iq_next(iq);
604 }
605
606 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndesc_pending) |
607 V_INGRESSQID(iq->cntxt_id) | V_SEINTARM(iq->intr_params));
608}
609
610#ifdef T4_PKT_TIMESTAMP
611#define RX_COPY_THRESHOLD (MINCLSIZE - 8)
612#else
613#define RX_COPY_THRESHOLD MINCLSIZE
614#endif
615
616void
617t4_eth_rx(void *arg)
618{
619 struct sge_rxq *rxq = arg;
620 struct sge_iq *iq = arg;
621 struct adapter *sc = iq->adapter;
622 struct rsp_ctrl *ctrl;
623 struct ifnet *ifp = rxq->ifp;
624 struct sge_fl *fl = &rxq->fl;
625 struct fl_sdesc *sd = &fl->sdesc[fl->cidx], *sd_next;
626 const struct rss_header *rss;
627 const struct cpl_rx_pkt *cpl;
628 uint32_t len;
629 int ndescs = 0, i;
630 struct mbuf *m0, *m;
631#ifdef INET
632 struct lro_ctrl *lro = &rxq->lro;
633 struct lro_entry *l;
634#endif
635
636 prefetch(sd->m);
637 prefetch(sd->cl);
638
639 iq->intr_next = iq->intr_params;
640 while (is_new_response(iq, &ctrl)) {
641
642 rmb();
643
644 rss = (const void *)iq->cdesc;
645 i = G_RSPD_TYPE(ctrl->u.type_gen);
646
647 if (__predict_false(i == X_RSPD_TYPE_CPL)) {
648
649 /* Can't be anything except an egress update */
650 KASSERT(rss->opcode == CPL_SGE_EGR_UPDATE,
651 ("%s: unexpected CPL %x", __func__, rss->opcode));
652
653 handle_sge_egr_update(sc, (const void *)(rss + 1));
654 goto nextdesc;
655 }
656 KASSERT(i == X_RSPD_TYPE_FLBUF && rss->opcode == CPL_RX_PKT,
657 ("%s: unexpected CPL %x rsp %d", __func__, rss->opcode, i));
658
659 sd_next = sd + 1;
660 if (__predict_false(fl->cidx + 1 == fl->cap))
661 sd_next = fl->sdesc;
662 prefetch(sd_next->m);
663 prefetch(sd_next->cl);
664
665 cpl = (const void *)(rss + 1);
666
667 m0 = sd->m;
668 sd->m = NULL; /* consumed */
669
670 len = be32toh(ctrl->pldbuflen_qid);
671 if (__predict_false((len & F_RSPD_NEWBUF) == 0))
672 panic("%s: cannot handle packed frames", __func__);
673 len = G_RSPD_LEN(len);
674
675 bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map,
676 BUS_DMASYNC_POSTREAD);
677
678 m_init(m0, NULL, 0, M_NOWAIT, MT_DATA, M_PKTHDR);
679
680#ifdef T4_PKT_TIMESTAMP
681 *mtod(m0, uint64_t *) =
682 be64toh(ctrl->u.last_flit & 0xfffffffffffffff);
683 m0->m_data += 8;
684
685 /*
686 * 60 bit timestamp value is *(uint64_t *)m0->m_pktdat. Note
687 * that it is in the leading free-space (see M_LEADINGSPACE) in
688 * the mbuf. The kernel can clobber it during a pullup,
689 * m_copymdata, etc. You need to make sure that the mbuf
690 * reaches you unmolested if you care about the timestamp.
691 */
692#endif
693
694 if (len < RX_COPY_THRESHOLD) {
695 /* copy data to mbuf, buffer will be recycled */
696 bcopy(sd->cl, mtod(m0, caddr_t), len);
697 m0->m_len = len;
698 } else {
699 bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map);
700 m_cljset(m0, sd->cl, FL_BUF_TYPE(sd->tag_idx));
701 sd->cl = NULL; /* consumed */
702 m0->m_len = min(len, FL_BUF_SIZE(sd->tag_idx));
703 }
704
705 len -= FL_PKTSHIFT;
706 m0->m_len -= FL_PKTSHIFT;
707 m0->m_data += FL_PKTSHIFT;
708
709 m0->m_pkthdr.len = len;
710 m0->m_pkthdr.rcvif = ifp;
711 m0->m_flags |= M_FLOWID;
712 m0->m_pkthdr.flowid = rss->hash_val;
713
714 if (cpl->csum_calc && !cpl->err_vec &&
715 ifp->if_capenable & IFCAP_RXCSUM) {
716 m0->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
717 CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
718 if (cpl->ip_frag)
719 m0->m_pkthdr.csum_data = be16toh(cpl->csum);
720 else
721 m0->m_pkthdr.csum_data = 0xffff;
722 rxq->rxcsum++;
723 }
724
725 if (cpl->vlan_ex) {
726 m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan);
727 m0->m_flags |= M_VLANTAG;
728 rxq->vlan_extraction++;
729 }
730
731 i = 1; /* # of fl sdesc used */
732 sd = sd_next;
733 if (__predict_false(++fl->cidx == fl->cap))
734 fl->cidx = 0;
735
736 len -= m0->m_len;
737 m = m0;
738 while (len) {
739 i++;
740
741 sd_next = sd + 1;
742 if (__predict_false(fl->cidx + 1 == fl->cap))
743 sd_next = fl->sdesc;
744 prefetch(sd_next->m);
745 prefetch(sd_next->cl);
746
747 m->m_next = sd->m;
748 sd->m = NULL; /* consumed */
749 m = m->m_next;
750
751 bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map,
752 BUS_DMASYNC_POSTREAD);
753
754 m_init(m, NULL, 0, M_NOWAIT, MT_DATA, 0);
755 if (len <= MLEN) {
756 bcopy(sd->cl, mtod(m, caddr_t), len);
757 m->m_len = len;
758 } else {
759 bus_dmamap_unload(fl->tag[sd->tag_idx],
760 sd->map);
761 m_cljset(m, sd->cl, FL_BUF_TYPE(sd->tag_idx));
762 sd->cl = NULL; /* consumed */
763 m->m_len = min(len, FL_BUF_SIZE(sd->tag_idx));
764 }
765
766 i++;
767 sd = sd_next;
768 if (__predict_false(++fl->cidx == fl->cap))
769 fl->cidx = 0;
770
771 len -= m->m_len;
772 }
773
774#ifdef INET
775 if (cpl->l2info & htobe32(F_RXF_LRO) &&
776 rxq->flags & RXQ_LRO_ENABLED &&
777 tcp_lro_rx(lro, m0, 0) == 0) {
778 /* queued for LRO */
779 } else
780#endif
781 ifp->if_input(ifp, m0);
782
783 FL_LOCK(fl);
784 fl->needed += i;
785 if (fl->needed >= 32)
786 refill_fl(sc, fl, 64, 32);
787 FL_UNLOCK(fl);
788
789nextdesc: ndescs++;
790 iq_next(iq);
791
792 if (ndescs > 32) {
793 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
794 V_CIDXINC(ndescs) |
795 V_INGRESSQID((u32)iq->cntxt_id) |
796 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
797 ndescs = 0;
798 }
799 }
800
801#ifdef INET
802 while (!SLIST_EMPTY(&lro->lro_active)) {
803 l = SLIST_FIRST(&lro->lro_active);
804 SLIST_REMOVE_HEAD(&lro->lro_active, next);
805 tcp_lro_flush(lro, l);
806 }
807#endif
808
809 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) |
810 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_next));
811
812 FL_LOCK(fl);
813 if (fl->needed >= 32)
814 refill_fl(sc, fl, 128, 8);
815 FL_UNLOCK(fl);
816}
817
818int
819t4_mgmt_tx(struct adapter *sc, struct mbuf *m)
820{
821 return ctrl_tx(sc, &sc->sge.ctrlq[0], m);
822}
823
824/* Per-packet header in a coalesced tx WR, before the SGL starts (in flits) */
825#define TXPKTS_PKT_HDR ((\
826 sizeof(struct ulp_txpkt) + \
827 sizeof(struct ulptx_idata) + \
828 sizeof(struct cpl_tx_pkt_core) \
829 ) / 8)
830
831/* Header of a coalesced tx WR, before SGL of first packet (in flits) */
832#define TXPKTS_WR_HDR (\
833 sizeof(struct fw_eth_tx_pkts_wr) / 8 + \
834 TXPKTS_PKT_HDR)
835
836/* Header of a tx WR, before SGL of first packet (in flits) */
837#define TXPKT_WR_HDR ((\
838 sizeof(struct fw_eth_tx_pkt_wr) + \
839 sizeof(struct cpl_tx_pkt_core) \
840 ) / 8 )
841
842/* Header of a tx LSO WR, before SGL of first packet (in flits) */
843#define TXPKT_LSO_WR_HDR ((\
844 sizeof(struct fw_eth_tx_pkt_wr) + \
845 sizeof(struct cpl_tx_pkt_lso) + \
846 sizeof(struct cpl_tx_pkt_core) \
847 ) / 8 )
848
849int
850t4_eth_tx(struct ifnet *ifp, struct sge_txq *txq, struct mbuf *m)
851{
852 struct port_info *pi = (void *)ifp->if_softc;
853 struct adapter *sc = pi->adapter;
854 struct sge_eq *eq = &txq->eq;
855 struct buf_ring *br = txq->br;
856 struct mbuf *next;
857 int rc, coalescing, can_reclaim;
858 struct txpkts txpkts;
859 struct sgl sgl;
860
861 TXQ_LOCK_ASSERT_OWNED(txq);
862 KASSERT(m, ("%s: called with nothing to do.", __func__));
863
864 prefetch(&eq->desc[eq->pidx]);
865 prefetch(&txq->sdesc[eq->pidx]);
866
867 txpkts.npkt = 0;/* indicates there's nothing in txpkts */
868 coalescing = 0;
869
870 if (eq->avail < 8)
871 reclaim_tx_descs(txq, 0, 8);
872
873 for (; m; m = next ? next : drbr_dequeue(ifp, br)) {
874
875 if (eq->avail < 8)
876 break;
877
878 next = m->m_nextpkt;
879 m->m_nextpkt = NULL;
880
881 if (next || buf_ring_peek(br))
882 coalescing = 1;
883
884 rc = get_pkt_sgl(txq, &m, &sgl, coalescing);
885 if (rc != 0) {
886 if (rc == ENOMEM) {
887
888 /* Short of resources, suspend tx */
889
890 m->m_nextpkt = next;
891 break;
892 }
893
894 /*
895 * Unrecoverable error for this packet, throw it away
896 * and move on to the next. get_pkt_sgl may already
897 * have freed m (it will be NULL in that case and the
898 * m_freem here is still safe).
899 */
900
901 m_freem(m);
902 continue;
903 }
904
905 if (coalescing &&
906 add_to_txpkts(pi, txq, &txpkts, m, &sgl) == 0) {
907
908 /* Successfully absorbed into txpkts */
909
910 write_ulp_cpl_sgl(pi, txq, &txpkts, m, &sgl);
911 goto doorbell;
912 }
913
914 /*
915 * We weren't coalescing to begin with, or current frame could
916 * not be coalesced (add_to_txpkts flushes txpkts if a frame
917 * given to it can't be coalesced). Either way there should be
918 * nothing in txpkts.
919 */
920 KASSERT(txpkts.npkt == 0,
921 ("%s: txpkts not empty: %d", __func__, txpkts.npkt));
922
923 /* We're sending out individual packets now */
924 coalescing = 0;
925
926 if (eq->avail < 8)
927 reclaim_tx_descs(txq, 0, 8);
928 rc = write_txpkt_wr(pi, txq, m, &sgl);
929 if (rc != 0) {
930
931 /* Short of hardware descriptors, suspend tx */
932
933 /*
934 * This is an unlikely but expensive failure. We've
935 * done all the hard work (DMA mappings etc.) and now we
936 * can't send out the packet. What's worse, we have to
937 * spend even more time freeing up everything in sgl.
938 */
939 txq->no_desc++;
940 free_pkt_sgl(txq, &sgl);
941
942 m->m_nextpkt = next;
943 break;
944 }
945
946 ETHER_BPF_MTAP(ifp, m);
947 if (sgl.nsegs == 0)
948 m_freem(m);
949
950doorbell:
951 /* Fewer and fewer doorbells as the queue fills up */
952 if (eq->pending >= (1 << (fls(eq->qsize - eq->avail) / 2)))
953 ring_eq_db(sc, eq);
954
955 can_reclaim = reclaimable(eq);
956 if (can_reclaim >= 32)
957 reclaim_tx_descs(txq, can_reclaim, 32);
958 }
959
960 if (txpkts.npkt > 0)
961 write_txpkts_wr(txq, &txpkts);
962
963 /*
964 * m not NULL means there was an error but we haven't thrown it away.
965 * This can happen when we're short of tx descriptors (no_desc) or maybe
966 * even DMA maps (no_dmamap). Either way, a credit flush and reclaim
967 * will get things going again.
968 *
969 * If eq->avail is already 0 we know a credit flush was requested in the
970 * WR that reduced it to 0 so we don't need another flush (we don't have
971 * any descriptor for a flush WR anyway, duh).
972 */
973 if (m && eq->avail > 0 && !(eq->flags & EQ_CRFLUSHED)) {
974 struct tx_sdesc *txsd = &txq->sdesc[eq->pidx];
975
976 txsd->desc_used = 1;
977 txsd->credits = 0;
978 write_eqflush_wr(eq);
979 }
980 txq->m = m;
981
982 if (eq->pending)
983 ring_eq_db(sc, eq);
984
985 can_reclaim = reclaimable(eq);
986 if (can_reclaim >= 32)
987 reclaim_tx_descs(txq, can_reclaim, 128);
988
989 return (0);
990}
991
992void
993t4_update_fl_bufsize(struct ifnet *ifp)
994{
995 struct port_info *pi = ifp->if_softc;
996 struct sge_rxq *rxq;
997 struct sge_fl *fl;
998 int i;
999
1000 for_each_rxq(pi, i, rxq) {
1001 fl = &rxq->fl;
1002
1003 FL_LOCK(fl);
1004 set_fl_tag_idx(fl, ifp->if_mtu);
1005 FL_UNLOCK(fl);
1006 }
1007}
1008
1009/*
1010 * A non-NULL handler indicates this iq will not receive direct interrupts, the
1011 * handler will be invoked by a forwarded interrupt queue.
1012 */
1013static inline void
1014init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx,
1015 int qsize, int esize, iq_intr_handler_t *handler, char *name)
1016{
1017 KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS,
1018 ("%s: bad tmr_idx %d", __func__, tmr_idx));
1019 KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */
1020 ("%s: bad pktc_idx %d", __func__, pktc_idx));
1021
1022 iq->flags = 0;
1023 iq->adapter = sc;
1024 iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx) |
1025 V_QINTR_CNT_EN(pktc_idx >= 0);
1026 iq->intr_pktc_idx = pktc_idx;
1027 iq->qsize = roundup(qsize, 16); /* See FW_IQ_CMD/iqsize */
1028 iq->esize = max(esize, 16); /* See FW_IQ_CMD/iqesize */
1029 iq->handler = handler;
1030 strlcpy(iq->lockname, name, sizeof(iq->lockname));
1031}
1032
1033static inline void
1034init_fl(struct sge_fl *fl, int qsize, char *name)
1035{
1036 fl->qsize = qsize;
1037 strlcpy(fl->lockname, name, sizeof(fl->lockname));
1038}
1039
1040static inline void
1041init_eq(struct sge_eq *eq, int qsize, char *name)
1042{
1043 eq->qsize = qsize;
1044 strlcpy(eq->lockname, name, sizeof(eq->lockname));
1045}
1046
1047static int
1048alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag,
1049 bus_dmamap_t *map, bus_addr_t *pa, void **va)
1050{
1051 int rc;
1052
1053 rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR,
1054 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag);
1055 if (rc != 0) {
1056 device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc);
1057 goto done;
1058 }
1059
1060 rc = bus_dmamem_alloc(*tag, va,
1061 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
1062 if (rc != 0) {
1063 device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc);
1064 goto done;
1065 }
1066
1067 rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0);
1068 if (rc != 0) {
1069 device_printf(sc->dev, "cannot load DMA map: %d\n", rc);
1070 goto done;
1071 }
1072done:
1073 if (rc)
1074 free_ring(sc, *tag, *map, *pa, *va);
1075
1076 return (rc);
1077}
1078
1079static int
1080free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map,
1081 bus_addr_t pa, void *va)
1082{
1083 if (pa)
1084 bus_dmamap_unload(tag, map);
1085 if (va)
1086 bus_dmamem_free(tag, va, map);
1087 if (tag)
1088 bus_dma_tag_destroy(tag);
1089
1090 return (0);
1091}
1092
1093/*
1094 * Allocates the ring for an ingress queue and an optional freelist. If the
1095 * freelist is specified it will be allocated and then associated with the
1096 * ingress queue.
1097 *
1098 * Returns errno on failure. Resources allocated up to that point may still be
1099 * allocated. Caller is responsible for cleanup in case this function fails.
1100 *
1101 * If the ingress queue will take interrupts directly (iq->handler == NULL) then
1102 * the intr_idx specifies the vector, starting from 0. Otherwise it specifies
1103 * the index of the queue to which its interrupts will be forwarded.
1104 */
1105static int
1106alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl,
104static int free_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *);
105static int alloc_iq(struct sge_iq *, int);
106static int free_iq(struct sge_iq *);
107static int alloc_rxq(struct port_info *, struct sge_rxq *, int, int);
108static int free_rxq(struct port_info *, struct sge_rxq *);
109static int alloc_ctrlq(struct adapter *, struct sge_ctrlq *, int);
110static int free_ctrlq(struct adapter *, struct sge_ctrlq *);
111static int alloc_txq(struct port_info *, struct sge_txq *, int);
112static int free_txq(struct port_info *, struct sge_txq *);
113static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int);
114static inline bool is_new_response(const struct sge_iq *, struct rsp_ctrl **);
115static inline void iq_next(struct sge_iq *);
116static inline void ring_fl_db(struct adapter *, struct sge_fl *);
117static void refill_fl(struct adapter *, struct sge_fl *, int, int);
118static int alloc_fl_sdesc(struct sge_fl *);
119static void free_fl_sdesc(struct sge_fl *);
120static int alloc_tx_maps(struct sge_txq *);
121static void free_tx_maps(struct sge_txq *);
122static void set_fl_tag_idx(struct sge_fl *, int);
123
124static int get_pkt_sgl(struct sge_txq *, struct mbuf **, struct sgl *, int);
125static int free_pkt_sgl(struct sge_txq *, struct sgl *);
126static int write_txpkt_wr(struct port_info *, struct sge_txq *, struct mbuf *,
127 struct sgl *);
128static int add_to_txpkts(struct port_info *, struct sge_txq *, struct txpkts *,
129 struct mbuf *, struct sgl *);
130static void write_txpkts_wr(struct sge_txq *, struct txpkts *);
131static inline void write_ulp_cpl_sgl(struct port_info *, struct sge_txq *,
132 struct txpkts *, struct mbuf *, struct sgl *);
133static int write_sgl_to_txd(struct sge_eq *, struct sgl *, caddr_t *);
134static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int);
135static inline void ring_eq_db(struct adapter *, struct sge_eq *);
136static inline int reclaimable(struct sge_eq *);
137static int reclaim_tx_descs(struct sge_txq *, int, int);
138static void write_eqflush_wr(struct sge_eq *);
139static __be64 get_flit(bus_dma_segment_t *, int, int);
140static int handle_sge_egr_update(struct adapter *,
141 const struct cpl_sge_egr_update *);
142
143static int ctrl_tx(struct adapter *, struct sge_ctrlq *, struct mbuf *);
144static int sysctl_abs_id(SYSCTL_HANDLER_ARGS);
145
146extern void filter_rpl(struct adapter *, const struct cpl_set_tcb_rpl *);
147
148/*
149 * Called on MOD_LOAD and fills up fl_buf_info[].
150 */
151void
152t4_sge_modload(void)
153{
154 int i;
155 int bufsize[FL_BUF_SIZES] = {
156 MCLBYTES,
157#if MJUMPAGESIZE != MCLBYTES
158 MJUMPAGESIZE,
159#endif
160 MJUM9BYTES,
161 MJUM16BYTES
162 };
163
164 for (i = 0; i < FL_BUF_SIZES; i++) {
165 FL_BUF_SIZE(i) = bufsize[i];
166 FL_BUF_TYPE(i) = m_gettype(bufsize[i]);
167 FL_BUF_ZONE(i) = m_getzone(bufsize[i]);
168 }
169}
170
171/**
172 * t4_sge_init - initialize SGE
173 * @sc: the adapter
174 *
175 * Performs SGE initialization needed every time after a chip reset.
176 * We do not initialize any of the queues here, instead the driver
177 * top-level must request them individually.
178 */
179void
180t4_sge_init(struct adapter *sc)
181{
182 struct sge *s = &sc->sge;
183 int i;
184
185 t4_set_reg_field(sc, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT) |
186 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
187 F_EGRSTATUSPAGESIZE,
188 V_INGPADBOUNDARY(ilog2(FL_ALIGN) - 5) |
189 V_PKTSHIFT(FL_PKTSHIFT) |
190 F_RXPKTCPLMODE |
191 V_EGRSTATUSPAGESIZE(SPG_LEN == 128));
192 t4_set_reg_field(sc, A_SGE_HOST_PAGE_SIZE,
193 V_HOSTPAGESIZEPF0(M_HOSTPAGESIZEPF0),
194 V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10));
195
196 for (i = 0; i < FL_BUF_SIZES; i++) {
197 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i),
198 FL_BUF_SIZE(i));
199 }
200
201 t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD,
202 V_THRESHOLD_0(s->counter_val[0]) |
203 V_THRESHOLD_1(s->counter_val[1]) |
204 V_THRESHOLD_2(s->counter_val[2]) |
205 V_THRESHOLD_3(s->counter_val[3]));
206
207 t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1,
208 V_TIMERVALUE0(us_to_core_ticks(sc, s->timer_val[0])) |
209 V_TIMERVALUE1(us_to_core_ticks(sc, s->timer_val[1])));
210 t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3,
211 V_TIMERVALUE2(us_to_core_ticks(sc, s->timer_val[2])) |
212 V_TIMERVALUE3(us_to_core_ticks(sc, s->timer_val[3])));
213 t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5,
214 V_TIMERVALUE4(us_to_core_ticks(sc, s->timer_val[4])) |
215 V_TIMERVALUE5(us_to_core_ticks(sc, s->timer_val[5])));
216}
217
218int
219t4_create_dma_tag(struct adapter *sc)
220{
221 int rc;
222
223 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
224 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
225 BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL,
226 NULL, &sc->dmat);
227 if (rc != 0) {
228 device_printf(sc->dev,
229 "failed to create main DMA tag: %d\n", rc);
230 }
231
232 return (rc);
233}
234
235int
236t4_destroy_dma_tag(struct adapter *sc)
237{
238 if (sc->dmat)
239 bus_dma_tag_destroy(sc->dmat);
240
241 return (0);
242}
243
244/*
245 * Allocate and initialize the firmware event queue, control queues, and the
246 * forwarded interrupt queues (if any). The adapter owns all these queues as
247 * they are not associated with any particular port.
248 *
249 * Returns errno on failure. Resources allocated up to that point may still be
250 * allocated. Caller is responsible for cleanup in case this function fails.
251 */
252int
253t4_setup_adapter_queues(struct adapter *sc)
254{
255 int i, rc;
256 struct sge_iq *iq, *fwq;
257 struct sge_ctrlq *ctrlq;
258 iq_intr_handler_t *handler;
259 char name[16];
260
261 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
262
263 if (sysctl_ctx_init(&sc->ctx) == 0) {
264 struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev);
265 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
266
267 sc->oid_ctrlq = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO,
268 "ctrlq", CTLFLAG_RD, NULL, "ctrl queues");
269 }
270
271 fwq = &sc->sge.fwq;
272 if (sc->flags & INTR_FWD) {
273 iq = &sc->sge.fiq[0];
274
275 /*
276 * Forwarded interrupt queues - allocate 1 if there's only 1
277 * vector available, one less than the number of vectors
278 * otherwise (the first vector is reserved for the error
279 * interrupt in that case).
280 */
281 i = sc->intr_count > 1 ? 1 : 0;
282 for (; i < sc->intr_count; i++, iq++) {
283
284 snprintf(name, sizeof(name), "%s fiq%d",
285 device_get_nameunit(sc->dev), i);
286 init_iq(iq, sc, 0, 0, (sc->sge.nrxq + 1) * 2, 16, NULL,
287 name);
288
289 rc = alloc_iq(iq, i);
290 if (rc != 0) {
291 device_printf(sc->dev,
292 "failed to create fwd intr queue %d: %d\n",
293 i, rc);
294 return (rc);
295 }
296 }
297
298 handler = t4_evt_rx;
299 i = 0; /* forward fwq's interrupt to the first fiq */
300 } else {
301 handler = NULL;
302 i = 1; /* fwq should use vector 1 (0 is used by error) */
303 }
304
305 snprintf(name, sizeof(name), "%s fwq", device_get_nameunit(sc->dev));
306 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, FW_IQ_ESIZE, handler, name);
307 rc = alloc_iq(fwq, i);
308 if (rc != 0) {
309 device_printf(sc->dev,
310 "failed to create firmware event queue: %d\n", rc);
311
312 return (rc);
313 }
314
315 /*
316 * Control queues - one per hardware channel.
317 */
318 ctrlq = &sc->sge.ctrlq[0];
319 for (i = 0; i < NCHAN; i++, ctrlq++) {
320 snprintf(name, sizeof(name), "%s ctrlq%d",
321 device_get_nameunit(sc->dev), i);
322 init_eq(&ctrlq->eq, CTRL_EQ_QSIZE, name);
323
324 rc = alloc_ctrlq(sc, ctrlq, i);
325 if (rc != 0) {
326 device_printf(sc->dev,
327 "failed to create control queue %d: %d\n", i, rc);
328 return (rc);
329 }
330 }
331
332 return (rc);
333}
334
335/*
336 * Idempotent
337 */
338int
339t4_teardown_adapter_queues(struct adapter *sc)
340{
341 int i;
342 struct sge_iq *iq;
343
344 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
345
346 /* Do this before freeing the queues */
347 if (sc->oid_ctrlq) {
348 sysctl_ctx_free(&sc->ctx);
349 sc->oid_ctrlq = NULL;
350 }
351
352 for (i = 0; i < NCHAN; i++)
353 free_ctrlq(sc, &sc->sge.ctrlq[i]);
354
355 iq = &sc->sge.fwq;
356 free_iq(iq);
357 if (sc->flags & INTR_FWD) {
358 for (i = 0; i < NFIQ(sc); i++) {
359 iq = &sc->sge.fiq[i];
360 free_iq(iq);
361 }
362 }
363
364 return (0);
365}
366
367int
368t4_setup_eth_queues(struct port_info *pi)
369{
370 int rc = 0, i, intr_idx;
371 struct sge_rxq *rxq;
372 struct sge_txq *txq;
373 char name[16];
374 struct adapter *sc = pi->adapter;
375
376 if (sysctl_ctx_init(&pi->ctx) == 0) {
377 struct sysctl_oid *oid = device_get_sysctl_tree(pi->dev);
378 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
379
380 pi->oid_rxq = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO,
381 "rxq", CTLFLAG_RD, NULL, "rx queues");
382 pi->oid_txq = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO,
383 "txq", CTLFLAG_RD, NULL, "tx queues");
384 }
385
386 for_each_rxq(pi, i, rxq) {
387
388 snprintf(name, sizeof(name), "%s rxq%d-iq",
389 device_get_nameunit(pi->dev), i);
390 init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx,
391 pi->qsize_rxq, RX_IQ_ESIZE,
392 sc->flags & INTR_FWD ? t4_eth_rx : NULL, name);
393
394 snprintf(name, sizeof(name), "%s rxq%d-fl",
395 device_get_nameunit(pi->dev), i);
396 init_fl(&rxq->fl, pi->qsize_rxq / 8, name);
397
398 if (sc->flags & INTR_FWD)
399 intr_idx = (pi->first_rxq + i) % NFIQ(sc);
400 else
401 intr_idx = pi->first_rxq + i + 2;
402
403 rc = alloc_rxq(pi, rxq, intr_idx, i);
404 if (rc != 0)
405 goto done;
406
407 intr_idx++;
408 }
409
410 for_each_txq(pi, i, txq) {
411
412 snprintf(name, sizeof(name), "%s txq%d",
413 device_get_nameunit(pi->dev), i);
414 init_eq(&txq->eq, pi->qsize_txq, name);
415
416 rc = alloc_txq(pi, txq, i);
417 if (rc != 0)
418 goto done;
419 }
420
421done:
422 if (rc)
423 t4_teardown_eth_queues(pi);
424
425 return (rc);
426}
427
428/*
429 * Idempotent
430 */
431int
432t4_teardown_eth_queues(struct port_info *pi)
433{
434 int i;
435 struct sge_rxq *rxq;
436 struct sge_txq *txq;
437
438 /* Do this before freeing the queues */
439 if (pi->oid_txq || pi->oid_rxq) {
440 sysctl_ctx_free(&pi->ctx);
441 pi->oid_txq = pi->oid_rxq = NULL;
442 }
443
444 for_each_txq(pi, i, txq) {
445 free_txq(pi, txq);
446 }
447
448 for_each_rxq(pi, i, rxq) {
449 free_rxq(pi, rxq);
450 }
451
452 return (0);
453}
454
455/* Deals with errors and forwarded interrupts */
456void
457t4_intr_all(void *arg)
458{
459 struct adapter *sc = arg;
460
461 t4_intr_err(arg);
462 t4_intr_fwd(&sc->sge.fiq[0]);
463}
464
465/* Deals with forwarded interrupts on the given ingress queue */
466void
467t4_intr_fwd(void *arg)
468{
469 struct sge_iq *iq = arg, *q;
470 struct adapter *sc = iq->adapter;
471 struct rsp_ctrl *ctrl;
472 int ndesc_pending = 0, ndesc_total = 0;
473 int qid;
474
475 if (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY))
476 return;
477
478 while (is_new_response(iq, &ctrl)) {
479
480 rmb();
481
482 /* Only interrupt muxing expected on this queue */
483 KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_INTR,
484 ("unexpected event on forwarded interrupt queue: %x",
485 G_RSPD_TYPE(ctrl->u.type_gen)));
486
487 qid = ntohl(ctrl->pldbuflen_qid) - sc->sge.iq_start;
488 q = sc->sge.iqmap[qid];
489
490 q->handler(q);
491
492 ndesc_total++;
493 if (++ndesc_pending >= iq->qsize / 4) {
494 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
495 V_CIDXINC(ndesc_pending) |
496 V_INGRESSQID(iq->cntxt_id) |
497 V_SEINTARM(
498 V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
499 ndesc_pending = 0;
500 }
501
502 iq_next(iq);
503 }
504
505 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndesc_pending) |
506 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params));
507
508 atomic_cmpset_32(&iq->state, IQS_BUSY, IQS_IDLE);
509}
510
511/* Deals with error interrupts */
512void
513t4_intr_err(void *arg)
514{
515 struct adapter *sc = arg;
516
517 if (sc->intr_type == INTR_INTX)
518 t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0);
519
520 t4_slow_intr_handler(sc);
521}
522
523/* Deals with the firmware event queue */
524void
525t4_intr_evt(void *arg)
526{
527 struct sge_iq *iq = arg;
528
529 if (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY))
530 return;
531
532 t4_evt_rx(arg);
533
534 atomic_cmpset_32(&iq->state, IQS_BUSY, IQS_IDLE);
535}
536
537void
538t4_intr_data(void *arg)
539{
540 struct sge_iq *iq = arg;
541
542 if (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY))
543 return;
544
545 t4_eth_rx(arg);
546
547 atomic_cmpset_32(&iq->state, IQS_BUSY, IQS_IDLE);
548}
549
550void
551t4_evt_rx(void *arg)
552{
553 struct sge_iq *iq = arg;
554 struct adapter *sc = iq->adapter;
555 struct rsp_ctrl *ctrl;
556 const struct rss_header *rss;
557 int ndesc_pending = 0, ndesc_total = 0;
558
559 KASSERT(iq == &sc->sge.fwq, ("%s: unexpected ingress queue", __func__));
560
561 while (is_new_response(iq, &ctrl)) {
562
563 rmb();
564
565 rss = (const void *)iq->cdesc;
566
567 /* Should only get CPL on this queue */
568 KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_CPL,
569 ("%s: unexpected type %d", __func__,
570 G_RSPD_TYPE(ctrl->u.type_gen)));
571
572 switch (rss->opcode) {
573 case CPL_FW4_MSG:
574 case CPL_FW6_MSG: {
575 const struct cpl_fw6_msg *cpl;
576
577 cpl = (const void *)(rss + 1);
578 if (cpl->type == FW6_TYPE_CMD_RPL)
579 t4_handle_fw_rpl(sc, cpl->data);
580
581 break;
582 }
583 case CPL_SGE_EGR_UPDATE:
584 handle_sge_egr_update(sc, (const void *)(rss + 1));
585 break;
586 case CPL_SET_TCB_RPL:
587 filter_rpl(sc, (const void *) (rss + 1));
588 break;
589 default:
590 device_printf(sc->dev,
591 "can't handle CPL opcode %d.", rss->opcode);
592 }
593
594 ndesc_total++;
595 if (++ndesc_pending >= iq->qsize / 4) {
596 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
597 V_CIDXINC(ndesc_pending) |
598 V_INGRESSQID(iq->cntxt_id) |
599 V_SEINTARM(
600 V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
601 ndesc_pending = 0;
602 }
603 iq_next(iq);
604 }
605
606 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndesc_pending) |
607 V_INGRESSQID(iq->cntxt_id) | V_SEINTARM(iq->intr_params));
608}
609
610#ifdef T4_PKT_TIMESTAMP
611#define RX_COPY_THRESHOLD (MINCLSIZE - 8)
612#else
613#define RX_COPY_THRESHOLD MINCLSIZE
614#endif
615
616void
617t4_eth_rx(void *arg)
618{
619 struct sge_rxq *rxq = arg;
620 struct sge_iq *iq = arg;
621 struct adapter *sc = iq->adapter;
622 struct rsp_ctrl *ctrl;
623 struct ifnet *ifp = rxq->ifp;
624 struct sge_fl *fl = &rxq->fl;
625 struct fl_sdesc *sd = &fl->sdesc[fl->cidx], *sd_next;
626 const struct rss_header *rss;
627 const struct cpl_rx_pkt *cpl;
628 uint32_t len;
629 int ndescs = 0, i;
630 struct mbuf *m0, *m;
631#ifdef INET
632 struct lro_ctrl *lro = &rxq->lro;
633 struct lro_entry *l;
634#endif
635
636 prefetch(sd->m);
637 prefetch(sd->cl);
638
639 iq->intr_next = iq->intr_params;
640 while (is_new_response(iq, &ctrl)) {
641
642 rmb();
643
644 rss = (const void *)iq->cdesc;
645 i = G_RSPD_TYPE(ctrl->u.type_gen);
646
647 if (__predict_false(i == X_RSPD_TYPE_CPL)) {
648
649 /* Can't be anything except an egress update */
650 KASSERT(rss->opcode == CPL_SGE_EGR_UPDATE,
651 ("%s: unexpected CPL %x", __func__, rss->opcode));
652
653 handle_sge_egr_update(sc, (const void *)(rss + 1));
654 goto nextdesc;
655 }
656 KASSERT(i == X_RSPD_TYPE_FLBUF && rss->opcode == CPL_RX_PKT,
657 ("%s: unexpected CPL %x rsp %d", __func__, rss->opcode, i));
658
659 sd_next = sd + 1;
660 if (__predict_false(fl->cidx + 1 == fl->cap))
661 sd_next = fl->sdesc;
662 prefetch(sd_next->m);
663 prefetch(sd_next->cl);
664
665 cpl = (const void *)(rss + 1);
666
667 m0 = sd->m;
668 sd->m = NULL; /* consumed */
669
670 len = be32toh(ctrl->pldbuflen_qid);
671 if (__predict_false((len & F_RSPD_NEWBUF) == 0))
672 panic("%s: cannot handle packed frames", __func__);
673 len = G_RSPD_LEN(len);
674
675 bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map,
676 BUS_DMASYNC_POSTREAD);
677
678 m_init(m0, NULL, 0, M_NOWAIT, MT_DATA, M_PKTHDR);
679
680#ifdef T4_PKT_TIMESTAMP
681 *mtod(m0, uint64_t *) =
682 be64toh(ctrl->u.last_flit & 0xfffffffffffffff);
683 m0->m_data += 8;
684
685 /*
686 * 60 bit timestamp value is *(uint64_t *)m0->m_pktdat. Note
687 * that it is in the leading free-space (see M_LEADINGSPACE) in
688 * the mbuf. The kernel can clobber it during a pullup,
689 * m_copymdata, etc. You need to make sure that the mbuf
690 * reaches you unmolested if you care about the timestamp.
691 */
692#endif
693
694 if (len < RX_COPY_THRESHOLD) {
695 /* copy data to mbuf, buffer will be recycled */
696 bcopy(sd->cl, mtod(m0, caddr_t), len);
697 m0->m_len = len;
698 } else {
699 bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map);
700 m_cljset(m0, sd->cl, FL_BUF_TYPE(sd->tag_idx));
701 sd->cl = NULL; /* consumed */
702 m0->m_len = min(len, FL_BUF_SIZE(sd->tag_idx));
703 }
704
705 len -= FL_PKTSHIFT;
706 m0->m_len -= FL_PKTSHIFT;
707 m0->m_data += FL_PKTSHIFT;
708
709 m0->m_pkthdr.len = len;
710 m0->m_pkthdr.rcvif = ifp;
711 m0->m_flags |= M_FLOWID;
712 m0->m_pkthdr.flowid = rss->hash_val;
713
714 if (cpl->csum_calc && !cpl->err_vec &&
715 ifp->if_capenable & IFCAP_RXCSUM) {
716 m0->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
717 CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
718 if (cpl->ip_frag)
719 m0->m_pkthdr.csum_data = be16toh(cpl->csum);
720 else
721 m0->m_pkthdr.csum_data = 0xffff;
722 rxq->rxcsum++;
723 }
724
725 if (cpl->vlan_ex) {
726 m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan);
727 m0->m_flags |= M_VLANTAG;
728 rxq->vlan_extraction++;
729 }
730
731 i = 1; /* # of fl sdesc used */
732 sd = sd_next;
733 if (__predict_false(++fl->cidx == fl->cap))
734 fl->cidx = 0;
735
736 len -= m0->m_len;
737 m = m0;
738 while (len) {
739 i++;
740
741 sd_next = sd + 1;
742 if (__predict_false(fl->cidx + 1 == fl->cap))
743 sd_next = fl->sdesc;
744 prefetch(sd_next->m);
745 prefetch(sd_next->cl);
746
747 m->m_next = sd->m;
748 sd->m = NULL; /* consumed */
749 m = m->m_next;
750
751 bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map,
752 BUS_DMASYNC_POSTREAD);
753
754 m_init(m, NULL, 0, M_NOWAIT, MT_DATA, 0);
755 if (len <= MLEN) {
756 bcopy(sd->cl, mtod(m, caddr_t), len);
757 m->m_len = len;
758 } else {
759 bus_dmamap_unload(fl->tag[sd->tag_idx],
760 sd->map);
761 m_cljset(m, sd->cl, FL_BUF_TYPE(sd->tag_idx));
762 sd->cl = NULL; /* consumed */
763 m->m_len = min(len, FL_BUF_SIZE(sd->tag_idx));
764 }
765
766 i++;
767 sd = sd_next;
768 if (__predict_false(++fl->cidx == fl->cap))
769 fl->cidx = 0;
770
771 len -= m->m_len;
772 }
773
774#ifdef INET
775 if (cpl->l2info & htobe32(F_RXF_LRO) &&
776 rxq->flags & RXQ_LRO_ENABLED &&
777 tcp_lro_rx(lro, m0, 0) == 0) {
778 /* queued for LRO */
779 } else
780#endif
781 ifp->if_input(ifp, m0);
782
783 FL_LOCK(fl);
784 fl->needed += i;
785 if (fl->needed >= 32)
786 refill_fl(sc, fl, 64, 32);
787 FL_UNLOCK(fl);
788
789nextdesc: ndescs++;
790 iq_next(iq);
791
792 if (ndescs > 32) {
793 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
794 V_CIDXINC(ndescs) |
795 V_INGRESSQID((u32)iq->cntxt_id) |
796 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
797 ndescs = 0;
798 }
799 }
800
801#ifdef INET
802 while (!SLIST_EMPTY(&lro->lro_active)) {
803 l = SLIST_FIRST(&lro->lro_active);
804 SLIST_REMOVE_HEAD(&lro->lro_active, next);
805 tcp_lro_flush(lro, l);
806 }
807#endif
808
809 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) |
810 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_next));
811
812 FL_LOCK(fl);
813 if (fl->needed >= 32)
814 refill_fl(sc, fl, 128, 8);
815 FL_UNLOCK(fl);
816}
817
818int
819t4_mgmt_tx(struct adapter *sc, struct mbuf *m)
820{
821 return ctrl_tx(sc, &sc->sge.ctrlq[0], m);
822}
823
824/* Per-packet header in a coalesced tx WR, before the SGL starts (in flits) */
825#define TXPKTS_PKT_HDR ((\
826 sizeof(struct ulp_txpkt) + \
827 sizeof(struct ulptx_idata) + \
828 sizeof(struct cpl_tx_pkt_core) \
829 ) / 8)
830
831/* Header of a coalesced tx WR, before SGL of first packet (in flits) */
832#define TXPKTS_WR_HDR (\
833 sizeof(struct fw_eth_tx_pkts_wr) / 8 + \
834 TXPKTS_PKT_HDR)
835
836/* Header of a tx WR, before SGL of first packet (in flits) */
837#define TXPKT_WR_HDR ((\
838 sizeof(struct fw_eth_tx_pkt_wr) + \
839 sizeof(struct cpl_tx_pkt_core) \
840 ) / 8 )
841
842/* Header of a tx LSO WR, before SGL of first packet (in flits) */
843#define TXPKT_LSO_WR_HDR ((\
844 sizeof(struct fw_eth_tx_pkt_wr) + \
845 sizeof(struct cpl_tx_pkt_lso) + \
846 sizeof(struct cpl_tx_pkt_core) \
847 ) / 8 )
848
849int
850t4_eth_tx(struct ifnet *ifp, struct sge_txq *txq, struct mbuf *m)
851{
852 struct port_info *pi = (void *)ifp->if_softc;
853 struct adapter *sc = pi->adapter;
854 struct sge_eq *eq = &txq->eq;
855 struct buf_ring *br = txq->br;
856 struct mbuf *next;
857 int rc, coalescing, can_reclaim;
858 struct txpkts txpkts;
859 struct sgl sgl;
860
861 TXQ_LOCK_ASSERT_OWNED(txq);
862 KASSERT(m, ("%s: called with nothing to do.", __func__));
863
864 prefetch(&eq->desc[eq->pidx]);
865 prefetch(&txq->sdesc[eq->pidx]);
866
867 txpkts.npkt = 0;/* indicates there's nothing in txpkts */
868 coalescing = 0;
869
870 if (eq->avail < 8)
871 reclaim_tx_descs(txq, 0, 8);
872
873 for (; m; m = next ? next : drbr_dequeue(ifp, br)) {
874
875 if (eq->avail < 8)
876 break;
877
878 next = m->m_nextpkt;
879 m->m_nextpkt = NULL;
880
881 if (next || buf_ring_peek(br))
882 coalescing = 1;
883
884 rc = get_pkt_sgl(txq, &m, &sgl, coalescing);
885 if (rc != 0) {
886 if (rc == ENOMEM) {
887
888 /* Short of resources, suspend tx */
889
890 m->m_nextpkt = next;
891 break;
892 }
893
894 /*
895 * Unrecoverable error for this packet, throw it away
896 * and move on to the next. get_pkt_sgl may already
897 * have freed m (it will be NULL in that case and the
898 * m_freem here is still safe).
899 */
900
901 m_freem(m);
902 continue;
903 }
904
905 if (coalescing &&
906 add_to_txpkts(pi, txq, &txpkts, m, &sgl) == 0) {
907
908 /* Successfully absorbed into txpkts */
909
910 write_ulp_cpl_sgl(pi, txq, &txpkts, m, &sgl);
911 goto doorbell;
912 }
913
914 /*
915 * We weren't coalescing to begin with, or current frame could
916 * not be coalesced (add_to_txpkts flushes txpkts if a frame
917 * given to it can't be coalesced). Either way there should be
918 * nothing in txpkts.
919 */
920 KASSERT(txpkts.npkt == 0,
921 ("%s: txpkts not empty: %d", __func__, txpkts.npkt));
922
923 /* We're sending out individual packets now */
924 coalescing = 0;
925
926 if (eq->avail < 8)
927 reclaim_tx_descs(txq, 0, 8);
928 rc = write_txpkt_wr(pi, txq, m, &sgl);
929 if (rc != 0) {
930
931 /* Short of hardware descriptors, suspend tx */
932
933 /*
934 * This is an unlikely but expensive failure. We've
935 * done all the hard work (DMA mappings etc.) and now we
936 * can't send out the packet. What's worse, we have to
937 * spend even more time freeing up everything in sgl.
938 */
939 txq->no_desc++;
940 free_pkt_sgl(txq, &sgl);
941
942 m->m_nextpkt = next;
943 break;
944 }
945
946 ETHER_BPF_MTAP(ifp, m);
947 if (sgl.nsegs == 0)
948 m_freem(m);
949
950doorbell:
951 /* Fewer and fewer doorbells as the queue fills up */
952 if (eq->pending >= (1 << (fls(eq->qsize - eq->avail) / 2)))
953 ring_eq_db(sc, eq);
954
955 can_reclaim = reclaimable(eq);
956 if (can_reclaim >= 32)
957 reclaim_tx_descs(txq, can_reclaim, 32);
958 }
959
960 if (txpkts.npkt > 0)
961 write_txpkts_wr(txq, &txpkts);
962
963 /*
964 * m not NULL means there was an error but we haven't thrown it away.
965 * This can happen when we're short of tx descriptors (no_desc) or maybe
966 * even DMA maps (no_dmamap). Either way, a credit flush and reclaim
967 * will get things going again.
968 *
969 * If eq->avail is already 0 we know a credit flush was requested in the
970 * WR that reduced it to 0 so we don't need another flush (we don't have
971 * any descriptor for a flush WR anyway, duh).
972 */
973 if (m && eq->avail > 0 && !(eq->flags & EQ_CRFLUSHED)) {
974 struct tx_sdesc *txsd = &txq->sdesc[eq->pidx];
975
976 txsd->desc_used = 1;
977 txsd->credits = 0;
978 write_eqflush_wr(eq);
979 }
980 txq->m = m;
981
982 if (eq->pending)
983 ring_eq_db(sc, eq);
984
985 can_reclaim = reclaimable(eq);
986 if (can_reclaim >= 32)
987 reclaim_tx_descs(txq, can_reclaim, 128);
988
989 return (0);
990}
991
992void
993t4_update_fl_bufsize(struct ifnet *ifp)
994{
995 struct port_info *pi = ifp->if_softc;
996 struct sge_rxq *rxq;
997 struct sge_fl *fl;
998 int i;
999
1000 for_each_rxq(pi, i, rxq) {
1001 fl = &rxq->fl;
1002
1003 FL_LOCK(fl);
1004 set_fl_tag_idx(fl, ifp->if_mtu);
1005 FL_UNLOCK(fl);
1006 }
1007}
1008
1009/*
1010 * A non-NULL handler indicates this iq will not receive direct interrupts, the
1011 * handler will be invoked by a forwarded interrupt queue.
1012 */
1013static inline void
1014init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx,
1015 int qsize, int esize, iq_intr_handler_t *handler, char *name)
1016{
1017 KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS,
1018 ("%s: bad tmr_idx %d", __func__, tmr_idx));
1019 KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */
1020 ("%s: bad pktc_idx %d", __func__, pktc_idx));
1021
1022 iq->flags = 0;
1023 iq->adapter = sc;
1024 iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx) |
1025 V_QINTR_CNT_EN(pktc_idx >= 0);
1026 iq->intr_pktc_idx = pktc_idx;
1027 iq->qsize = roundup(qsize, 16); /* See FW_IQ_CMD/iqsize */
1028 iq->esize = max(esize, 16); /* See FW_IQ_CMD/iqesize */
1029 iq->handler = handler;
1030 strlcpy(iq->lockname, name, sizeof(iq->lockname));
1031}
1032
1033static inline void
1034init_fl(struct sge_fl *fl, int qsize, char *name)
1035{
1036 fl->qsize = qsize;
1037 strlcpy(fl->lockname, name, sizeof(fl->lockname));
1038}
1039
1040static inline void
1041init_eq(struct sge_eq *eq, int qsize, char *name)
1042{
1043 eq->qsize = qsize;
1044 strlcpy(eq->lockname, name, sizeof(eq->lockname));
1045}
1046
1047static int
1048alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag,
1049 bus_dmamap_t *map, bus_addr_t *pa, void **va)
1050{
1051 int rc;
1052
1053 rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR,
1054 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag);
1055 if (rc != 0) {
1056 device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc);
1057 goto done;
1058 }
1059
1060 rc = bus_dmamem_alloc(*tag, va,
1061 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
1062 if (rc != 0) {
1063 device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc);
1064 goto done;
1065 }
1066
1067 rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0);
1068 if (rc != 0) {
1069 device_printf(sc->dev, "cannot load DMA map: %d\n", rc);
1070 goto done;
1071 }
1072done:
1073 if (rc)
1074 free_ring(sc, *tag, *map, *pa, *va);
1075
1076 return (rc);
1077}
1078
1079static int
1080free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map,
1081 bus_addr_t pa, void *va)
1082{
1083 if (pa)
1084 bus_dmamap_unload(tag, map);
1085 if (va)
1086 bus_dmamem_free(tag, va, map);
1087 if (tag)
1088 bus_dma_tag_destroy(tag);
1089
1090 return (0);
1091}
1092
1093/*
1094 * Allocates the ring for an ingress queue and an optional freelist. If the
1095 * freelist is specified it will be allocated and then associated with the
1096 * ingress queue.
1097 *
1098 * Returns errno on failure. Resources allocated up to that point may still be
1099 * allocated. Caller is responsible for cleanup in case this function fails.
1100 *
1101 * If the ingress queue will take interrupts directly (iq->handler == NULL) then
1102 * the intr_idx specifies the vector, starting from 0. Otherwise it specifies
1103 * the index of the queue to which its interrupts will be forwarded.
1104 */
1105static int
1106alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl,
1107 int intr_idx)
1107 int intr_idx, int cong)
1108{
1109 int rc, i, cntxt_id;
1110 size_t len;
1111 struct fw_iq_cmd c;
1112 struct adapter *sc = iq->adapter;
1113 __be32 v = 0;
1114
1115 /* The adapter queues are nominally allocated in port[0]'s name */
1116 if (pi == NULL)
1117 pi = sc->port[0];
1118
1119 len = iq->qsize * iq->esize;
1120 rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba,
1121 (void **)&iq->desc);
1122 if (rc != 0)
1123 return (rc);
1124
1125 bzero(&c, sizeof(c));
1126 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
1127 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
1128 V_FW_IQ_CMD_VFN(0));
1129
1130 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
1131 FW_LEN16(c));
1132
1133 /* Special handling for firmware event queue */
1134 if (iq == &sc->sge.fwq)
1135 v |= F_FW_IQ_CMD_IQASYNCH;
1136
1137 if (iq->handler) {
1138 KASSERT(intr_idx < NFIQ(sc),
1139 ("%s: invalid indirect intr_idx %d", __func__, intr_idx));
1140 v |= F_FW_IQ_CMD_IQANDST;
1141 v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fiq[intr_idx].abs_id);
1142 } else {
1143 KASSERT(intr_idx < sc->intr_count,
1144 ("%s: invalid direct intr_idx %d", __func__, intr_idx));
1145 v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx);
1146 }
1147
1148 c.type_to_iqandstindex = htobe32(v |
1149 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
1150 V_FW_IQ_CMD_VIID(pi->viid) |
1151 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
1152 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
1153 F_FW_IQ_CMD_IQGTSMODE |
1154 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) |
1155 V_FW_IQ_CMD_IQESIZE(ilog2(iq->esize) - 4));
1156 c.iqsize = htobe16(iq->qsize);
1157 c.iqaddr = htobe64(iq->ba);
1108{
1109 int rc, i, cntxt_id;
1110 size_t len;
1111 struct fw_iq_cmd c;
1112 struct adapter *sc = iq->adapter;
1113 __be32 v = 0;
1114
1115 /* The adapter queues are nominally allocated in port[0]'s name */
1116 if (pi == NULL)
1117 pi = sc->port[0];
1118
1119 len = iq->qsize * iq->esize;
1120 rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba,
1121 (void **)&iq->desc);
1122 if (rc != 0)
1123 return (rc);
1124
1125 bzero(&c, sizeof(c));
1126 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
1127 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
1128 V_FW_IQ_CMD_VFN(0));
1129
1130 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
1131 FW_LEN16(c));
1132
1133 /* Special handling for firmware event queue */
1134 if (iq == &sc->sge.fwq)
1135 v |= F_FW_IQ_CMD_IQASYNCH;
1136
1137 if (iq->handler) {
1138 KASSERT(intr_idx < NFIQ(sc),
1139 ("%s: invalid indirect intr_idx %d", __func__, intr_idx));
1140 v |= F_FW_IQ_CMD_IQANDST;
1141 v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fiq[intr_idx].abs_id);
1142 } else {
1143 KASSERT(intr_idx < sc->intr_count,
1144 ("%s: invalid direct intr_idx %d", __func__, intr_idx));
1145 v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx);
1146 }
1147
1148 c.type_to_iqandstindex = htobe32(v |
1149 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
1150 V_FW_IQ_CMD_VIID(pi->viid) |
1151 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
1152 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
1153 F_FW_IQ_CMD_IQGTSMODE |
1154 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) |
1155 V_FW_IQ_CMD_IQESIZE(ilog2(iq->esize) - 4));
1156 c.iqsize = htobe16(iq->qsize);
1157 c.iqaddr = htobe64(iq->ba);
1158 if (cong >= 0)
1159 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN);
1158
1159 if (fl) {
1160 mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF);
1161
1162 for (i = 0; i < FL_BUF_SIZES; i++) {
1163
1164 /*
1165 * A freelist buffer must be 16 byte aligned as the SGE
1166 * uses the low 4 bits of the bus addr to figure out the
1167 * buffer size.
1168 */
1169 rc = bus_dma_tag_create(sc->dmat, 16, 0,
1170 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1171 FL_BUF_SIZE(i), 1, FL_BUF_SIZE(i), BUS_DMA_ALLOCNOW,
1172 NULL, NULL, &fl->tag[i]);
1173 if (rc != 0) {
1174 device_printf(sc->dev,
1175 "failed to create fl DMA tag[%d]: %d\n",
1176 i, rc);
1177 return (rc);
1178 }
1179 }
1180 len = fl->qsize * RX_FL_ESIZE;
1181 rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map,
1182 &fl->ba, (void **)&fl->desc);
1183 if (rc)
1184 return (rc);
1185
1186 /* Allocate space for one software descriptor per buffer. */
1187 fl->cap = (fl->qsize - SPG_LEN / RX_FL_ESIZE) * 8;
1188 FL_LOCK(fl);
1189 set_fl_tag_idx(fl, pi->ifp->if_mtu);
1190 rc = alloc_fl_sdesc(fl);
1191 FL_UNLOCK(fl);
1192 if (rc != 0) {
1193 device_printf(sc->dev,
1194 "failed to setup fl software descriptors: %d\n",
1195 rc);
1196 return (rc);
1197 }
1198 fl->needed = fl->cap;
1199
1200 c.iqns_to_fl0congen =
1160
1161 if (fl) {
1162 mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF);
1163
1164 for (i = 0; i < FL_BUF_SIZES; i++) {
1165
1166 /*
1167 * A freelist buffer must be 16 byte aligned as the SGE
1168 * uses the low 4 bits of the bus addr to figure out the
1169 * buffer size.
1170 */
1171 rc = bus_dma_tag_create(sc->dmat, 16, 0,
1172 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1173 FL_BUF_SIZE(i), 1, FL_BUF_SIZE(i), BUS_DMA_ALLOCNOW,
1174 NULL, NULL, &fl->tag[i]);
1175 if (rc != 0) {
1176 device_printf(sc->dev,
1177 "failed to create fl DMA tag[%d]: %d\n",
1178 i, rc);
1179 return (rc);
1180 }
1181 }
1182 len = fl->qsize * RX_FL_ESIZE;
1183 rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map,
1184 &fl->ba, (void **)&fl->desc);
1185 if (rc)
1186 return (rc);
1187
1188 /* Allocate space for one software descriptor per buffer. */
1189 fl->cap = (fl->qsize - SPG_LEN / RX_FL_ESIZE) * 8;
1190 FL_LOCK(fl);
1191 set_fl_tag_idx(fl, pi->ifp->if_mtu);
1192 rc = alloc_fl_sdesc(fl);
1193 FL_UNLOCK(fl);
1194 if (rc != 0) {
1195 device_printf(sc->dev,
1196 "failed to setup fl software descriptors: %d\n",
1197 rc);
1198 return (rc);
1199 }
1200 fl->needed = fl->cap;
1201
1202 c.iqns_to_fl0congen =
1201 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE));
1203 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
1204 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
1205 F_FW_IQ_CMD_FL0PADEN);
1206 if (cong >= 0) {
1207 c.iqns_to_fl0congen |=
1208 htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
1209 F_FW_IQ_CMD_FL0CONGCIF |
1210 F_FW_IQ_CMD_FL0CONGEN);
1211 }
1202 c.fl0dcaen_to_fl0cidxfthresh =
1203 htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) |
1204 V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B));
1205 c.fl0size = htobe16(fl->qsize);
1206 c.fl0addr = htobe64(fl->ba);
1207 }
1208
1209 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
1210 if (rc != 0) {
1211 device_printf(sc->dev,
1212 "failed to create ingress queue: %d\n", rc);
1213 return (rc);
1214 }
1215
1216 iq->cdesc = iq->desc;
1217 iq->cidx = 0;
1218 iq->gen = 1;
1219 iq->intr_next = iq->intr_params;
1220 iq->cntxt_id = be16toh(c.iqid);
1221 iq->abs_id = be16toh(c.physiqid);
1222 iq->flags |= (IQ_ALLOCATED | IQ_STARTED);
1223
1224 cntxt_id = iq->cntxt_id - sc->sge.iq_start;
1225 KASSERT(cntxt_id < sc->sge.niq,
1226 ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__,
1227 cntxt_id, sc->sge.niq - 1));
1228 sc->sge.iqmap[cntxt_id] = iq;
1229
1230 if (fl) {
1231 fl->cntxt_id = be16toh(c.fl0id);
1232 fl->pidx = fl->cidx = 0;
1233
1234 cntxt_id = fl->cntxt_id - sc->sge.eq_start;
1235 KASSERT(cntxt_id < sc->sge.neq,
1236 ("%s: fl->cntxt_id (%d) more than the max (%d)", __func__,
1237 cntxt_id, sc->sge.neq - 1));
1238 sc->sge.eqmap[cntxt_id] = (void *)fl;
1239
1240 FL_LOCK(fl);
1241 refill_fl(sc, fl, -1, 8);
1242 FL_UNLOCK(fl);
1243 }
1244
1245 /* Enable IQ interrupts */
1246 atomic_store_rel_32(&iq->state, IQS_IDLE);
1247 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) |
1248 V_INGRESSQID(iq->cntxt_id));
1249
1250 return (0);
1251}
1252
1253/*
1254 * This can be called with the iq/fl in any state - fully allocated and
1255 * functional, partially allocated, even all-zeroed out.
1256 */
1257static int
1258free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl)
1259{
1260 int i, rc;
1261 struct adapter *sc = iq->adapter;
1262 device_t dev;
1263
1264 if (sc == NULL)
1265 return (0); /* nothing to do */
1266
1267 dev = pi ? pi->dev : sc->dev;
1268
1269 if (iq->flags & IQ_STARTED) {
1270 rc = -t4_iq_start_stop(sc, sc->mbox, 0, sc->pf, 0,
1271 iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff);
1272 if (rc != 0) {
1273 device_printf(dev,
1274 "failed to stop queue %p: %d\n", iq, rc);
1275 return (rc);
1276 }
1277 iq->flags &= ~IQ_STARTED;
1278
1279 /* Synchronize with the interrupt handler */
1280 while (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_DISABLED))
1281 pause("iqfree", hz / 1000);
1282 }
1283
1284 if (iq->flags & IQ_ALLOCATED) {
1285
1286 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0,
1287 FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id,
1288 fl ? fl->cntxt_id : 0xffff, 0xffff);
1289 if (rc != 0) {
1290 device_printf(dev,
1291 "failed to free queue %p: %d\n", iq, rc);
1292 return (rc);
1293 }
1294 iq->flags &= ~IQ_ALLOCATED;
1295 }
1296
1297 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc);
1298
1299 bzero(iq, sizeof(*iq));
1300
1301 if (fl) {
1302 free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba,
1303 fl->desc);
1304
1305 if (fl->sdesc) {
1306 FL_LOCK(fl);
1307 free_fl_sdesc(fl);
1308 FL_UNLOCK(fl);
1309 }
1310
1311 if (mtx_initialized(&fl->fl_lock))
1312 mtx_destroy(&fl->fl_lock);
1313
1314 for (i = 0; i < FL_BUF_SIZES; i++) {
1315 if (fl->tag[i])
1316 bus_dma_tag_destroy(fl->tag[i]);
1317 }
1318
1319 bzero(fl, sizeof(*fl));
1320 }
1321
1322 return (0);
1323}
1324
1325static int
1326alloc_iq(struct sge_iq *iq, int intr_idx)
1327{
1212 c.fl0dcaen_to_fl0cidxfthresh =
1213 htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) |
1214 V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B));
1215 c.fl0size = htobe16(fl->qsize);
1216 c.fl0addr = htobe64(fl->ba);
1217 }
1218
1219 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
1220 if (rc != 0) {
1221 device_printf(sc->dev,
1222 "failed to create ingress queue: %d\n", rc);
1223 return (rc);
1224 }
1225
1226 iq->cdesc = iq->desc;
1227 iq->cidx = 0;
1228 iq->gen = 1;
1229 iq->intr_next = iq->intr_params;
1230 iq->cntxt_id = be16toh(c.iqid);
1231 iq->abs_id = be16toh(c.physiqid);
1232 iq->flags |= (IQ_ALLOCATED | IQ_STARTED);
1233
1234 cntxt_id = iq->cntxt_id - sc->sge.iq_start;
1235 KASSERT(cntxt_id < sc->sge.niq,
1236 ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__,
1237 cntxt_id, sc->sge.niq - 1));
1238 sc->sge.iqmap[cntxt_id] = iq;
1239
1240 if (fl) {
1241 fl->cntxt_id = be16toh(c.fl0id);
1242 fl->pidx = fl->cidx = 0;
1243
1244 cntxt_id = fl->cntxt_id - sc->sge.eq_start;
1245 KASSERT(cntxt_id < sc->sge.neq,
1246 ("%s: fl->cntxt_id (%d) more than the max (%d)", __func__,
1247 cntxt_id, sc->sge.neq - 1));
1248 sc->sge.eqmap[cntxt_id] = (void *)fl;
1249
1250 FL_LOCK(fl);
1251 refill_fl(sc, fl, -1, 8);
1252 FL_UNLOCK(fl);
1253 }
1254
1255 /* Enable IQ interrupts */
1256 atomic_store_rel_32(&iq->state, IQS_IDLE);
1257 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) |
1258 V_INGRESSQID(iq->cntxt_id));
1259
1260 return (0);
1261}
1262
1263/*
1264 * This can be called with the iq/fl in any state - fully allocated and
1265 * functional, partially allocated, even all-zeroed out.
1266 */
1267static int
1268free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl)
1269{
1270 int i, rc;
1271 struct adapter *sc = iq->adapter;
1272 device_t dev;
1273
1274 if (sc == NULL)
1275 return (0); /* nothing to do */
1276
1277 dev = pi ? pi->dev : sc->dev;
1278
1279 if (iq->flags & IQ_STARTED) {
1280 rc = -t4_iq_start_stop(sc, sc->mbox, 0, sc->pf, 0,
1281 iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff);
1282 if (rc != 0) {
1283 device_printf(dev,
1284 "failed to stop queue %p: %d\n", iq, rc);
1285 return (rc);
1286 }
1287 iq->flags &= ~IQ_STARTED;
1288
1289 /* Synchronize with the interrupt handler */
1290 while (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_DISABLED))
1291 pause("iqfree", hz / 1000);
1292 }
1293
1294 if (iq->flags & IQ_ALLOCATED) {
1295
1296 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0,
1297 FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id,
1298 fl ? fl->cntxt_id : 0xffff, 0xffff);
1299 if (rc != 0) {
1300 device_printf(dev,
1301 "failed to free queue %p: %d\n", iq, rc);
1302 return (rc);
1303 }
1304 iq->flags &= ~IQ_ALLOCATED;
1305 }
1306
1307 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc);
1308
1309 bzero(iq, sizeof(*iq));
1310
1311 if (fl) {
1312 free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba,
1313 fl->desc);
1314
1315 if (fl->sdesc) {
1316 FL_LOCK(fl);
1317 free_fl_sdesc(fl);
1318 FL_UNLOCK(fl);
1319 }
1320
1321 if (mtx_initialized(&fl->fl_lock))
1322 mtx_destroy(&fl->fl_lock);
1323
1324 for (i = 0; i < FL_BUF_SIZES; i++) {
1325 if (fl->tag[i])
1326 bus_dma_tag_destroy(fl->tag[i]);
1327 }
1328
1329 bzero(fl, sizeof(*fl));
1330 }
1331
1332 return (0);
1333}
1334
1335static int
1336alloc_iq(struct sge_iq *iq, int intr_idx)
1337{
1328 return alloc_iq_fl(NULL, iq, NULL, intr_idx);
1338 return alloc_iq_fl(NULL, iq, NULL, intr_idx, -1);
1329}
1330
1331static int
1332free_iq(struct sge_iq *iq)
1333{
1334 return free_iq_fl(NULL, iq, NULL);
1335}
1336
1337static int
1338alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx)
1339{
1340 int rc;
1341 struct sysctl_oid *oid;
1342 struct sysctl_oid_list *children;
1343 char name[16];
1344
1339}
1340
1341static int
1342free_iq(struct sge_iq *iq)
1343{
1344 return free_iq_fl(NULL, iq, NULL);
1345}
1346
1347static int
1348alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx)
1349{
1350 int rc;
1351 struct sysctl_oid *oid;
1352 struct sysctl_oid_list *children;
1353 char name[16];
1354
1345 rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx);
1355 rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx, 1 << pi->tx_chan);
1346 if (rc != 0)
1347 return (rc);
1348
1349#ifdef INET
1350 rc = tcp_lro_init(&rxq->lro);
1351 if (rc != 0)
1352 return (rc);
1353 rxq->lro.ifp = pi->ifp; /* also indicates LRO init'ed */
1354
1355 if (pi->ifp->if_capenable & IFCAP_LRO)
1356 rxq->flags |= RXQ_LRO_ENABLED;
1357#endif
1358 rxq->ifp = pi->ifp;
1359
1360 children = SYSCTL_CHILDREN(pi->oid_rxq);
1361
1362 snprintf(name, sizeof(name), "%d", idx);
1363 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
1364 NULL, "rx queue");
1365 children = SYSCTL_CHILDREN(oid);
1366
1367 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id",
1368 CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.abs_id, 0, sysctl_abs_id, "I",
1369 "absolute id of the queue");
1370#ifdef INET
1371 SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD,
1372 &rxq->lro.lro_queued, 0, NULL);
1373 SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD,
1374 &rxq->lro.lro_flushed, 0, NULL);
1375#endif
1376 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD,
1377 &rxq->rxcsum, "# of times hardware assisted with checksum");
1378 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_extraction",
1379 CTLFLAG_RD, &rxq->vlan_extraction,
1380 "# of times hardware extracted 802.1Q tag");
1381
1382 return (rc);
1383}
1384
1385static int
1386free_rxq(struct port_info *pi, struct sge_rxq *rxq)
1387{
1388 int rc;
1389
1390#ifdef INET
1391 if (rxq->lro.ifp) {
1392 tcp_lro_free(&rxq->lro);
1393 rxq->lro.ifp = NULL;
1394 }
1395#endif
1396
1397 rc = free_iq_fl(pi, &rxq->iq, &rxq->fl);
1398 if (rc == 0)
1399 bzero(rxq, sizeof(*rxq));
1400
1401 return (rc);
1402}
1403
1404static int
1405alloc_ctrlq(struct adapter *sc, struct sge_ctrlq *ctrlq, int idx)
1406{
1407 int rc, cntxt_id;
1408 size_t len;
1409 struct fw_eq_ctrl_cmd c;
1410 struct sge_eq *eq = &ctrlq->eq;
1411 char name[16];
1412 struct sysctl_oid *oid;
1413 struct sysctl_oid_list *children;
1414
1415 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF);
1416
1417 len = eq->qsize * CTRL_EQ_ESIZE;
1418 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map,
1419 &eq->ba, (void **)&eq->desc);
1420 if (rc)
1421 return (rc);
1422
1423 eq->cap = eq->qsize - SPG_LEN / CTRL_EQ_ESIZE;
1424 eq->spg = (void *)&eq->desc[eq->cap];
1425 eq->avail = eq->cap - 1; /* one less to avoid cidx = pidx */
1426 eq->iqid = sc->sge.fwq.cntxt_id;
1427
1428 bzero(&c, sizeof(c));
1429
1430 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
1431 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) |
1432 V_FW_EQ_CTRL_CMD_VFN(0));
1433 c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC |
1434 F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
1435 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); /* XXX */
1436 c.physeqid_pkd = htobe32(0);
1437 c.fetchszm_to_iqid =
1438 htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
1356 if (rc != 0)
1357 return (rc);
1358
1359#ifdef INET
1360 rc = tcp_lro_init(&rxq->lro);
1361 if (rc != 0)
1362 return (rc);
1363 rxq->lro.ifp = pi->ifp; /* also indicates LRO init'ed */
1364
1365 if (pi->ifp->if_capenable & IFCAP_LRO)
1366 rxq->flags |= RXQ_LRO_ENABLED;
1367#endif
1368 rxq->ifp = pi->ifp;
1369
1370 children = SYSCTL_CHILDREN(pi->oid_rxq);
1371
1372 snprintf(name, sizeof(name), "%d", idx);
1373 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
1374 NULL, "rx queue");
1375 children = SYSCTL_CHILDREN(oid);
1376
1377 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id",
1378 CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.abs_id, 0, sysctl_abs_id, "I",
1379 "absolute id of the queue");
1380#ifdef INET
1381 SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD,
1382 &rxq->lro.lro_queued, 0, NULL);
1383 SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD,
1384 &rxq->lro.lro_flushed, 0, NULL);
1385#endif
1386 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD,
1387 &rxq->rxcsum, "# of times hardware assisted with checksum");
1388 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_extraction",
1389 CTLFLAG_RD, &rxq->vlan_extraction,
1390 "# of times hardware extracted 802.1Q tag");
1391
1392 return (rc);
1393}
1394
1395static int
1396free_rxq(struct port_info *pi, struct sge_rxq *rxq)
1397{
1398 int rc;
1399
1400#ifdef INET
1401 if (rxq->lro.ifp) {
1402 tcp_lro_free(&rxq->lro);
1403 rxq->lro.ifp = NULL;
1404 }
1405#endif
1406
1407 rc = free_iq_fl(pi, &rxq->iq, &rxq->fl);
1408 if (rc == 0)
1409 bzero(rxq, sizeof(*rxq));
1410
1411 return (rc);
1412}
1413
1414static int
1415alloc_ctrlq(struct adapter *sc, struct sge_ctrlq *ctrlq, int idx)
1416{
1417 int rc, cntxt_id;
1418 size_t len;
1419 struct fw_eq_ctrl_cmd c;
1420 struct sge_eq *eq = &ctrlq->eq;
1421 char name[16];
1422 struct sysctl_oid *oid;
1423 struct sysctl_oid_list *children;
1424
1425 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF);
1426
1427 len = eq->qsize * CTRL_EQ_ESIZE;
1428 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map,
1429 &eq->ba, (void **)&eq->desc);
1430 if (rc)
1431 return (rc);
1432
1433 eq->cap = eq->qsize - SPG_LEN / CTRL_EQ_ESIZE;
1434 eq->spg = (void *)&eq->desc[eq->cap];
1435 eq->avail = eq->cap - 1; /* one less to avoid cidx = pidx */
1436 eq->iqid = sc->sge.fwq.cntxt_id;
1437
1438 bzero(&c, sizeof(c));
1439
1440 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
1441 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) |
1442 V_FW_EQ_CTRL_CMD_VFN(0));
1443 c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC |
1444 F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
1445 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); /* XXX */
1446 c.physeqid_pkd = htobe32(0);
1447 c.fetchszm_to_iqid =
1448 htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
1439 V_FW_EQ_CTRL_CMD_PCIECHN(idx) |
1449 V_FW_EQ_CTRL_CMD_PCIECHN(idx) | F_FW_EQ_CTRL_CMD_FETCHRO |
1440 V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
1441 c.dcaen_to_eqsize =
1442 htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
1443 V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
1444 V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
1445 V_FW_EQ_CTRL_CMD_EQSIZE(eq->qsize));
1446 c.eqaddr = htobe64(eq->ba);
1447
1448 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
1449 if (rc != 0) {
1450 device_printf(sc->dev,
1451 "failed to create control queue %d: %d\n", idx, rc);
1452 return (rc);
1453 }
1454
1455 eq->pidx = eq->cidx = 0;
1456 eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid));
1457 eq->flags |= (EQ_ALLOCATED | EQ_STARTED);
1458
1459 cntxt_id = eq->cntxt_id - sc->sge.eq_start;
1460 KASSERT(cntxt_id < sc->sge.neq,
1461 ("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
1462 cntxt_id, sc->sge.neq - 1));
1463 sc->sge.eqmap[cntxt_id] = eq;
1464
1465 children = SYSCTL_CHILDREN(sc->oid_ctrlq);
1466
1467 snprintf(name, sizeof(name), "%d", idx);
1468 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, name, CTLFLAG_RD,
1469 NULL, "ctrl queue");
1470 children = SYSCTL_CHILDREN(oid);
1471
1472 SYSCTL_ADD_UQUAD(&sc->ctx, children, OID_AUTO, "total_wrs", CTLFLAG_RD,
1473 &ctrlq->total_wrs, "total # of work requests");
1474 SYSCTL_ADD_UINT(&sc->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD,
1475 &ctrlq->no_desc, 0,
1476 "# of times ctrlq ran out of hardware descriptors");
1477 SYSCTL_ADD_UINT(&sc->ctx, children, OID_AUTO, "too_long", CTLFLAG_RD,
1478 &ctrlq->too_long, 0, "# of oversized work requests");
1479
1480 return (rc);
1481}
1482
1483static int
1484free_ctrlq(struct adapter *sc, struct sge_ctrlq *ctrlq)
1485{
1486 int rc;
1487 struct sge_eq *eq = &ctrlq->eq;
1488
1489 if (eq->flags & (EQ_ALLOCATED | EQ_STARTED)) {
1490 rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id);
1491 if (rc != 0) {
1492 device_printf(sc->dev,
1493 "failed to free ctrl queue %p: %d\n", eq, rc);
1494 return (rc);
1495 }
1496 eq->flags &= ~(EQ_ALLOCATED | EQ_STARTED);
1497 }
1498
1499 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc);
1500
1501 if (mtx_initialized(&eq->eq_lock))
1502 mtx_destroy(&eq->eq_lock);
1503
1504 bzero(ctrlq, sizeof(*ctrlq));
1505 return (0);
1506}
1507
1508static int
1509alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx)
1510{
1511 int rc, cntxt_id;
1512 size_t len;
1513 struct adapter *sc = pi->adapter;
1514 struct fw_eq_eth_cmd c;
1515 struct sge_eq *eq = &txq->eq;
1516 char name[16];
1517 struct sysctl_oid *oid;
1518 struct sysctl_oid_list *children;
1519
1520 txq->ifp = pi->ifp;
1521 TASK_INIT(&txq->resume_tx, 0, cxgbe_txq_start, txq);
1522
1523 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF);
1524
1525 len = eq->qsize * TX_EQ_ESIZE;
1526 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map,
1527 &eq->ba, (void **)&eq->desc);
1528 if (rc)
1529 return (rc);
1530
1531 eq->cap = eq->qsize - SPG_LEN / TX_EQ_ESIZE;
1532 eq->spg = (void *)&eq->desc[eq->cap];
1533 eq->avail = eq->cap - 1; /* one less to avoid cidx = pidx */
1534 txq->sdesc = malloc(eq->cap * sizeof(struct tx_sdesc), M_CXGBE,
1535 M_ZERO | M_WAITOK);
1536 txq->br = buf_ring_alloc(eq->qsize, M_CXGBE, M_WAITOK, &eq->eq_lock);
1537 eq->iqid = sc->sge.rxq[pi->first_rxq].iq.cntxt_id;
1538
1539 rc = bus_dma_tag_create(sc->dmat, 1, 0, BUS_SPACE_MAXADDR,
1540 BUS_SPACE_MAXADDR, NULL, NULL, 64 * 1024, TX_SGL_SEGS,
1541 BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &txq->tx_tag);
1542 if (rc != 0) {
1543 device_printf(sc->dev,
1544 "failed to create tx DMA tag: %d\n", rc);
1545 return (rc);
1546 }
1547
1548 rc = alloc_tx_maps(txq);
1549 if (rc != 0) {
1550 device_printf(sc->dev, "failed to setup tx DMA maps: %d\n", rc);
1551 return (rc);
1552 }
1553
1554 bzero(&c, sizeof(c));
1555
1556 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
1557 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
1558 V_FW_EQ_ETH_CMD_VFN(0));
1559 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
1560 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
1561 c.viid_pkd = htobe32(V_FW_EQ_ETH_CMD_VIID(pi->viid));
1562 c.fetchszm_to_iqid =
1563 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
1450 V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
1451 c.dcaen_to_eqsize =
1452 htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
1453 V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
1454 V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
1455 V_FW_EQ_CTRL_CMD_EQSIZE(eq->qsize));
1456 c.eqaddr = htobe64(eq->ba);
1457
1458 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
1459 if (rc != 0) {
1460 device_printf(sc->dev,
1461 "failed to create control queue %d: %d\n", idx, rc);
1462 return (rc);
1463 }
1464
1465 eq->pidx = eq->cidx = 0;
1466 eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid));
1467 eq->flags |= (EQ_ALLOCATED | EQ_STARTED);
1468
1469 cntxt_id = eq->cntxt_id - sc->sge.eq_start;
1470 KASSERT(cntxt_id < sc->sge.neq,
1471 ("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
1472 cntxt_id, sc->sge.neq - 1));
1473 sc->sge.eqmap[cntxt_id] = eq;
1474
1475 children = SYSCTL_CHILDREN(sc->oid_ctrlq);
1476
1477 snprintf(name, sizeof(name), "%d", idx);
1478 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, name, CTLFLAG_RD,
1479 NULL, "ctrl queue");
1480 children = SYSCTL_CHILDREN(oid);
1481
1482 SYSCTL_ADD_UQUAD(&sc->ctx, children, OID_AUTO, "total_wrs", CTLFLAG_RD,
1483 &ctrlq->total_wrs, "total # of work requests");
1484 SYSCTL_ADD_UINT(&sc->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD,
1485 &ctrlq->no_desc, 0,
1486 "# of times ctrlq ran out of hardware descriptors");
1487 SYSCTL_ADD_UINT(&sc->ctx, children, OID_AUTO, "too_long", CTLFLAG_RD,
1488 &ctrlq->too_long, 0, "# of oversized work requests");
1489
1490 return (rc);
1491}
1492
1493static int
1494free_ctrlq(struct adapter *sc, struct sge_ctrlq *ctrlq)
1495{
1496 int rc;
1497 struct sge_eq *eq = &ctrlq->eq;
1498
1499 if (eq->flags & (EQ_ALLOCATED | EQ_STARTED)) {
1500 rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id);
1501 if (rc != 0) {
1502 device_printf(sc->dev,
1503 "failed to free ctrl queue %p: %d\n", eq, rc);
1504 return (rc);
1505 }
1506 eq->flags &= ~(EQ_ALLOCATED | EQ_STARTED);
1507 }
1508
1509 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc);
1510
1511 if (mtx_initialized(&eq->eq_lock))
1512 mtx_destroy(&eq->eq_lock);
1513
1514 bzero(ctrlq, sizeof(*ctrlq));
1515 return (0);
1516}
1517
1518static int
1519alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx)
1520{
1521 int rc, cntxt_id;
1522 size_t len;
1523 struct adapter *sc = pi->adapter;
1524 struct fw_eq_eth_cmd c;
1525 struct sge_eq *eq = &txq->eq;
1526 char name[16];
1527 struct sysctl_oid *oid;
1528 struct sysctl_oid_list *children;
1529
1530 txq->ifp = pi->ifp;
1531 TASK_INIT(&txq->resume_tx, 0, cxgbe_txq_start, txq);
1532
1533 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF);
1534
1535 len = eq->qsize * TX_EQ_ESIZE;
1536 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map,
1537 &eq->ba, (void **)&eq->desc);
1538 if (rc)
1539 return (rc);
1540
1541 eq->cap = eq->qsize - SPG_LEN / TX_EQ_ESIZE;
1542 eq->spg = (void *)&eq->desc[eq->cap];
1543 eq->avail = eq->cap - 1; /* one less to avoid cidx = pidx */
1544 txq->sdesc = malloc(eq->cap * sizeof(struct tx_sdesc), M_CXGBE,
1545 M_ZERO | M_WAITOK);
1546 txq->br = buf_ring_alloc(eq->qsize, M_CXGBE, M_WAITOK, &eq->eq_lock);
1547 eq->iqid = sc->sge.rxq[pi->first_rxq].iq.cntxt_id;
1548
1549 rc = bus_dma_tag_create(sc->dmat, 1, 0, BUS_SPACE_MAXADDR,
1550 BUS_SPACE_MAXADDR, NULL, NULL, 64 * 1024, TX_SGL_SEGS,
1551 BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &txq->tx_tag);
1552 if (rc != 0) {
1553 device_printf(sc->dev,
1554 "failed to create tx DMA tag: %d\n", rc);
1555 return (rc);
1556 }
1557
1558 rc = alloc_tx_maps(txq);
1559 if (rc != 0) {
1560 device_printf(sc->dev, "failed to setup tx DMA maps: %d\n", rc);
1561 return (rc);
1562 }
1563
1564 bzero(&c, sizeof(c));
1565
1566 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
1567 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
1568 V_FW_EQ_ETH_CMD_VFN(0));
1569 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
1570 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
1571 c.viid_pkd = htobe32(V_FW_EQ_ETH_CMD_VIID(pi->viid));
1572 c.fetchszm_to_iqid =
1573 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
1564 V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
1574 V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
1565 V_FW_EQ_ETH_CMD_IQID(eq->iqid));
1566 c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
1567 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
1568 V_FW_EQ_ETH_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
1569 V_FW_EQ_ETH_CMD_EQSIZE(eq->qsize));
1570 c.eqaddr = htobe64(eq->ba);
1571
1572 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
1573 if (rc != 0) {
1574 device_printf(pi->dev,
1575 "failed to create egress queue: %d\n", rc);
1576 return (rc);
1577 }
1578
1579 eq->pidx = eq->cidx = 0;
1580 eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
1581 eq->flags |= (EQ_ALLOCATED | EQ_STARTED);
1582
1583 cntxt_id = eq->cntxt_id - sc->sge.eq_start;
1584 KASSERT(cntxt_id < sc->sge.neq,
1585 ("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
1586 cntxt_id, sc->sge.neq - 1));
1587 sc->sge.eqmap[cntxt_id] = eq;
1588
1589 children = SYSCTL_CHILDREN(pi->oid_txq);
1590
1591 snprintf(name, sizeof(name), "%d", idx);
1592 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
1593 NULL, "tx queue");
1594 children = SYSCTL_CHILDREN(oid);
1595
1596 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD,
1597 &txq->txcsum, "# of times hardware assisted with checksum");
1598 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_insertion",
1599 CTLFLAG_RD, &txq->vlan_insertion,
1600 "# of times hardware inserted 802.1Q tag");
1601 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD,
1602 &txq->tso_wrs, "# of IPv4 TSO work requests");
1603 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD,
1604 &txq->imm_wrs, "# of work requests with immediate data");
1605 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD,
1606 &txq->sgl_wrs, "# of work requests with direct SGL");
1607 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD,
1608 &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)");
1609 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_wrs", CTLFLAG_RD,
1610 &txq->txpkts_wrs, "# of txpkts work requests (multiple pkts/WR)");
1611 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_pkts", CTLFLAG_RD,
1612 &txq->txpkts_pkts, "# of frames tx'd using txpkts work requests");
1613
1614 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_dmamap", CTLFLAG_RD,
1615 &txq->no_dmamap, 0, "# of times txq ran out of DMA maps");
1616 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD,
1617 &txq->no_desc, 0, "# of times txq ran out of hardware descriptors");
1618 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "egr_update", CTLFLAG_RD,
1619 &txq->egr_update, 0, "egress update notifications from the SGE");
1620
1621 return (rc);
1622}
1623
1624static int
1625free_txq(struct port_info *pi, struct sge_txq *txq)
1626{
1627 int rc;
1628 struct adapter *sc = pi->adapter;
1629 struct sge_eq *eq = &txq->eq;
1630
1631 if (eq->flags & (EQ_ALLOCATED | EQ_STARTED)) {
1632
1633 /*
1634 * Wait for the response to a credit flush if there's one
1635 * pending. Clearing the flag tells handle_sge_egr_update or
1636 * cxgbe_txq_start (depending on how far the response has made
1637 * it) that they should ignore the response and wake up free_txq
1638 * instead.
1639 *
1640 * The interface has been marked down by the time we get here
1641 * (both IFF_UP and IFF_DRV_RUNNING cleared). qflush has
1642 * emptied the tx buf_rings and we know nothing new is being
1643 * queued for tx so we don't have to worry about a new credit
1644 * flush request.
1645 */
1646 TXQ_LOCK(txq);
1647 if (eq->flags & EQ_CRFLUSHED) {
1648 eq->flags &= ~EQ_CRFLUSHED;
1649 msleep(txq, &eq->eq_lock, 0, "crflush", 0);
1650 }
1651 TXQ_UNLOCK(txq);
1652
1653 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id);
1654 if (rc != 0) {
1655 device_printf(pi->dev,
1656 "failed to free egress queue %p: %d\n", eq, rc);
1657 return (rc);
1658 }
1659 eq->flags &= ~(EQ_ALLOCATED | EQ_STARTED);
1660 }
1661
1662 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc);
1663
1664 free(txq->sdesc, M_CXGBE);
1665
1666 if (txq->maps)
1667 free_tx_maps(txq);
1668
1669 buf_ring_free(txq->br, M_CXGBE);
1670
1671 if (txq->tx_tag)
1672 bus_dma_tag_destroy(txq->tx_tag);
1673
1674 if (mtx_initialized(&eq->eq_lock))
1675 mtx_destroy(&eq->eq_lock);
1676
1677 bzero(txq, sizeof(*txq));
1678 return (0);
1679}
1680
1681static void
1682oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1683{
1684 bus_addr_t *ba = arg;
1685
1686 KASSERT(nseg == 1,
1687 ("%s meant for single segment mappings only.", __func__));
1688
1689 *ba = error ? 0 : segs->ds_addr;
1690}
1691
1692static inline bool
1693is_new_response(const struct sge_iq *iq, struct rsp_ctrl **ctrl)
1694{
1695 *ctrl = (void *)((uintptr_t)iq->cdesc +
1696 (iq->esize - sizeof(struct rsp_ctrl)));
1697
1698 return (((*ctrl)->u.type_gen >> S_RSPD_GEN) == iq->gen);
1699}
1700
1701static inline void
1702iq_next(struct sge_iq *iq)
1703{
1704 iq->cdesc = (void *) ((uintptr_t)iq->cdesc + iq->esize);
1705 if (__predict_false(++iq->cidx == iq->qsize - 1)) {
1706 iq->cidx = 0;
1707 iq->gen ^= 1;
1708 iq->cdesc = iq->desc;
1709 }
1710}
1711
1712#define FL_HW_IDX(x) ((x) >> 3)
1713static inline void
1714ring_fl_db(struct adapter *sc, struct sge_fl *fl)
1715{
1716 int ndesc = fl->pending / 8;
1717
1718 if (FL_HW_IDX(fl->pidx) == FL_HW_IDX(fl->cidx))
1719 ndesc--; /* hold back one credit */
1720
1721 if (ndesc <= 0)
1722 return; /* nothing to do */
1723
1724 wmb();
1725
1726 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), F_DBPRIO |
1727 V_QID(fl->cntxt_id) | V_PIDX(ndesc));
1728 fl->pending -= ndesc * 8;
1729}
1730
1731/*
1732 * Fill up the freelist by upto nbufs and ring its doorbell if the number of
1733 * buffers ready to be handed to the hardware >= dbthresh.
1734 */
1735static void
1736refill_fl(struct adapter *sc, struct sge_fl *fl, int nbufs, int dbthresh)
1737{
1738 __be64 *d = &fl->desc[fl->pidx];
1739 struct fl_sdesc *sd = &fl->sdesc[fl->pidx];
1740 bus_dma_tag_t tag;
1741 bus_addr_t pa;
1742 caddr_t cl;
1743 int rc;
1744
1745 FL_LOCK_ASSERT_OWNED(fl);
1746
1747 if (nbufs < 0 || nbufs > fl->needed)
1748 nbufs = fl->needed;
1749
1750 while (nbufs--) {
1751
1752 if (sd->cl != NULL) {
1753
1754 /*
1755 * This happens when a frame small enough to fit
1756 * entirely in an mbuf was received in cl last time.
1757 * We'd held on to cl and can reuse it now. Note that
1758 * we reuse a cluster of the old size if fl->tag_idx is
1759 * no longer the same as sd->tag_idx.
1760 */
1761
1762 KASSERT(*d == sd->ba_tag,
1763 ("%s: recyling problem at pidx %d",
1764 __func__, fl->pidx));
1765
1766 d++;
1767 goto recycled;
1768 }
1769
1770
1771 if (fl->tag_idx != sd->tag_idx) {
1772 bus_dmamap_t map;
1773 bus_dma_tag_t newtag = fl->tag[fl->tag_idx];
1774 bus_dma_tag_t oldtag = fl->tag[sd->tag_idx];
1775
1776 /*
1777 * An MTU change can get us here. Discard the old map
1778 * which was created with the old tag, but only if
1779 * we're able to get a new one.
1780 */
1781 rc = bus_dmamap_create(newtag, 0, &map);
1782 if (rc == 0) {
1783 bus_dmamap_destroy(oldtag, sd->map);
1784 sd->map = map;
1785 sd->tag_idx = fl->tag_idx;
1786 }
1787 }
1788
1789 tag = fl->tag[sd->tag_idx];
1790
1791 cl = m_cljget(NULL, M_NOWAIT, FL_BUF_SIZE(sd->tag_idx));
1792 if (cl == NULL)
1793 break;
1794
1795 rc = bus_dmamap_load(tag, sd->map, cl, FL_BUF_SIZE(sd->tag_idx),
1796 oneseg_dma_callback, &pa, 0);
1797 if (rc != 0 || pa == 0) {
1798 fl->dmamap_failed++;
1799 uma_zfree(FL_BUF_ZONE(sd->tag_idx), cl);
1800 break;
1801 }
1802
1803 sd->cl = cl;
1804 *d++ = htobe64(pa | sd->tag_idx);
1805
1806#ifdef INVARIANTS
1807 sd->ba_tag = htobe64(pa | sd->tag_idx);
1808#endif
1809
1810recycled:
1811 /* sd->m is never recycled, should always be NULL */
1812 KASSERT(sd->m == NULL, ("%s: stray mbuf", __func__));
1813
1814 sd->m = m_gethdr(M_NOWAIT, MT_NOINIT);
1815 if (sd->m == NULL)
1816 break;
1817
1818 fl->pending++;
1819 fl->needed--;
1820 sd++;
1821 if (++fl->pidx == fl->cap) {
1822 fl->pidx = 0;
1823 sd = fl->sdesc;
1824 d = fl->desc;
1825 }
1826 }
1827
1828 if (fl->pending >= dbthresh)
1829 ring_fl_db(sc, fl);
1830}
1831
1832static int
1833alloc_fl_sdesc(struct sge_fl *fl)
1834{
1835 struct fl_sdesc *sd;
1836 bus_dma_tag_t tag;
1837 int i, rc;
1838
1839 FL_LOCK_ASSERT_OWNED(fl);
1840
1841 fl->sdesc = malloc(fl->cap * sizeof(struct fl_sdesc), M_CXGBE,
1842 M_ZERO | M_WAITOK);
1843
1844 tag = fl->tag[fl->tag_idx];
1845 sd = fl->sdesc;
1846 for (i = 0; i < fl->cap; i++, sd++) {
1847
1848 sd->tag_idx = fl->tag_idx;
1849 rc = bus_dmamap_create(tag, 0, &sd->map);
1850 if (rc != 0)
1851 goto failed;
1852 }
1853
1854 return (0);
1855failed:
1856 while (--i >= 0) {
1857 sd--;
1858 bus_dmamap_destroy(tag, sd->map);
1859 if (sd->m) {
1860 m_init(sd->m, NULL, 0, M_NOWAIT, MT_DATA, 0);
1861 m_free(sd->m);
1862 sd->m = NULL;
1863 }
1864 }
1865 KASSERT(sd == fl->sdesc, ("%s: EDOOFUS", __func__));
1866
1867 free(fl->sdesc, M_CXGBE);
1868 fl->sdesc = NULL;
1869
1870 return (rc);
1871}
1872
1873static void
1874free_fl_sdesc(struct sge_fl *fl)
1875{
1876 struct fl_sdesc *sd;
1877 int i;
1878
1879 FL_LOCK_ASSERT_OWNED(fl);
1880
1881 sd = fl->sdesc;
1882 for (i = 0; i < fl->cap; i++, sd++) {
1883
1884 if (sd->m) {
1885 m_init(sd->m, NULL, 0, M_NOWAIT, MT_DATA, 0);
1886 m_free(sd->m);
1887 sd->m = NULL;
1888 }
1889
1890 if (sd->cl) {
1891 bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map);
1892 uma_zfree(FL_BUF_ZONE(sd->tag_idx), sd->cl);
1893 sd->cl = NULL;
1894 }
1895
1896 bus_dmamap_destroy(fl->tag[sd->tag_idx], sd->map);
1897 }
1898
1899 free(fl->sdesc, M_CXGBE);
1900 fl->sdesc = NULL;
1901}
1902
1903static int
1904alloc_tx_maps(struct sge_txq *txq)
1905{
1906 struct tx_map *txm;
1907 int i, rc, count;
1908
1909 /*
1910 * We can stuff ~10 frames in an 8-descriptor txpkts WR (8 is the SGE
1911 * limit for any WR). txq->no_dmamap events shouldn't occur if maps is
1912 * sized for the worst case.
1913 */
1914 count = txq->eq.qsize * 10 / 8;
1915 txq->map_total = txq->map_avail = count;
1916 txq->map_cidx = txq->map_pidx = 0;
1917
1918 txq->maps = malloc(count * sizeof(struct tx_map), M_CXGBE,
1919 M_ZERO | M_WAITOK);
1920
1921 txm = txq->maps;
1922 for (i = 0; i < count; i++, txm++) {
1923 rc = bus_dmamap_create(txq->tx_tag, 0, &txm->map);
1924 if (rc != 0)
1925 goto failed;
1926 }
1927
1928 return (0);
1929failed:
1930 while (--i >= 0) {
1931 txm--;
1932 bus_dmamap_destroy(txq->tx_tag, txm->map);
1933 }
1934 KASSERT(txm == txq->maps, ("%s: EDOOFUS", __func__));
1935
1936 free(txq->maps, M_CXGBE);
1937 txq->maps = NULL;
1938
1939 return (rc);
1940}
1941
1942static void
1943free_tx_maps(struct sge_txq *txq)
1944{
1945 struct tx_map *txm;
1946 int i;
1947
1948 txm = txq->maps;
1949 for (i = 0; i < txq->map_total; i++, txm++) {
1950
1951 if (txm->m) {
1952 bus_dmamap_unload(txq->tx_tag, txm->map);
1953 m_freem(txm->m);
1954 txm->m = NULL;
1955 }
1956
1957 bus_dmamap_destroy(txq->tx_tag, txm->map);
1958 }
1959
1960 free(txq->maps, M_CXGBE);
1961 txq->maps = NULL;
1962}
1963
1964/*
1965 * We'll do immediate data tx for non-TSO, but only when not coalescing. We're
1966 * willing to use upto 2 hardware descriptors which means a maximum of 96 bytes
1967 * of immediate data.
1968 */
1969#define IMM_LEN ( \
1970 2 * TX_EQ_ESIZE \
1971 - sizeof(struct fw_eth_tx_pkt_wr) \
1972 - sizeof(struct cpl_tx_pkt_core))
1973
1974/*
1975 * Returns non-zero on failure, no need to cleanup anything in that case.
1976 *
1977 * Note 1: We always try to defrag the mbuf if required and return EFBIG only
1978 * if the resulting chain still won't fit in a tx descriptor.
1979 *
1980 * Note 2: We'll pullup the mbuf chain if TSO is requested and the first mbuf
1981 * does not have the TCP header in it.
1982 */
1983static int
1984get_pkt_sgl(struct sge_txq *txq, struct mbuf **fp, struct sgl *sgl,
1985 int sgl_only)
1986{
1987 struct mbuf *m = *fp;
1988 struct tx_map *txm;
1989 int rc, defragged = 0, n;
1990
1991 TXQ_LOCK_ASSERT_OWNED(txq);
1992
1993 if (m->m_pkthdr.tso_segsz)
1994 sgl_only = 1; /* Do not allow immediate data with LSO */
1995
1996start: sgl->nsegs = 0;
1997
1998 if (m->m_pkthdr.len <= IMM_LEN && !sgl_only)
1999 return (0); /* nsegs = 0 tells caller to use imm. tx */
2000
2001 if (txq->map_avail == 0) {
2002 txq->no_dmamap++;
2003 return (ENOMEM);
2004 }
2005 txm = &txq->maps[txq->map_pidx];
2006
2007 if (m->m_pkthdr.tso_segsz && m->m_len < 50) {
2008 *fp = m_pullup(m, 50);
2009 m = *fp;
2010 if (m == NULL)
2011 return (ENOBUFS);
2012 }
2013
2014 rc = bus_dmamap_load_mbuf_sg(txq->tx_tag, txm->map, m, sgl->seg,
2015 &sgl->nsegs, BUS_DMA_NOWAIT);
2016 if (rc == EFBIG && defragged == 0) {
2017 m = m_defrag(m, M_DONTWAIT);
2018 if (m == NULL)
2019 return (EFBIG);
2020
2021 defragged = 1;
2022 *fp = m;
2023 goto start;
2024 }
2025 if (rc != 0)
2026 return (rc);
2027
2028 txm->m = m;
2029 txq->map_avail--;
2030 if (++txq->map_pidx == txq->map_total)
2031 txq->map_pidx = 0;
2032
2033 KASSERT(sgl->nsegs > 0 && sgl->nsegs <= TX_SGL_SEGS,
2034 ("%s: bad DMA mapping (%d segments)", __func__, sgl->nsegs));
2035
2036 /*
2037 * Store the # of flits required to hold this frame's SGL in nflits. An
2038 * SGL has a (ULPTX header + len0, addr0) tuple optionally followed by
2039 * multiple (len0 + len1, addr0, addr1) tuples. If addr1 is not used
2040 * then len1 must be set to 0.
2041 */
2042 n = sgl->nsegs - 1;
2043 sgl->nflits = (3 * n) / 2 + (n & 1) + 2;
2044
2045 return (0);
2046}
2047
2048
2049/*
2050 * Releases all the txq resources used up in the specified sgl.
2051 */
2052static int
2053free_pkt_sgl(struct sge_txq *txq, struct sgl *sgl)
2054{
2055 struct tx_map *txm;
2056
2057 TXQ_LOCK_ASSERT_OWNED(txq);
2058
2059 if (sgl->nsegs == 0)
2060 return (0); /* didn't use any map */
2061
2062 /* 1 pkt uses exactly 1 map, back it out */
2063
2064 txq->map_avail++;
2065 if (txq->map_pidx > 0)
2066 txq->map_pidx--;
2067 else
2068 txq->map_pidx = txq->map_total - 1;
2069
2070 txm = &txq->maps[txq->map_pidx];
2071 bus_dmamap_unload(txq->tx_tag, txm->map);
2072 txm->m = NULL;
2073
2074 return (0);
2075}
2076
2077static int
2078write_txpkt_wr(struct port_info *pi, struct sge_txq *txq, struct mbuf *m,
2079 struct sgl *sgl)
2080{
2081 struct sge_eq *eq = &txq->eq;
2082 struct fw_eth_tx_pkt_wr *wr;
2083 struct cpl_tx_pkt_core *cpl;
2084 uint32_t ctrl; /* used in many unrelated places */
2085 uint64_t ctrl1;
2086 int nflits, ndesc, pktlen;
2087 struct tx_sdesc *txsd;
2088 caddr_t dst;
2089
2090 TXQ_LOCK_ASSERT_OWNED(txq);
2091
2092 pktlen = m->m_pkthdr.len;
2093
2094 /*
2095 * Do we have enough flits to send this frame out?
2096 */
2097 ctrl = sizeof(struct cpl_tx_pkt_core);
2098 if (m->m_pkthdr.tso_segsz) {
2099 nflits = TXPKT_LSO_WR_HDR;
2100 ctrl += sizeof(struct cpl_tx_pkt_lso);
2101 } else
2102 nflits = TXPKT_WR_HDR;
2103 if (sgl->nsegs > 0)
2104 nflits += sgl->nflits;
2105 else {
2106 nflits += howmany(pktlen, 8);
2107 ctrl += pktlen;
2108 }
2109 ndesc = howmany(nflits, 8);
2110 if (ndesc > eq->avail)
2111 return (ENOMEM);
2112
2113 /* Firmware work request header */
2114 wr = (void *)&eq->desc[eq->pidx];
2115 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
2116 V_FW_WR_IMMDLEN(ctrl));
2117 ctrl = V_FW_WR_LEN16(howmany(nflits, 2));
2118 if (eq->avail == ndesc && !(eq->flags & EQ_CRFLUSHED)) {
2119 ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
2120 eq->flags |= EQ_CRFLUSHED;
2121 }
2122
2123 wr->equiq_to_len16 = htobe32(ctrl);
2124 wr->r3 = 0;
2125
2126 if (m->m_pkthdr.tso_segsz) {
2127 struct cpl_tx_pkt_lso *lso = (void *)(wr + 1);
2128 struct ether_header *eh;
2129 struct ip *ip;
2130 struct tcphdr *tcp;
2131
2132 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE |
2133 F_LSO_LAST_SLICE;
2134
2135 eh = mtod(m, struct ether_header *);
2136 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2137 ctrl |= V_LSO_ETHHDR_LEN(1);
2138 ip = (void *)((struct ether_vlan_header *)eh + 1);
2139 } else
2140 ip = (void *)(eh + 1);
2141
2142 tcp = (void *)((uintptr_t)ip + ip->ip_hl * 4);
2143 ctrl |= V_LSO_IPHDR_LEN(ip->ip_hl) |
2144 V_LSO_TCPHDR_LEN(tcp->th_off);
2145
2146 lso->lso_ctrl = htobe32(ctrl);
2147 lso->ipid_ofst = htobe16(0);
2148 lso->mss = htobe16(m->m_pkthdr.tso_segsz);
2149 lso->seqno_offset = htobe32(0);
2150 lso->len = htobe32(pktlen);
2151
2152 cpl = (void *)(lso + 1);
2153
2154 txq->tso_wrs++;
2155 } else
2156 cpl = (void *)(wr + 1);
2157
2158 /* Checksum offload */
2159 ctrl1 = 0;
2160 if (!(m->m_pkthdr.csum_flags & CSUM_IP))
2161 ctrl1 |= F_TXPKT_IPCSUM_DIS;
2162 if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)))
2163 ctrl1 |= F_TXPKT_L4CSUM_DIS;
2164 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP))
2165 txq->txcsum++; /* some hardware assistance provided */
2166
2167 /* VLAN tag insertion */
2168 if (m->m_flags & M_VLANTAG) {
2169 ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
2170 txq->vlan_insertion++;
2171 }
2172
2173 /* CPL header */
2174 cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
2175 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf));
2176 cpl->pack = 0;
2177 cpl->len = htobe16(pktlen);
2178 cpl->ctrl1 = htobe64(ctrl1);
2179
2180 /* Software descriptor */
2181 txsd = &txq->sdesc[eq->pidx];
2182 txsd->desc_used = ndesc;
2183
2184 eq->pending += ndesc;
2185 eq->avail -= ndesc;
2186 eq->pidx += ndesc;
2187 if (eq->pidx >= eq->cap)
2188 eq->pidx -= eq->cap;
2189
2190 /* SGL */
2191 dst = (void *)(cpl + 1);
2192 if (sgl->nsegs > 0) {
2193 txsd->credits = 1;
2194 txq->sgl_wrs++;
2195 write_sgl_to_txd(eq, sgl, &dst);
2196 } else {
2197 txsd->credits = 0;
2198 txq->imm_wrs++;
2199 for (; m; m = m->m_next) {
2200 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
2201#ifdef INVARIANTS
2202 pktlen -= m->m_len;
2203#endif
2204 }
2205#ifdef INVARIANTS
2206 KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen));
2207#endif
2208
2209 }
2210
2211 txq->txpkt_wrs++;
2212 return (0);
2213}
2214
2215/*
2216 * Returns 0 to indicate that m has been accepted into a coalesced tx work
2217 * request. It has either been folded into txpkts or txpkts was flushed and m
2218 * has started a new coalesced work request (as the first frame in a fresh
2219 * txpkts).
2220 *
2221 * Returns non-zero to indicate a failure - caller is responsible for
2222 * transmitting m, if there was anything in txpkts it has been flushed.
2223 */
2224static int
2225add_to_txpkts(struct port_info *pi, struct sge_txq *txq, struct txpkts *txpkts,
2226 struct mbuf *m, struct sgl *sgl)
2227{
2228 struct sge_eq *eq = &txq->eq;
2229 int can_coalesce;
2230 struct tx_sdesc *txsd;
2231 int flits;
2232
2233 TXQ_LOCK_ASSERT_OWNED(txq);
2234
2235 if (txpkts->npkt > 0) {
2236 flits = TXPKTS_PKT_HDR + sgl->nflits;
2237 can_coalesce = m->m_pkthdr.tso_segsz == 0 &&
2238 txpkts->nflits + flits <= TX_WR_FLITS &&
2239 txpkts->nflits + flits <= eq->avail * 8 &&
2240 txpkts->plen + m->m_pkthdr.len < 65536;
2241
2242 if (can_coalesce) {
2243 txpkts->npkt++;
2244 txpkts->nflits += flits;
2245 txpkts->plen += m->m_pkthdr.len;
2246
2247 txsd = &txq->sdesc[eq->pidx];
2248 txsd->credits++;
2249
2250 return (0);
2251 }
2252
2253 /*
2254 * Couldn't coalesce m into txpkts. The first order of business
2255 * is to send txpkts on its way. Then we'll revisit m.
2256 */
2257 write_txpkts_wr(txq, txpkts);
2258 }
2259
2260 /*
2261 * Check if we can start a new coalesced tx work request with m as
2262 * the first packet in it.
2263 */
2264
2265 KASSERT(txpkts->npkt == 0, ("%s: txpkts not empty", __func__));
2266
2267 flits = TXPKTS_WR_HDR + sgl->nflits;
2268 can_coalesce = m->m_pkthdr.tso_segsz == 0 &&
2269 flits <= eq->avail * 8 && flits <= TX_WR_FLITS;
2270
2271 if (can_coalesce == 0)
2272 return (EINVAL);
2273
2274 /*
2275 * Start a fresh coalesced tx WR with m as the first frame in it.
2276 */
2277 txpkts->npkt = 1;
2278 txpkts->nflits = flits;
2279 txpkts->flitp = &eq->desc[eq->pidx].flit[2];
2280 txpkts->plen = m->m_pkthdr.len;
2281
2282 txsd = &txq->sdesc[eq->pidx];
2283 txsd->credits = 1;
2284
2285 return (0);
2286}
2287
2288/*
2289 * Note that write_txpkts_wr can never run out of hardware descriptors (but
2290 * write_txpkt_wr can). add_to_txpkts ensures that a frame is accepted for
2291 * coalescing only if sufficient hardware descriptors are available.
2292 */
2293static void
2294write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts)
2295{
2296 struct sge_eq *eq = &txq->eq;
2297 struct fw_eth_tx_pkts_wr *wr;
2298 struct tx_sdesc *txsd;
2299 uint32_t ctrl;
2300 int ndesc;
2301
2302 TXQ_LOCK_ASSERT_OWNED(txq);
2303
2304 ndesc = howmany(txpkts->nflits, 8);
2305
2306 wr = (void *)&eq->desc[eq->pidx];
2307 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR) |
2308 V_FW_WR_IMMDLEN(0)); /* immdlen does not matter in this WR */
2309 ctrl = V_FW_WR_LEN16(howmany(txpkts->nflits, 2));
2310 if (eq->avail == ndesc && !(eq->flags & EQ_CRFLUSHED)) {
2311 ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
2312 eq->flags |= EQ_CRFLUSHED;
2313 }
2314 wr->equiq_to_len16 = htobe32(ctrl);
2315 wr->plen = htobe16(txpkts->plen);
2316 wr->npkt = txpkts->npkt;
2317 wr->r3 = wr->r4 = 0;
2318
2319 /* Everything else already written */
2320
2321 txsd = &txq->sdesc[eq->pidx];
2322 txsd->desc_used = ndesc;
2323
2324 KASSERT(eq->avail >= ndesc, ("%s: out of descriptors", __func__));
2325
2326 eq->pending += ndesc;
2327 eq->avail -= ndesc;
2328 eq->pidx += ndesc;
2329 if (eq->pidx >= eq->cap)
2330 eq->pidx -= eq->cap;
2331
2332 txq->txpkts_pkts += txpkts->npkt;
2333 txq->txpkts_wrs++;
2334 txpkts->npkt = 0; /* emptied */
2335}
2336
2337static inline void
2338write_ulp_cpl_sgl(struct port_info *pi, struct sge_txq *txq,
2339 struct txpkts *txpkts, struct mbuf *m, struct sgl *sgl)
2340{
2341 struct ulp_txpkt *ulpmc;
2342 struct ulptx_idata *ulpsc;
2343 struct cpl_tx_pkt_core *cpl;
2344 struct sge_eq *eq = &txq->eq;
2345 uintptr_t flitp, start, end;
2346 uint64_t ctrl;
2347 caddr_t dst;
2348
2349 KASSERT(txpkts->npkt > 0, ("%s: txpkts is empty", __func__));
2350
2351 start = (uintptr_t)eq->desc;
2352 end = (uintptr_t)eq->spg;
2353
2354 /* Checksum offload */
2355 ctrl = 0;
2356 if (!(m->m_pkthdr.csum_flags & CSUM_IP))
2357 ctrl |= F_TXPKT_IPCSUM_DIS;
2358 if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)))
2359 ctrl |= F_TXPKT_L4CSUM_DIS;
2360 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP))
2361 txq->txcsum++; /* some hardware assistance provided */
2362
2363 /* VLAN tag insertion */
2364 if (m->m_flags & M_VLANTAG) {
2365 ctrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
2366 txq->vlan_insertion++;
2367 }
2368
2369 /*
2370 * The previous packet's SGL must have ended at a 16 byte boundary (this
2371 * is required by the firmware/hardware). It follows that flitp cannot
2372 * wrap around between the ULPTX master command and ULPTX subcommand (8
2373 * bytes each), and that it can not wrap around in the middle of the
2374 * cpl_tx_pkt_core either.
2375 */
2376 flitp = (uintptr_t)txpkts->flitp;
2377 KASSERT((flitp & 0xf) == 0,
2378 ("%s: last SGL did not end at 16 byte boundary: %p",
2379 __func__, txpkts->flitp));
2380
2381 /* ULP master command */
2382 ulpmc = (void *)flitp;
2383 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0) |
2384 V_ULP_TXPKT_FID(eq->iqid));
2385 ulpmc->len = htonl(howmany(sizeof(*ulpmc) + sizeof(*ulpsc) +
2386 sizeof(*cpl) + 8 * sgl->nflits, 16));
2387
2388 /* ULP subcommand */
2389 ulpsc = (void *)(ulpmc + 1);
2390 ulpsc->cmd_more = htobe32(V_ULPTX_CMD((u32)ULP_TX_SC_IMM) |
2391 F_ULP_TX_SC_MORE);
2392 ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core));
2393
2394 flitp += sizeof(*ulpmc) + sizeof(*ulpsc);
2395 if (flitp == end)
2396 flitp = start;
2397
2398 /* CPL_TX_PKT */
2399 cpl = (void *)flitp;
2400 cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
2401 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf));
2402 cpl->pack = 0;
2403 cpl->len = htobe16(m->m_pkthdr.len);
2404 cpl->ctrl1 = htobe64(ctrl);
2405
2406 flitp += sizeof(*cpl);
2407 if (flitp == end)
2408 flitp = start;
2409
2410 /* SGL for this frame */
2411 dst = (caddr_t)flitp;
2412 txpkts->nflits += write_sgl_to_txd(eq, sgl, &dst);
2413 txpkts->flitp = (void *)dst;
2414
2415 KASSERT(((uintptr_t)dst & 0xf) == 0,
2416 ("%s: SGL ends at %p (not a 16 byte boundary)", __func__, dst));
2417}
2418
2419/*
2420 * If the SGL ends on an address that is not 16 byte aligned, this function will
2421 * add a 0 filled flit at the end. It returns 1 in that case.
2422 */
2423static int
2424write_sgl_to_txd(struct sge_eq *eq, struct sgl *sgl, caddr_t *to)
2425{
2426 __be64 *flitp, *end;
2427 struct ulptx_sgl *usgl;
2428 bus_dma_segment_t *seg;
2429 int i, padded;
2430
2431 KASSERT(sgl->nsegs > 0 && sgl->nflits > 0,
2432 ("%s: bad SGL - nsegs=%d, nflits=%d",
2433 __func__, sgl->nsegs, sgl->nflits));
2434
2435 KASSERT(((uintptr_t)(*to) & 0xf) == 0,
2436 ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to));
2437
2438 flitp = (__be64 *)(*to);
2439 end = flitp + sgl->nflits;
2440 seg = &sgl->seg[0];
2441 usgl = (void *)flitp;
2442
2443 /*
2444 * We start at a 16 byte boundary somewhere inside the tx descriptor
2445 * ring, so we're at least 16 bytes away from the status page. There is
2446 * no chance of a wrap around in the middle of usgl (which is 16 bytes).
2447 */
2448
2449 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
2450 V_ULPTX_NSGE(sgl->nsegs));
2451 usgl->len0 = htobe32(seg->ds_len);
2452 usgl->addr0 = htobe64(seg->ds_addr);
2453 seg++;
2454
2455 if ((uintptr_t)end <= (uintptr_t)eq->spg) {
2456
2457 /* Won't wrap around at all */
2458
2459 for (i = 0; i < sgl->nsegs - 1; i++, seg++) {
2460 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ds_len);
2461 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ds_addr);
2462 }
2463 if (i & 1)
2464 usgl->sge[i / 2].len[1] = htobe32(0);
2465 } else {
2466
2467 /* Will wrap somewhere in the rest of the SGL */
2468
2469 /* 2 flits already written, write the rest flit by flit */
2470 flitp = (void *)(usgl + 1);
2471 for (i = 0; i < sgl->nflits - 2; i++) {
2472 if ((uintptr_t)flitp == (uintptr_t)eq->spg)
2473 flitp = (void *)eq->desc;
2474 *flitp++ = get_flit(seg, sgl->nsegs - 1, i);
2475 }
2476 end = flitp;
2477 }
2478
2479 if ((uintptr_t)end & 0xf) {
2480 *(uint64_t *)end = 0;
2481 end++;
2482 padded = 1;
2483 } else
2484 padded = 0;
2485
2486 if ((uintptr_t)end == (uintptr_t)eq->spg)
2487 *to = (void *)eq->desc;
2488 else
2489 *to = (void *)end;
2490
2491 return (padded);
2492}
2493
2494static inline void
2495copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len)
2496{
2497 if ((uintptr_t)(*to) + len <= (uintptr_t)eq->spg) {
2498 bcopy(from, *to, len);
2499 (*to) += len;
2500 } else {
2501 int portion = (uintptr_t)eq->spg - (uintptr_t)(*to);
2502
2503 bcopy(from, *to, portion);
2504 from += portion;
2505 portion = len - portion; /* remaining */
2506 bcopy(from, (void *)eq->desc, portion);
2507 (*to) = (caddr_t)eq->desc + portion;
2508 }
2509}
2510
2511static inline void
2512ring_eq_db(struct adapter *sc, struct sge_eq *eq)
2513{
2514 wmb();
2515 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
2516 V_QID(eq->cntxt_id) | V_PIDX(eq->pending));
2517 eq->pending = 0;
2518}
2519
2520static inline int
2521reclaimable(struct sge_eq *eq)
2522{
2523 unsigned int cidx;
2524
2525 cidx = eq->spg->cidx; /* stable snapshot */
2526 cidx = be16_to_cpu(cidx);
2527
2528 if (cidx >= eq->cidx)
2529 return (cidx - eq->cidx);
2530 else
2531 return (cidx + eq->cap - eq->cidx);
2532}
2533
2534/*
2535 * There are "can_reclaim" tx descriptors ready to be reclaimed. Reclaim as
2536 * many as possible but stop when there are around "n" mbufs to free.
2537 *
2538 * The actual number reclaimed is provided as the return value.
2539 */
2540static int
2541reclaim_tx_descs(struct sge_txq *txq, int can_reclaim, int n)
2542{
2543 struct tx_sdesc *txsd;
2544 struct tx_map *txm;
2545 unsigned int reclaimed, maps;
2546 struct sge_eq *eq = &txq->eq;
2547
2548 EQ_LOCK_ASSERT_OWNED(eq);
2549
2550 if (can_reclaim == 0)
2551 can_reclaim = reclaimable(eq);
2552
2553 maps = reclaimed = 0;
2554 while (can_reclaim && maps < n) {
2555 int ndesc;
2556
2557 txsd = &txq->sdesc[eq->cidx];
2558 ndesc = txsd->desc_used;
2559
2560 /* Firmware doesn't return "partial" credits. */
2561 KASSERT(can_reclaim >= ndesc,
2562 ("%s: unexpected number of credits: %d, %d",
2563 __func__, can_reclaim, ndesc));
2564
2565 maps += txsd->credits;
2566
2567 reclaimed += ndesc;
2568 can_reclaim -= ndesc;
2569
2570 eq->cidx += ndesc;
2571 if (__predict_false(eq->cidx >= eq->cap))
2572 eq->cidx -= eq->cap;
2573 }
2574
2575 txm = &txq->maps[txq->map_cidx];
2576 if (maps)
2577 prefetch(txm->m);
2578
2579 eq->avail += reclaimed;
2580 KASSERT(eq->avail < eq->cap, /* avail tops out at (cap - 1) */
2581 ("%s: too many descriptors available", __func__));
2582
2583 txq->map_avail += maps;
2584 KASSERT(txq->map_avail <= txq->map_total,
2585 ("%s: too many maps available", __func__));
2586
2587 while (maps--) {
2588 struct tx_map *next;
2589
2590 next = txm + 1;
2591 if (__predict_false(txq->map_cidx + 1 == txq->map_total))
2592 next = txq->maps;
2593 prefetch(next->m);
2594
2595 bus_dmamap_unload(txq->tx_tag, txm->map);
2596 m_freem(txm->m);
2597 txm->m = NULL;
2598
2599 txm = next;
2600 if (__predict_false(++txq->map_cidx == txq->map_total))
2601 txq->map_cidx = 0;
2602 }
2603
2604 return (reclaimed);
2605}
2606
2607static void
2608write_eqflush_wr(struct sge_eq *eq)
2609{
2610 struct fw_eq_flush_wr *wr;
2611
2612 EQ_LOCK_ASSERT_OWNED(eq);
2613 KASSERT(eq->avail > 0, ("%s: no descriptors left.", __func__));
2614
2615 wr = (void *)&eq->desc[eq->pidx];
2616 bzero(wr, sizeof(*wr));
2617 wr->opcode = FW_EQ_FLUSH_WR;
2618 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(sizeof(*wr) / 16) |
2619 F_FW_WR_EQUEQ | F_FW_WR_EQUIQ);
2620
2621 eq->flags |= EQ_CRFLUSHED;
2622 eq->pending++;
2623 eq->avail--;
2624 if (++eq->pidx == eq->cap)
2625 eq->pidx = 0;
2626}
2627
2628static __be64
2629get_flit(bus_dma_segment_t *sgl, int nsegs, int idx)
2630{
2631 int i = (idx / 3) * 2;
2632
2633 switch (idx % 3) {
2634 case 0: {
2635 __be64 rc;
2636
2637 rc = htobe32(sgl[i].ds_len);
2638 if (i + 1 < nsegs)
2639 rc |= (uint64_t)htobe32(sgl[i + 1].ds_len) << 32;
2640
2641 return (rc);
2642 }
2643 case 1:
2644 return htobe64(sgl[i].ds_addr);
2645 case 2:
2646 return htobe64(sgl[i + 1].ds_addr);
2647 }
2648
2649 return (0);
2650}
2651
2652static void
2653set_fl_tag_idx(struct sge_fl *fl, int mtu)
2654{
2655 int i;
2656
2657 FL_LOCK_ASSERT_OWNED(fl);
2658
2659 for (i = 0; i < FL_BUF_SIZES - 1; i++) {
2660 if (FL_BUF_SIZE(i) >= (mtu + FL_PKTSHIFT))
2661 break;
2662 }
2663
2664 fl->tag_idx = i;
2665}
2666
2667static int
2668handle_sge_egr_update(struct adapter *sc, const struct cpl_sge_egr_update *cpl)
2669{
2670 unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid));
2671 struct sge *s = &sc->sge;
2672 struct sge_txq *txq;
2673 struct port_info *pi;
2674
2675 txq = (void *)s->eqmap[qid - s->eq_start];
2676 TXQ_LOCK(txq);
2677 if (txq->eq.flags & EQ_CRFLUSHED) {
2678 pi = txq->ifp->if_softc;
2679 taskqueue_enqueue(pi->tq, &txq->resume_tx);
2680 txq->egr_update++;
2681 } else
2682 wakeup_one(txq); /* txq is going away, wakeup free_txq */
2683 TXQ_UNLOCK(txq);
2684
2685 return (0);
2686}
2687
2688/*
2689 * m0 is freed on successful transmission.
2690 */
2691static int
2692ctrl_tx(struct adapter *sc, struct sge_ctrlq *ctrlq, struct mbuf *m0)
2693{
2694 struct sge_eq *eq = &ctrlq->eq;
2695 int rc = 0, ndesc;
2696 int can_reclaim;
2697 caddr_t dst;
2698 struct mbuf *m;
2699
2700 M_ASSERTPKTHDR(m0);
2701
2702 if (m0->m_pkthdr.len > SGE_MAX_WR_LEN) {
2703 ctrlq->too_long++;
2704 return (EMSGSIZE);
2705 }
2706 ndesc = howmany(m0->m_pkthdr.len, CTRL_EQ_ESIZE);
2707
2708 EQ_LOCK(eq);
2709
2710 can_reclaim = reclaimable(eq);
2711 eq->cidx += can_reclaim;
2712 eq->avail += can_reclaim;
2713 if (__predict_false(eq->cidx >= eq->cap))
2714 eq->cidx -= eq->cap;
2715
2716 if (eq->avail < ndesc) {
2717 rc = EAGAIN;
2718 ctrlq->no_desc++;
2719 goto failed;
2720 }
2721
2722 dst = (void *)&eq->desc[eq->pidx];
2723 for (m = m0; m; m = m->m_next)
2724 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
2725
2726 eq->pidx += ndesc;
2727 if (__predict_false(eq->pidx >= eq->cap))
2728 eq->pidx -= eq->cap;
2729
2730 eq->pending += ndesc;
2731 ctrlq->total_wrs++;
2732 ring_eq_db(sc, eq);
2733failed:
2734 EQ_UNLOCK(eq);
2735 if (rc == 0)
2736 m_freem(m0);
2737
2738 return (rc);
2739}
2740
2741static int
2742sysctl_abs_id(SYSCTL_HANDLER_ARGS)
2743{
2744 uint16_t *id = arg1;
2745 int i = *id;
2746
2747 return sysctl_handle_int(oidp, &i, 0, req);
2748}
1575 V_FW_EQ_ETH_CMD_IQID(eq->iqid));
1576 c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
1577 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
1578 V_FW_EQ_ETH_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
1579 V_FW_EQ_ETH_CMD_EQSIZE(eq->qsize));
1580 c.eqaddr = htobe64(eq->ba);
1581
1582 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
1583 if (rc != 0) {
1584 device_printf(pi->dev,
1585 "failed to create egress queue: %d\n", rc);
1586 return (rc);
1587 }
1588
1589 eq->pidx = eq->cidx = 0;
1590 eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
1591 eq->flags |= (EQ_ALLOCATED | EQ_STARTED);
1592
1593 cntxt_id = eq->cntxt_id - sc->sge.eq_start;
1594 KASSERT(cntxt_id < sc->sge.neq,
1595 ("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
1596 cntxt_id, sc->sge.neq - 1));
1597 sc->sge.eqmap[cntxt_id] = eq;
1598
1599 children = SYSCTL_CHILDREN(pi->oid_txq);
1600
1601 snprintf(name, sizeof(name), "%d", idx);
1602 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
1603 NULL, "tx queue");
1604 children = SYSCTL_CHILDREN(oid);
1605
1606 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD,
1607 &txq->txcsum, "# of times hardware assisted with checksum");
1608 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_insertion",
1609 CTLFLAG_RD, &txq->vlan_insertion,
1610 "# of times hardware inserted 802.1Q tag");
1611 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD,
1612 &txq->tso_wrs, "# of IPv4 TSO work requests");
1613 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD,
1614 &txq->imm_wrs, "# of work requests with immediate data");
1615 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD,
1616 &txq->sgl_wrs, "# of work requests with direct SGL");
1617 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD,
1618 &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)");
1619 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_wrs", CTLFLAG_RD,
1620 &txq->txpkts_wrs, "# of txpkts work requests (multiple pkts/WR)");
1621 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_pkts", CTLFLAG_RD,
1622 &txq->txpkts_pkts, "# of frames tx'd using txpkts work requests");
1623
1624 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_dmamap", CTLFLAG_RD,
1625 &txq->no_dmamap, 0, "# of times txq ran out of DMA maps");
1626 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD,
1627 &txq->no_desc, 0, "# of times txq ran out of hardware descriptors");
1628 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "egr_update", CTLFLAG_RD,
1629 &txq->egr_update, 0, "egress update notifications from the SGE");
1630
1631 return (rc);
1632}
1633
1634static int
1635free_txq(struct port_info *pi, struct sge_txq *txq)
1636{
1637 int rc;
1638 struct adapter *sc = pi->adapter;
1639 struct sge_eq *eq = &txq->eq;
1640
1641 if (eq->flags & (EQ_ALLOCATED | EQ_STARTED)) {
1642
1643 /*
1644 * Wait for the response to a credit flush if there's one
1645 * pending. Clearing the flag tells handle_sge_egr_update or
1646 * cxgbe_txq_start (depending on how far the response has made
1647 * it) that they should ignore the response and wake up free_txq
1648 * instead.
1649 *
1650 * The interface has been marked down by the time we get here
1651 * (both IFF_UP and IFF_DRV_RUNNING cleared). qflush has
1652 * emptied the tx buf_rings and we know nothing new is being
1653 * queued for tx so we don't have to worry about a new credit
1654 * flush request.
1655 */
1656 TXQ_LOCK(txq);
1657 if (eq->flags & EQ_CRFLUSHED) {
1658 eq->flags &= ~EQ_CRFLUSHED;
1659 msleep(txq, &eq->eq_lock, 0, "crflush", 0);
1660 }
1661 TXQ_UNLOCK(txq);
1662
1663 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id);
1664 if (rc != 0) {
1665 device_printf(pi->dev,
1666 "failed to free egress queue %p: %d\n", eq, rc);
1667 return (rc);
1668 }
1669 eq->flags &= ~(EQ_ALLOCATED | EQ_STARTED);
1670 }
1671
1672 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc);
1673
1674 free(txq->sdesc, M_CXGBE);
1675
1676 if (txq->maps)
1677 free_tx_maps(txq);
1678
1679 buf_ring_free(txq->br, M_CXGBE);
1680
1681 if (txq->tx_tag)
1682 bus_dma_tag_destroy(txq->tx_tag);
1683
1684 if (mtx_initialized(&eq->eq_lock))
1685 mtx_destroy(&eq->eq_lock);
1686
1687 bzero(txq, sizeof(*txq));
1688 return (0);
1689}
1690
1691static void
1692oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1693{
1694 bus_addr_t *ba = arg;
1695
1696 KASSERT(nseg == 1,
1697 ("%s meant for single segment mappings only.", __func__));
1698
1699 *ba = error ? 0 : segs->ds_addr;
1700}
1701
1702static inline bool
1703is_new_response(const struct sge_iq *iq, struct rsp_ctrl **ctrl)
1704{
1705 *ctrl = (void *)((uintptr_t)iq->cdesc +
1706 (iq->esize - sizeof(struct rsp_ctrl)));
1707
1708 return (((*ctrl)->u.type_gen >> S_RSPD_GEN) == iq->gen);
1709}
1710
1711static inline void
1712iq_next(struct sge_iq *iq)
1713{
1714 iq->cdesc = (void *) ((uintptr_t)iq->cdesc + iq->esize);
1715 if (__predict_false(++iq->cidx == iq->qsize - 1)) {
1716 iq->cidx = 0;
1717 iq->gen ^= 1;
1718 iq->cdesc = iq->desc;
1719 }
1720}
1721
1722#define FL_HW_IDX(x) ((x) >> 3)
1723static inline void
1724ring_fl_db(struct adapter *sc, struct sge_fl *fl)
1725{
1726 int ndesc = fl->pending / 8;
1727
1728 if (FL_HW_IDX(fl->pidx) == FL_HW_IDX(fl->cidx))
1729 ndesc--; /* hold back one credit */
1730
1731 if (ndesc <= 0)
1732 return; /* nothing to do */
1733
1734 wmb();
1735
1736 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), F_DBPRIO |
1737 V_QID(fl->cntxt_id) | V_PIDX(ndesc));
1738 fl->pending -= ndesc * 8;
1739}
1740
1741/*
1742 * Fill up the freelist by upto nbufs and ring its doorbell if the number of
1743 * buffers ready to be handed to the hardware >= dbthresh.
1744 */
1745static void
1746refill_fl(struct adapter *sc, struct sge_fl *fl, int nbufs, int dbthresh)
1747{
1748 __be64 *d = &fl->desc[fl->pidx];
1749 struct fl_sdesc *sd = &fl->sdesc[fl->pidx];
1750 bus_dma_tag_t tag;
1751 bus_addr_t pa;
1752 caddr_t cl;
1753 int rc;
1754
1755 FL_LOCK_ASSERT_OWNED(fl);
1756
1757 if (nbufs < 0 || nbufs > fl->needed)
1758 nbufs = fl->needed;
1759
1760 while (nbufs--) {
1761
1762 if (sd->cl != NULL) {
1763
1764 /*
1765 * This happens when a frame small enough to fit
1766 * entirely in an mbuf was received in cl last time.
1767 * We'd held on to cl and can reuse it now. Note that
1768 * we reuse a cluster of the old size if fl->tag_idx is
1769 * no longer the same as sd->tag_idx.
1770 */
1771
1772 KASSERT(*d == sd->ba_tag,
1773 ("%s: recyling problem at pidx %d",
1774 __func__, fl->pidx));
1775
1776 d++;
1777 goto recycled;
1778 }
1779
1780
1781 if (fl->tag_idx != sd->tag_idx) {
1782 bus_dmamap_t map;
1783 bus_dma_tag_t newtag = fl->tag[fl->tag_idx];
1784 bus_dma_tag_t oldtag = fl->tag[sd->tag_idx];
1785
1786 /*
1787 * An MTU change can get us here. Discard the old map
1788 * which was created with the old tag, but only if
1789 * we're able to get a new one.
1790 */
1791 rc = bus_dmamap_create(newtag, 0, &map);
1792 if (rc == 0) {
1793 bus_dmamap_destroy(oldtag, sd->map);
1794 sd->map = map;
1795 sd->tag_idx = fl->tag_idx;
1796 }
1797 }
1798
1799 tag = fl->tag[sd->tag_idx];
1800
1801 cl = m_cljget(NULL, M_NOWAIT, FL_BUF_SIZE(sd->tag_idx));
1802 if (cl == NULL)
1803 break;
1804
1805 rc = bus_dmamap_load(tag, sd->map, cl, FL_BUF_SIZE(sd->tag_idx),
1806 oneseg_dma_callback, &pa, 0);
1807 if (rc != 0 || pa == 0) {
1808 fl->dmamap_failed++;
1809 uma_zfree(FL_BUF_ZONE(sd->tag_idx), cl);
1810 break;
1811 }
1812
1813 sd->cl = cl;
1814 *d++ = htobe64(pa | sd->tag_idx);
1815
1816#ifdef INVARIANTS
1817 sd->ba_tag = htobe64(pa | sd->tag_idx);
1818#endif
1819
1820recycled:
1821 /* sd->m is never recycled, should always be NULL */
1822 KASSERT(sd->m == NULL, ("%s: stray mbuf", __func__));
1823
1824 sd->m = m_gethdr(M_NOWAIT, MT_NOINIT);
1825 if (sd->m == NULL)
1826 break;
1827
1828 fl->pending++;
1829 fl->needed--;
1830 sd++;
1831 if (++fl->pidx == fl->cap) {
1832 fl->pidx = 0;
1833 sd = fl->sdesc;
1834 d = fl->desc;
1835 }
1836 }
1837
1838 if (fl->pending >= dbthresh)
1839 ring_fl_db(sc, fl);
1840}
1841
1842static int
1843alloc_fl_sdesc(struct sge_fl *fl)
1844{
1845 struct fl_sdesc *sd;
1846 bus_dma_tag_t tag;
1847 int i, rc;
1848
1849 FL_LOCK_ASSERT_OWNED(fl);
1850
1851 fl->sdesc = malloc(fl->cap * sizeof(struct fl_sdesc), M_CXGBE,
1852 M_ZERO | M_WAITOK);
1853
1854 tag = fl->tag[fl->tag_idx];
1855 sd = fl->sdesc;
1856 for (i = 0; i < fl->cap; i++, sd++) {
1857
1858 sd->tag_idx = fl->tag_idx;
1859 rc = bus_dmamap_create(tag, 0, &sd->map);
1860 if (rc != 0)
1861 goto failed;
1862 }
1863
1864 return (0);
1865failed:
1866 while (--i >= 0) {
1867 sd--;
1868 bus_dmamap_destroy(tag, sd->map);
1869 if (sd->m) {
1870 m_init(sd->m, NULL, 0, M_NOWAIT, MT_DATA, 0);
1871 m_free(sd->m);
1872 sd->m = NULL;
1873 }
1874 }
1875 KASSERT(sd == fl->sdesc, ("%s: EDOOFUS", __func__));
1876
1877 free(fl->sdesc, M_CXGBE);
1878 fl->sdesc = NULL;
1879
1880 return (rc);
1881}
1882
1883static void
1884free_fl_sdesc(struct sge_fl *fl)
1885{
1886 struct fl_sdesc *sd;
1887 int i;
1888
1889 FL_LOCK_ASSERT_OWNED(fl);
1890
1891 sd = fl->sdesc;
1892 for (i = 0; i < fl->cap; i++, sd++) {
1893
1894 if (sd->m) {
1895 m_init(sd->m, NULL, 0, M_NOWAIT, MT_DATA, 0);
1896 m_free(sd->m);
1897 sd->m = NULL;
1898 }
1899
1900 if (sd->cl) {
1901 bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map);
1902 uma_zfree(FL_BUF_ZONE(sd->tag_idx), sd->cl);
1903 sd->cl = NULL;
1904 }
1905
1906 bus_dmamap_destroy(fl->tag[sd->tag_idx], sd->map);
1907 }
1908
1909 free(fl->sdesc, M_CXGBE);
1910 fl->sdesc = NULL;
1911}
1912
1913static int
1914alloc_tx_maps(struct sge_txq *txq)
1915{
1916 struct tx_map *txm;
1917 int i, rc, count;
1918
1919 /*
1920 * We can stuff ~10 frames in an 8-descriptor txpkts WR (8 is the SGE
1921 * limit for any WR). txq->no_dmamap events shouldn't occur if maps is
1922 * sized for the worst case.
1923 */
1924 count = txq->eq.qsize * 10 / 8;
1925 txq->map_total = txq->map_avail = count;
1926 txq->map_cidx = txq->map_pidx = 0;
1927
1928 txq->maps = malloc(count * sizeof(struct tx_map), M_CXGBE,
1929 M_ZERO | M_WAITOK);
1930
1931 txm = txq->maps;
1932 for (i = 0; i < count; i++, txm++) {
1933 rc = bus_dmamap_create(txq->tx_tag, 0, &txm->map);
1934 if (rc != 0)
1935 goto failed;
1936 }
1937
1938 return (0);
1939failed:
1940 while (--i >= 0) {
1941 txm--;
1942 bus_dmamap_destroy(txq->tx_tag, txm->map);
1943 }
1944 KASSERT(txm == txq->maps, ("%s: EDOOFUS", __func__));
1945
1946 free(txq->maps, M_CXGBE);
1947 txq->maps = NULL;
1948
1949 return (rc);
1950}
1951
1952static void
1953free_tx_maps(struct sge_txq *txq)
1954{
1955 struct tx_map *txm;
1956 int i;
1957
1958 txm = txq->maps;
1959 for (i = 0; i < txq->map_total; i++, txm++) {
1960
1961 if (txm->m) {
1962 bus_dmamap_unload(txq->tx_tag, txm->map);
1963 m_freem(txm->m);
1964 txm->m = NULL;
1965 }
1966
1967 bus_dmamap_destroy(txq->tx_tag, txm->map);
1968 }
1969
1970 free(txq->maps, M_CXGBE);
1971 txq->maps = NULL;
1972}
1973
1974/*
1975 * We'll do immediate data tx for non-TSO, but only when not coalescing. We're
1976 * willing to use upto 2 hardware descriptors which means a maximum of 96 bytes
1977 * of immediate data.
1978 */
1979#define IMM_LEN ( \
1980 2 * TX_EQ_ESIZE \
1981 - sizeof(struct fw_eth_tx_pkt_wr) \
1982 - sizeof(struct cpl_tx_pkt_core))
1983
1984/*
1985 * Returns non-zero on failure, no need to cleanup anything in that case.
1986 *
1987 * Note 1: We always try to defrag the mbuf if required and return EFBIG only
1988 * if the resulting chain still won't fit in a tx descriptor.
1989 *
1990 * Note 2: We'll pullup the mbuf chain if TSO is requested and the first mbuf
1991 * does not have the TCP header in it.
1992 */
1993static int
1994get_pkt_sgl(struct sge_txq *txq, struct mbuf **fp, struct sgl *sgl,
1995 int sgl_only)
1996{
1997 struct mbuf *m = *fp;
1998 struct tx_map *txm;
1999 int rc, defragged = 0, n;
2000
2001 TXQ_LOCK_ASSERT_OWNED(txq);
2002
2003 if (m->m_pkthdr.tso_segsz)
2004 sgl_only = 1; /* Do not allow immediate data with LSO */
2005
2006start: sgl->nsegs = 0;
2007
2008 if (m->m_pkthdr.len <= IMM_LEN && !sgl_only)
2009 return (0); /* nsegs = 0 tells caller to use imm. tx */
2010
2011 if (txq->map_avail == 0) {
2012 txq->no_dmamap++;
2013 return (ENOMEM);
2014 }
2015 txm = &txq->maps[txq->map_pidx];
2016
2017 if (m->m_pkthdr.tso_segsz && m->m_len < 50) {
2018 *fp = m_pullup(m, 50);
2019 m = *fp;
2020 if (m == NULL)
2021 return (ENOBUFS);
2022 }
2023
2024 rc = bus_dmamap_load_mbuf_sg(txq->tx_tag, txm->map, m, sgl->seg,
2025 &sgl->nsegs, BUS_DMA_NOWAIT);
2026 if (rc == EFBIG && defragged == 0) {
2027 m = m_defrag(m, M_DONTWAIT);
2028 if (m == NULL)
2029 return (EFBIG);
2030
2031 defragged = 1;
2032 *fp = m;
2033 goto start;
2034 }
2035 if (rc != 0)
2036 return (rc);
2037
2038 txm->m = m;
2039 txq->map_avail--;
2040 if (++txq->map_pidx == txq->map_total)
2041 txq->map_pidx = 0;
2042
2043 KASSERT(sgl->nsegs > 0 && sgl->nsegs <= TX_SGL_SEGS,
2044 ("%s: bad DMA mapping (%d segments)", __func__, sgl->nsegs));
2045
2046 /*
2047 * Store the # of flits required to hold this frame's SGL in nflits. An
2048 * SGL has a (ULPTX header + len0, addr0) tuple optionally followed by
2049 * multiple (len0 + len1, addr0, addr1) tuples. If addr1 is not used
2050 * then len1 must be set to 0.
2051 */
2052 n = sgl->nsegs - 1;
2053 sgl->nflits = (3 * n) / 2 + (n & 1) + 2;
2054
2055 return (0);
2056}
2057
2058
2059/*
2060 * Releases all the txq resources used up in the specified sgl.
2061 */
2062static int
2063free_pkt_sgl(struct sge_txq *txq, struct sgl *sgl)
2064{
2065 struct tx_map *txm;
2066
2067 TXQ_LOCK_ASSERT_OWNED(txq);
2068
2069 if (sgl->nsegs == 0)
2070 return (0); /* didn't use any map */
2071
2072 /* 1 pkt uses exactly 1 map, back it out */
2073
2074 txq->map_avail++;
2075 if (txq->map_pidx > 0)
2076 txq->map_pidx--;
2077 else
2078 txq->map_pidx = txq->map_total - 1;
2079
2080 txm = &txq->maps[txq->map_pidx];
2081 bus_dmamap_unload(txq->tx_tag, txm->map);
2082 txm->m = NULL;
2083
2084 return (0);
2085}
2086
2087static int
2088write_txpkt_wr(struct port_info *pi, struct sge_txq *txq, struct mbuf *m,
2089 struct sgl *sgl)
2090{
2091 struct sge_eq *eq = &txq->eq;
2092 struct fw_eth_tx_pkt_wr *wr;
2093 struct cpl_tx_pkt_core *cpl;
2094 uint32_t ctrl; /* used in many unrelated places */
2095 uint64_t ctrl1;
2096 int nflits, ndesc, pktlen;
2097 struct tx_sdesc *txsd;
2098 caddr_t dst;
2099
2100 TXQ_LOCK_ASSERT_OWNED(txq);
2101
2102 pktlen = m->m_pkthdr.len;
2103
2104 /*
2105 * Do we have enough flits to send this frame out?
2106 */
2107 ctrl = sizeof(struct cpl_tx_pkt_core);
2108 if (m->m_pkthdr.tso_segsz) {
2109 nflits = TXPKT_LSO_WR_HDR;
2110 ctrl += sizeof(struct cpl_tx_pkt_lso);
2111 } else
2112 nflits = TXPKT_WR_HDR;
2113 if (sgl->nsegs > 0)
2114 nflits += sgl->nflits;
2115 else {
2116 nflits += howmany(pktlen, 8);
2117 ctrl += pktlen;
2118 }
2119 ndesc = howmany(nflits, 8);
2120 if (ndesc > eq->avail)
2121 return (ENOMEM);
2122
2123 /* Firmware work request header */
2124 wr = (void *)&eq->desc[eq->pidx];
2125 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
2126 V_FW_WR_IMMDLEN(ctrl));
2127 ctrl = V_FW_WR_LEN16(howmany(nflits, 2));
2128 if (eq->avail == ndesc && !(eq->flags & EQ_CRFLUSHED)) {
2129 ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
2130 eq->flags |= EQ_CRFLUSHED;
2131 }
2132
2133 wr->equiq_to_len16 = htobe32(ctrl);
2134 wr->r3 = 0;
2135
2136 if (m->m_pkthdr.tso_segsz) {
2137 struct cpl_tx_pkt_lso *lso = (void *)(wr + 1);
2138 struct ether_header *eh;
2139 struct ip *ip;
2140 struct tcphdr *tcp;
2141
2142 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE |
2143 F_LSO_LAST_SLICE;
2144
2145 eh = mtod(m, struct ether_header *);
2146 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2147 ctrl |= V_LSO_ETHHDR_LEN(1);
2148 ip = (void *)((struct ether_vlan_header *)eh + 1);
2149 } else
2150 ip = (void *)(eh + 1);
2151
2152 tcp = (void *)((uintptr_t)ip + ip->ip_hl * 4);
2153 ctrl |= V_LSO_IPHDR_LEN(ip->ip_hl) |
2154 V_LSO_TCPHDR_LEN(tcp->th_off);
2155
2156 lso->lso_ctrl = htobe32(ctrl);
2157 lso->ipid_ofst = htobe16(0);
2158 lso->mss = htobe16(m->m_pkthdr.tso_segsz);
2159 lso->seqno_offset = htobe32(0);
2160 lso->len = htobe32(pktlen);
2161
2162 cpl = (void *)(lso + 1);
2163
2164 txq->tso_wrs++;
2165 } else
2166 cpl = (void *)(wr + 1);
2167
2168 /* Checksum offload */
2169 ctrl1 = 0;
2170 if (!(m->m_pkthdr.csum_flags & CSUM_IP))
2171 ctrl1 |= F_TXPKT_IPCSUM_DIS;
2172 if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)))
2173 ctrl1 |= F_TXPKT_L4CSUM_DIS;
2174 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP))
2175 txq->txcsum++; /* some hardware assistance provided */
2176
2177 /* VLAN tag insertion */
2178 if (m->m_flags & M_VLANTAG) {
2179 ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
2180 txq->vlan_insertion++;
2181 }
2182
2183 /* CPL header */
2184 cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
2185 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf));
2186 cpl->pack = 0;
2187 cpl->len = htobe16(pktlen);
2188 cpl->ctrl1 = htobe64(ctrl1);
2189
2190 /* Software descriptor */
2191 txsd = &txq->sdesc[eq->pidx];
2192 txsd->desc_used = ndesc;
2193
2194 eq->pending += ndesc;
2195 eq->avail -= ndesc;
2196 eq->pidx += ndesc;
2197 if (eq->pidx >= eq->cap)
2198 eq->pidx -= eq->cap;
2199
2200 /* SGL */
2201 dst = (void *)(cpl + 1);
2202 if (sgl->nsegs > 0) {
2203 txsd->credits = 1;
2204 txq->sgl_wrs++;
2205 write_sgl_to_txd(eq, sgl, &dst);
2206 } else {
2207 txsd->credits = 0;
2208 txq->imm_wrs++;
2209 for (; m; m = m->m_next) {
2210 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
2211#ifdef INVARIANTS
2212 pktlen -= m->m_len;
2213#endif
2214 }
2215#ifdef INVARIANTS
2216 KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen));
2217#endif
2218
2219 }
2220
2221 txq->txpkt_wrs++;
2222 return (0);
2223}
2224
2225/*
2226 * Returns 0 to indicate that m has been accepted into a coalesced tx work
2227 * request. It has either been folded into txpkts or txpkts was flushed and m
2228 * has started a new coalesced work request (as the first frame in a fresh
2229 * txpkts).
2230 *
2231 * Returns non-zero to indicate a failure - caller is responsible for
2232 * transmitting m, if there was anything in txpkts it has been flushed.
2233 */
2234static int
2235add_to_txpkts(struct port_info *pi, struct sge_txq *txq, struct txpkts *txpkts,
2236 struct mbuf *m, struct sgl *sgl)
2237{
2238 struct sge_eq *eq = &txq->eq;
2239 int can_coalesce;
2240 struct tx_sdesc *txsd;
2241 int flits;
2242
2243 TXQ_LOCK_ASSERT_OWNED(txq);
2244
2245 if (txpkts->npkt > 0) {
2246 flits = TXPKTS_PKT_HDR + sgl->nflits;
2247 can_coalesce = m->m_pkthdr.tso_segsz == 0 &&
2248 txpkts->nflits + flits <= TX_WR_FLITS &&
2249 txpkts->nflits + flits <= eq->avail * 8 &&
2250 txpkts->plen + m->m_pkthdr.len < 65536;
2251
2252 if (can_coalesce) {
2253 txpkts->npkt++;
2254 txpkts->nflits += flits;
2255 txpkts->plen += m->m_pkthdr.len;
2256
2257 txsd = &txq->sdesc[eq->pidx];
2258 txsd->credits++;
2259
2260 return (0);
2261 }
2262
2263 /*
2264 * Couldn't coalesce m into txpkts. The first order of business
2265 * is to send txpkts on its way. Then we'll revisit m.
2266 */
2267 write_txpkts_wr(txq, txpkts);
2268 }
2269
2270 /*
2271 * Check if we can start a new coalesced tx work request with m as
2272 * the first packet in it.
2273 */
2274
2275 KASSERT(txpkts->npkt == 0, ("%s: txpkts not empty", __func__));
2276
2277 flits = TXPKTS_WR_HDR + sgl->nflits;
2278 can_coalesce = m->m_pkthdr.tso_segsz == 0 &&
2279 flits <= eq->avail * 8 && flits <= TX_WR_FLITS;
2280
2281 if (can_coalesce == 0)
2282 return (EINVAL);
2283
2284 /*
2285 * Start a fresh coalesced tx WR with m as the first frame in it.
2286 */
2287 txpkts->npkt = 1;
2288 txpkts->nflits = flits;
2289 txpkts->flitp = &eq->desc[eq->pidx].flit[2];
2290 txpkts->plen = m->m_pkthdr.len;
2291
2292 txsd = &txq->sdesc[eq->pidx];
2293 txsd->credits = 1;
2294
2295 return (0);
2296}
2297
2298/*
2299 * Note that write_txpkts_wr can never run out of hardware descriptors (but
2300 * write_txpkt_wr can). add_to_txpkts ensures that a frame is accepted for
2301 * coalescing only if sufficient hardware descriptors are available.
2302 */
2303static void
2304write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts)
2305{
2306 struct sge_eq *eq = &txq->eq;
2307 struct fw_eth_tx_pkts_wr *wr;
2308 struct tx_sdesc *txsd;
2309 uint32_t ctrl;
2310 int ndesc;
2311
2312 TXQ_LOCK_ASSERT_OWNED(txq);
2313
2314 ndesc = howmany(txpkts->nflits, 8);
2315
2316 wr = (void *)&eq->desc[eq->pidx];
2317 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR) |
2318 V_FW_WR_IMMDLEN(0)); /* immdlen does not matter in this WR */
2319 ctrl = V_FW_WR_LEN16(howmany(txpkts->nflits, 2));
2320 if (eq->avail == ndesc && !(eq->flags & EQ_CRFLUSHED)) {
2321 ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
2322 eq->flags |= EQ_CRFLUSHED;
2323 }
2324 wr->equiq_to_len16 = htobe32(ctrl);
2325 wr->plen = htobe16(txpkts->plen);
2326 wr->npkt = txpkts->npkt;
2327 wr->r3 = wr->r4 = 0;
2328
2329 /* Everything else already written */
2330
2331 txsd = &txq->sdesc[eq->pidx];
2332 txsd->desc_used = ndesc;
2333
2334 KASSERT(eq->avail >= ndesc, ("%s: out of descriptors", __func__));
2335
2336 eq->pending += ndesc;
2337 eq->avail -= ndesc;
2338 eq->pidx += ndesc;
2339 if (eq->pidx >= eq->cap)
2340 eq->pidx -= eq->cap;
2341
2342 txq->txpkts_pkts += txpkts->npkt;
2343 txq->txpkts_wrs++;
2344 txpkts->npkt = 0; /* emptied */
2345}
2346
2347static inline void
2348write_ulp_cpl_sgl(struct port_info *pi, struct sge_txq *txq,
2349 struct txpkts *txpkts, struct mbuf *m, struct sgl *sgl)
2350{
2351 struct ulp_txpkt *ulpmc;
2352 struct ulptx_idata *ulpsc;
2353 struct cpl_tx_pkt_core *cpl;
2354 struct sge_eq *eq = &txq->eq;
2355 uintptr_t flitp, start, end;
2356 uint64_t ctrl;
2357 caddr_t dst;
2358
2359 KASSERT(txpkts->npkt > 0, ("%s: txpkts is empty", __func__));
2360
2361 start = (uintptr_t)eq->desc;
2362 end = (uintptr_t)eq->spg;
2363
2364 /* Checksum offload */
2365 ctrl = 0;
2366 if (!(m->m_pkthdr.csum_flags & CSUM_IP))
2367 ctrl |= F_TXPKT_IPCSUM_DIS;
2368 if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)))
2369 ctrl |= F_TXPKT_L4CSUM_DIS;
2370 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP))
2371 txq->txcsum++; /* some hardware assistance provided */
2372
2373 /* VLAN tag insertion */
2374 if (m->m_flags & M_VLANTAG) {
2375 ctrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
2376 txq->vlan_insertion++;
2377 }
2378
2379 /*
2380 * The previous packet's SGL must have ended at a 16 byte boundary (this
2381 * is required by the firmware/hardware). It follows that flitp cannot
2382 * wrap around between the ULPTX master command and ULPTX subcommand (8
2383 * bytes each), and that it can not wrap around in the middle of the
2384 * cpl_tx_pkt_core either.
2385 */
2386 flitp = (uintptr_t)txpkts->flitp;
2387 KASSERT((flitp & 0xf) == 0,
2388 ("%s: last SGL did not end at 16 byte boundary: %p",
2389 __func__, txpkts->flitp));
2390
2391 /* ULP master command */
2392 ulpmc = (void *)flitp;
2393 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0) |
2394 V_ULP_TXPKT_FID(eq->iqid));
2395 ulpmc->len = htonl(howmany(sizeof(*ulpmc) + sizeof(*ulpsc) +
2396 sizeof(*cpl) + 8 * sgl->nflits, 16));
2397
2398 /* ULP subcommand */
2399 ulpsc = (void *)(ulpmc + 1);
2400 ulpsc->cmd_more = htobe32(V_ULPTX_CMD((u32)ULP_TX_SC_IMM) |
2401 F_ULP_TX_SC_MORE);
2402 ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core));
2403
2404 flitp += sizeof(*ulpmc) + sizeof(*ulpsc);
2405 if (flitp == end)
2406 flitp = start;
2407
2408 /* CPL_TX_PKT */
2409 cpl = (void *)flitp;
2410 cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
2411 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf));
2412 cpl->pack = 0;
2413 cpl->len = htobe16(m->m_pkthdr.len);
2414 cpl->ctrl1 = htobe64(ctrl);
2415
2416 flitp += sizeof(*cpl);
2417 if (flitp == end)
2418 flitp = start;
2419
2420 /* SGL for this frame */
2421 dst = (caddr_t)flitp;
2422 txpkts->nflits += write_sgl_to_txd(eq, sgl, &dst);
2423 txpkts->flitp = (void *)dst;
2424
2425 KASSERT(((uintptr_t)dst & 0xf) == 0,
2426 ("%s: SGL ends at %p (not a 16 byte boundary)", __func__, dst));
2427}
2428
2429/*
2430 * If the SGL ends on an address that is not 16 byte aligned, this function will
2431 * add a 0 filled flit at the end. It returns 1 in that case.
2432 */
2433static int
2434write_sgl_to_txd(struct sge_eq *eq, struct sgl *sgl, caddr_t *to)
2435{
2436 __be64 *flitp, *end;
2437 struct ulptx_sgl *usgl;
2438 bus_dma_segment_t *seg;
2439 int i, padded;
2440
2441 KASSERT(sgl->nsegs > 0 && sgl->nflits > 0,
2442 ("%s: bad SGL - nsegs=%d, nflits=%d",
2443 __func__, sgl->nsegs, sgl->nflits));
2444
2445 KASSERT(((uintptr_t)(*to) & 0xf) == 0,
2446 ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to));
2447
2448 flitp = (__be64 *)(*to);
2449 end = flitp + sgl->nflits;
2450 seg = &sgl->seg[0];
2451 usgl = (void *)flitp;
2452
2453 /*
2454 * We start at a 16 byte boundary somewhere inside the tx descriptor
2455 * ring, so we're at least 16 bytes away from the status page. There is
2456 * no chance of a wrap around in the middle of usgl (which is 16 bytes).
2457 */
2458
2459 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
2460 V_ULPTX_NSGE(sgl->nsegs));
2461 usgl->len0 = htobe32(seg->ds_len);
2462 usgl->addr0 = htobe64(seg->ds_addr);
2463 seg++;
2464
2465 if ((uintptr_t)end <= (uintptr_t)eq->spg) {
2466
2467 /* Won't wrap around at all */
2468
2469 for (i = 0; i < sgl->nsegs - 1; i++, seg++) {
2470 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ds_len);
2471 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ds_addr);
2472 }
2473 if (i & 1)
2474 usgl->sge[i / 2].len[1] = htobe32(0);
2475 } else {
2476
2477 /* Will wrap somewhere in the rest of the SGL */
2478
2479 /* 2 flits already written, write the rest flit by flit */
2480 flitp = (void *)(usgl + 1);
2481 for (i = 0; i < sgl->nflits - 2; i++) {
2482 if ((uintptr_t)flitp == (uintptr_t)eq->spg)
2483 flitp = (void *)eq->desc;
2484 *flitp++ = get_flit(seg, sgl->nsegs - 1, i);
2485 }
2486 end = flitp;
2487 }
2488
2489 if ((uintptr_t)end & 0xf) {
2490 *(uint64_t *)end = 0;
2491 end++;
2492 padded = 1;
2493 } else
2494 padded = 0;
2495
2496 if ((uintptr_t)end == (uintptr_t)eq->spg)
2497 *to = (void *)eq->desc;
2498 else
2499 *to = (void *)end;
2500
2501 return (padded);
2502}
2503
2504static inline void
2505copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len)
2506{
2507 if ((uintptr_t)(*to) + len <= (uintptr_t)eq->spg) {
2508 bcopy(from, *to, len);
2509 (*to) += len;
2510 } else {
2511 int portion = (uintptr_t)eq->spg - (uintptr_t)(*to);
2512
2513 bcopy(from, *to, portion);
2514 from += portion;
2515 portion = len - portion; /* remaining */
2516 bcopy(from, (void *)eq->desc, portion);
2517 (*to) = (caddr_t)eq->desc + portion;
2518 }
2519}
2520
2521static inline void
2522ring_eq_db(struct adapter *sc, struct sge_eq *eq)
2523{
2524 wmb();
2525 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
2526 V_QID(eq->cntxt_id) | V_PIDX(eq->pending));
2527 eq->pending = 0;
2528}
2529
2530static inline int
2531reclaimable(struct sge_eq *eq)
2532{
2533 unsigned int cidx;
2534
2535 cidx = eq->spg->cidx; /* stable snapshot */
2536 cidx = be16_to_cpu(cidx);
2537
2538 if (cidx >= eq->cidx)
2539 return (cidx - eq->cidx);
2540 else
2541 return (cidx + eq->cap - eq->cidx);
2542}
2543
2544/*
2545 * There are "can_reclaim" tx descriptors ready to be reclaimed. Reclaim as
2546 * many as possible but stop when there are around "n" mbufs to free.
2547 *
2548 * The actual number reclaimed is provided as the return value.
2549 */
2550static int
2551reclaim_tx_descs(struct sge_txq *txq, int can_reclaim, int n)
2552{
2553 struct tx_sdesc *txsd;
2554 struct tx_map *txm;
2555 unsigned int reclaimed, maps;
2556 struct sge_eq *eq = &txq->eq;
2557
2558 EQ_LOCK_ASSERT_OWNED(eq);
2559
2560 if (can_reclaim == 0)
2561 can_reclaim = reclaimable(eq);
2562
2563 maps = reclaimed = 0;
2564 while (can_reclaim && maps < n) {
2565 int ndesc;
2566
2567 txsd = &txq->sdesc[eq->cidx];
2568 ndesc = txsd->desc_used;
2569
2570 /* Firmware doesn't return "partial" credits. */
2571 KASSERT(can_reclaim >= ndesc,
2572 ("%s: unexpected number of credits: %d, %d",
2573 __func__, can_reclaim, ndesc));
2574
2575 maps += txsd->credits;
2576
2577 reclaimed += ndesc;
2578 can_reclaim -= ndesc;
2579
2580 eq->cidx += ndesc;
2581 if (__predict_false(eq->cidx >= eq->cap))
2582 eq->cidx -= eq->cap;
2583 }
2584
2585 txm = &txq->maps[txq->map_cidx];
2586 if (maps)
2587 prefetch(txm->m);
2588
2589 eq->avail += reclaimed;
2590 KASSERT(eq->avail < eq->cap, /* avail tops out at (cap - 1) */
2591 ("%s: too many descriptors available", __func__));
2592
2593 txq->map_avail += maps;
2594 KASSERT(txq->map_avail <= txq->map_total,
2595 ("%s: too many maps available", __func__));
2596
2597 while (maps--) {
2598 struct tx_map *next;
2599
2600 next = txm + 1;
2601 if (__predict_false(txq->map_cidx + 1 == txq->map_total))
2602 next = txq->maps;
2603 prefetch(next->m);
2604
2605 bus_dmamap_unload(txq->tx_tag, txm->map);
2606 m_freem(txm->m);
2607 txm->m = NULL;
2608
2609 txm = next;
2610 if (__predict_false(++txq->map_cidx == txq->map_total))
2611 txq->map_cidx = 0;
2612 }
2613
2614 return (reclaimed);
2615}
2616
2617static void
2618write_eqflush_wr(struct sge_eq *eq)
2619{
2620 struct fw_eq_flush_wr *wr;
2621
2622 EQ_LOCK_ASSERT_OWNED(eq);
2623 KASSERT(eq->avail > 0, ("%s: no descriptors left.", __func__));
2624
2625 wr = (void *)&eq->desc[eq->pidx];
2626 bzero(wr, sizeof(*wr));
2627 wr->opcode = FW_EQ_FLUSH_WR;
2628 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(sizeof(*wr) / 16) |
2629 F_FW_WR_EQUEQ | F_FW_WR_EQUIQ);
2630
2631 eq->flags |= EQ_CRFLUSHED;
2632 eq->pending++;
2633 eq->avail--;
2634 if (++eq->pidx == eq->cap)
2635 eq->pidx = 0;
2636}
2637
2638static __be64
2639get_flit(bus_dma_segment_t *sgl, int nsegs, int idx)
2640{
2641 int i = (idx / 3) * 2;
2642
2643 switch (idx % 3) {
2644 case 0: {
2645 __be64 rc;
2646
2647 rc = htobe32(sgl[i].ds_len);
2648 if (i + 1 < nsegs)
2649 rc |= (uint64_t)htobe32(sgl[i + 1].ds_len) << 32;
2650
2651 return (rc);
2652 }
2653 case 1:
2654 return htobe64(sgl[i].ds_addr);
2655 case 2:
2656 return htobe64(sgl[i + 1].ds_addr);
2657 }
2658
2659 return (0);
2660}
2661
2662static void
2663set_fl_tag_idx(struct sge_fl *fl, int mtu)
2664{
2665 int i;
2666
2667 FL_LOCK_ASSERT_OWNED(fl);
2668
2669 for (i = 0; i < FL_BUF_SIZES - 1; i++) {
2670 if (FL_BUF_SIZE(i) >= (mtu + FL_PKTSHIFT))
2671 break;
2672 }
2673
2674 fl->tag_idx = i;
2675}
2676
2677static int
2678handle_sge_egr_update(struct adapter *sc, const struct cpl_sge_egr_update *cpl)
2679{
2680 unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid));
2681 struct sge *s = &sc->sge;
2682 struct sge_txq *txq;
2683 struct port_info *pi;
2684
2685 txq = (void *)s->eqmap[qid - s->eq_start];
2686 TXQ_LOCK(txq);
2687 if (txq->eq.flags & EQ_CRFLUSHED) {
2688 pi = txq->ifp->if_softc;
2689 taskqueue_enqueue(pi->tq, &txq->resume_tx);
2690 txq->egr_update++;
2691 } else
2692 wakeup_one(txq); /* txq is going away, wakeup free_txq */
2693 TXQ_UNLOCK(txq);
2694
2695 return (0);
2696}
2697
2698/*
2699 * m0 is freed on successful transmission.
2700 */
2701static int
2702ctrl_tx(struct adapter *sc, struct sge_ctrlq *ctrlq, struct mbuf *m0)
2703{
2704 struct sge_eq *eq = &ctrlq->eq;
2705 int rc = 0, ndesc;
2706 int can_reclaim;
2707 caddr_t dst;
2708 struct mbuf *m;
2709
2710 M_ASSERTPKTHDR(m0);
2711
2712 if (m0->m_pkthdr.len > SGE_MAX_WR_LEN) {
2713 ctrlq->too_long++;
2714 return (EMSGSIZE);
2715 }
2716 ndesc = howmany(m0->m_pkthdr.len, CTRL_EQ_ESIZE);
2717
2718 EQ_LOCK(eq);
2719
2720 can_reclaim = reclaimable(eq);
2721 eq->cidx += can_reclaim;
2722 eq->avail += can_reclaim;
2723 if (__predict_false(eq->cidx >= eq->cap))
2724 eq->cidx -= eq->cap;
2725
2726 if (eq->avail < ndesc) {
2727 rc = EAGAIN;
2728 ctrlq->no_desc++;
2729 goto failed;
2730 }
2731
2732 dst = (void *)&eq->desc[eq->pidx];
2733 for (m = m0; m; m = m->m_next)
2734 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
2735
2736 eq->pidx += ndesc;
2737 if (__predict_false(eq->pidx >= eq->cap))
2738 eq->pidx -= eq->cap;
2739
2740 eq->pending += ndesc;
2741 ctrlq->total_wrs++;
2742 ring_eq_db(sc, eq);
2743failed:
2744 EQ_UNLOCK(eq);
2745 if (rc == 0)
2746 m_freem(m0);
2747
2748 return (rc);
2749}
2750
2751static int
2752sysctl_abs_id(SYSCTL_HANDLER_ARGS)
2753{
2754 uint16_t *id = arg1;
2755 int i = *id;
2756
2757 return sysctl_handle_int(oidp, &i, 0, req);
2758}