1/*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h>
| 1/*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h>
|
29__FBSDID("$FreeBSD: head/sys/dev/cxgbe/t4_sge.c 219289 2011-03-05 03:27:14Z np $");
| 29__FBSDID("$FreeBSD: head/sys/dev/cxgbe/t4_sge.c 219290 2011-03-05 03:42:03Z np $");
|
30 31#include "opt_inet.h" 32 33#include <sys/types.h> 34#include <sys/mbuf.h> 35#include <sys/socket.h> 36#include <sys/kernel.h> 37#include <sys/malloc.h> 38#include <sys/queue.h> 39#include <sys/taskqueue.h> 40#include <sys/sysctl.h> 41#include <net/bpf.h> 42#include <net/ethernet.h> 43#include <net/if.h> 44#include <net/if_vlan_var.h> 45#include <netinet/in.h> 46#include <netinet/ip.h> 47#include <netinet/tcp.h> 48 49#include "common/common.h" 50#include "common/t4_regs.h" 51#include "common/t4_regs_values.h" 52#include "common/t4_msg.h" 53#include "common/t4fw_interface.h" 54 55struct fl_buf_info { 56 int size; 57 int type; 58 uma_zone_t zone; 59}; 60 61/* t4_sge_init will fill up the zone */ 62static struct fl_buf_info fl_buf_info[FL_BUF_SIZES] = { 63 { MCLBYTES, EXT_CLUSTER, NULL}, 64 { MJUMPAGESIZE, EXT_JUMBOP, NULL}, 65 { MJUM9BYTES, EXT_JUMBO9, NULL}, 66 { MJUM16BYTES, EXT_JUMBO16, NULL} 67}; 68#define FL_BUF_SIZE(x) (fl_buf_info[x].size) 69#define FL_BUF_TYPE(x) (fl_buf_info[x].type) 70#define FL_BUF_ZONE(x) (fl_buf_info[x].zone) 71 72enum { 73 FL_PKTSHIFT = 2 74}; 75 76#define FL_ALIGN min(CACHE_LINE_SIZE, 32) 77#if CACHE_LINE_SIZE > 64 78#define SPG_LEN 128 79#else 80#define SPG_LEN 64 81#endif 82 83/* Used to track coalesced tx work request */ 84struct txpkts { 85 uint64_t *flitp; /* ptr to flit where next pkt should start */ 86 uint8_t npkt; /* # of packets in this work request */ 87 uint8_t nflits; /* # of flits used by this work request */ 88 uint16_t plen; /* total payload (sum of all packets) */ 89}; 90 91/* A packet's SGL. This + m_pkthdr has all info needed for tx */ 92struct sgl { 93 int nsegs; /* # of segments in the SGL, 0 means imm. tx */ 94 int nflits; /* # of flits needed for the SGL */ 95 bus_dma_segment_t seg[TX_SGL_SEGS]; 96}; 97 98static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int, 99 int, iq_intr_handler_t *, char *); 100static inline void init_fl(struct sge_fl *, int, char *); 101static inline void init_txq(struct sge_txq *, int, char *); 102static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *, 103 bus_addr_t *, void **); 104static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 105 void *); 106static int alloc_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *, 107 int); 108static int free_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *); 109static int alloc_iq(struct sge_iq *, int); 110static int free_iq(struct sge_iq *); 111static int alloc_rxq(struct port_info *, struct sge_rxq *, int, int); 112static int free_rxq(struct port_info *, struct sge_rxq *); 113static int alloc_txq(struct port_info *, struct sge_txq *, int); 114static int free_txq(struct port_info *, struct sge_txq *); 115static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int); 116static inline bool is_new_response(const struct sge_iq *, struct rsp_ctrl **); 117static inline void iq_next(struct sge_iq *); 118static inline void ring_fl_db(struct adapter *, struct sge_fl *); 119static void refill_fl(struct sge_fl *, int); 120static int alloc_fl_sdesc(struct sge_fl *); 121static void free_fl_sdesc(struct sge_fl *); 122static int alloc_eq_maps(struct sge_eq *); 123static void free_eq_maps(struct sge_eq *);
| 30 31#include "opt_inet.h" 32 33#include <sys/types.h> 34#include <sys/mbuf.h> 35#include <sys/socket.h> 36#include <sys/kernel.h> 37#include <sys/malloc.h> 38#include <sys/queue.h> 39#include <sys/taskqueue.h> 40#include <sys/sysctl.h> 41#include <net/bpf.h> 42#include <net/ethernet.h> 43#include <net/if.h> 44#include <net/if_vlan_var.h> 45#include <netinet/in.h> 46#include <netinet/ip.h> 47#include <netinet/tcp.h> 48 49#include "common/common.h" 50#include "common/t4_regs.h" 51#include "common/t4_regs_values.h" 52#include "common/t4_msg.h" 53#include "common/t4fw_interface.h" 54 55struct fl_buf_info { 56 int size; 57 int type; 58 uma_zone_t zone; 59}; 60 61/* t4_sge_init will fill up the zone */ 62static struct fl_buf_info fl_buf_info[FL_BUF_SIZES] = { 63 { MCLBYTES, EXT_CLUSTER, NULL}, 64 { MJUMPAGESIZE, EXT_JUMBOP, NULL}, 65 { MJUM9BYTES, EXT_JUMBO9, NULL}, 66 { MJUM16BYTES, EXT_JUMBO16, NULL} 67}; 68#define FL_BUF_SIZE(x) (fl_buf_info[x].size) 69#define FL_BUF_TYPE(x) (fl_buf_info[x].type) 70#define FL_BUF_ZONE(x) (fl_buf_info[x].zone) 71 72enum { 73 FL_PKTSHIFT = 2 74}; 75 76#define FL_ALIGN min(CACHE_LINE_SIZE, 32) 77#if CACHE_LINE_SIZE > 64 78#define SPG_LEN 128 79#else 80#define SPG_LEN 64 81#endif 82 83/* Used to track coalesced tx work request */ 84struct txpkts { 85 uint64_t *flitp; /* ptr to flit where next pkt should start */ 86 uint8_t npkt; /* # of packets in this work request */ 87 uint8_t nflits; /* # of flits used by this work request */ 88 uint16_t plen; /* total payload (sum of all packets) */ 89}; 90 91/* A packet's SGL. This + m_pkthdr has all info needed for tx */ 92struct sgl { 93 int nsegs; /* # of segments in the SGL, 0 means imm. tx */ 94 int nflits; /* # of flits needed for the SGL */ 95 bus_dma_segment_t seg[TX_SGL_SEGS]; 96}; 97 98static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int, 99 int, iq_intr_handler_t *, char *); 100static inline void init_fl(struct sge_fl *, int, char *); 101static inline void init_txq(struct sge_txq *, int, char *); 102static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *, 103 bus_addr_t *, void **); 104static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 105 void *); 106static int alloc_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *, 107 int); 108static int free_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *); 109static int alloc_iq(struct sge_iq *, int); 110static int free_iq(struct sge_iq *); 111static int alloc_rxq(struct port_info *, struct sge_rxq *, int, int); 112static int free_rxq(struct port_info *, struct sge_rxq *); 113static int alloc_txq(struct port_info *, struct sge_txq *, int); 114static int free_txq(struct port_info *, struct sge_txq *); 115static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int); 116static inline bool is_new_response(const struct sge_iq *, struct rsp_ctrl **); 117static inline void iq_next(struct sge_iq *); 118static inline void ring_fl_db(struct adapter *, struct sge_fl *); 119static void refill_fl(struct sge_fl *, int); 120static int alloc_fl_sdesc(struct sge_fl *); 121static void free_fl_sdesc(struct sge_fl *); 122static int alloc_eq_maps(struct sge_eq *); 123static void free_eq_maps(struct sge_eq *);
|
124static struct mbuf *get_fl_sdesc_data(struct sge_fl *, int, int);
| |
125static void set_fl_tag_idx(struct sge_fl *, int); 126 127static int get_pkt_sgl(struct sge_txq *, struct mbuf **, struct sgl *, int); 128static int free_pkt_sgl(struct sge_txq *, struct sgl *); 129static int write_txpkt_wr(struct port_info *, struct sge_txq *, struct mbuf *, 130 struct sgl *); 131static int add_to_txpkts(struct port_info *, struct sge_txq *, struct txpkts *, 132 struct mbuf *, struct sgl *); 133static void write_txpkts_wr(struct sge_txq *, struct txpkts *); 134static inline void write_ulp_cpl_sgl(struct port_info *, struct sge_txq *, 135 struct txpkts *, struct mbuf *, struct sgl *); 136static int write_sgl_to_txd(struct sge_eq *, struct sgl *, caddr_t *); 137static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int); 138static inline void ring_tx_db(struct adapter *, struct sge_eq *); 139static int reclaim_tx_descs(struct sge_eq *, int, int); 140static void write_eqflush_wr(struct sge_eq *); 141static __be64 get_flit(bus_dma_segment_t *, int, int); 142static int handle_sge_egr_update(struct adapter *, 143 const struct cpl_sge_egr_update *); 144 145/** 146 * t4_sge_init - initialize SGE 147 * @sc: the adapter 148 * 149 * Performs SGE initialization needed every time after a chip reset. 150 * We do not initialize any of the queues here, instead the driver 151 * top-level must request them individually. 152 */ 153void 154t4_sge_init(struct adapter *sc) 155{ 156 struct sge *s = &sc->sge; 157 int i; 158 159 FL_BUF_ZONE(0) = zone_clust; 160 FL_BUF_ZONE(1) = zone_jumbop; 161 FL_BUF_ZONE(2) = zone_jumbo9; 162 FL_BUF_ZONE(3) = zone_jumbo16; 163 164 t4_set_reg_field(sc, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT) | 165 V_INGPADBOUNDARY(M_INGPADBOUNDARY) | 166 F_EGRSTATUSPAGESIZE, 167 V_INGPADBOUNDARY(ilog2(FL_ALIGN) - 5) | 168 V_PKTSHIFT(FL_PKTSHIFT) | 169 F_RXPKTCPLMODE | 170 V_EGRSTATUSPAGESIZE(SPG_LEN == 128)); 171 t4_set_reg_field(sc, A_SGE_HOST_PAGE_SIZE, 172 V_HOSTPAGESIZEPF0(M_HOSTPAGESIZEPF0), 173 V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10)); 174 175 for (i = 0; i < FL_BUF_SIZES; i++) { 176 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i), 177 FL_BUF_SIZE(i)); 178 } 179 180 t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, 181 V_THRESHOLD_0(s->counter_val[0]) | 182 V_THRESHOLD_1(s->counter_val[1]) | 183 V_THRESHOLD_2(s->counter_val[2]) | 184 V_THRESHOLD_3(s->counter_val[3])); 185 186 t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, 187 V_TIMERVALUE0(us_to_core_ticks(sc, s->timer_val[0])) | 188 V_TIMERVALUE1(us_to_core_ticks(sc, s->timer_val[1]))); 189 t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, 190 V_TIMERVALUE2(us_to_core_ticks(sc, s->timer_val[2])) | 191 V_TIMERVALUE3(us_to_core_ticks(sc, s->timer_val[3]))); 192 t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, 193 V_TIMERVALUE4(us_to_core_ticks(sc, s->timer_val[4])) | 194 V_TIMERVALUE5(us_to_core_ticks(sc, s->timer_val[5]))); 195} 196 197int 198t4_create_dma_tag(struct adapter *sc) 199{ 200 int rc; 201 202 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 203 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 204 BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, 205 NULL, &sc->dmat); 206 if (rc != 0) { 207 device_printf(sc->dev, 208 "failed to create main DMA tag: %d\n", rc); 209 } 210 211 return (rc); 212} 213 214int 215t4_destroy_dma_tag(struct adapter *sc) 216{ 217 if (sc->dmat) 218 bus_dma_tag_destroy(sc->dmat); 219 220 return (0); 221} 222 223/* 224 * Allocate and initialize the firmware event queue and the forwarded interrupt 225 * queues, if any. The adapter owns all these queues as they are not associated 226 * with any particular port. 227 * 228 * Returns errno on failure. Resources allocated up to that point may still be 229 * allocated. Caller is responsible for cleanup in case this function fails. 230 */ 231int 232t4_setup_adapter_iqs(struct adapter *sc) 233{ 234 int i, rc; 235 struct sge_iq *iq, *fwq; 236 iq_intr_handler_t *handler; 237 char name[16]; 238 239 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 240 241 fwq = &sc->sge.fwq; 242 if (sc->flags & INTR_FWD) { 243 iq = &sc->sge.fiq[0]; 244 245 /* 246 * Forwarded interrupt queues - allocate 1 if there's only 1 247 * vector available, one less than the number of vectors 248 * otherwise (the first vector is reserved for the error 249 * interrupt in that case). 250 */ 251 i = sc->intr_count > 1 ? 1 : 0; 252 for (; i < sc->intr_count; i++, iq++) { 253 254 snprintf(name, sizeof(name), "%s fiq%d", 255 device_get_nameunit(sc->dev), i); 256 init_iq(iq, sc, 0, 0, (sc->sge.nrxq + 1) * 2, 16, NULL, 257 name); 258 259 rc = alloc_iq(iq, i); 260 if (rc != 0) { 261 device_printf(sc->dev, 262 "failed to create fwd intr queue %d: %d\n", 263 i, rc); 264 return (rc); 265 } 266 } 267 268 handler = t4_intr_evt; 269 i = 0; /* forward fwq's interrupt to the first fiq */ 270 } else { 271 handler = NULL; 272 i = 1; /* fwq should use vector 1 (0 is used by error) */ 273 } 274 275 snprintf(name, sizeof(name), "%s fwq", device_get_nameunit(sc->dev)); 276 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, FW_IQ_ESIZE, handler, name); 277 rc = alloc_iq(fwq, i); 278 if (rc != 0) { 279 device_printf(sc->dev, 280 "failed to create firmware event queue: %d\n", rc); 281 } 282 283 return (rc); 284} 285 286/* 287 * Idempotent 288 */ 289int 290t4_teardown_adapter_iqs(struct adapter *sc) 291{ 292 int i; 293 struct sge_iq *iq; 294 295 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 296 297 iq = &sc->sge.fwq; 298 free_iq(iq); 299 if (sc->flags & INTR_FWD) { 300 for (i = 0; i < NFIQ(sc); i++) { 301 iq = &sc->sge.fiq[i]; 302 free_iq(iq); 303 } 304 } 305 306 return (0); 307} 308 309int 310t4_setup_eth_queues(struct port_info *pi) 311{ 312 int rc = 0, i, intr_idx; 313 struct sge_rxq *rxq; 314 struct sge_txq *txq; 315 char name[16]; 316 struct adapter *sc = pi->adapter; 317 318 if (sysctl_ctx_init(&pi->ctx) == 0) { 319 struct sysctl_oid *oid = device_get_sysctl_tree(pi->dev); 320 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 321 322 pi->oid_rxq = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, 323 "rxq", CTLFLAG_RD, NULL, "rx queues"); 324 pi->oid_txq = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, 325 "txq", CTLFLAG_RD, NULL, "tx queues"); 326 } 327 328 for_each_rxq(pi, i, rxq) { 329 330 snprintf(name, sizeof(name), "%s rxq%d-iq", 331 device_get_nameunit(pi->dev), i); 332 init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, 333 pi->qsize_rxq, RX_IQ_ESIZE, 334 sc->flags & INTR_FWD ? t4_intr_data: NULL, name); 335 336 snprintf(name, sizeof(name), "%s rxq%d-fl", 337 device_get_nameunit(pi->dev), i); 338 init_fl(&rxq->fl, pi->qsize_rxq / 8, name); 339 340 if (sc->flags & INTR_FWD) 341 intr_idx = (pi->first_rxq + i) % NFIQ(sc); 342 else 343 intr_idx = pi->first_rxq + i + 2; 344 345 rc = alloc_rxq(pi, rxq, intr_idx, i); 346 if (rc != 0) 347 goto done; 348 349 intr_idx++; 350 } 351 352 for_each_txq(pi, i, txq) { 353 354 snprintf(name, sizeof(name), "%s txq%d", 355 device_get_nameunit(pi->dev), i); 356 init_txq(txq, pi->qsize_txq, name); 357 358 rc = alloc_txq(pi, txq, i); 359 if (rc != 0) 360 goto done; 361 } 362 363done: 364 if (rc) 365 t4_teardown_eth_queues(pi); 366 367 return (rc); 368} 369 370/* 371 * Idempotent 372 */ 373int 374t4_teardown_eth_queues(struct port_info *pi) 375{ 376 int i; 377 struct sge_rxq *rxq; 378 struct sge_txq *txq; 379 380 /* Do this before freeing the queues */ 381 if (pi->oid_txq || pi->oid_rxq) { 382 sysctl_ctx_free(&pi->ctx); 383 pi->oid_txq = pi->oid_rxq = NULL; 384 } 385 386 for_each_txq(pi, i, txq) { 387 free_txq(pi, txq); 388 } 389 390 for_each_rxq(pi, i, rxq) { 391 free_rxq(pi, rxq); 392 } 393 394 return (0); 395} 396 397/* Deals with errors and forwarded interrupts */ 398void 399t4_intr_all(void *arg) 400{ 401 struct adapter *sc = arg; 402 403 t4_intr_err(arg); 404 t4_intr_fwd(&sc->sge.fiq[0]); 405} 406 407/* Deals with forwarded interrupts on the given ingress queue */ 408void 409t4_intr_fwd(void *arg) 410{ 411 struct sge_iq *iq = arg, *q; 412 struct adapter *sc = iq->adapter; 413 struct rsp_ctrl *ctrl; 414 int ndesc_pending = 0, ndesc_total = 0; 415 int qid; 416 417 IQ_LOCK(iq); 418 while (is_new_response(iq, &ctrl)) { 419 420 rmb(); 421 422 /* Only interrupt muxing expected on this queue */ 423 KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_INTR, 424 ("unexpected event on forwarded interrupt queue: %x", 425 G_RSPD_TYPE(ctrl->u.type_gen))); 426 427 qid = ntohl(ctrl->pldbuflen_qid) - sc->sge.iq_start; 428 q = sc->sge.iqmap[qid]; 429 430 q->handler(q); 431 432 ndesc_total++; 433 if (++ndesc_pending >= iq->qsize / 4) { 434 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 435 V_CIDXINC(ndesc_pending) | 436 V_INGRESSQID(iq->cntxt_id) | 437 V_SEINTARM( 438 V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 439 ndesc_pending = 0; 440 } 441 442 iq_next(iq); 443 } 444 IQ_UNLOCK(iq); 445 446 if (ndesc_total > 0) { 447 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 448 V_CIDXINC(ndesc_pending) | V_INGRESSQID((u32)iq->cntxt_id) | 449 V_SEINTARM(iq->intr_params)); 450 } 451} 452 453/* Deals with error interrupts */ 454void 455t4_intr_err(void *arg) 456{ 457 struct adapter *sc = arg; 458 459 if (sc->intr_type == 1) 460 t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0); 461 462 t4_slow_intr_handler(sc); 463} 464 465/* Deals with the firmware event queue */ 466void 467t4_intr_evt(void *arg) 468{ 469 struct sge_iq *iq = arg; 470 struct adapter *sc = iq->adapter; 471 struct rsp_ctrl *ctrl; 472 const struct rss_header *rss; 473 int ndesc_pending = 0, ndesc_total = 0; 474 475 KASSERT(iq == &sc->sge.fwq, ("%s: unexpected ingress queue", __func__)); 476 477 IQ_LOCK(iq); 478 while (is_new_response(iq, &ctrl)) { 479 480 rmb(); 481 482 rss = (const void *)iq->cdesc; 483 484 /* Should only get CPL on this queue */ 485 KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_CPL, 486 ("%s: unexpected type %d", __func__, 487 G_RSPD_TYPE(ctrl->u.type_gen))); 488 489 switch (rss->opcode) { 490 case CPL_FW4_MSG: 491 case CPL_FW6_MSG: { 492 const struct cpl_fw6_msg *cpl; 493 494 cpl = (const void *)(rss + 1); 495 if (cpl->type == FW6_TYPE_CMD_RPL) 496 t4_handle_fw_rpl(sc, cpl->data); 497 498 break; 499 } 500 case CPL_SGE_EGR_UPDATE: 501 handle_sge_egr_update(sc, (const void *)(rss + 1)); 502 break; 503 504 default: 505 device_printf(sc->dev, 506 "can't handle CPL opcode %d.", rss->opcode); 507 } 508 509 ndesc_total++; 510 if (++ndesc_pending >= iq->qsize / 4) { 511 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 512 V_CIDXINC(ndesc_pending) | 513 V_INGRESSQID(iq->cntxt_id) | 514 V_SEINTARM( 515 V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 516 ndesc_pending = 0; 517 } 518 iq_next(iq); 519 } 520 IQ_UNLOCK(iq); 521 522 if (ndesc_total > 0) { 523 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 524 V_CIDXINC(ndesc_pending) | V_INGRESSQID(iq->cntxt_id) | 525 V_SEINTARM(iq->intr_params)); 526 } 527} 528 529void 530t4_intr_data(void *arg) 531{ 532 struct sge_rxq *rxq = arg; 533 struct sge_iq *iq = arg; 534 struct adapter *sc = iq->adapter; 535 struct rsp_ctrl *ctrl;
| 124static void set_fl_tag_idx(struct sge_fl *, int); 125 126static int get_pkt_sgl(struct sge_txq *, struct mbuf **, struct sgl *, int); 127static int free_pkt_sgl(struct sge_txq *, struct sgl *); 128static int write_txpkt_wr(struct port_info *, struct sge_txq *, struct mbuf *, 129 struct sgl *); 130static int add_to_txpkts(struct port_info *, struct sge_txq *, struct txpkts *, 131 struct mbuf *, struct sgl *); 132static void write_txpkts_wr(struct sge_txq *, struct txpkts *); 133static inline void write_ulp_cpl_sgl(struct port_info *, struct sge_txq *, 134 struct txpkts *, struct mbuf *, struct sgl *); 135static int write_sgl_to_txd(struct sge_eq *, struct sgl *, caddr_t *); 136static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int); 137static inline void ring_tx_db(struct adapter *, struct sge_eq *); 138static int reclaim_tx_descs(struct sge_eq *, int, int); 139static void write_eqflush_wr(struct sge_eq *); 140static __be64 get_flit(bus_dma_segment_t *, int, int); 141static int handle_sge_egr_update(struct adapter *, 142 const struct cpl_sge_egr_update *); 143 144/** 145 * t4_sge_init - initialize SGE 146 * @sc: the adapter 147 * 148 * Performs SGE initialization needed every time after a chip reset. 149 * We do not initialize any of the queues here, instead the driver 150 * top-level must request them individually. 151 */ 152void 153t4_sge_init(struct adapter *sc) 154{ 155 struct sge *s = &sc->sge; 156 int i; 157 158 FL_BUF_ZONE(0) = zone_clust; 159 FL_BUF_ZONE(1) = zone_jumbop; 160 FL_BUF_ZONE(2) = zone_jumbo9; 161 FL_BUF_ZONE(3) = zone_jumbo16; 162 163 t4_set_reg_field(sc, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT) | 164 V_INGPADBOUNDARY(M_INGPADBOUNDARY) | 165 F_EGRSTATUSPAGESIZE, 166 V_INGPADBOUNDARY(ilog2(FL_ALIGN) - 5) | 167 V_PKTSHIFT(FL_PKTSHIFT) | 168 F_RXPKTCPLMODE | 169 V_EGRSTATUSPAGESIZE(SPG_LEN == 128)); 170 t4_set_reg_field(sc, A_SGE_HOST_PAGE_SIZE, 171 V_HOSTPAGESIZEPF0(M_HOSTPAGESIZEPF0), 172 V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10)); 173 174 for (i = 0; i < FL_BUF_SIZES; i++) { 175 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i), 176 FL_BUF_SIZE(i)); 177 } 178 179 t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, 180 V_THRESHOLD_0(s->counter_val[0]) | 181 V_THRESHOLD_1(s->counter_val[1]) | 182 V_THRESHOLD_2(s->counter_val[2]) | 183 V_THRESHOLD_3(s->counter_val[3])); 184 185 t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, 186 V_TIMERVALUE0(us_to_core_ticks(sc, s->timer_val[0])) | 187 V_TIMERVALUE1(us_to_core_ticks(sc, s->timer_val[1]))); 188 t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, 189 V_TIMERVALUE2(us_to_core_ticks(sc, s->timer_val[2])) | 190 V_TIMERVALUE3(us_to_core_ticks(sc, s->timer_val[3]))); 191 t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, 192 V_TIMERVALUE4(us_to_core_ticks(sc, s->timer_val[4])) | 193 V_TIMERVALUE5(us_to_core_ticks(sc, s->timer_val[5]))); 194} 195 196int 197t4_create_dma_tag(struct adapter *sc) 198{ 199 int rc; 200 201 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 202 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 203 BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, 204 NULL, &sc->dmat); 205 if (rc != 0) { 206 device_printf(sc->dev, 207 "failed to create main DMA tag: %d\n", rc); 208 } 209 210 return (rc); 211} 212 213int 214t4_destroy_dma_tag(struct adapter *sc) 215{ 216 if (sc->dmat) 217 bus_dma_tag_destroy(sc->dmat); 218 219 return (0); 220} 221 222/* 223 * Allocate and initialize the firmware event queue and the forwarded interrupt 224 * queues, if any. The adapter owns all these queues as they are not associated 225 * with any particular port. 226 * 227 * Returns errno on failure. Resources allocated up to that point may still be 228 * allocated. Caller is responsible for cleanup in case this function fails. 229 */ 230int 231t4_setup_adapter_iqs(struct adapter *sc) 232{ 233 int i, rc; 234 struct sge_iq *iq, *fwq; 235 iq_intr_handler_t *handler; 236 char name[16]; 237 238 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 239 240 fwq = &sc->sge.fwq; 241 if (sc->flags & INTR_FWD) { 242 iq = &sc->sge.fiq[0]; 243 244 /* 245 * Forwarded interrupt queues - allocate 1 if there's only 1 246 * vector available, one less than the number of vectors 247 * otherwise (the first vector is reserved for the error 248 * interrupt in that case). 249 */ 250 i = sc->intr_count > 1 ? 1 : 0; 251 for (; i < sc->intr_count; i++, iq++) { 252 253 snprintf(name, sizeof(name), "%s fiq%d", 254 device_get_nameunit(sc->dev), i); 255 init_iq(iq, sc, 0, 0, (sc->sge.nrxq + 1) * 2, 16, NULL, 256 name); 257 258 rc = alloc_iq(iq, i); 259 if (rc != 0) { 260 device_printf(sc->dev, 261 "failed to create fwd intr queue %d: %d\n", 262 i, rc); 263 return (rc); 264 } 265 } 266 267 handler = t4_intr_evt; 268 i = 0; /* forward fwq's interrupt to the first fiq */ 269 } else { 270 handler = NULL; 271 i = 1; /* fwq should use vector 1 (0 is used by error) */ 272 } 273 274 snprintf(name, sizeof(name), "%s fwq", device_get_nameunit(sc->dev)); 275 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, FW_IQ_ESIZE, handler, name); 276 rc = alloc_iq(fwq, i); 277 if (rc != 0) { 278 device_printf(sc->dev, 279 "failed to create firmware event queue: %d\n", rc); 280 } 281 282 return (rc); 283} 284 285/* 286 * Idempotent 287 */ 288int 289t4_teardown_adapter_iqs(struct adapter *sc) 290{ 291 int i; 292 struct sge_iq *iq; 293 294 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 295 296 iq = &sc->sge.fwq; 297 free_iq(iq); 298 if (sc->flags & INTR_FWD) { 299 for (i = 0; i < NFIQ(sc); i++) { 300 iq = &sc->sge.fiq[i]; 301 free_iq(iq); 302 } 303 } 304 305 return (0); 306} 307 308int 309t4_setup_eth_queues(struct port_info *pi) 310{ 311 int rc = 0, i, intr_idx; 312 struct sge_rxq *rxq; 313 struct sge_txq *txq; 314 char name[16]; 315 struct adapter *sc = pi->adapter; 316 317 if (sysctl_ctx_init(&pi->ctx) == 0) { 318 struct sysctl_oid *oid = device_get_sysctl_tree(pi->dev); 319 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 320 321 pi->oid_rxq = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, 322 "rxq", CTLFLAG_RD, NULL, "rx queues"); 323 pi->oid_txq = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, 324 "txq", CTLFLAG_RD, NULL, "tx queues"); 325 } 326 327 for_each_rxq(pi, i, rxq) { 328 329 snprintf(name, sizeof(name), "%s rxq%d-iq", 330 device_get_nameunit(pi->dev), i); 331 init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, 332 pi->qsize_rxq, RX_IQ_ESIZE, 333 sc->flags & INTR_FWD ? t4_intr_data: NULL, name); 334 335 snprintf(name, sizeof(name), "%s rxq%d-fl", 336 device_get_nameunit(pi->dev), i); 337 init_fl(&rxq->fl, pi->qsize_rxq / 8, name); 338 339 if (sc->flags & INTR_FWD) 340 intr_idx = (pi->first_rxq + i) % NFIQ(sc); 341 else 342 intr_idx = pi->first_rxq + i + 2; 343 344 rc = alloc_rxq(pi, rxq, intr_idx, i); 345 if (rc != 0) 346 goto done; 347 348 intr_idx++; 349 } 350 351 for_each_txq(pi, i, txq) { 352 353 snprintf(name, sizeof(name), "%s txq%d", 354 device_get_nameunit(pi->dev), i); 355 init_txq(txq, pi->qsize_txq, name); 356 357 rc = alloc_txq(pi, txq, i); 358 if (rc != 0) 359 goto done; 360 } 361 362done: 363 if (rc) 364 t4_teardown_eth_queues(pi); 365 366 return (rc); 367} 368 369/* 370 * Idempotent 371 */ 372int 373t4_teardown_eth_queues(struct port_info *pi) 374{ 375 int i; 376 struct sge_rxq *rxq; 377 struct sge_txq *txq; 378 379 /* Do this before freeing the queues */ 380 if (pi->oid_txq || pi->oid_rxq) { 381 sysctl_ctx_free(&pi->ctx); 382 pi->oid_txq = pi->oid_rxq = NULL; 383 } 384 385 for_each_txq(pi, i, txq) { 386 free_txq(pi, txq); 387 } 388 389 for_each_rxq(pi, i, rxq) { 390 free_rxq(pi, rxq); 391 } 392 393 return (0); 394} 395 396/* Deals with errors and forwarded interrupts */ 397void 398t4_intr_all(void *arg) 399{ 400 struct adapter *sc = arg; 401 402 t4_intr_err(arg); 403 t4_intr_fwd(&sc->sge.fiq[0]); 404} 405 406/* Deals with forwarded interrupts on the given ingress queue */ 407void 408t4_intr_fwd(void *arg) 409{ 410 struct sge_iq *iq = arg, *q; 411 struct adapter *sc = iq->adapter; 412 struct rsp_ctrl *ctrl; 413 int ndesc_pending = 0, ndesc_total = 0; 414 int qid; 415 416 IQ_LOCK(iq); 417 while (is_new_response(iq, &ctrl)) { 418 419 rmb(); 420 421 /* Only interrupt muxing expected on this queue */ 422 KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_INTR, 423 ("unexpected event on forwarded interrupt queue: %x", 424 G_RSPD_TYPE(ctrl->u.type_gen))); 425 426 qid = ntohl(ctrl->pldbuflen_qid) - sc->sge.iq_start; 427 q = sc->sge.iqmap[qid]; 428 429 q->handler(q); 430 431 ndesc_total++; 432 if (++ndesc_pending >= iq->qsize / 4) { 433 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 434 V_CIDXINC(ndesc_pending) | 435 V_INGRESSQID(iq->cntxt_id) | 436 V_SEINTARM( 437 V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 438 ndesc_pending = 0; 439 } 440 441 iq_next(iq); 442 } 443 IQ_UNLOCK(iq); 444 445 if (ndesc_total > 0) { 446 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 447 V_CIDXINC(ndesc_pending) | V_INGRESSQID((u32)iq->cntxt_id) | 448 V_SEINTARM(iq->intr_params)); 449 } 450} 451 452/* Deals with error interrupts */ 453void 454t4_intr_err(void *arg) 455{ 456 struct adapter *sc = arg; 457 458 if (sc->intr_type == 1) 459 t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0); 460 461 t4_slow_intr_handler(sc); 462} 463 464/* Deals with the firmware event queue */ 465void 466t4_intr_evt(void *arg) 467{ 468 struct sge_iq *iq = arg; 469 struct adapter *sc = iq->adapter; 470 struct rsp_ctrl *ctrl; 471 const struct rss_header *rss; 472 int ndesc_pending = 0, ndesc_total = 0; 473 474 KASSERT(iq == &sc->sge.fwq, ("%s: unexpected ingress queue", __func__)); 475 476 IQ_LOCK(iq); 477 while (is_new_response(iq, &ctrl)) { 478 479 rmb(); 480 481 rss = (const void *)iq->cdesc; 482 483 /* Should only get CPL on this queue */ 484 KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_CPL, 485 ("%s: unexpected type %d", __func__, 486 G_RSPD_TYPE(ctrl->u.type_gen))); 487 488 switch (rss->opcode) { 489 case CPL_FW4_MSG: 490 case CPL_FW6_MSG: { 491 const struct cpl_fw6_msg *cpl; 492 493 cpl = (const void *)(rss + 1); 494 if (cpl->type == FW6_TYPE_CMD_RPL) 495 t4_handle_fw_rpl(sc, cpl->data); 496 497 break; 498 } 499 case CPL_SGE_EGR_UPDATE: 500 handle_sge_egr_update(sc, (const void *)(rss + 1)); 501 break; 502 503 default: 504 device_printf(sc->dev, 505 "can't handle CPL opcode %d.", rss->opcode); 506 } 507 508 ndesc_total++; 509 if (++ndesc_pending >= iq->qsize / 4) { 510 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 511 V_CIDXINC(ndesc_pending) | 512 V_INGRESSQID(iq->cntxt_id) | 513 V_SEINTARM( 514 V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 515 ndesc_pending = 0; 516 } 517 iq_next(iq); 518 } 519 IQ_UNLOCK(iq); 520 521 if (ndesc_total > 0) { 522 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 523 V_CIDXINC(ndesc_pending) | V_INGRESSQID(iq->cntxt_id) | 524 V_SEINTARM(iq->intr_params)); 525 } 526} 527 528void 529t4_intr_data(void *arg) 530{ 531 struct sge_rxq *rxq = arg; 532 struct sge_iq *iq = arg; 533 struct adapter *sc = iq->adapter; 534 struct rsp_ctrl *ctrl;
|
536 struct sge_fl *fl = &rxq->fl;
| |
537 struct ifnet *ifp = rxq->ifp;
| 535 struct ifnet *ifp = rxq->ifp;
|
| 536 struct sge_fl *fl = &rxq->fl; 537 struct fl_sdesc *sd = &fl->sdesc[fl->cidx], *sd_next;
|
538 const struct rss_header *rss; 539 const struct cpl_rx_pkt *cpl;
| 538 const struct rss_header *rss; 539 const struct cpl_rx_pkt *cpl;
|
540 int ndescs = 0, rsp_type;
| |
541 uint32_t len;
| 540 uint32_t len;
|
| 541 int ndescs = 0, i;
|
542 struct mbuf *m0, *m; 543#ifdef INET 544 struct lro_ctrl *lro = &rxq->lro; 545 struct lro_entry *l; 546#endif 547
| 542 struct mbuf *m0, *m; 543#ifdef INET 544 struct lro_ctrl *lro = &rxq->lro; 545 struct lro_entry *l; 546#endif 547
|
| 548 prefetch(sd->m); 549 prefetch(sd->cl); 550
|
548 IQ_LOCK(iq); 549 iq->intr_next = iq->intr_params; 550 while (is_new_response(iq, &ctrl)) { 551 552 rmb(); 553 554 rss = (const void *)iq->cdesc;
| 551 IQ_LOCK(iq); 552 iq->intr_next = iq->intr_params; 553 while (is_new_response(iq, &ctrl)) { 554 555 rmb(); 556 557 rss = (const void *)iq->cdesc;
|
555 cpl = (const void *)(rss + 1);
| 558 i = G_RSPD_TYPE(ctrl->u.type_gen);
|
556
| 559
|
557 rsp_type = G_RSPD_TYPE(ctrl->u.type_gen);
| 560 if (__predict_false(i == X_RSPD_TYPE_CPL)) {
|
558
| 561
|
559 if (__predict_false(rsp_type == X_RSPD_TYPE_CPL)) {
| |
560 /* Can't be anything except an egress update */
| 562 /* Can't be anything except an egress update */
|
561 handle_sge_egr_update(sc, (const void *)cpl);
| 563 KASSERT(rss->opcode == CPL_SGE_EGR_UPDATE, 564 ("%s: unexpected CPL %x", __func__, rss->opcode)); 565 566 handle_sge_egr_update(sc, (const void *)(rss + 1));
|
562 goto nextdesc; 563 }
| 567 goto nextdesc; 568 }
|
| 569 KASSERT(i == X_RSPD_TYPE_FLBUF && rss->opcode == CPL_RX_PKT, 570 ("%s: unexpected CPL %x rsp %d", __func__, rss->opcode, i));
|
564
| 571
|
565 KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_FLBUF, 566 ("unexpected event on data ingress queue: %x", 567 G_RSPD_TYPE(ctrl->u.type_gen)));
| 572 sd_next = sd + 1; 573 if (__predict_false(fl->cidx + 1 == fl->cap)) 574 sd_next = fl->sdesc; 575 prefetch(sd_next->m); 576 prefetch(sd_next->cl);
|
568
| 577
|
569 len = be32toh(ctrl->pldbuflen_qid);
| 578 cpl = (const void *)(rss + 1);
|
570
| 579
|
571 KASSERT(len & F_RSPD_NEWBUF, 572 ("%s: T4 misconfigured to pack buffers.", __func__));
| 580 m0 = sd->m; 581 sd->m = NULL; /* consumed */
|
573
| 582
|
| 583 len = be32toh(ctrl->pldbuflen_qid); 584 if (__predict_false((len & F_RSPD_NEWBUF) == 0)) 585 panic("%s: cannot handle packed frames", __func__);
|
574 len = G_RSPD_LEN(len);
| 586 len = G_RSPD_LEN(len);
|
575 m0 = get_fl_sdesc_data(fl, len, M_PKTHDR); 576 if (m0 == NULL) { 577 iq->intr_next = V_QINTR_TIMER_IDX(SGE_NTIMERS - 1); 578 break;
| 587 588 bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map, 589 BUS_DMASYNC_POSTREAD); 590 591 m_init(m0, zone_mbuf, MLEN, M_NOWAIT, MT_DATA, M_PKTHDR); 592 if (len < MINCLSIZE) { 593 /* copy data to mbuf, buffer will be recycled */ 594 bcopy(sd->cl, mtod(m0, caddr_t), len); 595 m0->m_len = len; 596 } else { 597 bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map); 598 m_cljset(m0, sd->cl, FL_BUF_TYPE(sd->tag_idx)); 599 sd->cl = NULL; /* consumed */ 600 m0->m_len = min(len, FL_BUF_SIZE(sd->tag_idx));
|
579 } 580 581 len -= FL_PKTSHIFT; 582 m0->m_len -= FL_PKTSHIFT; 583 m0->m_data += FL_PKTSHIFT; 584 585 m0->m_pkthdr.len = len; 586 m0->m_pkthdr.rcvif = ifp; 587 m0->m_flags |= M_FLOWID; 588 m0->m_pkthdr.flowid = rss->hash_val; 589 590 if (cpl->csum_calc && !cpl->err_vec && 591 ifp->if_capenable & IFCAP_RXCSUM) { 592 m0->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | 593 CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 594 if (cpl->ip_frag) 595 m0->m_pkthdr.csum_data = be16toh(cpl->csum); 596 else 597 m0->m_pkthdr.csum_data = 0xffff; 598 rxq->rxcsum++; 599 } 600 601 if (cpl->vlan_ex) { 602 m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); 603 m0->m_flags |= M_VLANTAG; 604 rxq->vlan_extraction++; 605 } 606
| 601 } 602 603 len -= FL_PKTSHIFT; 604 m0->m_len -= FL_PKTSHIFT; 605 m0->m_data += FL_PKTSHIFT; 606 607 m0->m_pkthdr.len = len; 608 m0->m_pkthdr.rcvif = ifp; 609 m0->m_flags |= M_FLOWID; 610 m0->m_pkthdr.flowid = rss->hash_val; 611 612 if (cpl->csum_calc && !cpl->err_vec && 613 ifp->if_capenable & IFCAP_RXCSUM) { 614 m0->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | 615 CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 616 if (cpl->ip_frag) 617 m0->m_pkthdr.csum_data = be16toh(cpl->csum); 618 else 619 m0->m_pkthdr.csum_data = 0xffff; 620 rxq->rxcsum++; 621 } 622 623 if (cpl->vlan_ex) { 624 m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); 625 m0->m_flags |= M_VLANTAG; 626 rxq->vlan_extraction++; 627 } 628
|
| 629 i = 1; /* # of fl sdesc used */ 630 sd = sd_next; 631 if (__predict_false(++fl->cidx == fl->cap)) 632 fl->cidx = 0; 633
|
607 len -= m0->m_len; 608 m = m0; 609 while (len) {
| 634 len -= m0->m_len; 635 m = m0; 636 while (len) {
|
610 m->m_next = get_fl_sdesc_data(fl, len, 0); 611 if (m->m_next == NULL) 612 CXGBE_UNIMPLEMENTED("mbuf recovery");
| 637 i++;
|
613
| 638
|
| 639 sd_next = sd + 1; 640 if (__predict_false(fl->cidx + 1 == fl->cap)) 641 sd_next = fl->sdesc; 642 prefetch(sd_next->m); 643 prefetch(sd_next->cl); 644 645 m->m_next = sd->m; 646 sd->m = NULL; /* consumed */
|
614 m = m->m_next;
| 647 m = m->m_next;
|
| 648 649 bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map, 650 BUS_DMASYNC_POSTREAD); 651 652 m_init(m, zone_mbuf, MLEN, M_NOWAIT, MT_DATA, 0); 653 if (len <= MLEN) { 654 bcopy(sd->cl, mtod(m, caddr_t), len); 655 m->m_len = len; 656 } else { 657 bus_dmamap_unload(fl->tag[sd->tag_idx], 658 sd->map); 659 m_cljset(m, sd->cl, FL_BUF_TYPE(sd->tag_idx)); 660 sd->cl = NULL; /* consumed */ 661 m->m_len = min(len, FL_BUF_SIZE(sd->tag_idx)); 662 } 663 664 i++; 665 sd = sd_next; 666 if (__predict_false(++fl->cidx == fl->cap)) 667 fl->cidx = 0; 668
|
615 len -= m->m_len; 616 }
| 669 len -= m->m_len; 670 }
|
| 671 672 IQ_UNLOCK(iq);
|
617#ifdef INET 618 if (cpl->l2info & htobe32(F_RXF_LRO) && 619 rxq->flags & RXQ_LRO_ENABLED && 620 tcp_lro_rx(lro, m0, 0) == 0) { 621 /* queued for LRO */ 622 } else 623#endif
| 673#ifdef INET 674 if (cpl->l2info & htobe32(F_RXF_LRO) && 675 rxq->flags & RXQ_LRO_ENABLED && 676 tcp_lro_rx(lro, m0, 0) == 0) { 677 /* queued for LRO */ 678 } else 679#endif
|
624 (*ifp->if_input)(ifp, m0);
| 680 ifp->if_input(ifp, m0); 681 IQ_LOCK(iq);
|
625 626 FL_LOCK(fl);
| 682 683 FL_LOCK(fl);
|
627 if (fl->needed >= 32) {
| 684 fl->needed += i; 685 if (fl->needed >= 32)
|
628 refill_fl(fl, 64);
| 686 refill_fl(fl, 64);
|
629 if (fl->pending >= 32) 630 ring_fl_db(sc, fl); 631 }
| 687 if (fl->pending >= 32) 688 ring_fl_db(sc, fl);
|
632 FL_UNLOCK(fl); 633 634nextdesc: ndescs++; 635 iq_next(iq); 636 637 if (ndescs > 32) { 638 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 639 V_CIDXINC(ndescs) | 640 V_INGRESSQID((u32)iq->cntxt_id) | 641 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 642 ndescs = 0; 643 } 644 }
| 689 FL_UNLOCK(fl); 690 691nextdesc: ndescs++; 692 iq_next(iq); 693 694 if (ndescs > 32) { 695 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 696 V_CIDXINC(ndescs) | 697 V_INGRESSQID((u32)iq->cntxt_id) | 698 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 699 ndescs = 0; 700 } 701 }
|
| 702 IQ_UNLOCK(iq);
|
645 646#ifdef INET 647 while (!SLIST_EMPTY(&lro->lro_active)) { 648 l = SLIST_FIRST(&lro->lro_active); 649 SLIST_REMOVE_HEAD(&lro->lro_active, next); 650 tcp_lro_flush(lro, l); 651 } 652#endif 653 654 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) | 655 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_next)); 656
| 703 704#ifdef INET 705 while (!SLIST_EMPTY(&lro->lro_active)) { 706 l = SLIST_FIRST(&lro->lro_active); 707 SLIST_REMOVE_HEAD(&lro->lro_active, next); 708 tcp_lro_flush(lro, l); 709 } 710#endif 711 712 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) | 713 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_next)); 714
|
657 IQ_UNLOCK(iq); 658
| |
659 FL_LOCK(fl);
| 715 FL_LOCK(fl);
|
660 if (fl->needed) { 661 refill_fl(fl, -1); 662 if (fl->pending >= 8) 663 ring_fl_db(sc, fl); 664 }
| 716 if (fl->needed >= 32) 717 refill_fl(fl, 128); 718 if (fl->pending >= 8) 719 ring_fl_db(sc, fl);
|
665 FL_UNLOCK(fl); 666} 667 668/* Per-packet header in a coalesced tx WR, before the SGL starts (in flits) */ 669#define TXPKTS_PKT_HDR ((\ 670 sizeof(struct ulp_txpkt) + \ 671 sizeof(struct ulptx_idata) + \ 672 sizeof(struct cpl_tx_pkt_core) \ 673 ) / 8) 674 675/* Header of a coalesced tx WR, before SGL of first packet (in flits) */ 676#define TXPKTS_WR_HDR (\ 677 sizeof(struct fw_eth_tx_pkts_wr) / 8 + \ 678 TXPKTS_PKT_HDR) 679 680/* Header of a tx WR, before SGL of first packet (in flits) */ 681#define TXPKT_WR_HDR ((\ 682 sizeof(struct fw_eth_tx_pkt_wr) + \ 683 sizeof(struct cpl_tx_pkt_core) \ 684 ) / 8 ) 685 686/* Header of a tx LSO WR, before SGL of first packet (in flits) */ 687#define TXPKT_LSO_WR_HDR ((\ 688 sizeof(struct fw_eth_tx_pkt_wr) + \ 689 sizeof(struct cpl_tx_pkt_lso) + \ 690 sizeof(struct cpl_tx_pkt_core) \ 691 ) / 8 ) 692 693int 694t4_eth_tx(struct ifnet *ifp, struct sge_txq *txq, struct mbuf *m) 695{ 696 struct port_info *pi = (void *)ifp->if_softc; 697 struct adapter *sc = pi->adapter; 698 struct sge_eq *eq = &txq->eq; 699 struct buf_ring *br = eq->br; 700 struct mbuf *next; 701 int rc, coalescing; 702 struct txpkts txpkts; 703 struct sgl sgl; 704 705 TXQ_LOCK_ASSERT_OWNED(txq); 706 KASSERT(m, ("%s: called with nothing to do.", __func__)); 707 708 txpkts.npkt = 0;/* indicates there's nothing in txpkts */ 709 coalescing = 0; 710 711 prefetch(&eq->sdesc[eq->pidx]); 712 prefetch(&eq->desc[eq->pidx]); 713 prefetch(&eq->maps[eq->map_pidx]); 714 715 if (eq->avail < 8) 716 reclaim_tx_descs(eq, 1, 8); 717 718 for (; m; m = next ? next : drbr_dequeue(ifp, br)) { 719 720 if (eq->avail < 8) 721 break; 722 723 next = m->m_nextpkt; 724 m->m_nextpkt = NULL; 725 726 if (next || buf_ring_peek(br)) 727 coalescing = 1; 728 729 rc = get_pkt_sgl(txq, &m, &sgl, coalescing); 730 if (rc != 0) { 731 if (rc == ENOMEM) { 732 733 /* Short of resources, suspend tx */ 734 735 m->m_nextpkt = next; 736 break; 737 } 738 739 /* 740 * Unrecoverable error for this packet, throw it away 741 * and move on to the next. get_pkt_sgl may already 742 * have freed m (it will be NULL in that case and the 743 * m_freem here is still safe). 744 */ 745 746 m_freem(m); 747 continue; 748 } 749 750 if (coalescing && 751 add_to_txpkts(pi, txq, &txpkts, m, &sgl) == 0) { 752 753 /* Successfully absorbed into txpkts */ 754 755 write_ulp_cpl_sgl(pi, txq, &txpkts, m, &sgl); 756 goto doorbell; 757 } 758 759 /* 760 * We weren't coalescing to begin with, or current frame could 761 * not be coalesced (add_to_txpkts flushes txpkts if a frame 762 * given to it can't be coalesced). Either way there should be 763 * nothing in txpkts. 764 */ 765 KASSERT(txpkts.npkt == 0, 766 ("%s: txpkts not empty: %d", __func__, txpkts.npkt)); 767 768 /* We're sending out individual packets now */ 769 coalescing = 0; 770 771 if (eq->avail < 8) 772 reclaim_tx_descs(eq, 1, 8); 773 rc = write_txpkt_wr(pi, txq, m, &sgl); 774 if (rc != 0) { 775 776 /* Short of hardware descriptors, suspend tx */ 777 778 /* 779 * This is an unlikely but expensive failure. We've 780 * done all the hard work (DMA mappings etc.) and now we 781 * can't send out the packet. What's worse, we have to 782 * spend even more time freeing up everything in sgl. 783 */ 784 txq->no_desc++; 785 free_pkt_sgl(txq, &sgl); 786 787 m->m_nextpkt = next; 788 break; 789 } 790 791 ETHER_BPF_MTAP(ifp, m); 792 if (sgl.nsegs == 0) 793 m_freem(m); 794 795doorbell: 796 /* Fewer and fewer doorbells as the queue fills up */ 797 if (eq->pending >= (1 << (fls(eq->qsize - eq->avail) / 2))) 798 ring_tx_db(sc, eq); 799 reclaim_tx_descs(eq, 16, 32); 800 } 801 802 if (txpkts.npkt > 0) 803 write_txpkts_wr(txq, &txpkts); 804 805 /* 806 * m not NULL means there was an error but we haven't thrown it away. 807 * This can happen when we're short of tx descriptors (no_desc) or maybe 808 * even DMA maps (no_dmamap). Either way, a credit flush and reclaim 809 * will get things going again. 810 * 811 * If eq->avail is already 0 we know a credit flush was requested in the 812 * WR that reduced it to 0 so we don't need another flush (we don't have 813 * any descriptor for a flush WR anyway, duh). 814 */ 815 if (m && eq->avail > 0) 816 write_eqflush_wr(eq); 817 txq->m = m; 818 819 if (eq->pending) 820 ring_tx_db(sc, eq); 821 822 reclaim_tx_descs(eq, 16, eq->qsize); 823 824 return (0); 825} 826 827void 828t4_update_fl_bufsize(struct ifnet *ifp) 829{ 830 struct port_info *pi = ifp->if_softc; 831 struct sge_rxq *rxq; 832 struct sge_fl *fl; 833 int i; 834 835 for_each_rxq(pi, i, rxq) { 836 fl = &rxq->fl; 837 838 FL_LOCK(fl); 839 set_fl_tag_idx(fl, ifp->if_mtu); 840 FL_UNLOCK(fl); 841 } 842} 843 844/* 845 * A non-NULL handler indicates this iq will not receive direct interrupts, the 846 * handler will be invoked by a forwarded interrupt queue. 847 */ 848static inline void 849init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx, 850 int qsize, int esize, iq_intr_handler_t *handler, char *name) 851{ 852 KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS, 853 ("%s: bad tmr_idx %d", __func__, tmr_idx)); 854 KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ 855 ("%s: bad pktc_idx %d", __func__, pktc_idx)); 856 857 iq->flags = 0; 858 iq->adapter = sc; 859 iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx) | 860 V_QINTR_CNT_EN(pktc_idx >= 0); 861 iq->intr_pktc_idx = pktc_idx; 862 iq->qsize = roundup(qsize, 16); /* See FW_IQ_CMD/iqsize */ 863 iq->esize = max(esize, 16); /* See FW_IQ_CMD/iqesize */ 864 iq->handler = handler; 865 strlcpy(iq->lockname, name, sizeof(iq->lockname)); 866} 867 868static inline void 869init_fl(struct sge_fl *fl, int qsize, char *name) 870{ 871 fl->qsize = qsize; 872 strlcpy(fl->lockname, name, sizeof(fl->lockname)); 873} 874 875static inline void 876init_txq(struct sge_txq *txq, int qsize, char *name) 877{ 878 txq->eq.qsize = qsize; 879 strlcpy(txq->eq.lockname, name, sizeof(txq->eq.lockname)); 880} 881 882static int 883alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag, 884 bus_dmamap_t *map, bus_addr_t *pa, void **va) 885{ 886 int rc; 887 888 rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, 889 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag); 890 if (rc != 0) { 891 device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc); 892 goto done; 893 } 894 895 rc = bus_dmamem_alloc(*tag, va, 896 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map); 897 if (rc != 0) { 898 device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc); 899 goto done; 900 } 901 902 rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0); 903 if (rc != 0) { 904 device_printf(sc->dev, "cannot load DMA map: %d\n", rc); 905 goto done; 906 } 907done: 908 if (rc) 909 free_ring(sc, *tag, *map, *pa, *va); 910 911 return (rc); 912} 913 914static int 915free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map, 916 bus_addr_t pa, void *va) 917{ 918 if (pa) 919 bus_dmamap_unload(tag, map); 920 if (va) 921 bus_dmamem_free(tag, va, map); 922 if (tag) 923 bus_dma_tag_destroy(tag); 924 925 return (0); 926} 927 928/* 929 * Allocates the ring for an ingress queue and an optional freelist. If the 930 * freelist is specified it will be allocated and then associated with the 931 * ingress queue. 932 * 933 * Returns errno on failure. Resources allocated up to that point may still be 934 * allocated. Caller is responsible for cleanup in case this function fails. 935 * 936 * If the ingress queue will take interrupts directly (iq->handler == NULL) then 937 * the intr_idx specifies the vector, starting from 0. Otherwise it specifies 938 * the index of the queue to which its interrupts will be forwarded. 939 */ 940static int 941alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl, 942 int intr_idx) 943{ 944 int rc, i, cntxt_id; 945 size_t len; 946 struct fw_iq_cmd c; 947 struct adapter *sc = iq->adapter; 948 __be32 v = 0; 949 950 /* The adapter queues are nominally allocated in port[0]'s name */ 951 if (pi == NULL) 952 pi = sc->port[0]; 953 954 mtx_init(&iq->iq_lock, iq->lockname, NULL, MTX_DEF); 955 956 len = iq->qsize * iq->esize; 957 rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, 958 (void **)&iq->desc); 959 if (rc != 0) 960 return (rc); 961 962 bzero(&c, sizeof(c)); 963 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 964 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 965 V_FW_IQ_CMD_VFN(0)); 966 967 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 968 FW_LEN16(c)); 969 970 /* Special handling for firmware event queue */ 971 if (iq == &sc->sge.fwq) 972 v |= F_FW_IQ_CMD_IQASYNCH; 973 974 if (iq->handler) { 975 KASSERT(intr_idx < NFIQ(sc), 976 ("%s: invalid indirect intr_idx %d", __func__, intr_idx)); 977 v |= F_FW_IQ_CMD_IQANDST; 978 v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fiq[intr_idx].abs_id); 979 } else { 980 KASSERT(intr_idx < sc->intr_count, 981 ("%s: invalid direct intr_idx %d", __func__, intr_idx)); 982 v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx); 983 } 984 985 c.type_to_iqandstindex = htobe32(v | 986 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 987 V_FW_IQ_CMD_VIID(pi->viid) | 988 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 989 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | 990 F_FW_IQ_CMD_IQGTSMODE | 991 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | 992 V_FW_IQ_CMD_IQESIZE(ilog2(iq->esize) - 4)); 993 c.iqsize = htobe16(iq->qsize); 994 c.iqaddr = htobe64(iq->ba); 995 996 if (fl) { 997 mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); 998 999 for (i = 0; i < FL_BUF_SIZES; i++) { 1000 1001 /* 1002 * A freelist buffer must be 16 byte aligned as the SGE 1003 * uses the low 4 bits of the bus addr to figure out the 1004 * buffer size. 1005 */ 1006 rc = bus_dma_tag_create(sc->dmat, 16, 0, 1007 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1008 FL_BUF_SIZE(i), 1, FL_BUF_SIZE(i), BUS_DMA_ALLOCNOW, 1009 NULL, NULL, &fl->tag[i]); 1010 if (rc != 0) { 1011 device_printf(sc->dev, 1012 "failed to create fl DMA tag[%d]: %d\n", 1013 i, rc); 1014 return (rc); 1015 } 1016 } 1017 len = fl->qsize * RX_FL_ESIZE; 1018 rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, 1019 &fl->ba, (void **)&fl->desc); 1020 if (rc) 1021 return (rc); 1022 1023 /* Allocate space for one software descriptor per buffer. */ 1024 fl->cap = (fl->qsize - SPG_LEN / RX_FL_ESIZE) * 8; 1025 FL_LOCK(fl); 1026 set_fl_tag_idx(fl, pi->ifp->if_mtu); 1027 rc = alloc_fl_sdesc(fl); 1028 FL_UNLOCK(fl); 1029 if (rc != 0) { 1030 device_printf(sc->dev, 1031 "failed to setup fl software descriptors: %d\n", 1032 rc); 1033 return (rc); 1034 } 1035 fl->needed = fl->cap - 1; /* one less to avoid cidx = pidx */ 1036 1037 c.iqns_to_fl0congen = 1038 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE)); 1039 c.fl0dcaen_to_fl0cidxfthresh = 1040 htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) | 1041 V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B)); 1042 c.fl0size = htobe16(fl->qsize); 1043 c.fl0addr = htobe64(fl->ba); 1044 } 1045 1046 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 1047 if (rc != 0) { 1048 device_printf(sc->dev, 1049 "failed to create ingress queue: %d\n", rc); 1050 return (rc); 1051 } 1052 1053 iq->cdesc = iq->desc; 1054 iq->cidx = 0; 1055 iq->gen = 1; 1056 iq->intr_next = iq->intr_params; 1057 iq->cntxt_id = be16toh(c.iqid); 1058 iq->abs_id = be16toh(c.physiqid); 1059 iq->flags |= (IQ_ALLOCATED | IQ_STARTED); 1060 1061 cntxt_id = iq->cntxt_id - sc->sge.iq_start; 1062 KASSERT(cntxt_id < sc->sge.niq, 1063 ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, 1064 cntxt_id, sc->sge.niq - 1)); 1065 sc->sge.iqmap[cntxt_id] = iq; 1066 1067 if (fl) { 1068 fl->cntxt_id = be16toh(c.fl0id); 1069 fl->pidx = fl->cidx = 0; 1070 1071 cntxt_id = iq->cntxt_id - sc->sge.eq_start; 1072 KASSERT(cntxt_id < sc->sge.neq, 1073 ("%s: fl->cntxt_id (%d) more than the max (%d)", __func__, 1074 cntxt_id, sc->sge.neq - 1)); 1075 sc->sge.eqmap[cntxt_id] = (void *)fl; 1076 1077 FL_LOCK(fl); 1078 refill_fl(fl, -1); 1079 if (fl->pending >= 8) 1080 ring_fl_db(sc, fl); 1081 FL_UNLOCK(fl); 1082 } 1083 1084 /* Enable IQ interrupts */ 1085 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) | 1086 V_INGRESSQID(iq->cntxt_id)); 1087 1088 return (0); 1089} 1090 1091/* 1092 * This can be called with the iq/fl in any state - fully allocated and 1093 * functional, partially allocated, even all-zeroed out. 1094 */ 1095static int 1096free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl) 1097{ 1098 int i, rc; 1099 struct adapter *sc = iq->adapter; 1100 device_t dev; 1101 1102 if (sc == NULL) 1103 return (0); /* nothing to do */ 1104 1105 dev = pi ? pi->dev : sc->dev; 1106 1107 if (iq->flags & IQ_STARTED) { 1108 rc = -t4_iq_start_stop(sc, sc->mbox, 0, sc->pf, 0, 1109 iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff); 1110 if (rc != 0) { 1111 device_printf(dev, 1112 "failed to stop queue %p: %d\n", iq, rc); 1113 return (rc); 1114 } 1115 iq->flags &= ~IQ_STARTED; 1116 } 1117 1118 if (iq->flags & IQ_ALLOCATED) { 1119 1120 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, 1121 FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id, 1122 fl ? fl->cntxt_id : 0xffff, 0xffff); 1123 if (rc != 0) { 1124 device_printf(dev, 1125 "failed to free queue %p: %d\n", iq, rc); 1126 return (rc); 1127 } 1128 iq->flags &= ~IQ_ALLOCATED; 1129 } 1130 1131 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); 1132 1133 if (mtx_initialized(&iq->iq_lock)) 1134 mtx_destroy(&iq->iq_lock); 1135 1136 bzero(iq, sizeof(*iq)); 1137 1138 if (fl) { 1139 free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, 1140 fl->desc); 1141 1142 if (fl->sdesc) { 1143 FL_LOCK(fl); 1144 free_fl_sdesc(fl); 1145 FL_UNLOCK(fl); 1146 } 1147 1148 if (mtx_initialized(&fl->fl_lock)) 1149 mtx_destroy(&fl->fl_lock); 1150 1151 for (i = 0; i < FL_BUF_SIZES; i++) { 1152 if (fl->tag[i]) 1153 bus_dma_tag_destroy(fl->tag[i]); 1154 } 1155 1156 bzero(fl, sizeof(*fl)); 1157 } 1158 1159 return (0); 1160} 1161 1162static int 1163alloc_iq(struct sge_iq *iq, int intr_idx) 1164{ 1165 return alloc_iq_fl(NULL, iq, NULL, intr_idx); 1166} 1167 1168static int 1169free_iq(struct sge_iq *iq) 1170{ 1171 return free_iq_fl(NULL, iq, NULL); 1172} 1173 1174static int 1175alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx) 1176{ 1177 int rc; 1178 struct sysctl_oid *oid; 1179 struct sysctl_oid_list *children; 1180 char name[16]; 1181 1182 rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx); 1183 if (rc != 0) 1184 return (rc); 1185 1186#ifdef INET 1187 rc = tcp_lro_init(&rxq->lro); 1188 if (rc != 0) 1189 return (rc); 1190 rxq->lro.ifp = pi->ifp; /* also indicates LRO init'ed */ 1191 1192 if (pi->ifp->if_capenable & IFCAP_LRO) 1193 rxq->flags |= RXQ_LRO_ENABLED; 1194#endif 1195 rxq->ifp = pi->ifp; 1196 1197 children = SYSCTL_CHILDREN(pi->oid_rxq); 1198 1199 snprintf(name, sizeof(name), "%d", idx); 1200 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 1201 NULL, "rx queue"); 1202 children = SYSCTL_CHILDREN(oid); 1203
| 720 FL_UNLOCK(fl); 721} 722 723/* Per-packet header in a coalesced tx WR, before the SGL starts (in flits) */ 724#define TXPKTS_PKT_HDR ((\ 725 sizeof(struct ulp_txpkt) + \ 726 sizeof(struct ulptx_idata) + \ 727 sizeof(struct cpl_tx_pkt_core) \ 728 ) / 8) 729 730/* Header of a coalesced tx WR, before SGL of first packet (in flits) */ 731#define TXPKTS_WR_HDR (\ 732 sizeof(struct fw_eth_tx_pkts_wr) / 8 + \ 733 TXPKTS_PKT_HDR) 734 735/* Header of a tx WR, before SGL of first packet (in flits) */ 736#define TXPKT_WR_HDR ((\ 737 sizeof(struct fw_eth_tx_pkt_wr) + \ 738 sizeof(struct cpl_tx_pkt_core) \ 739 ) / 8 ) 740 741/* Header of a tx LSO WR, before SGL of first packet (in flits) */ 742#define TXPKT_LSO_WR_HDR ((\ 743 sizeof(struct fw_eth_tx_pkt_wr) + \ 744 sizeof(struct cpl_tx_pkt_lso) + \ 745 sizeof(struct cpl_tx_pkt_core) \ 746 ) / 8 ) 747 748int 749t4_eth_tx(struct ifnet *ifp, struct sge_txq *txq, struct mbuf *m) 750{ 751 struct port_info *pi = (void *)ifp->if_softc; 752 struct adapter *sc = pi->adapter; 753 struct sge_eq *eq = &txq->eq; 754 struct buf_ring *br = eq->br; 755 struct mbuf *next; 756 int rc, coalescing; 757 struct txpkts txpkts; 758 struct sgl sgl; 759 760 TXQ_LOCK_ASSERT_OWNED(txq); 761 KASSERT(m, ("%s: called with nothing to do.", __func__)); 762 763 txpkts.npkt = 0;/* indicates there's nothing in txpkts */ 764 coalescing = 0; 765 766 prefetch(&eq->sdesc[eq->pidx]); 767 prefetch(&eq->desc[eq->pidx]); 768 prefetch(&eq->maps[eq->map_pidx]); 769 770 if (eq->avail < 8) 771 reclaim_tx_descs(eq, 1, 8); 772 773 for (; m; m = next ? next : drbr_dequeue(ifp, br)) { 774 775 if (eq->avail < 8) 776 break; 777 778 next = m->m_nextpkt; 779 m->m_nextpkt = NULL; 780 781 if (next || buf_ring_peek(br)) 782 coalescing = 1; 783 784 rc = get_pkt_sgl(txq, &m, &sgl, coalescing); 785 if (rc != 0) { 786 if (rc == ENOMEM) { 787 788 /* Short of resources, suspend tx */ 789 790 m->m_nextpkt = next; 791 break; 792 } 793 794 /* 795 * Unrecoverable error for this packet, throw it away 796 * and move on to the next. get_pkt_sgl may already 797 * have freed m (it will be NULL in that case and the 798 * m_freem here is still safe). 799 */ 800 801 m_freem(m); 802 continue; 803 } 804 805 if (coalescing && 806 add_to_txpkts(pi, txq, &txpkts, m, &sgl) == 0) { 807 808 /* Successfully absorbed into txpkts */ 809 810 write_ulp_cpl_sgl(pi, txq, &txpkts, m, &sgl); 811 goto doorbell; 812 } 813 814 /* 815 * We weren't coalescing to begin with, or current frame could 816 * not be coalesced (add_to_txpkts flushes txpkts if a frame 817 * given to it can't be coalesced). Either way there should be 818 * nothing in txpkts. 819 */ 820 KASSERT(txpkts.npkt == 0, 821 ("%s: txpkts not empty: %d", __func__, txpkts.npkt)); 822 823 /* We're sending out individual packets now */ 824 coalescing = 0; 825 826 if (eq->avail < 8) 827 reclaim_tx_descs(eq, 1, 8); 828 rc = write_txpkt_wr(pi, txq, m, &sgl); 829 if (rc != 0) { 830 831 /* Short of hardware descriptors, suspend tx */ 832 833 /* 834 * This is an unlikely but expensive failure. We've 835 * done all the hard work (DMA mappings etc.) and now we 836 * can't send out the packet. What's worse, we have to 837 * spend even more time freeing up everything in sgl. 838 */ 839 txq->no_desc++; 840 free_pkt_sgl(txq, &sgl); 841 842 m->m_nextpkt = next; 843 break; 844 } 845 846 ETHER_BPF_MTAP(ifp, m); 847 if (sgl.nsegs == 0) 848 m_freem(m); 849 850doorbell: 851 /* Fewer and fewer doorbells as the queue fills up */ 852 if (eq->pending >= (1 << (fls(eq->qsize - eq->avail) / 2))) 853 ring_tx_db(sc, eq); 854 reclaim_tx_descs(eq, 16, 32); 855 } 856 857 if (txpkts.npkt > 0) 858 write_txpkts_wr(txq, &txpkts); 859 860 /* 861 * m not NULL means there was an error but we haven't thrown it away. 862 * This can happen when we're short of tx descriptors (no_desc) or maybe 863 * even DMA maps (no_dmamap). Either way, a credit flush and reclaim 864 * will get things going again. 865 * 866 * If eq->avail is already 0 we know a credit flush was requested in the 867 * WR that reduced it to 0 so we don't need another flush (we don't have 868 * any descriptor for a flush WR anyway, duh). 869 */ 870 if (m && eq->avail > 0) 871 write_eqflush_wr(eq); 872 txq->m = m; 873 874 if (eq->pending) 875 ring_tx_db(sc, eq); 876 877 reclaim_tx_descs(eq, 16, eq->qsize); 878 879 return (0); 880} 881 882void 883t4_update_fl_bufsize(struct ifnet *ifp) 884{ 885 struct port_info *pi = ifp->if_softc; 886 struct sge_rxq *rxq; 887 struct sge_fl *fl; 888 int i; 889 890 for_each_rxq(pi, i, rxq) { 891 fl = &rxq->fl; 892 893 FL_LOCK(fl); 894 set_fl_tag_idx(fl, ifp->if_mtu); 895 FL_UNLOCK(fl); 896 } 897} 898 899/* 900 * A non-NULL handler indicates this iq will not receive direct interrupts, the 901 * handler will be invoked by a forwarded interrupt queue. 902 */ 903static inline void 904init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx, 905 int qsize, int esize, iq_intr_handler_t *handler, char *name) 906{ 907 KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS, 908 ("%s: bad tmr_idx %d", __func__, tmr_idx)); 909 KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ 910 ("%s: bad pktc_idx %d", __func__, pktc_idx)); 911 912 iq->flags = 0; 913 iq->adapter = sc; 914 iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx) | 915 V_QINTR_CNT_EN(pktc_idx >= 0); 916 iq->intr_pktc_idx = pktc_idx; 917 iq->qsize = roundup(qsize, 16); /* See FW_IQ_CMD/iqsize */ 918 iq->esize = max(esize, 16); /* See FW_IQ_CMD/iqesize */ 919 iq->handler = handler; 920 strlcpy(iq->lockname, name, sizeof(iq->lockname)); 921} 922 923static inline void 924init_fl(struct sge_fl *fl, int qsize, char *name) 925{ 926 fl->qsize = qsize; 927 strlcpy(fl->lockname, name, sizeof(fl->lockname)); 928} 929 930static inline void 931init_txq(struct sge_txq *txq, int qsize, char *name) 932{ 933 txq->eq.qsize = qsize; 934 strlcpy(txq->eq.lockname, name, sizeof(txq->eq.lockname)); 935} 936 937static int 938alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag, 939 bus_dmamap_t *map, bus_addr_t *pa, void **va) 940{ 941 int rc; 942 943 rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, 944 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag); 945 if (rc != 0) { 946 device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc); 947 goto done; 948 } 949 950 rc = bus_dmamem_alloc(*tag, va, 951 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map); 952 if (rc != 0) { 953 device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc); 954 goto done; 955 } 956 957 rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0); 958 if (rc != 0) { 959 device_printf(sc->dev, "cannot load DMA map: %d\n", rc); 960 goto done; 961 } 962done: 963 if (rc) 964 free_ring(sc, *tag, *map, *pa, *va); 965 966 return (rc); 967} 968 969static int 970free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map, 971 bus_addr_t pa, void *va) 972{ 973 if (pa) 974 bus_dmamap_unload(tag, map); 975 if (va) 976 bus_dmamem_free(tag, va, map); 977 if (tag) 978 bus_dma_tag_destroy(tag); 979 980 return (0); 981} 982 983/* 984 * Allocates the ring for an ingress queue and an optional freelist. If the 985 * freelist is specified it will be allocated and then associated with the 986 * ingress queue. 987 * 988 * Returns errno on failure. Resources allocated up to that point may still be 989 * allocated. Caller is responsible for cleanup in case this function fails. 990 * 991 * If the ingress queue will take interrupts directly (iq->handler == NULL) then 992 * the intr_idx specifies the vector, starting from 0. Otherwise it specifies 993 * the index of the queue to which its interrupts will be forwarded. 994 */ 995static int 996alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl, 997 int intr_idx) 998{ 999 int rc, i, cntxt_id; 1000 size_t len; 1001 struct fw_iq_cmd c; 1002 struct adapter *sc = iq->adapter; 1003 __be32 v = 0; 1004 1005 /* The adapter queues are nominally allocated in port[0]'s name */ 1006 if (pi == NULL) 1007 pi = sc->port[0]; 1008 1009 mtx_init(&iq->iq_lock, iq->lockname, NULL, MTX_DEF); 1010 1011 len = iq->qsize * iq->esize; 1012 rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, 1013 (void **)&iq->desc); 1014 if (rc != 0) 1015 return (rc); 1016 1017 bzero(&c, sizeof(c)); 1018 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 1019 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 1020 V_FW_IQ_CMD_VFN(0)); 1021 1022 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 1023 FW_LEN16(c)); 1024 1025 /* Special handling for firmware event queue */ 1026 if (iq == &sc->sge.fwq) 1027 v |= F_FW_IQ_CMD_IQASYNCH; 1028 1029 if (iq->handler) { 1030 KASSERT(intr_idx < NFIQ(sc), 1031 ("%s: invalid indirect intr_idx %d", __func__, intr_idx)); 1032 v |= F_FW_IQ_CMD_IQANDST; 1033 v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fiq[intr_idx].abs_id); 1034 } else { 1035 KASSERT(intr_idx < sc->intr_count, 1036 ("%s: invalid direct intr_idx %d", __func__, intr_idx)); 1037 v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx); 1038 } 1039 1040 c.type_to_iqandstindex = htobe32(v | 1041 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 1042 V_FW_IQ_CMD_VIID(pi->viid) | 1043 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 1044 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | 1045 F_FW_IQ_CMD_IQGTSMODE | 1046 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | 1047 V_FW_IQ_CMD_IQESIZE(ilog2(iq->esize) - 4)); 1048 c.iqsize = htobe16(iq->qsize); 1049 c.iqaddr = htobe64(iq->ba); 1050 1051 if (fl) { 1052 mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); 1053 1054 for (i = 0; i < FL_BUF_SIZES; i++) { 1055 1056 /* 1057 * A freelist buffer must be 16 byte aligned as the SGE 1058 * uses the low 4 bits of the bus addr to figure out the 1059 * buffer size. 1060 */ 1061 rc = bus_dma_tag_create(sc->dmat, 16, 0, 1062 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1063 FL_BUF_SIZE(i), 1, FL_BUF_SIZE(i), BUS_DMA_ALLOCNOW, 1064 NULL, NULL, &fl->tag[i]); 1065 if (rc != 0) { 1066 device_printf(sc->dev, 1067 "failed to create fl DMA tag[%d]: %d\n", 1068 i, rc); 1069 return (rc); 1070 } 1071 } 1072 len = fl->qsize * RX_FL_ESIZE; 1073 rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, 1074 &fl->ba, (void **)&fl->desc); 1075 if (rc) 1076 return (rc); 1077 1078 /* Allocate space for one software descriptor per buffer. */ 1079 fl->cap = (fl->qsize - SPG_LEN / RX_FL_ESIZE) * 8; 1080 FL_LOCK(fl); 1081 set_fl_tag_idx(fl, pi->ifp->if_mtu); 1082 rc = alloc_fl_sdesc(fl); 1083 FL_UNLOCK(fl); 1084 if (rc != 0) { 1085 device_printf(sc->dev, 1086 "failed to setup fl software descriptors: %d\n", 1087 rc); 1088 return (rc); 1089 } 1090 fl->needed = fl->cap - 1; /* one less to avoid cidx = pidx */ 1091 1092 c.iqns_to_fl0congen = 1093 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE)); 1094 c.fl0dcaen_to_fl0cidxfthresh = 1095 htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) | 1096 V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B)); 1097 c.fl0size = htobe16(fl->qsize); 1098 c.fl0addr = htobe64(fl->ba); 1099 } 1100 1101 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 1102 if (rc != 0) { 1103 device_printf(sc->dev, 1104 "failed to create ingress queue: %d\n", rc); 1105 return (rc); 1106 } 1107 1108 iq->cdesc = iq->desc; 1109 iq->cidx = 0; 1110 iq->gen = 1; 1111 iq->intr_next = iq->intr_params; 1112 iq->cntxt_id = be16toh(c.iqid); 1113 iq->abs_id = be16toh(c.physiqid); 1114 iq->flags |= (IQ_ALLOCATED | IQ_STARTED); 1115 1116 cntxt_id = iq->cntxt_id - sc->sge.iq_start; 1117 KASSERT(cntxt_id < sc->sge.niq, 1118 ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, 1119 cntxt_id, sc->sge.niq - 1)); 1120 sc->sge.iqmap[cntxt_id] = iq; 1121 1122 if (fl) { 1123 fl->cntxt_id = be16toh(c.fl0id); 1124 fl->pidx = fl->cidx = 0; 1125 1126 cntxt_id = iq->cntxt_id - sc->sge.eq_start; 1127 KASSERT(cntxt_id < sc->sge.neq, 1128 ("%s: fl->cntxt_id (%d) more than the max (%d)", __func__, 1129 cntxt_id, sc->sge.neq - 1)); 1130 sc->sge.eqmap[cntxt_id] = (void *)fl; 1131 1132 FL_LOCK(fl); 1133 refill_fl(fl, -1); 1134 if (fl->pending >= 8) 1135 ring_fl_db(sc, fl); 1136 FL_UNLOCK(fl); 1137 } 1138 1139 /* Enable IQ interrupts */ 1140 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) | 1141 V_INGRESSQID(iq->cntxt_id)); 1142 1143 return (0); 1144} 1145 1146/* 1147 * This can be called with the iq/fl in any state - fully allocated and 1148 * functional, partially allocated, even all-zeroed out. 1149 */ 1150static int 1151free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl) 1152{ 1153 int i, rc; 1154 struct adapter *sc = iq->adapter; 1155 device_t dev; 1156 1157 if (sc == NULL) 1158 return (0); /* nothing to do */ 1159 1160 dev = pi ? pi->dev : sc->dev; 1161 1162 if (iq->flags & IQ_STARTED) { 1163 rc = -t4_iq_start_stop(sc, sc->mbox, 0, sc->pf, 0, 1164 iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff); 1165 if (rc != 0) { 1166 device_printf(dev, 1167 "failed to stop queue %p: %d\n", iq, rc); 1168 return (rc); 1169 } 1170 iq->flags &= ~IQ_STARTED; 1171 } 1172 1173 if (iq->flags & IQ_ALLOCATED) { 1174 1175 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, 1176 FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id, 1177 fl ? fl->cntxt_id : 0xffff, 0xffff); 1178 if (rc != 0) { 1179 device_printf(dev, 1180 "failed to free queue %p: %d\n", iq, rc); 1181 return (rc); 1182 } 1183 iq->flags &= ~IQ_ALLOCATED; 1184 } 1185 1186 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); 1187 1188 if (mtx_initialized(&iq->iq_lock)) 1189 mtx_destroy(&iq->iq_lock); 1190 1191 bzero(iq, sizeof(*iq)); 1192 1193 if (fl) { 1194 free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, 1195 fl->desc); 1196 1197 if (fl->sdesc) { 1198 FL_LOCK(fl); 1199 free_fl_sdesc(fl); 1200 FL_UNLOCK(fl); 1201 } 1202 1203 if (mtx_initialized(&fl->fl_lock)) 1204 mtx_destroy(&fl->fl_lock); 1205 1206 for (i = 0; i < FL_BUF_SIZES; i++) { 1207 if (fl->tag[i]) 1208 bus_dma_tag_destroy(fl->tag[i]); 1209 } 1210 1211 bzero(fl, sizeof(*fl)); 1212 } 1213 1214 return (0); 1215} 1216 1217static int 1218alloc_iq(struct sge_iq *iq, int intr_idx) 1219{ 1220 return alloc_iq_fl(NULL, iq, NULL, intr_idx); 1221} 1222 1223static int 1224free_iq(struct sge_iq *iq) 1225{ 1226 return free_iq_fl(NULL, iq, NULL); 1227} 1228 1229static int 1230alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx) 1231{ 1232 int rc; 1233 struct sysctl_oid *oid; 1234 struct sysctl_oid_list *children; 1235 char name[16]; 1236 1237 rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx); 1238 if (rc != 0) 1239 return (rc); 1240 1241#ifdef INET 1242 rc = tcp_lro_init(&rxq->lro); 1243 if (rc != 0) 1244 return (rc); 1245 rxq->lro.ifp = pi->ifp; /* also indicates LRO init'ed */ 1246 1247 if (pi->ifp->if_capenable & IFCAP_LRO) 1248 rxq->flags |= RXQ_LRO_ENABLED; 1249#endif 1250 rxq->ifp = pi->ifp; 1251 1252 children = SYSCTL_CHILDREN(pi->oid_rxq); 1253 1254 snprintf(name, sizeof(name), "%d", idx); 1255 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 1256 NULL, "rx queue"); 1257 children = SYSCTL_CHILDREN(oid); 1258
|
| 1259#ifdef INET
|
1204 SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD, 1205 &rxq->lro.lro_queued, 0, NULL); 1206 SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, 1207 &rxq->lro.lro_flushed, 0, NULL);
| 1260 SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD, 1261 &rxq->lro.lro_queued, 0, NULL); 1262 SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, 1263 &rxq->lro.lro_flushed, 0, NULL);
|
| 1264#endif
|
1208 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD, 1209 &rxq->rxcsum, "# of times hardware assisted with checksum"); 1210 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_extraction", 1211 CTLFLAG_RD, &rxq->vlan_extraction, 1212 "# of times hardware extracted 802.1Q tag"); 1213 1214 return (rc); 1215} 1216 1217static int 1218free_rxq(struct port_info *pi, struct sge_rxq *rxq) 1219{ 1220 int rc; 1221 1222#ifdef INET 1223 if (rxq->lro.ifp) { 1224 tcp_lro_free(&rxq->lro); 1225 rxq->lro.ifp = NULL; 1226 } 1227#endif 1228 1229 rc = free_iq_fl(pi, &rxq->iq, &rxq->fl); 1230 if (rc == 0) 1231 bzero(rxq, sizeof(*rxq)); 1232 1233 return (rc); 1234} 1235 1236static int 1237alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx) 1238{ 1239 int rc, cntxt_id; 1240 size_t len; 1241 struct adapter *sc = pi->adapter; 1242 struct fw_eq_eth_cmd c; 1243 struct sge_eq *eq = &txq->eq; 1244 char name[16]; 1245 struct sysctl_oid *oid; 1246 struct sysctl_oid_list *children; 1247 1248 txq->ifp = pi->ifp; 1249 TASK_INIT(&txq->resume_tx, 0, cxgbe_txq_start, txq); 1250 1251 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); 1252 1253 len = eq->qsize * TX_EQ_ESIZE; 1254 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, 1255 &eq->ba, (void **)&eq->desc); 1256 if (rc) 1257 return (rc); 1258 1259 eq->cap = eq->qsize - SPG_LEN / TX_EQ_ESIZE; 1260 eq->spg = (void *)&eq->desc[eq->cap]; 1261 eq->avail = eq->cap - 1; /* one less to avoid cidx = pidx */ 1262 eq->sdesc = malloc(eq->cap * sizeof(struct tx_sdesc), M_CXGBE, 1263 M_ZERO | M_WAITOK); 1264 eq->br = buf_ring_alloc(eq->qsize, M_CXGBE, M_WAITOK, &eq->eq_lock); 1265 eq->iqid = sc->sge.rxq[pi->first_rxq].iq.cntxt_id; 1266 1267 rc = bus_dma_tag_create(sc->dmat, 1, 0, BUS_SPACE_MAXADDR, 1268 BUS_SPACE_MAXADDR, NULL, NULL, 64 * 1024, TX_SGL_SEGS, 1269 BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &eq->tx_tag); 1270 if (rc != 0) { 1271 device_printf(sc->dev, 1272 "failed to create tx DMA tag: %d\n", rc); 1273 return (rc); 1274 } 1275 1276 rc = alloc_eq_maps(eq); 1277 if (rc != 0) { 1278 device_printf(sc->dev, "failed to setup tx DMA maps: %d\n", rc); 1279 return (rc); 1280 } 1281 1282 bzero(&c, sizeof(c)); 1283 1284 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 1285 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 1286 V_FW_EQ_ETH_CMD_VFN(0)); 1287 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 1288 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 1289 c.viid_pkd = htobe32(V_FW_EQ_ETH_CMD_VIID(pi->viid)); 1290 c.fetchszm_to_iqid = 1291 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 1292 V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | 1293 V_FW_EQ_ETH_CMD_IQID(eq->iqid)); 1294 c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 1295 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 1296 V_FW_EQ_ETH_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) | 1297 V_FW_EQ_ETH_CMD_EQSIZE(eq->qsize)); 1298 c.eqaddr = htobe64(eq->ba); 1299 1300 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 1301 if (rc != 0) { 1302 device_printf(pi->dev, 1303 "failed to create egress queue: %d\n", rc); 1304 return (rc); 1305 } 1306 1307 eq->pidx = eq->cidx = 0; 1308 eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 1309 eq->flags |= (EQ_ALLOCATED | EQ_STARTED); 1310 1311 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 1312 KASSERT(cntxt_id < sc->sge.neq, 1313 ("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 1314 cntxt_id, sc->sge.neq - 1)); 1315 sc->sge.eqmap[cntxt_id] = eq; 1316 1317 children = SYSCTL_CHILDREN(pi->oid_txq); 1318 1319 snprintf(name, sizeof(name), "%d", idx); 1320 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 1321 NULL, "tx queue"); 1322 children = SYSCTL_CHILDREN(oid); 1323 1324 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD, 1325 &txq->txcsum, "# of times hardware assisted with checksum"); 1326 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_insertion", 1327 CTLFLAG_RD, &txq->vlan_insertion, 1328 "# of times hardware inserted 802.1Q tag"); 1329 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD, 1330 &txq->tso_wrs, "# of IPv4 TSO work requests"); 1331 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD, 1332 &txq->imm_wrs, "# of work requests with immediate data"); 1333 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD, 1334 &txq->sgl_wrs, "# of work requests with direct SGL"); 1335 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD, 1336 &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); 1337 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_wrs", CTLFLAG_RD, 1338 &txq->txpkts_wrs, "# of txpkts work requests (multiple pkts/WR)"); 1339 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_pkts", CTLFLAG_RD, 1340 &txq->txpkts_pkts, "# of frames tx'd using txpkts work requests"); 1341 1342 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_dmamap", CTLFLAG_RD, 1343 &txq->no_dmamap, 0, "# of times txq ran out of DMA maps"); 1344 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD, 1345 &txq->no_desc, 0, "# of times txq ran out of hardware descriptors"); 1346 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "egr_update", CTLFLAG_RD, 1347 &txq->egr_update, 0, "egress update notifications from the SGE"); 1348 1349 return (rc); 1350} 1351 1352static int 1353free_txq(struct port_info *pi, struct sge_txq *txq) 1354{ 1355 int rc; 1356 struct adapter *sc = pi->adapter; 1357 struct sge_eq *eq = &txq->eq; 1358 1359 if (eq->flags & (EQ_ALLOCATED | EQ_STARTED)) { 1360 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); 1361 if (rc != 0) { 1362 device_printf(pi->dev, 1363 "failed to free egress queue %p: %d\n", eq, rc); 1364 return (rc); 1365 } 1366 eq->flags &= ~(EQ_ALLOCATED | EQ_STARTED); 1367 } 1368 1369 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); 1370 1371 free(eq->sdesc, M_CXGBE); 1372 1373 if (eq->maps) 1374 free_eq_maps(eq); 1375 1376 buf_ring_free(eq->br, M_CXGBE); 1377 1378 if (eq->tx_tag) 1379 bus_dma_tag_destroy(eq->tx_tag); 1380 1381 if (mtx_initialized(&eq->eq_lock)) 1382 mtx_destroy(&eq->eq_lock); 1383 1384 bzero(txq, sizeof(*txq)); 1385 return (0); 1386} 1387 1388static void 1389oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1390{ 1391 bus_addr_t *ba = arg; 1392 1393 KASSERT(nseg == 1, 1394 ("%s meant for single segment mappings only.", __func__)); 1395 1396 *ba = error ? 0 : segs->ds_addr; 1397} 1398 1399static inline bool 1400is_new_response(const struct sge_iq *iq, struct rsp_ctrl **ctrl) 1401{ 1402 *ctrl = (void *)((uintptr_t)iq->cdesc + 1403 (iq->esize - sizeof(struct rsp_ctrl))); 1404 1405 return (((*ctrl)->u.type_gen >> S_RSPD_GEN) == iq->gen); 1406} 1407 1408static inline void 1409iq_next(struct sge_iq *iq) 1410{ 1411 iq->cdesc = (void *) ((uintptr_t)iq->cdesc + iq->esize); 1412 if (__predict_false(++iq->cidx == iq->qsize - 1)) { 1413 iq->cidx = 0; 1414 iq->gen ^= 1; 1415 iq->cdesc = iq->desc; 1416 } 1417} 1418 1419static inline void 1420ring_fl_db(struct adapter *sc, struct sge_fl *fl) 1421{ 1422 int ndesc = fl->pending / 8; 1423 1424 /* Caller responsible for ensuring there's something useful to do */ 1425 KASSERT(ndesc > 0, ("%s called with no useful work to do.", __func__)); 1426 1427 wmb(); 1428 1429 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), F_DBPRIO | 1430 V_QID(fl->cntxt_id) | V_PIDX(ndesc)); 1431 1432 fl->pending &= 7; 1433} 1434 1435static void 1436refill_fl(struct sge_fl *fl, int nbufs) 1437{ 1438 __be64 *d = &fl->desc[fl->pidx]; 1439 struct fl_sdesc *sd = &fl->sdesc[fl->pidx]; 1440 bus_dma_tag_t tag; 1441 bus_addr_t pa; 1442 caddr_t cl; 1443 int rc; 1444 1445 FL_LOCK_ASSERT_OWNED(fl); 1446 1447 if (nbufs < 0 || nbufs > fl->needed) 1448 nbufs = fl->needed; 1449 1450 while (nbufs--) { 1451 1452 if (sd->cl != NULL) { 1453 1454 /* 1455 * This happens when a frame small enough to fit 1456 * entirely in an mbuf was received in cl last time. 1457 * We'd held on to cl and can reuse it now. Note that 1458 * we reuse a cluster of the old size if fl->tag_idx is 1459 * no longer the same as sd->tag_idx. 1460 */ 1461 1462 KASSERT(*d == sd->ba_tag, 1463 ("%s: recyling problem at pidx %d", 1464 __func__, fl->pidx)); 1465 1466 d++; 1467 goto recycled; 1468 } 1469 1470 1471 if (fl->tag_idx != sd->tag_idx) { 1472 bus_dmamap_t map; 1473 bus_dma_tag_t newtag = fl->tag[fl->tag_idx]; 1474 bus_dma_tag_t oldtag = fl->tag[sd->tag_idx]; 1475 1476 /* 1477 * An MTU change can get us here. Discard the old map 1478 * which was created with the old tag, but only if 1479 * we're able to get a new one. 1480 */ 1481 rc = bus_dmamap_create(newtag, 0, &map); 1482 if (rc == 0) { 1483 bus_dmamap_destroy(oldtag, sd->map); 1484 sd->map = map; 1485 sd->tag_idx = fl->tag_idx; 1486 } 1487 } 1488 1489 tag = fl->tag[sd->tag_idx]; 1490 1491 cl = m_cljget(NULL, M_NOWAIT, FL_BUF_SIZE(sd->tag_idx)); 1492 if (cl == NULL) 1493 break; 1494
| 1265 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD, 1266 &rxq->rxcsum, "# of times hardware assisted with checksum"); 1267 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_extraction", 1268 CTLFLAG_RD, &rxq->vlan_extraction, 1269 "# of times hardware extracted 802.1Q tag"); 1270 1271 return (rc); 1272} 1273 1274static int 1275free_rxq(struct port_info *pi, struct sge_rxq *rxq) 1276{ 1277 int rc; 1278 1279#ifdef INET 1280 if (rxq->lro.ifp) { 1281 tcp_lro_free(&rxq->lro); 1282 rxq->lro.ifp = NULL; 1283 } 1284#endif 1285 1286 rc = free_iq_fl(pi, &rxq->iq, &rxq->fl); 1287 if (rc == 0) 1288 bzero(rxq, sizeof(*rxq)); 1289 1290 return (rc); 1291} 1292 1293static int 1294alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx) 1295{ 1296 int rc, cntxt_id; 1297 size_t len; 1298 struct adapter *sc = pi->adapter; 1299 struct fw_eq_eth_cmd c; 1300 struct sge_eq *eq = &txq->eq; 1301 char name[16]; 1302 struct sysctl_oid *oid; 1303 struct sysctl_oid_list *children; 1304 1305 txq->ifp = pi->ifp; 1306 TASK_INIT(&txq->resume_tx, 0, cxgbe_txq_start, txq); 1307 1308 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); 1309 1310 len = eq->qsize * TX_EQ_ESIZE; 1311 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, 1312 &eq->ba, (void **)&eq->desc); 1313 if (rc) 1314 return (rc); 1315 1316 eq->cap = eq->qsize - SPG_LEN / TX_EQ_ESIZE; 1317 eq->spg = (void *)&eq->desc[eq->cap]; 1318 eq->avail = eq->cap - 1; /* one less to avoid cidx = pidx */ 1319 eq->sdesc = malloc(eq->cap * sizeof(struct tx_sdesc), M_CXGBE, 1320 M_ZERO | M_WAITOK); 1321 eq->br = buf_ring_alloc(eq->qsize, M_CXGBE, M_WAITOK, &eq->eq_lock); 1322 eq->iqid = sc->sge.rxq[pi->first_rxq].iq.cntxt_id; 1323 1324 rc = bus_dma_tag_create(sc->dmat, 1, 0, BUS_SPACE_MAXADDR, 1325 BUS_SPACE_MAXADDR, NULL, NULL, 64 * 1024, TX_SGL_SEGS, 1326 BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &eq->tx_tag); 1327 if (rc != 0) { 1328 device_printf(sc->dev, 1329 "failed to create tx DMA tag: %d\n", rc); 1330 return (rc); 1331 } 1332 1333 rc = alloc_eq_maps(eq); 1334 if (rc != 0) { 1335 device_printf(sc->dev, "failed to setup tx DMA maps: %d\n", rc); 1336 return (rc); 1337 } 1338 1339 bzero(&c, sizeof(c)); 1340 1341 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 1342 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 1343 V_FW_EQ_ETH_CMD_VFN(0)); 1344 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 1345 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 1346 c.viid_pkd = htobe32(V_FW_EQ_ETH_CMD_VIID(pi->viid)); 1347 c.fetchszm_to_iqid = 1348 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 1349 V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | 1350 V_FW_EQ_ETH_CMD_IQID(eq->iqid)); 1351 c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 1352 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 1353 V_FW_EQ_ETH_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) | 1354 V_FW_EQ_ETH_CMD_EQSIZE(eq->qsize)); 1355 c.eqaddr = htobe64(eq->ba); 1356 1357 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 1358 if (rc != 0) { 1359 device_printf(pi->dev, 1360 "failed to create egress queue: %d\n", rc); 1361 return (rc); 1362 } 1363 1364 eq->pidx = eq->cidx = 0; 1365 eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 1366 eq->flags |= (EQ_ALLOCATED | EQ_STARTED); 1367 1368 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 1369 KASSERT(cntxt_id < sc->sge.neq, 1370 ("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 1371 cntxt_id, sc->sge.neq - 1)); 1372 sc->sge.eqmap[cntxt_id] = eq; 1373 1374 children = SYSCTL_CHILDREN(pi->oid_txq); 1375 1376 snprintf(name, sizeof(name), "%d", idx); 1377 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 1378 NULL, "tx queue"); 1379 children = SYSCTL_CHILDREN(oid); 1380 1381 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD, 1382 &txq->txcsum, "# of times hardware assisted with checksum"); 1383 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_insertion", 1384 CTLFLAG_RD, &txq->vlan_insertion, 1385 "# of times hardware inserted 802.1Q tag"); 1386 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD, 1387 &txq->tso_wrs, "# of IPv4 TSO work requests"); 1388 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD, 1389 &txq->imm_wrs, "# of work requests with immediate data"); 1390 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD, 1391 &txq->sgl_wrs, "# of work requests with direct SGL"); 1392 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD, 1393 &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); 1394 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_wrs", CTLFLAG_RD, 1395 &txq->txpkts_wrs, "# of txpkts work requests (multiple pkts/WR)"); 1396 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_pkts", CTLFLAG_RD, 1397 &txq->txpkts_pkts, "# of frames tx'd using txpkts work requests"); 1398 1399 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_dmamap", CTLFLAG_RD, 1400 &txq->no_dmamap, 0, "# of times txq ran out of DMA maps"); 1401 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD, 1402 &txq->no_desc, 0, "# of times txq ran out of hardware descriptors"); 1403 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "egr_update", CTLFLAG_RD, 1404 &txq->egr_update, 0, "egress update notifications from the SGE"); 1405 1406 return (rc); 1407} 1408 1409static int 1410free_txq(struct port_info *pi, struct sge_txq *txq) 1411{ 1412 int rc; 1413 struct adapter *sc = pi->adapter; 1414 struct sge_eq *eq = &txq->eq; 1415 1416 if (eq->flags & (EQ_ALLOCATED | EQ_STARTED)) { 1417 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); 1418 if (rc != 0) { 1419 device_printf(pi->dev, 1420 "failed to free egress queue %p: %d\n", eq, rc); 1421 return (rc); 1422 } 1423 eq->flags &= ~(EQ_ALLOCATED | EQ_STARTED); 1424 } 1425 1426 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); 1427 1428 free(eq->sdesc, M_CXGBE); 1429 1430 if (eq->maps) 1431 free_eq_maps(eq); 1432 1433 buf_ring_free(eq->br, M_CXGBE); 1434 1435 if (eq->tx_tag) 1436 bus_dma_tag_destroy(eq->tx_tag); 1437 1438 if (mtx_initialized(&eq->eq_lock)) 1439 mtx_destroy(&eq->eq_lock); 1440 1441 bzero(txq, sizeof(*txq)); 1442 return (0); 1443} 1444 1445static void 1446oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1447{ 1448 bus_addr_t *ba = arg; 1449 1450 KASSERT(nseg == 1, 1451 ("%s meant for single segment mappings only.", __func__)); 1452 1453 *ba = error ? 0 : segs->ds_addr; 1454} 1455 1456static inline bool 1457is_new_response(const struct sge_iq *iq, struct rsp_ctrl **ctrl) 1458{ 1459 *ctrl = (void *)((uintptr_t)iq->cdesc + 1460 (iq->esize - sizeof(struct rsp_ctrl))); 1461 1462 return (((*ctrl)->u.type_gen >> S_RSPD_GEN) == iq->gen); 1463} 1464 1465static inline void 1466iq_next(struct sge_iq *iq) 1467{ 1468 iq->cdesc = (void *) ((uintptr_t)iq->cdesc + iq->esize); 1469 if (__predict_false(++iq->cidx == iq->qsize - 1)) { 1470 iq->cidx = 0; 1471 iq->gen ^= 1; 1472 iq->cdesc = iq->desc; 1473 } 1474} 1475 1476static inline void 1477ring_fl_db(struct adapter *sc, struct sge_fl *fl) 1478{ 1479 int ndesc = fl->pending / 8; 1480 1481 /* Caller responsible for ensuring there's something useful to do */ 1482 KASSERT(ndesc > 0, ("%s called with no useful work to do.", __func__)); 1483 1484 wmb(); 1485 1486 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), F_DBPRIO | 1487 V_QID(fl->cntxt_id) | V_PIDX(ndesc)); 1488 1489 fl->pending &= 7; 1490} 1491 1492static void 1493refill_fl(struct sge_fl *fl, int nbufs) 1494{ 1495 __be64 *d = &fl->desc[fl->pidx]; 1496 struct fl_sdesc *sd = &fl->sdesc[fl->pidx]; 1497 bus_dma_tag_t tag; 1498 bus_addr_t pa; 1499 caddr_t cl; 1500 int rc; 1501 1502 FL_LOCK_ASSERT_OWNED(fl); 1503 1504 if (nbufs < 0 || nbufs > fl->needed) 1505 nbufs = fl->needed; 1506 1507 while (nbufs--) { 1508 1509 if (sd->cl != NULL) { 1510 1511 /* 1512 * This happens when a frame small enough to fit 1513 * entirely in an mbuf was received in cl last time. 1514 * We'd held on to cl and can reuse it now. Note that 1515 * we reuse a cluster of the old size if fl->tag_idx is 1516 * no longer the same as sd->tag_idx. 1517 */ 1518 1519 KASSERT(*d == sd->ba_tag, 1520 ("%s: recyling problem at pidx %d", 1521 __func__, fl->pidx)); 1522 1523 d++; 1524 goto recycled; 1525 } 1526 1527 1528 if (fl->tag_idx != sd->tag_idx) { 1529 bus_dmamap_t map; 1530 bus_dma_tag_t newtag = fl->tag[fl->tag_idx]; 1531 bus_dma_tag_t oldtag = fl->tag[sd->tag_idx]; 1532 1533 /* 1534 * An MTU change can get us here. Discard the old map 1535 * which was created with the old tag, but only if 1536 * we're able to get a new one. 1537 */ 1538 rc = bus_dmamap_create(newtag, 0, &map); 1539 if (rc == 0) { 1540 bus_dmamap_destroy(oldtag, sd->map); 1541 sd->map = map; 1542 sd->tag_idx = fl->tag_idx; 1543 } 1544 } 1545 1546 tag = fl->tag[sd->tag_idx]; 1547 1548 cl = m_cljget(NULL, M_NOWAIT, FL_BUF_SIZE(sd->tag_idx)); 1549 if (cl == NULL) 1550 break; 1551
|
1495 rc = bus_dmamap_load(tag, sd->map, cl, 1496 FL_BUF_SIZE(sd->tag_idx), oneseg_dma_callback, 1497 &pa, 0);
| 1552 rc = bus_dmamap_load(tag, sd->map, cl, FL_BUF_SIZE(sd->tag_idx), 1553 oneseg_dma_callback, &pa, 0);
|
1498 if (rc != 0 || pa == 0) { 1499 fl->dmamap_failed++; 1500 uma_zfree(FL_BUF_ZONE(sd->tag_idx), cl); 1501 break; 1502 } 1503 1504 sd->cl = cl; 1505 *d++ = htobe64(pa | sd->tag_idx); 1506 1507#ifdef INVARIANTS 1508 sd->ba_tag = htobe64(pa | sd->tag_idx); 1509#endif 1510
| 1554 if (rc != 0 || pa == 0) { 1555 fl->dmamap_failed++; 1556 uma_zfree(FL_BUF_ZONE(sd->tag_idx), cl); 1557 break; 1558 } 1559 1560 sd->cl = cl; 1561 *d++ = htobe64(pa | sd->tag_idx); 1562 1563#ifdef INVARIANTS 1564 sd->ba_tag = htobe64(pa | sd->tag_idx); 1565#endif 1566
|
1511recycled: fl->pending++;
| 1567recycled: 1568 /* sd->m is never recycled, should always be NULL */ 1569 KASSERT(sd->m == NULL, ("%s: stray mbuf", __func__)); 1570 1571 sd->m = m_gethdr(M_NOWAIT, MT_NOINIT); 1572 if (sd->m == NULL) 1573 break; 1574 1575 fl->pending++;
|
1512 fl->needed--; 1513 sd++; 1514 if (++fl->pidx == fl->cap) { 1515 fl->pidx = 0; 1516 sd = fl->sdesc; 1517 d = fl->desc; 1518 }
| 1576 fl->needed--; 1577 sd++; 1578 if (++fl->pidx == fl->cap) { 1579 fl->pidx = 0; 1580 sd = fl->sdesc; 1581 d = fl->desc; 1582 }
|
1519 1520 /* No harm if gethdr fails, we'll retry after rx */ 1521 if (sd->m == NULL) 1522 sd->m = m_gethdr(M_NOWAIT, MT_NOINIT);
| |
1523 } 1524} 1525 1526static int 1527alloc_fl_sdesc(struct sge_fl *fl) 1528{ 1529 struct fl_sdesc *sd; 1530 bus_dma_tag_t tag; 1531 int i, rc; 1532 1533 FL_LOCK_ASSERT_OWNED(fl); 1534 1535 fl->sdesc = malloc(fl->cap * sizeof(struct fl_sdesc), M_CXGBE, 1536 M_ZERO | M_WAITOK); 1537 1538 tag = fl->tag[fl->tag_idx]; 1539 sd = fl->sdesc; 1540 for (i = 0; i < fl->cap; i++, sd++) { 1541 1542 sd->tag_idx = fl->tag_idx; 1543 rc = bus_dmamap_create(tag, 0, &sd->map); 1544 if (rc != 0) 1545 goto failed; 1546 1547 /* Doesn't matter if this succeeds or not */ 1548 sd->m = m_gethdr(M_NOWAIT, MT_NOINIT); 1549 } 1550 1551 return (0); 1552failed: 1553 while (--i >= 0) { 1554 sd--; 1555 bus_dmamap_destroy(tag, sd->map); 1556 if (sd->m) { 1557 m_init(sd->m, zone_mbuf, MLEN, M_NOWAIT, MT_DATA, 0); 1558 m_free(sd->m); 1559 sd->m = NULL; 1560 } 1561 } 1562 KASSERT(sd == fl->sdesc, ("%s: EDOOFUS", __func__)); 1563 1564 free(fl->sdesc, M_CXGBE); 1565 fl->sdesc = NULL; 1566 1567 return (rc); 1568} 1569 1570static void 1571free_fl_sdesc(struct sge_fl *fl) 1572{ 1573 struct fl_sdesc *sd; 1574 int i; 1575 1576 FL_LOCK_ASSERT_OWNED(fl); 1577 1578 sd = fl->sdesc; 1579 for (i = 0; i < fl->cap; i++, sd++) { 1580 1581 if (sd->m) { 1582 m_init(sd->m, zone_mbuf, MLEN, M_NOWAIT, MT_DATA, 0); 1583 m_free(sd->m); 1584 sd->m = NULL; 1585 } 1586 1587 if (sd->cl) { 1588 bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map); 1589 uma_zfree(FL_BUF_ZONE(sd->tag_idx), sd->cl); 1590 sd->cl = NULL; 1591 } 1592 1593 bus_dmamap_destroy(fl->tag[sd->tag_idx], sd->map); 1594 } 1595 1596 free(fl->sdesc, M_CXGBE); 1597 fl->sdesc = NULL; 1598} 1599 1600static int 1601alloc_eq_maps(struct sge_eq *eq) 1602{ 1603 struct tx_map *txm; 1604 int i, rc, count; 1605 1606 /* 1607 * We can stuff ~10 frames in an 8-descriptor txpkts WR (8 is the SGE 1608 * limit for any WR). txq->no_dmamap events shouldn't occur if maps is 1609 * sized for the worst case. 1610 */ 1611 count = eq->qsize * 10 / 8; 1612 eq->map_total = eq->map_avail = count; 1613 eq->map_cidx = eq->map_pidx = 0; 1614 1615 eq->maps = malloc(count * sizeof(struct tx_map), M_CXGBE, 1616 M_ZERO | M_WAITOK); 1617 1618 txm = eq->maps; 1619 for (i = 0; i < count; i++, txm++) { 1620 rc = bus_dmamap_create(eq->tx_tag, 0, &txm->map); 1621 if (rc != 0) 1622 goto failed; 1623 } 1624 1625 return (0); 1626failed: 1627 while (--i >= 0) { 1628 txm--; 1629 bus_dmamap_destroy(eq->tx_tag, txm->map); 1630 } 1631 KASSERT(txm == eq->maps, ("%s: EDOOFUS", __func__)); 1632 1633 free(eq->maps, M_CXGBE); 1634 eq->maps = NULL; 1635 1636 return (rc); 1637} 1638 1639static void 1640free_eq_maps(struct sge_eq *eq) 1641{ 1642 struct tx_map *txm; 1643 int i; 1644 1645 txm = eq->maps; 1646 for (i = 0; i < eq->map_total; i++, txm++) { 1647 1648 if (txm->m) { 1649 bus_dmamap_unload(eq->tx_tag, txm->map); 1650 m_freem(txm->m); 1651 txm->m = NULL; 1652 } 1653 1654 bus_dmamap_destroy(eq->tx_tag, txm->map); 1655 } 1656 1657 free(eq->maps, M_CXGBE); 1658 eq->maps = NULL; 1659} 1660 1661/* 1662 * We'll do immediate data tx for non-TSO, but only when not coalescing. We're 1663 * willing to use upto 2 hardware descriptors which means a maximum of 96 bytes 1664 * of immediate data. 1665 */ 1666#define IMM_LEN ( \ 1667 2 * TX_EQ_ESIZE \ 1668 - sizeof(struct fw_eth_tx_pkt_wr) \ 1669 - sizeof(struct cpl_tx_pkt_core)) 1670 1671/* 1672 * Returns non-zero on failure, no need to cleanup anything in that case. 1673 * 1674 * Note 1: We always try to defrag the mbuf if required and return EFBIG only 1675 * if the resulting chain still won't fit in a tx descriptor. 1676 * 1677 * Note 2: We'll pullup the mbuf chain if TSO is requested and the first mbuf 1678 * does not have the TCP header in it. 1679 */ 1680static int 1681get_pkt_sgl(struct sge_txq *txq, struct mbuf **fp, struct sgl *sgl, 1682 int sgl_only) 1683{ 1684 struct mbuf *m = *fp; 1685 struct sge_eq *eq = &txq->eq; 1686 struct tx_map *txm; 1687 int rc, defragged = 0, n; 1688 1689 TXQ_LOCK_ASSERT_OWNED(txq); 1690 1691 if (m->m_pkthdr.tso_segsz) 1692 sgl_only = 1; /* Do not allow immediate data with LSO */ 1693 1694start: sgl->nsegs = 0; 1695 1696 if (m->m_pkthdr.len <= IMM_LEN && !sgl_only) 1697 return (0); /* nsegs = 0 tells caller to use imm. tx */ 1698 1699 if (eq->map_avail == 0) { 1700 txq->no_dmamap++; 1701 return (ENOMEM); 1702 } 1703 txm = &eq->maps[eq->map_pidx]; 1704 1705 if (m->m_pkthdr.tso_segsz && m->m_len < 50) { 1706 *fp = m_pullup(m, 50); 1707 m = *fp; 1708 if (m == NULL) 1709 return (ENOBUFS); 1710 } 1711 1712 rc = bus_dmamap_load_mbuf_sg(eq->tx_tag, txm->map, m, sgl->seg, 1713 &sgl->nsegs, BUS_DMA_NOWAIT); 1714 if (rc == EFBIG && defragged == 0) { 1715 m = m_defrag(m, M_DONTWAIT); 1716 if (m == NULL) 1717 return (EFBIG); 1718 1719 defragged = 1; 1720 *fp = m; 1721 goto start; 1722 } 1723 if (rc != 0) 1724 return (rc); 1725 1726 txm->m = m; 1727 eq->map_avail--; 1728 if (++eq->map_pidx == eq->map_total) 1729 eq->map_pidx = 0; 1730 1731 KASSERT(sgl->nsegs > 0 && sgl->nsegs <= TX_SGL_SEGS, 1732 ("%s: bad DMA mapping (%d segments)", __func__, sgl->nsegs)); 1733 1734 /* 1735 * Store the # of flits required to hold this frame's SGL in nflits. An 1736 * SGL has a (ULPTX header + len0, addr0) tuple optionally followed by 1737 * multiple (len0 + len1, addr0, addr1) tuples. If addr1 is not used 1738 * then len1 must be set to 0. 1739 */ 1740 n = sgl->nsegs - 1; 1741 sgl->nflits = (3 * n) / 2 + (n & 1) + 2; 1742 1743 return (0); 1744} 1745 1746 1747/* 1748 * Releases all the txq resources used up in the specified sgl. 1749 */ 1750static int 1751free_pkt_sgl(struct sge_txq *txq, struct sgl *sgl) 1752{ 1753 struct sge_eq *eq = &txq->eq; 1754 struct tx_map *txm; 1755 1756 TXQ_LOCK_ASSERT_OWNED(txq); 1757 1758 if (sgl->nsegs == 0) 1759 return (0); /* didn't use any map */ 1760 1761 /* 1 pkt uses exactly 1 map, back it out */ 1762 1763 eq->map_avail++; 1764 if (eq->map_pidx > 0) 1765 eq->map_pidx--; 1766 else 1767 eq->map_pidx = eq->map_total - 1; 1768 1769 txm = &eq->maps[eq->map_pidx]; 1770 bus_dmamap_unload(eq->tx_tag, txm->map); 1771 txm->m = NULL; 1772 1773 return (0); 1774} 1775 1776static int 1777write_txpkt_wr(struct port_info *pi, struct sge_txq *txq, struct mbuf *m, 1778 struct sgl *sgl) 1779{ 1780 struct sge_eq *eq = &txq->eq; 1781 struct fw_eth_tx_pkt_wr *wr; 1782 struct cpl_tx_pkt_core *cpl; 1783 uint32_t ctrl; /* used in many unrelated places */ 1784 uint64_t ctrl1; 1785 int nflits, ndesc, pktlen; 1786 struct tx_sdesc *txsd; 1787 caddr_t dst; 1788 1789 TXQ_LOCK_ASSERT_OWNED(txq); 1790 1791 pktlen = m->m_pkthdr.len; 1792 1793 /* 1794 * Do we have enough flits to send this frame out? 1795 */ 1796 ctrl = sizeof(struct cpl_tx_pkt_core); 1797 if (m->m_pkthdr.tso_segsz) { 1798 nflits = TXPKT_LSO_WR_HDR; 1799 ctrl += sizeof(struct cpl_tx_pkt_lso); 1800 } else 1801 nflits = TXPKT_WR_HDR; 1802 if (sgl->nsegs > 0) 1803 nflits += sgl->nflits; 1804 else { 1805 nflits += howmany(pktlen, 8); 1806 ctrl += pktlen; 1807 } 1808 ndesc = howmany(nflits, 8); 1809 if (ndesc > eq->avail) 1810 return (ENOMEM); 1811 1812 /* Firmware work request header */ 1813 wr = (void *)&eq->desc[eq->pidx]; 1814 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | 1815 V_FW_WR_IMMDLEN(ctrl)); 1816 ctrl = V_FW_WR_LEN16(howmany(nflits, 2)); 1817 if (eq->avail == ndesc) 1818 ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ; 1819 wr->equiq_to_len16 = htobe32(ctrl); 1820 wr->r3 = 0; 1821 1822 if (m->m_pkthdr.tso_segsz) { 1823 struct cpl_tx_pkt_lso *lso = (void *)(wr + 1); 1824 struct ether_header *eh; 1825 struct ip *ip; 1826 struct tcphdr *tcp; 1827 1828 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | 1829 F_LSO_LAST_SLICE; 1830 1831 eh = mtod(m, struct ether_header *); 1832 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1833 ctrl |= V_LSO_ETHHDR_LEN(1); 1834 ip = (void *)((struct ether_vlan_header *)eh + 1); 1835 } else 1836 ip = (void *)(eh + 1); 1837 1838 tcp = (void *)((uintptr_t)ip + ip->ip_hl * 4); 1839 ctrl |= V_LSO_IPHDR_LEN(ip->ip_hl) | 1840 V_LSO_TCPHDR_LEN(tcp->th_off); 1841 1842 lso->lso_ctrl = htobe32(ctrl); 1843 lso->ipid_ofst = htobe16(0); 1844 lso->mss = htobe16(m->m_pkthdr.tso_segsz); 1845 lso->seqno_offset = htobe32(0); 1846 lso->len = htobe32(pktlen); 1847 1848 cpl = (void *)(lso + 1); 1849 1850 txq->tso_wrs++; 1851 } else 1852 cpl = (void *)(wr + 1); 1853 1854 /* Checksum offload */ 1855 ctrl1 = 0; 1856 if (!(m->m_pkthdr.csum_flags & CSUM_IP)) 1857 ctrl1 |= F_TXPKT_IPCSUM_DIS; 1858 if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))) 1859 ctrl1 |= F_TXPKT_L4CSUM_DIS; 1860 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP)) 1861 txq->txcsum++; /* some hardware assistance provided */ 1862 1863 /* VLAN tag insertion */ 1864 if (m->m_flags & M_VLANTAG) { 1865 ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 1866 txq->vlan_insertion++; 1867 } 1868 1869 /* CPL header */ 1870 cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 1871 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf)); 1872 cpl->pack = 0; 1873 cpl->len = htobe16(pktlen); 1874 cpl->ctrl1 = htobe64(ctrl1); 1875 1876 /* Software descriptor */ 1877 txsd = &eq->sdesc[eq->pidx]; 1878 txsd->desc_used = ndesc; 1879 1880 eq->pending += ndesc; 1881 eq->avail -= ndesc; 1882 eq->pidx += ndesc; 1883 if (eq->pidx >= eq->cap) 1884 eq->pidx -= eq->cap; 1885 1886 /* SGL */ 1887 dst = (void *)(cpl + 1); 1888 if (sgl->nsegs > 0) { 1889 txsd->map_used = 1; 1890 txq->sgl_wrs++; 1891 write_sgl_to_txd(eq, sgl, &dst); 1892 } else { 1893 txsd->map_used = 0; 1894 txq->imm_wrs++; 1895 for (; m; m = m->m_next) { 1896 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 1897#ifdef INVARIANTS 1898 pktlen -= m->m_len; 1899#endif 1900 } 1901#ifdef INVARIANTS 1902 KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen)); 1903#endif 1904 1905 } 1906 1907 txq->txpkt_wrs++; 1908 return (0); 1909} 1910 1911/* 1912 * Returns 0 to indicate that m has been accepted into a coalesced tx work 1913 * request. It has either been folded into txpkts or txpkts was flushed and m 1914 * has started a new coalesced work request (as the first frame in a fresh 1915 * txpkts). 1916 * 1917 * Returns non-zero to indicate a failure - caller is responsible for 1918 * transmitting m, if there was anything in txpkts it has been flushed. 1919 */ 1920static int 1921add_to_txpkts(struct port_info *pi, struct sge_txq *txq, struct txpkts *txpkts, 1922 struct mbuf *m, struct sgl *sgl) 1923{ 1924 struct sge_eq *eq = &txq->eq; 1925 int can_coalesce; 1926 struct tx_sdesc *txsd; 1927 int flits; 1928 1929 TXQ_LOCK_ASSERT_OWNED(txq); 1930 1931 if (txpkts->npkt > 0) { 1932 flits = TXPKTS_PKT_HDR + sgl->nflits; 1933 can_coalesce = m->m_pkthdr.tso_segsz == 0 && 1934 txpkts->nflits + flits <= TX_WR_FLITS && 1935 txpkts->nflits + flits <= eq->avail * 8 && 1936 txpkts->plen + m->m_pkthdr.len < 65536; 1937 1938 if (can_coalesce) { 1939 txpkts->npkt++; 1940 txpkts->nflits += flits; 1941 txpkts->plen += m->m_pkthdr.len; 1942 1943 txsd = &eq->sdesc[eq->pidx]; 1944 txsd->map_used++; 1945 1946 return (0); 1947 } 1948 1949 /* 1950 * Couldn't coalesce m into txpkts. The first order of business 1951 * is to send txpkts on its way. Then we'll revisit m. 1952 */ 1953 write_txpkts_wr(txq, txpkts); 1954 } 1955 1956 /* 1957 * Check if we can start a new coalesced tx work request with m as 1958 * the first packet in it. 1959 */ 1960 1961 KASSERT(txpkts->npkt == 0, ("%s: txpkts not empty", __func__)); 1962 1963 flits = TXPKTS_WR_HDR + sgl->nflits; 1964 can_coalesce = m->m_pkthdr.tso_segsz == 0 && 1965 flits <= eq->avail * 8 && flits <= TX_WR_FLITS; 1966 1967 if (can_coalesce == 0) 1968 return (EINVAL); 1969 1970 /* 1971 * Start a fresh coalesced tx WR with m as the first frame in it. 1972 */ 1973 txpkts->npkt = 1; 1974 txpkts->nflits = flits; 1975 txpkts->flitp = &eq->desc[eq->pidx].flit[2]; 1976 txpkts->plen = m->m_pkthdr.len; 1977 1978 txsd = &eq->sdesc[eq->pidx]; 1979 txsd->map_used = 1; 1980 1981 return (0); 1982} 1983 1984/* 1985 * Note that write_txpkts_wr can never run out of hardware descriptors (but 1986 * write_txpkt_wr can). add_to_txpkts ensures that a frame is accepted for 1987 * coalescing only if sufficient hardware descriptors are available. 1988 */ 1989static void 1990write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts) 1991{ 1992 struct sge_eq *eq = &txq->eq; 1993 struct fw_eth_tx_pkts_wr *wr; 1994 struct tx_sdesc *txsd; 1995 uint32_t ctrl; 1996 int ndesc; 1997 1998 TXQ_LOCK_ASSERT_OWNED(txq); 1999 2000 ndesc = howmany(txpkts->nflits, 8); 2001 2002 wr = (void *)&eq->desc[eq->pidx]; 2003 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR) | 2004 V_FW_WR_IMMDLEN(0)); /* immdlen does not matter in this WR */ 2005 ctrl = V_FW_WR_LEN16(howmany(txpkts->nflits, 2)); 2006 if (eq->avail == ndesc) 2007 ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ; 2008 wr->equiq_to_len16 = htobe32(ctrl); 2009 wr->plen = htobe16(txpkts->plen); 2010 wr->npkt = txpkts->npkt; 2011 wr->r3 = wr->r4 = 0; 2012 2013 /* Everything else already written */ 2014 2015 txsd = &eq->sdesc[eq->pidx]; 2016 txsd->desc_used = ndesc; 2017 2018 KASSERT(eq->avail >= ndesc, ("%s: out ouf descriptors", __func__)); 2019 2020 eq->pending += ndesc; 2021 eq->avail -= ndesc; 2022 eq->pidx += ndesc; 2023 if (eq->pidx >= eq->cap) 2024 eq->pidx -= eq->cap; 2025 2026 txq->txpkts_pkts += txpkts->npkt; 2027 txq->txpkts_wrs++; 2028 txpkts->npkt = 0; /* emptied */ 2029} 2030 2031static inline void 2032write_ulp_cpl_sgl(struct port_info *pi, struct sge_txq *txq, 2033 struct txpkts *txpkts, struct mbuf *m, struct sgl *sgl) 2034{ 2035 struct ulp_txpkt *ulpmc; 2036 struct ulptx_idata *ulpsc; 2037 struct cpl_tx_pkt_core *cpl; 2038 struct sge_eq *eq = &txq->eq; 2039 uintptr_t flitp, start, end; 2040 uint64_t ctrl; 2041 caddr_t dst; 2042 2043 KASSERT(txpkts->npkt > 0, ("%s: txpkts is empty", __func__)); 2044 2045 start = (uintptr_t)eq->desc; 2046 end = (uintptr_t)eq->spg; 2047 2048 /* Checksum offload */ 2049 ctrl = 0; 2050 if (!(m->m_pkthdr.csum_flags & CSUM_IP)) 2051 ctrl |= F_TXPKT_IPCSUM_DIS; 2052 if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))) 2053 ctrl |= F_TXPKT_L4CSUM_DIS; 2054 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP)) 2055 txq->txcsum++; /* some hardware assistance provided */ 2056 2057 /* VLAN tag insertion */ 2058 if (m->m_flags & M_VLANTAG) { 2059 ctrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 2060 txq->vlan_insertion++; 2061 } 2062 2063 /* 2064 * The previous packet's SGL must have ended at a 16 byte boundary (this 2065 * is required by the firmware/hardware). It follows that flitp cannot 2066 * wrap around between the ULPTX master command and ULPTX subcommand (8 2067 * bytes each), and that it can not wrap around in the middle of the 2068 * cpl_tx_pkt_core either. 2069 */ 2070 flitp = (uintptr_t)txpkts->flitp; 2071 KASSERT((flitp & 0xf) == 0, 2072 ("%s: last SGL did not end at 16 byte boundary: %p", 2073 __func__, txpkts->flitp)); 2074 2075 /* ULP master command */ 2076 ulpmc = (void *)flitp; 2077 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0) | 2078 V_ULP_TXPKT_FID(eq->iqid)); 2079 ulpmc->len = htonl(howmany(sizeof(*ulpmc) + sizeof(*ulpsc) + 2080 sizeof(*cpl) + 8 * sgl->nflits, 16)); 2081 2082 /* ULP subcommand */ 2083 ulpsc = (void *)(ulpmc + 1); 2084 ulpsc->cmd_more = htobe32(V_ULPTX_CMD((u32)ULP_TX_SC_IMM) | 2085 F_ULP_TX_SC_MORE); 2086 ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); 2087 2088 flitp += sizeof(*ulpmc) + sizeof(*ulpsc); 2089 if (flitp == end) 2090 flitp = start; 2091 2092 /* CPL_TX_PKT */ 2093 cpl = (void *)flitp; 2094 cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 2095 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf)); 2096 cpl->pack = 0; 2097 cpl->len = htobe16(m->m_pkthdr.len); 2098 cpl->ctrl1 = htobe64(ctrl); 2099 2100 flitp += sizeof(*cpl); 2101 if (flitp == end) 2102 flitp = start; 2103 2104 /* SGL for this frame */ 2105 dst = (caddr_t)flitp; 2106 txpkts->nflits += write_sgl_to_txd(eq, sgl, &dst); 2107 txpkts->flitp = (void *)dst; 2108 2109 KASSERT(((uintptr_t)dst & 0xf) == 0, 2110 ("%s: SGL ends at %p (not a 16 byte boundary)", __func__, dst)); 2111} 2112 2113/* 2114 * If the SGL ends on an address that is not 16 byte aligned, this function will 2115 * add a 0 filled flit at the end. It returns 1 in that case. 2116 */ 2117static int 2118write_sgl_to_txd(struct sge_eq *eq, struct sgl *sgl, caddr_t *to) 2119{ 2120 __be64 *flitp, *end; 2121 struct ulptx_sgl *usgl; 2122 bus_dma_segment_t *seg; 2123 int i, padded; 2124 2125 KASSERT(sgl->nsegs > 0 && sgl->nflits > 0, 2126 ("%s: bad SGL - nsegs=%d, nflits=%d", 2127 __func__, sgl->nsegs, sgl->nflits)); 2128 2129 KASSERT(((uintptr_t)(*to) & 0xf) == 0, 2130 ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to)); 2131 2132 flitp = (__be64 *)(*to); 2133 end = flitp + sgl->nflits; 2134 seg = &sgl->seg[0]; 2135 usgl = (void *)flitp; 2136 2137 /* 2138 * We start at a 16 byte boundary somewhere inside the tx descriptor 2139 * ring, so we're at least 16 bytes away from the status page. There is 2140 * no chance of a wrap around in the middle of usgl (which is 16 bytes). 2141 */ 2142 2143 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 2144 V_ULPTX_NSGE(sgl->nsegs)); 2145 usgl->len0 = htobe32(seg->ds_len); 2146 usgl->addr0 = htobe64(seg->ds_addr); 2147 seg++; 2148 2149 if ((uintptr_t)end <= (uintptr_t)eq->spg) { 2150 2151 /* Won't wrap around at all */ 2152 2153 for (i = 0; i < sgl->nsegs - 1; i++, seg++) { 2154 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ds_len); 2155 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ds_addr); 2156 } 2157 if (i & 1) 2158 usgl->sge[i / 2].len[1] = htobe32(0); 2159 } else { 2160 2161 /* Will wrap somewhere in the rest of the SGL */ 2162 2163 /* 2 flits already written, write the rest flit by flit */ 2164 flitp = (void *)(usgl + 1); 2165 for (i = 0; i < sgl->nflits - 2; i++) { 2166 if ((uintptr_t)flitp == (uintptr_t)eq->spg) 2167 flitp = (void *)eq->desc; 2168 *flitp++ = get_flit(seg, sgl->nsegs - 1, i); 2169 } 2170 end = flitp; 2171 } 2172 2173 if ((uintptr_t)end & 0xf) { 2174 *(uint64_t *)end = 0; 2175 end++; 2176 padded = 1; 2177 } else 2178 padded = 0; 2179 2180 if ((uintptr_t)end == (uintptr_t)eq->spg) 2181 *to = (void *)eq->desc; 2182 else 2183 *to = (void *)end; 2184 2185 return (padded); 2186} 2187 2188static inline void 2189copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len) 2190{ 2191 if ((uintptr_t)(*to) + len <= (uintptr_t)eq->spg) { 2192 bcopy(from, *to, len); 2193 (*to) += len; 2194 } else { 2195 int portion = (uintptr_t)eq->spg - (uintptr_t)(*to); 2196 2197 bcopy(from, *to, portion); 2198 from += portion; 2199 portion = len - portion; /* remaining */ 2200 bcopy(from, (void *)eq->desc, portion); 2201 (*to) = (caddr_t)eq->desc + portion; 2202 } 2203} 2204 2205static inline void 2206ring_tx_db(struct adapter *sc, struct sge_eq *eq) 2207{ 2208 wmb(); 2209 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), 2210 V_QID(eq->cntxt_id) | V_PIDX(eq->pending)); 2211 eq->pending = 0; 2212} 2213 2214static int 2215reclaim_tx_descs(struct sge_eq *eq, int atleast, int howmany) 2216{ 2217 struct tx_sdesc *txsd; 2218 struct tx_map *txm, *next_txm; 2219 unsigned int cidx, can_reclaim, reclaimed, maps, next_map_cidx; 2220 2221 EQ_LOCK_ASSERT_OWNED(eq); 2222 2223 cidx = eq->spg->cidx; /* stable snapshot */ 2224 cidx = be16_to_cpu(cidx); 2225 2226 if (cidx >= eq->cidx) 2227 can_reclaim = cidx - eq->cidx; 2228 else 2229 can_reclaim = cidx + eq->cap - eq->cidx; 2230 2231 if (can_reclaim < atleast) 2232 return (0); 2233 2234 next_map_cidx = eq->map_cidx; 2235 next_txm = txm = &eq->maps[next_map_cidx]; 2236 prefetch(txm); 2237 2238 maps = reclaimed = 0; 2239 do { 2240 int ndesc; 2241 2242 txsd = &eq->sdesc[eq->cidx]; 2243 ndesc = txsd->desc_used; 2244 2245 /* Firmware doesn't return "partial" credits. */ 2246 KASSERT(can_reclaim >= ndesc, 2247 ("%s: unexpected number of credits: %d, %d", 2248 __func__, can_reclaim, ndesc)); 2249 2250 maps += txsd->map_used; 2251 reclaimed += ndesc; 2252 2253 eq->cidx += ndesc; 2254 if (eq->cidx >= eq->cap) 2255 eq->cidx -= eq->cap; 2256 2257 can_reclaim -= ndesc; 2258 2259 } while (can_reclaim && reclaimed < howmany); 2260 2261 eq->avail += reclaimed; 2262 KASSERT(eq->avail < eq->cap, /* avail tops out at (cap - 1) */ 2263 ("%s: too many descriptors available", __func__)); 2264 2265 eq->map_avail += maps; 2266 KASSERT(eq->map_avail <= eq->map_total, 2267 ("%s: too many maps available", __func__)); 2268 2269 prefetch(txm->m); 2270 while (maps--) { 2271 next_txm++; 2272 if (++next_map_cidx == eq->map_total) { 2273 next_map_cidx = 0; 2274 next_txm = eq->maps; 2275 } 2276 prefetch(next_txm->m); 2277 2278 bus_dmamap_unload(eq->tx_tag, txm->map); 2279 m_freem(txm->m); 2280 txm->m = NULL; 2281 2282 txm = next_txm; 2283 } 2284 eq->map_cidx = next_map_cidx; 2285 2286 return (reclaimed); 2287} 2288 2289static void 2290write_eqflush_wr(struct sge_eq *eq) 2291{ 2292 struct fw_eq_flush_wr *wr; 2293 struct tx_sdesc *txsd; 2294 2295 EQ_LOCK_ASSERT_OWNED(eq); 2296 KASSERT(eq->avail > 0, ("%s: no descriptors left.", __func__)); 2297 2298 wr = (void *)&eq->desc[eq->pidx]; 2299 bzero(wr, sizeof(*wr)); 2300 wr->opcode = FW_EQ_FLUSH_WR; 2301 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(sizeof(*wr) / 16) | 2302 F_FW_WR_EQUEQ | F_FW_WR_EQUIQ); 2303 2304 txsd = &eq->sdesc[eq->pidx]; 2305 txsd->desc_used = 1; 2306 txsd->map_used = 0; 2307 2308 eq->pending++; 2309 eq->avail--; 2310 if (++eq->pidx == eq->cap) 2311 eq->pidx = 0; 2312} 2313 2314static __be64 2315get_flit(bus_dma_segment_t *sgl, int nsegs, int idx) 2316{ 2317 int i = (idx / 3) * 2; 2318 2319 switch (idx % 3) { 2320 case 0: { 2321 __be64 rc; 2322 2323 rc = htobe32(sgl[i].ds_len); 2324 if (i + 1 < nsegs) 2325 rc |= (uint64_t)htobe32(sgl[i + 1].ds_len) << 32; 2326 2327 return (rc); 2328 } 2329 case 1: 2330 return htobe64(sgl[i].ds_addr); 2331 case 2: 2332 return htobe64(sgl[i + 1].ds_addr); 2333 } 2334 2335 return (0); 2336} 2337
| 1583 } 1584} 1585 1586static int 1587alloc_fl_sdesc(struct sge_fl *fl) 1588{ 1589 struct fl_sdesc *sd; 1590 bus_dma_tag_t tag; 1591 int i, rc; 1592 1593 FL_LOCK_ASSERT_OWNED(fl); 1594 1595 fl->sdesc = malloc(fl->cap * sizeof(struct fl_sdesc), M_CXGBE, 1596 M_ZERO | M_WAITOK); 1597 1598 tag = fl->tag[fl->tag_idx]; 1599 sd = fl->sdesc; 1600 for (i = 0; i < fl->cap; i++, sd++) { 1601 1602 sd->tag_idx = fl->tag_idx; 1603 rc = bus_dmamap_create(tag, 0, &sd->map); 1604 if (rc != 0) 1605 goto failed; 1606 1607 /* Doesn't matter if this succeeds or not */ 1608 sd->m = m_gethdr(M_NOWAIT, MT_NOINIT); 1609 } 1610 1611 return (0); 1612failed: 1613 while (--i >= 0) { 1614 sd--; 1615 bus_dmamap_destroy(tag, sd->map); 1616 if (sd->m) { 1617 m_init(sd->m, zone_mbuf, MLEN, M_NOWAIT, MT_DATA, 0); 1618 m_free(sd->m); 1619 sd->m = NULL; 1620 } 1621 } 1622 KASSERT(sd == fl->sdesc, ("%s: EDOOFUS", __func__)); 1623 1624 free(fl->sdesc, M_CXGBE); 1625 fl->sdesc = NULL; 1626 1627 return (rc); 1628} 1629 1630static void 1631free_fl_sdesc(struct sge_fl *fl) 1632{ 1633 struct fl_sdesc *sd; 1634 int i; 1635 1636 FL_LOCK_ASSERT_OWNED(fl); 1637 1638 sd = fl->sdesc; 1639 for (i = 0; i < fl->cap; i++, sd++) { 1640 1641 if (sd->m) { 1642 m_init(sd->m, zone_mbuf, MLEN, M_NOWAIT, MT_DATA, 0); 1643 m_free(sd->m); 1644 sd->m = NULL; 1645 } 1646 1647 if (sd->cl) { 1648 bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map); 1649 uma_zfree(FL_BUF_ZONE(sd->tag_idx), sd->cl); 1650 sd->cl = NULL; 1651 } 1652 1653 bus_dmamap_destroy(fl->tag[sd->tag_idx], sd->map); 1654 } 1655 1656 free(fl->sdesc, M_CXGBE); 1657 fl->sdesc = NULL; 1658} 1659 1660static int 1661alloc_eq_maps(struct sge_eq *eq) 1662{ 1663 struct tx_map *txm; 1664 int i, rc, count; 1665 1666 /* 1667 * We can stuff ~10 frames in an 8-descriptor txpkts WR (8 is the SGE 1668 * limit for any WR). txq->no_dmamap events shouldn't occur if maps is 1669 * sized for the worst case. 1670 */ 1671 count = eq->qsize * 10 / 8; 1672 eq->map_total = eq->map_avail = count; 1673 eq->map_cidx = eq->map_pidx = 0; 1674 1675 eq->maps = malloc(count * sizeof(struct tx_map), M_CXGBE, 1676 M_ZERO | M_WAITOK); 1677 1678 txm = eq->maps; 1679 for (i = 0; i < count; i++, txm++) { 1680 rc = bus_dmamap_create(eq->tx_tag, 0, &txm->map); 1681 if (rc != 0) 1682 goto failed; 1683 } 1684 1685 return (0); 1686failed: 1687 while (--i >= 0) { 1688 txm--; 1689 bus_dmamap_destroy(eq->tx_tag, txm->map); 1690 } 1691 KASSERT(txm == eq->maps, ("%s: EDOOFUS", __func__)); 1692 1693 free(eq->maps, M_CXGBE); 1694 eq->maps = NULL; 1695 1696 return (rc); 1697} 1698 1699static void 1700free_eq_maps(struct sge_eq *eq) 1701{ 1702 struct tx_map *txm; 1703 int i; 1704 1705 txm = eq->maps; 1706 for (i = 0; i < eq->map_total; i++, txm++) { 1707 1708 if (txm->m) { 1709 bus_dmamap_unload(eq->tx_tag, txm->map); 1710 m_freem(txm->m); 1711 txm->m = NULL; 1712 } 1713 1714 bus_dmamap_destroy(eq->tx_tag, txm->map); 1715 } 1716 1717 free(eq->maps, M_CXGBE); 1718 eq->maps = NULL; 1719} 1720 1721/* 1722 * We'll do immediate data tx for non-TSO, but only when not coalescing. We're 1723 * willing to use upto 2 hardware descriptors which means a maximum of 96 bytes 1724 * of immediate data. 1725 */ 1726#define IMM_LEN ( \ 1727 2 * TX_EQ_ESIZE \ 1728 - sizeof(struct fw_eth_tx_pkt_wr) \ 1729 - sizeof(struct cpl_tx_pkt_core)) 1730 1731/* 1732 * Returns non-zero on failure, no need to cleanup anything in that case. 1733 * 1734 * Note 1: We always try to defrag the mbuf if required and return EFBIG only 1735 * if the resulting chain still won't fit in a tx descriptor. 1736 * 1737 * Note 2: We'll pullup the mbuf chain if TSO is requested and the first mbuf 1738 * does not have the TCP header in it. 1739 */ 1740static int 1741get_pkt_sgl(struct sge_txq *txq, struct mbuf **fp, struct sgl *sgl, 1742 int sgl_only) 1743{ 1744 struct mbuf *m = *fp; 1745 struct sge_eq *eq = &txq->eq; 1746 struct tx_map *txm; 1747 int rc, defragged = 0, n; 1748 1749 TXQ_LOCK_ASSERT_OWNED(txq); 1750 1751 if (m->m_pkthdr.tso_segsz) 1752 sgl_only = 1; /* Do not allow immediate data with LSO */ 1753 1754start: sgl->nsegs = 0; 1755 1756 if (m->m_pkthdr.len <= IMM_LEN && !sgl_only) 1757 return (0); /* nsegs = 0 tells caller to use imm. tx */ 1758 1759 if (eq->map_avail == 0) { 1760 txq->no_dmamap++; 1761 return (ENOMEM); 1762 } 1763 txm = &eq->maps[eq->map_pidx]; 1764 1765 if (m->m_pkthdr.tso_segsz && m->m_len < 50) { 1766 *fp = m_pullup(m, 50); 1767 m = *fp; 1768 if (m == NULL) 1769 return (ENOBUFS); 1770 } 1771 1772 rc = bus_dmamap_load_mbuf_sg(eq->tx_tag, txm->map, m, sgl->seg, 1773 &sgl->nsegs, BUS_DMA_NOWAIT); 1774 if (rc == EFBIG && defragged == 0) { 1775 m = m_defrag(m, M_DONTWAIT); 1776 if (m == NULL) 1777 return (EFBIG); 1778 1779 defragged = 1; 1780 *fp = m; 1781 goto start; 1782 } 1783 if (rc != 0) 1784 return (rc); 1785 1786 txm->m = m; 1787 eq->map_avail--; 1788 if (++eq->map_pidx == eq->map_total) 1789 eq->map_pidx = 0; 1790 1791 KASSERT(sgl->nsegs > 0 && sgl->nsegs <= TX_SGL_SEGS, 1792 ("%s: bad DMA mapping (%d segments)", __func__, sgl->nsegs)); 1793 1794 /* 1795 * Store the # of flits required to hold this frame's SGL in nflits. An 1796 * SGL has a (ULPTX header + len0, addr0) tuple optionally followed by 1797 * multiple (len0 + len1, addr0, addr1) tuples. If addr1 is not used 1798 * then len1 must be set to 0. 1799 */ 1800 n = sgl->nsegs - 1; 1801 sgl->nflits = (3 * n) / 2 + (n & 1) + 2; 1802 1803 return (0); 1804} 1805 1806 1807/* 1808 * Releases all the txq resources used up in the specified sgl. 1809 */ 1810static int 1811free_pkt_sgl(struct sge_txq *txq, struct sgl *sgl) 1812{ 1813 struct sge_eq *eq = &txq->eq; 1814 struct tx_map *txm; 1815 1816 TXQ_LOCK_ASSERT_OWNED(txq); 1817 1818 if (sgl->nsegs == 0) 1819 return (0); /* didn't use any map */ 1820 1821 /* 1 pkt uses exactly 1 map, back it out */ 1822 1823 eq->map_avail++; 1824 if (eq->map_pidx > 0) 1825 eq->map_pidx--; 1826 else 1827 eq->map_pidx = eq->map_total - 1; 1828 1829 txm = &eq->maps[eq->map_pidx]; 1830 bus_dmamap_unload(eq->tx_tag, txm->map); 1831 txm->m = NULL; 1832 1833 return (0); 1834} 1835 1836static int 1837write_txpkt_wr(struct port_info *pi, struct sge_txq *txq, struct mbuf *m, 1838 struct sgl *sgl) 1839{ 1840 struct sge_eq *eq = &txq->eq; 1841 struct fw_eth_tx_pkt_wr *wr; 1842 struct cpl_tx_pkt_core *cpl; 1843 uint32_t ctrl; /* used in many unrelated places */ 1844 uint64_t ctrl1; 1845 int nflits, ndesc, pktlen; 1846 struct tx_sdesc *txsd; 1847 caddr_t dst; 1848 1849 TXQ_LOCK_ASSERT_OWNED(txq); 1850 1851 pktlen = m->m_pkthdr.len; 1852 1853 /* 1854 * Do we have enough flits to send this frame out? 1855 */ 1856 ctrl = sizeof(struct cpl_tx_pkt_core); 1857 if (m->m_pkthdr.tso_segsz) { 1858 nflits = TXPKT_LSO_WR_HDR; 1859 ctrl += sizeof(struct cpl_tx_pkt_lso); 1860 } else 1861 nflits = TXPKT_WR_HDR; 1862 if (sgl->nsegs > 0) 1863 nflits += sgl->nflits; 1864 else { 1865 nflits += howmany(pktlen, 8); 1866 ctrl += pktlen; 1867 } 1868 ndesc = howmany(nflits, 8); 1869 if (ndesc > eq->avail) 1870 return (ENOMEM); 1871 1872 /* Firmware work request header */ 1873 wr = (void *)&eq->desc[eq->pidx]; 1874 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | 1875 V_FW_WR_IMMDLEN(ctrl)); 1876 ctrl = V_FW_WR_LEN16(howmany(nflits, 2)); 1877 if (eq->avail == ndesc) 1878 ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ; 1879 wr->equiq_to_len16 = htobe32(ctrl); 1880 wr->r3 = 0; 1881 1882 if (m->m_pkthdr.tso_segsz) { 1883 struct cpl_tx_pkt_lso *lso = (void *)(wr + 1); 1884 struct ether_header *eh; 1885 struct ip *ip; 1886 struct tcphdr *tcp; 1887 1888 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | 1889 F_LSO_LAST_SLICE; 1890 1891 eh = mtod(m, struct ether_header *); 1892 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1893 ctrl |= V_LSO_ETHHDR_LEN(1); 1894 ip = (void *)((struct ether_vlan_header *)eh + 1); 1895 } else 1896 ip = (void *)(eh + 1); 1897 1898 tcp = (void *)((uintptr_t)ip + ip->ip_hl * 4); 1899 ctrl |= V_LSO_IPHDR_LEN(ip->ip_hl) | 1900 V_LSO_TCPHDR_LEN(tcp->th_off); 1901 1902 lso->lso_ctrl = htobe32(ctrl); 1903 lso->ipid_ofst = htobe16(0); 1904 lso->mss = htobe16(m->m_pkthdr.tso_segsz); 1905 lso->seqno_offset = htobe32(0); 1906 lso->len = htobe32(pktlen); 1907 1908 cpl = (void *)(lso + 1); 1909 1910 txq->tso_wrs++; 1911 } else 1912 cpl = (void *)(wr + 1); 1913 1914 /* Checksum offload */ 1915 ctrl1 = 0; 1916 if (!(m->m_pkthdr.csum_flags & CSUM_IP)) 1917 ctrl1 |= F_TXPKT_IPCSUM_DIS; 1918 if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))) 1919 ctrl1 |= F_TXPKT_L4CSUM_DIS; 1920 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP)) 1921 txq->txcsum++; /* some hardware assistance provided */ 1922 1923 /* VLAN tag insertion */ 1924 if (m->m_flags & M_VLANTAG) { 1925 ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 1926 txq->vlan_insertion++; 1927 } 1928 1929 /* CPL header */ 1930 cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 1931 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf)); 1932 cpl->pack = 0; 1933 cpl->len = htobe16(pktlen); 1934 cpl->ctrl1 = htobe64(ctrl1); 1935 1936 /* Software descriptor */ 1937 txsd = &eq->sdesc[eq->pidx]; 1938 txsd->desc_used = ndesc; 1939 1940 eq->pending += ndesc; 1941 eq->avail -= ndesc; 1942 eq->pidx += ndesc; 1943 if (eq->pidx >= eq->cap) 1944 eq->pidx -= eq->cap; 1945 1946 /* SGL */ 1947 dst = (void *)(cpl + 1); 1948 if (sgl->nsegs > 0) { 1949 txsd->map_used = 1; 1950 txq->sgl_wrs++; 1951 write_sgl_to_txd(eq, sgl, &dst); 1952 } else { 1953 txsd->map_used = 0; 1954 txq->imm_wrs++; 1955 for (; m; m = m->m_next) { 1956 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 1957#ifdef INVARIANTS 1958 pktlen -= m->m_len; 1959#endif 1960 } 1961#ifdef INVARIANTS 1962 KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen)); 1963#endif 1964 1965 } 1966 1967 txq->txpkt_wrs++; 1968 return (0); 1969} 1970 1971/* 1972 * Returns 0 to indicate that m has been accepted into a coalesced tx work 1973 * request. It has either been folded into txpkts or txpkts was flushed and m 1974 * has started a new coalesced work request (as the first frame in a fresh 1975 * txpkts). 1976 * 1977 * Returns non-zero to indicate a failure - caller is responsible for 1978 * transmitting m, if there was anything in txpkts it has been flushed. 1979 */ 1980static int 1981add_to_txpkts(struct port_info *pi, struct sge_txq *txq, struct txpkts *txpkts, 1982 struct mbuf *m, struct sgl *sgl) 1983{ 1984 struct sge_eq *eq = &txq->eq; 1985 int can_coalesce; 1986 struct tx_sdesc *txsd; 1987 int flits; 1988 1989 TXQ_LOCK_ASSERT_OWNED(txq); 1990 1991 if (txpkts->npkt > 0) { 1992 flits = TXPKTS_PKT_HDR + sgl->nflits; 1993 can_coalesce = m->m_pkthdr.tso_segsz == 0 && 1994 txpkts->nflits + flits <= TX_WR_FLITS && 1995 txpkts->nflits + flits <= eq->avail * 8 && 1996 txpkts->plen + m->m_pkthdr.len < 65536; 1997 1998 if (can_coalesce) { 1999 txpkts->npkt++; 2000 txpkts->nflits += flits; 2001 txpkts->plen += m->m_pkthdr.len; 2002 2003 txsd = &eq->sdesc[eq->pidx]; 2004 txsd->map_used++; 2005 2006 return (0); 2007 } 2008 2009 /* 2010 * Couldn't coalesce m into txpkts. The first order of business 2011 * is to send txpkts on its way. Then we'll revisit m. 2012 */ 2013 write_txpkts_wr(txq, txpkts); 2014 } 2015 2016 /* 2017 * Check if we can start a new coalesced tx work request with m as 2018 * the first packet in it. 2019 */ 2020 2021 KASSERT(txpkts->npkt == 0, ("%s: txpkts not empty", __func__)); 2022 2023 flits = TXPKTS_WR_HDR + sgl->nflits; 2024 can_coalesce = m->m_pkthdr.tso_segsz == 0 && 2025 flits <= eq->avail * 8 && flits <= TX_WR_FLITS; 2026 2027 if (can_coalesce == 0) 2028 return (EINVAL); 2029 2030 /* 2031 * Start a fresh coalesced tx WR with m as the first frame in it. 2032 */ 2033 txpkts->npkt = 1; 2034 txpkts->nflits = flits; 2035 txpkts->flitp = &eq->desc[eq->pidx].flit[2]; 2036 txpkts->plen = m->m_pkthdr.len; 2037 2038 txsd = &eq->sdesc[eq->pidx]; 2039 txsd->map_used = 1; 2040 2041 return (0); 2042} 2043 2044/* 2045 * Note that write_txpkts_wr can never run out of hardware descriptors (but 2046 * write_txpkt_wr can). add_to_txpkts ensures that a frame is accepted for 2047 * coalescing only if sufficient hardware descriptors are available. 2048 */ 2049static void 2050write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts) 2051{ 2052 struct sge_eq *eq = &txq->eq; 2053 struct fw_eth_tx_pkts_wr *wr; 2054 struct tx_sdesc *txsd; 2055 uint32_t ctrl; 2056 int ndesc; 2057 2058 TXQ_LOCK_ASSERT_OWNED(txq); 2059 2060 ndesc = howmany(txpkts->nflits, 8); 2061 2062 wr = (void *)&eq->desc[eq->pidx]; 2063 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR) | 2064 V_FW_WR_IMMDLEN(0)); /* immdlen does not matter in this WR */ 2065 ctrl = V_FW_WR_LEN16(howmany(txpkts->nflits, 2)); 2066 if (eq->avail == ndesc) 2067 ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ; 2068 wr->equiq_to_len16 = htobe32(ctrl); 2069 wr->plen = htobe16(txpkts->plen); 2070 wr->npkt = txpkts->npkt; 2071 wr->r3 = wr->r4 = 0; 2072 2073 /* Everything else already written */ 2074 2075 txsd = &eq->sdesc[eq->pidx]; 2076 txsd->desc_used = ndesc; 2077 2078 KASSERT(eq->avail >= ndesc, ("%s: out ouf descriptors", __func__)); 2079 2080 eq->pending += ndesc; 2081 eq->avail -= ndesc; 2082 eq->pidx += ndesc; 2083 if (eq->pidx >= eq->cap) 2084 eq->pidx -= eq->cap; 2085 2086 txq->txpkts_pkts += txpkts->npkt; 2087 txq->txpkts_wrs++; 2088 txpkts->npkt = 0; /* emptied */ 2089} 2090 2091static inline void 2092write_ulp_cpl_sgl(struct port_info *pi, struct sge_txq *txq, 2093 struct txpkts *txpkts, struct mbuf *m, struct sgl *sgl) 2094{ 2095 struct ulp_txpkt *ulpmc; 2096 struct ulptx_idata *ulpsc; 2097 struct cpl_tx_pkt_core *cpl; 2098 struct sge_eq *eq = &txq->eq; 2099 uintptr_t flitp, start, end; 2100 uint64_t ctrl; 2101 caddr_t dst; 2102 2103 KASSERT(txpkts->npkt > 0, ("%s: txpkts is empty", __func__)); 2104 2105 start = (uintptr_t)eq->desc; 2106 end = (uintptr_t)eq->spg; 2107 2108 /* Checksum offload */ 2109 ctrl = 0; 2110 if (!(m->m_pkthdr.csum_flags & CSUM_IP)) 2111 ctrl |= F_TXPKT_IPCSUM_DIS; 2112 if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))) 2113 ctrl |= F_TXPKT_L4CSUM_DIS; 2114 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP)) 2115 txq->txcsum++; /* some hardware assistance provided */ 2116 2117 /* VLAN tag insertion */ 2118 if (m->m_flags & M_VLANTAG) { 2119 ctrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 2120 txq->vlan_insertion++; 2121 } 2122 2123 /* 2124 * The previous packet's SGL must have ended at a 16 byte boundary (this 2125 * is required by the firmware/hardware). It follows that flitp cannot 2126 * wrap around between the ULPTX master command and ULPTX subcommand (8 2127 * bytes each), and that it can not wrap around in the middle of the 2128 * cpl_tx_pkt_core either. 2129 */ 2130 flitp = (uintptr_t)txpkts->flitp; 2131 KASSERT((flitp & 0xf) == 0, 2132 ("%s: last SGL did not end at 16 byte boundary: %p", 2133 __func__, txpkts->flitp)); 2134 2135 /* ULP master command */ 2136 ulpmc = (void *)flitp; 2137 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0) | 2138 V_ULP_TXPKT_FID(eq->iqid)); 2139 ulpmc->len = htonl(howmany(sizeof(*ulpmc) + sizeof(*ulpsc) + 2140 sizeof(*cpl) + 8 * sgl->nflits, 16)); 2141 2142 /* ULP subcommand */ 2143 ulpsc = (void *)(ulpmc + 1); 2144 ulpsc->cmd_more = htobe32(V_ULPTX_CMD((u32)ULP_TX_SC_IMM) | 2145 F_ULP_TX_SC_MORE); 2146 ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); 2147 2148 flitp += sizeof(*ulpmc) + sizeof(*ulpsc); 2149 if (flitp == end) 2150 flitp = start; 2151 2152 /* CPL_TX_PKT */ 2153 cpl = (void *)flitp; 2154 cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 2155 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf)); 2156 cpl->pack = 0; 2157 cpl->len = htobe16(m->m_pkthdr.len); 2158 cpl->ctrl1 = htobe64(ctrl); 2159 2160 flitp += sizeof(*cpl); 2161 if (flitp == end) 2162 flitp = start; 2163 2164 /* SGL for this frame */ 2165 dst = (caddr_t)flitp; 2166 txpkts->nflits += write_sgl_to_txd(eq, sgl, &dst); 2167 txpkts->flitp = (void *)dst; 2168 2169 KASSERT(((uintptr_t)dst & 0xf) == 0, 2170 ("%s: SGL ends at %p (not a 16 byte boundary)", __func__, dst)); 2171} 2172 2173/* 2174 * If the SGL ends on an address that is not 16 byte aligned, this function will 2175 * add a 0 filled flit at the end. It returns 1 in that case. 2176 */ 2177static int 2178write_sgl_to_txd(struct sge_eq *eq, struct sgl *sgl, caddr_t *to) 2179{ 2180 __be64 *flitp, *end; 2181 struct ulptx_sgl *usgl; 2182 bus_dma_segment_t *seg; 2183 int i, padded; 2184 2185 KASSERT(sgl->nsegs > 0 && sgl->nflits > 0, 2186 ("%s: bad SGL - nsegs=%d, nflits=%d", 2187 __func__, sgl->nsegs, sgl->nflits)); 2188 2189 KASSERT(((uintptr_t)(*to) & 0xf) == 0, 2190 ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to)); 2191 2192 flitp = (__be64 *)(*to); 2193 end = flitp + sgl->nflits; 2194 seg = &sgl->seg[0]; 2195 usgl = (void *)flitp; 2196 2197 /* 2198 * We start at a 16 byte boundary somewhere inside the tx descriptor 2199 * ring, so we're at least 16 bytes away from the status page. There is 2200 * no chance of a wrap around in the middle of usgl (which is 16 bytes). 2201 */ 2202 2203 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 2204 V_ULPTX_NSGE(sgl->nsegs)); 2205 usgl->len0 = htobe32(seg->ds_len); 2206 usgl->addr0 = htobe64(seg->ds_addr); 2207 seg++; 2208 2209 if ((uintptr_t)end <= (uintptr_t)eq->spg) { 2210 2211 /* Won't wrap around at all */ 2212 2213 for (i = 0; i < sgl->nsegs - 1; i++, seg++) { 2214 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ds_len); 2215 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ds_addr); 2216 } 2217 if (i & 1) 2218 usgl->sge[i / 2].len[1] = htobe32(0); 2219 } else { 2220 2221 /* Will wrap somewhere in the rest of the SGL */ 2222 2223 /* 2 flits already written, write the rest flit by flit */ 2224 flitp = (void *)(usgl + 1); 2225 for (i = 0; i < sgl->nflits - 2; i++) { 2226 if ((uintptr_t)flitp == (uintptr_t)eq->spg) 2227 flitp = (void *)eq->desc; 2228 *flitp++ = get_flit(seg, sgl->nsegs - 1, i); 2229 } 2230 end = flitp; 2231 } 2232 2233 if ((uintptr_t)end & 0xf) { 2234 *(uint64_t *)end = 0; 2235 end++; 2236 padded = 1; 2237 } else 2238 padded = 0; 2239 2240 if ((uintptr_t)end == (uintptr_t)eq->spg) 2241 *to = (void *)eq->desc; 2242 else 2243 *to = (void *)end; 2244 2245 return (padded); 2246} 2247 2248static inline void 2249copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len) 2250{ 2251 if ((uintptr_t)(*to) + len <= (uintptr_t)eq->spg) { 2252 bcopy(from, *to, len); 2253 (*to) += len; 2254 } else { 2255 int portion = (uintptr_t)eq->spg - (uintptr_t)(*to); 2256 2257 bcopy(from, *to, portion); 2258 from += portion; 2259 portion = len - portion; /* remaining */ 2260 bcopy(from, (void *)eq->desc, portion); 2261 (*to) = (caddr_t)eq->desc + portion; 2262 } 2263} 2264 2265static inline void 2266ring_tx_db(struct adapter *sc, struct sge_eq *eq) 2267{ 2268 wmb(); 2269 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), 2270 V_QID(eq->cntxt_id) | V_PIDX(eq->pending)); 2271 eq->pending = 0; 2272} 2273 2274static int 2275reclaim_tx_descs(struct sge_eq *eq, int atleast, int howmany) 2276{ 2277 struct tx_sdesc *txsd; 2278 struct tx_map *txm, *next_txm; 2279 unsigned int cidx, can_reclaim, reclaimed, maps, next_map_cidx; 2280 2281 EQ_LOCK_ASSERT_OWNED(eq); 2282 2283 cidx = eq->spg->cidx; /* stable snapshot */ 2284 cidx = be16_to_cpu(cidx); 2285 2286 if (cidx >= eq->cidx) 2287 can_reclaim = cidx - eq->cidx; 2288 else 2289 can_reclaim = cidx + eq->cap - eq->cidx; 2290 2291 if (can_reclaim < atleast) 2292 return (0); 2293 2294 next_map_cidx = eq->map_cidx; 2295 next_txm = txm = &eq->maps[next_map_cidx]; 2296 prefetch(txm); 2297 2298 maps = reclaimed = 0; 2299 do { 2300 int ndesc; 2301 2302 txsd = &eq->sdesc[eq->cidx]; 2303 ndesc = txsd->desc_used; 2304 2305 /* Firmware doesn't return "partial" credits. */ 2306 KASSERT(can_reclaim >= ndesc, 2307 ("%s: unexpected number of credits: %d, %d", 2308 __func__, can_reclaim, ndesc)); 2309 2310 maps += txsd->map_used; 2311 reclaimed += ndesc; 2312 2313 eq->cidx += ndesc; 2314 if (eq->cidx >= eq->cap) 2315 eq->cidx -= eq->cap; 2316 2317 can_reclaim -= ndesc; 2318 2319 } while (can_reclaim && reclaimed < howmany); 2320 2321 eq->avail += reclaimed; 2322 KASSERT(eq->avail < eq->cap, /* avail tops out at (cap - 1) */ 2323 ("%s: too many descriptors available", __func__)); 2324 2325 eq->map_avail += maps; 2326 KASSERT(eq->map_avail <= eq->map_total, 2327 ("%s: too many maps available", __func__)); 2328 2329 prefetch(txm->m); 2330 while (maps--) { 2331 next_txm++; 2332 if (++next_map_cidx == eq->map_total) { 2333 next_map_cidx = 0; 2334 next_txm = eq->maps; 2335 } 2336 prefetch(next_txm->m); 2337 2338 bus_dmamap_unload(eq->tx_tag, txm->map); 2339 m_freem(txm->m); 2340 txm->m = NULL; 2341 2342 txm = next_txm; 2343 } 2344 eq->map_cidx = next_map_cidx; 2345 2346 return (reclaimed); 2347} 2348 2349static void 2350write_eqflush_wr(struct sge_eq *eq) 2351{ 2352 struct fw_eq_flush_wr *wr; 2353 struct tx_sdesc *txsd; 2354 2355 EQ_LOCK_ASSERT_OWNED(eq); 2356 KASSERT(eq->avail > 0, ("%s: no descriptors left.", __func__)); 2357 2358 wr = (void *)&eq->desc[eq->pidx]; 2359 bzero(wr, sizeof(*wr)); 2360 wr->opcode = FW_EQ_FLUSH_WR; 2361 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(sizeof(*wr) / 16) | 2362 F_FW_WR_EQUEQ | F_FW_WR_EQUIQ); 2363 2364 txsd = &eq->sdesc[eq->pidx]; 2365 txsd->desc_used = 1; 2366 txsd->map_used = 0; 2367 2368 eq->pending++; 2369 eq->avail--; 2370 if (++eq->pidx == eq->cap) 2371 eq->pidx = 0; 2372} 2373 2374static __be64 2375get_flit(bus_dma_segment_t *sgl, int nsegs, int idx) 2376{ 2377 int i = (idx / 3) * 2; 2378 2379 switch (idx % 3) { 2380 case 0: { 2381 __be64 rc; 2382 2383 rc = htobe32(sgl[i].ds_len); 2384 if (i + 1 < nsegs) 2385 rc |= (uint64_t)htobe32(sgl[i + 1].ds_len) << 32; 2386 2387 return (rc); 2388 } 2389 case 1: 2390 return htobe64(sgl[i].ds_addr); 2391 case 2: 2392 return htobe64(sgl[i + 1].ds_addr); 2393 } 2394 2395 return (0); 2396} 2397
|
2338static struct mbuf * 2339get_fl_sdesc_data(struct sge_fl *fl, int len, int flags) 2340{ 2341 struct fl_sdesc *sd; 2342 struct mbuf *m; 2343 2344 sd = &fl->sdesc[fl->cidx]; 2345 FL_LOCK(fl); 2346 if (++fl->cidx == fl->cap) 2347 fl->cidx = 0; 2348 fl->needed++; 2349 FL_UNLOCK(fl); 2350 2351 m = sd->m; 2352 if (m == NULL) { 2353 m = m_gethdr(M_NOWAIT, MT_NOINIT); 2354 if (m == NULL) 2355 return (NULL); 2356 } 2357 sd->m = NULL; /* consumed */ 2358 2359 bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map, BUS_DMASYNC_POSTREAD); 2360 m_init(m, zone_mbuf, MLEN, M_NOWAIT, MT_DATA, flags); 2361 if ((flags && len < MINCLSIZE) || (!flags && len <= MLEN)) 2362 bcopy(sd->cl, mtod(m, caddr_t), len); 2363 else { 2364 bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map); 2365 m_cljset(m, sd->cl, FL_BUF_TYPE(sd->tag_idx)); 2366 sd->cl = NULL; /* consumed */ 2367 } 2368 2369 m->m_len = min(len, FL_BUF_SIZE(sd->tag_idx)); 2370 2371 return (m); 2372} 2373
| |
2374static void 2375set_fl_tag_idx(struct sge_fl *fl, int mtu) 2376{ 2377 int i; 2378 2379 FL_LOCK_ASSERT_OWNED(fl); 2380 2381 for (i = 0; i < FL_BUF_SIZES - 1; i++) { 2382 if (FL_BUF_SIZE(i) >= (mtu + FL_PKTSHIFT)) 2383 break; 2384 } 2385 2386 fl->tag_idx = i; 2387} 2388 2389static int 2390handle_sge_egr_update(struct adapter *sc, const struct cpl_sge_egr_update *cpl) 2391{ 2392 unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); 2393 struct sge *s = &sc->sge; 2394 struct sge_txq *txq; 2395 struct port_info *pi; 2396 2397 txq = (void *)s->eqmap[qid - s->eq_start]; 2398 pi = txq->ifp->if_softc; 2399 taskqueue_enqueue(pi->tq, &txq->resume_tx); 2400 txq->egr_update++; 2401 2402 return (0); 2403}
| 2398static void 2399set_fl_tag_idx(struct sge_fl *fl, int mtu) 2400{ 2401 int i; 2402 2403 FL_LOCK_ASSERT_OWNED(fl); 2404 2405 for (i = 0; i < FL_BUF_SIZES - 1; i++) { 2406 if (FL_BUF_SIZE(i) >= (mtu + FL_PKTSHIFT)) 2407 break; 2408 } 2409 2410 fl->tag_idx = i; 2411} 2412 2413static int 2414handle_sge_egr_update(struct adapter *sc, const struct cpl_sge_egr_update *cpl) 2415{ 2416 unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); 2417 struct sge *s = &sc->sge; 2418 struct sge_txq *txq; 2419 struct port_info *pi; 2420 2421 txq = (void *)s->eqmap[qid - s->eq_start]; 2422 pi = txq->ifp->if_softc; 2423 taskqueue_enqueue(pi->tq, &txq->resume_tx); 2424 txq->egr_update++; 2425 2426 return (0); 2427}
|