1/*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: --- 12 unchanged lines hidden (view full) --- 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> |
29__FBSDID("$FreeBSD: head/sys/dev/cxgbe/t4_sge.c 220649 2011-04-15 03:09:27Z np $"); |
30 31#include "opt_inet.h" 32 33#include <sys/types.h> 34#include <sys/mbuf.h> 35#include <sys/socket.h> 36#include <sys/kernel.h> 37#include <sys/malloc.h> --- 236 unchanged lines hidden (view full) --- 274 if (rc != 0) { 275 device_printf(sc->dev, 276 "failed to create fwd intr queue %d: %d\n", 277 i, rc); 278 return (rc); 279 } 280 } 281 |
282 handler = t4_evt_rx; |
283 i = 0; /* forward fwq's interrupt to the first fiq */ 284 } else { 285 handler = NULL; 286 i = 1; /* fwq should use vector 1 (0 is used by error) */ 287 } 288 289 snprintf(name, sizeof(name), "%s fwq", device_get_nameunit(sc->dev)); 290 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, FW_IQ_ESIZE, handler, name); --- 49 unchanged lines hidden (view full) --- 340 } 341 342 for_each_rxq(pi, i, rxq) { 343 344 snprintf(name, sizeof(name), "%s rxq%d-iq", 345 device_get_nameunit(pi->dev), i); 346 init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, 347 pi->qsize_rxq, RX_IQ_ESIZE, |
348 sc->flags & INTR_FWD ? t4_eth_rx : NULL, name); |
349 350 snprintf(name, sizeof(name), "%s rxq%d-fl", 351 device_get_nameunit(pi->dev), i); 352 init_fl(&rxq->fl, pi->qsize_rxq / 8, name); 353 354 if (sc->flags & INTR_FWD) 355 intr_idx = (pi->first_rxq + i) % NFIQ(sc); 356 else --- 66 unchanged lines hidden (view full) --- 423t4_intr_fwd(void *arg) 424{ 425 struct sge_iq *iq = arg, *q; 426 struct adapter *sc = iq->adapter; 427 struct rsp_ctrl *ctrl; 428 int ndesc_pending = 0, ndesc_total = 0; 429 int qid; 430 |
431 if (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY)) 432 return; 433 |
434 while (is_new_response(iq, &ctrl)) { 435 436 rmb(); 437 438 /* Only interrupt muxing expected on this queue */ 439 KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_INTR, 440 ("unexpected event on forwarded interrupt queue: %x", 441 G_RSPD_TYPE(ctrl->u.type_gen))); --- 16 unchanged lines hidden (view full) --- 458 iq_next(iq); 459 } 460 461 if (ndesc_total > 0) { 462 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 463 V_CIDXINC(ndesc_pending) | V_INGRESSQID((u32)iq->cntxt_id) | 464 V_SEINTARM(iq->intr_params)); 465 } |
466 467 atomic_cmpset_32(&iq->state, IQS_BUSY, IQS_IDLE); |
468} 469 470/* Deals with error interrupts */ 471void 472t4_intr_err(void *arg) 473{ 474 struct adapter *sc = arg; 475 476 if (sc->intr_type == INTR_INTX) 477 t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0); 478 479 t4_slow_intr_handler(sc); 480} 481 482/* Deals with the firmware event queue */ 483void 484t4_intr_evt(void *arg) 485{ 486 struct sge_iq *iq = arg; |
487 488 if (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY)) 489 return; 490 491 t4_evt_rx(arg); 492 493 atomic_cmpset_32(&iq->state, IQS_BUSY, IQS_IDLE); 494} 495 496void 497t4_intr_data(void *arg) 498{ 499 struct sge_iq *iq = arg; 500 501 if (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY)) 502 return; 503 504 t4_eth_rx(arg); 505 506 atomic_cmpset_32(&iq->state, IQS_BUSY, IQS_IDLE); 507} 508 509void 510t4_evt_rx(void *arg) 511{ 512 struct sge_iq *iq = arg; |
513 struct adapter *sc = iq->adapter; 514 struct rsp_ctrl *ctrl; 515 const struct rss_header *rss; 516 int ndesc_pending = 0, ndesc_total = 0; 517 518 KASSERT(iq == &sc->sge.fwq, ("%s: unexpected ingress queue", __func__)); 519 520 while (is_new_response(iq, &ctrl)) { --- 42 unchanged lines hidden (view full) --- 563 if (ndesc_total > 0) { 564 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 565 V_CIDXINC(ndesc_pending) | V_INGRESSQID(iq->cntxt_id) | 566 V_SEINTARM(iq->intr_params)); 567 } 568} 569 570void |
571t4_eth_rx(void *arg) |
572{ 573 struct sge_rxq *rxq = arg; 574 struct sge_iq *iq = arg; 575 struct adapter *sc = iq->adapter; 576 struct rsp_ctrl *ctrl; 577 struct ifnet *ifp = rxq->ifp; 578 struct sge_fl *fl = &rxq->fl; 579 struct fl_sdesc *sd = &fl->sdesc[fl->cidx], *sd_next; --- 463 unchanged lines hidden (view full) --- 1043 struct fw_iq_cmd c; 1044 struct adapter *sc = iq->adapter; 1045 __be32 v = 0; 1046 1047 /* The adapter queues are nominally allocated in port[0]'s name */ 1048 if (pi == NULL) 1049 pi = sc->port[0]; 1050 |
1051 len = iq->qsize * iq->esize; 1052 rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, 1053 (void **)&iq->desc); 1054 if (rc != 0) 1055 return (rc); 1056 1057 bzero(&c, sizeof(c)); 1058 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | --- 113 unchanged lines hidden (view full) --- 1172 FL_LOCK(fl); 1173 refill_fl(fl, -1); 1174 if (fl->pending >= 8) 1175 ring_fl_db(sc, fl); 1176 FL_UNLOCK(fl); 1177 } 1178 1179 /* Enable IQ interrupts */ |
1180 atomic_store_rel_32(&iq->state, IQS_IDLE); |
1181 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) | 1182 V_INGRESSQID(iq->cntxt_id)); 1183 1184 return (0); 1185} 1186 1187/* 1188 * This can be called with the iq/fl in any state - fully allocated and --- 15 unchanged lines hidden (view full) --- 1204 rc = -t4_iq_start_stop(sc, sc->mbox, 0, sc->pf, 0, 1205 iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff); 1206 if (rc != 0) { 1207 device_printf(dev, 1208 "failed to stop queue %p: %d\n", iq, rc); 1209 return (rc); 1210 } 1211 iq->flags &= ~IQ_STARTED; |
1212 1213 /* Synchronize with the interrupt handler */ 1214 while (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_DISABLED)) 1215 pause("iqfree", hz / 1000); |
1216 } 1217 1218 if (iq->flags & IQ_ALLOCATED) { 1219 1220 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, 1221 FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id, 1222 fl ? fl->cntxt_id : 0xffff, 0xffff); 1223 if (rc != 0) { 1224 device_printf(dev, 1225 "failed to free queue %p: %d\n", iq, rc); 1226 return (rc); 1227 } 1228 iq->flags &= ~IQ_ALLOCATED; 1229 } 1230 1231 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); 1232 |
1233 bzero(iq, sizeof(*iq)); 1234 1235 if (fl) { 1236 free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, 1237 fl->desc); 1238 1239 if (fl->sdesc) { 1240 FL_LOCK(fl); --- 210 unchanged lines hidden (view full) --- 1451static int 1452free_txq(struct port_info *pi, struct sge_txq *txq) 1453{ 1454 int rc; 1455 struct adapter *sc = pi->adapter; 1456 struct sge_eq *eq = &txq->eq; 1457 1458 if (eq->flags & (EQ_ALLOCATED | EQ_STARTED)) { |
1459 1460 /* 1461 * Wait for the response to a credit flush if there's one 1462 * pending. Clearing the flag tells handle_sge_egr_update or 1463 * cxgbe_txq_start (depending on how far the response has made 1464 * it) that they should ignore the response and wake up free_txq 1465 * instead. 1466 * 1467 * The interface has been marked down by the time we get here 1468 * (both IFF_UP and IFF_DRV_RUNNING cleared). qflush has 1469 * emptied the tx buf_rings and we know nothing new is being 1470 * queued for tx so we don't have to worry about a new credit 1471 * flush request. 1472 */ 1473 TXQ_LOCK(txq); 1474 if (eq->flags & EQ_CRFLUSHED) { 1475 eq->flags &= ~EQ_CRFLUSHED; 1476 msleep(txq, &eq->eq_lock, 0, "crflush", 0); 1477 } 1478 TXQ_UNLOCK(txq); 1479 |
1480 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); 1481 if (rc != 0) { 1482 device_printf(pi->dev, 1483 "failed to free egress queue %p: %d\n", eq, rc); 1484 return (rc); 1485 } 1486 eq->flags &= ~(EQ_ALLOCATED | EQ_STARTED); 1487 } --- 1003 unchanged lines hidden (view full) --- 2491handle_sge_egr_update(struct adapter *sc, const struct cpl_sge_egr_update *cpl) 2492{ 2493 unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); 2494 struct sge *s = &sc->sge; 2495 struct sge_txq *txq; 2496 struct port_info *pi; 2497 2498 txq = (void *)s->eqmap[qid - s->eq_start]; |
2499 TXQ_LOCK(txq); 2500 if (txq->eq.flags & EQ_CRFLUSHED) { 2501 pi = txq->ifp->if_softc; 2502 taskqueue_enqueue(pi->tq, &txq->resume_tx); 2503 txq->egr_update++; 2504 } else 2505 wakeup_one(txq); /* txq is going away, wakeup free_txq */ 2506 TXQ_UNLOCK(txq); |
2507 |
2508 return (0); 2509} |