Lines Matching refs:adap

409  *	@adap: the adapter
418 t3_sge_init(adapter_t *adap, struct sge_params *p)
422 ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */
431 if (adap->params.rev > 0) {
432 if (!(adap->flags & (USING_MSIX | USING_MSI)))
435 t3_write_reg(adap, A_SG_CONTROL, ctrl);
436 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
438 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
439 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
440 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
441 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
442 adap->params.rev < T3_REV_C ? 1000 : 500);
443 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
444 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
445 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
446 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
447 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
538 t3_sge_prep(adapter_t *adap, struct sge_params *p)
542 nqsets = min(SGE_QSETS / adap->params.nports, mp_ncpus);
543 nqsets *= adap->params.nports;
551 is_offload(adap);
569 device_printf(adap->dev,
577 if (adap->params.nports > 2) {
592 q->txq_size[TXQ_OFLD] = is_offload(adap) ? TX_OFLD_Q_SIZE : 16;
815 __refill_fl(adapter_t *adap, struct sge_fl *fl)
817 refill_fl(adap, fl, min(16U, fl->size - fl->credits));
821 __refill_fl_lt(adapter_t *adap, struct sge_fl *fl, int max)
826 refill_fl(adap, fl, min(max, reclaimable));
839 recycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx)
856 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
1217 * @adap: the adapter
1228 check_ring_tx_db(adapter_t *adap, struct sge_txq *q, int mustring)
1235 T3_TRACE1(adap->tb[q->cntxt_id & 7], "doorbell Tx, cntxt %d",
1238 t3_write_reg(adap, A_SG_KDOORBELL,
1244 t3_write_reg(adap, A_SG_KDOORBELL,
1796 * @adap: the adapter
1813 check_desc_avail(adapter_t *adap, struct sge_txq *q,
1861 * @adap: the adapter
1870 ctrl_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
1884 ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL);
1901 t3_write_reg(adap, A_SG_KDOORBELL,
1921 adapter_t *adap = qs->port->adapter;
1947 t3_write_reg(adap, A_SG_KDOORBELL,
1956 t3_mgmt_tx(struct adapter *adap, struct mbuf *m)
1958 return ctrl_xmit(adap, &adap->sge.qs[0], m);
2187 * @adap: the adapter
2198 write_ofld_wr(adapter_t *adap, struct mbuf *m, struct sge_txq *q,
2259 * @adap: the adapter
2266 ofld_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
2278 ret = check_desc_avail(adap, q, m, ndesc, TXQ_OFLD);
2296 write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2297 check_ring_tx_db(adap, q, 1);
2315 adapter_t *adap = qs->port->adapter;
2346 write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2355 t3_write_reg(adap, A_SG_KDOORBELL,
2425 q->adap = sc;
2620 t3_rx_eth(struct adapter *adap, struct mbuf *m, int ethpad)
2623 struct port_info *pi = &adap->port[adap->rxpkt_map[cpl->iff]];
2666 * @adap: the adapter that received the packet
2681 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
2711 recycle_rx_buf(adap, fl, fl->cidx);
2805 check_ring_db(adapter_t *adap, struct sge_qset *qs,
2813 * @adap: the adapter
2827 process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
2893 get_imm_packet(adap, r, m);
2900 eop = get_packet(adap, drop_thresh, qs, mh, r);
2902 if (r->rss_hdr.hash_type && !adap->timestamp) {
2922 adap->cpl_handler[opcode](qs, r, mh->mh_head);
2930 t3_rx_eth(adap, m, ethpad);
2970 refill_rspq(adap, rspq, rspq->credits);
2973 __refill_fl_lt(adap, &qs->fl[0], 32);
2974 __refill_fl_lt(adap, &qs->fl[1], 32);
2984 check_ring_db(adap, qs, sleeping);
2990 __refill_fl_lt(adap, &qs->fl[0], 512);
2991 __refill_fl_lt(adap, &qs->fl[1], 512);
3000 process_responses_gts(adapter_t *adap, struct sge_rspq *rq)
3005 work = process_responses(adap, rspq_to_qset(rq), -1);
3011 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
3029 adapter_t *adap = data;
3030 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3032 t3_write_reg(adap, A_PL_CLI, 0);
3033 map = t3_read_reg(adap, A_SG_DATA_INTR);
3039 t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3040 (void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3041 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3045 for_each_port(adap, i)
3047 process_responses_gts(adap, &adap->sge.qs[i].rspq);
3060 adapter_t *adap = data;
3061 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3066 for_each_port(adap, i)
3067 if (process_responses_gts(adap, &adap->sge.qs[i].rspq))
3071 t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3072 (void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3073 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3081 adapter_t *adap = qs->port->adapter;
3084 if (process_responses_gts(adap, rspq) == 0)