Lines Matching refs:adap

59 	struct adapter *adap = q->adap;
61 if (adap->uld[q->uld].lro_flush)
62 adap->uld[q->uld].lro_flush(&q->lro_mgr);
77 struct adapter *adap = q->adap;
87 ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
91 ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
108 static int alloc_uld_rxqs(struct adapter *adap,
115 struct sge *s = &adap->sge;
118 per_chan = rxq_info->nrxq / adap->params.nports;
120 if (adap->flags & CXGB4_USING_MSIX)
128 per_chan = rxq_info->nciq / adap->params.nports;
133 msi_idx = cxgb4_get_msix_idx_from_bmap(adap);
139 snprintf(adap->msix_info[msi_idx].desc,
140 sizeof(adap->msix_info[msi_idx].desc),
142 adap->port[0]->name, rxq_info->name, i);
144 q->msix = &adap->msix_info[msi_idx];
146 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
147 adap->port[que_idx++ / per_chan],
165 free_rspq_fl(adap, &q->rspq,
168 cxgb4_free_msix_idx_in_bmap(adap, q->msix->idx);
174 setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
176 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
179 ret = alloc_uld_rxqs(adap, rxq_info, lro);
184 if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
185 struct sge *s = &adap->sge;
190 for_each_port(adap, i) {
195 ret = t4_set_params(adap, adap->mbox, adap->pf,
202 static void t4_free_uld_rxqs(struct adapter *adap, int n,
207 free_rspq_fl(adap, &q->rspq,
212 static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
214 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
216 if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
217 struct sge *s = &adap->sge;
222 for_each_port(adap, i) {
226 t4_set_params(adap, adap->mbox, adap->pf,
232 t4_free_uld_rxqs(adap, rxq_info->nciq,
234 t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
237 static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
240 struct sge *s = &adap->sge;
248 if (adap->flags & CXGB4_USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
250 rxq_info->nrxq = roundup(i, adap->params.nports);
254 rxq_info->nrxq = roundup(i, adap->params.nports);
259 if (adap->flags & CXGB4_USING_MSIX)
265 rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
266 adap->params.nports);
268 adap->params.nports);
289 init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64);
294 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
296 dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
303 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
308 adap->sge.uld_rxq_info[uld_type] = rxq_info;
313 static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
315 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
317 adap->sge.uld_rxq_info[uld_type] = NULL;
324 request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
326 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
340 cxgb4_set_msix_aff(adap, minfo->vec,
349 cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
356 free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
358 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
365 cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
370 static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
372 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
381 cxgb4_enable_rx(adap, q);
385 static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
387 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
401 free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
411 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
413 free_tx_desc(adap, &txq->q, txq->q.in_use, false);
416 free_txq(adap, &txq->q);
422 alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info,
425 struct sge *s = &adap->sge;
429 j = nq / adap->params.nports;
434 err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j],
441 free_sge_txq_uld(adap, txq_info);
446 release_sge_txq_uld(struct adapter *adap, unsigned int uld_type)
451 txq_info = adap->sge.uld_txq_info[tx_uld_type];
454 free_sge_txq_uld(adap, txq_info);
457 adap->sge.uld_txq_info[tx_uld_type] = NULL;
462 setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
469 txq_info = adap->sge.uld_txq_info[tx_uld_type];
479 i = min_t(int, adap->vres.ncrypto_fc,
481 txq_info->ntxq = rounddown(i, adap->params.nports);
483 dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n");
490 txq_info->ntxq = roundup(i, adap->params.nports);
499 if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) {
506 adap->sge.uld_txq_info[tx_uld_type] = txq_info;
510 static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
513 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
515 struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type];
524 int t4_uld_mem_alloc(struct adapter *adap)
526 struct sge *s = &adap->sge;
528 adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
529 if (!adap->uld)
548 kfree(adap->uld);
552 void t4_uld_mem_free(struct adapter *adap)
554 struct sge *s = &adap->sge;
558 kfree(adap->uld);
562 static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type)
564 if (adap->uld[type].handle) {
565 adap->uld[type].handle = NULL;
566 adap->uld[type].add = NULL;
567 release_sge_txq_uld(adap, type);
569 if (adap->flags & CXGB4_FULL_INIT_DONE)
570 quiesce_rx_uld(adap, type);
572 if (adap->flags & CXGB4_USING_MSIX)
573 free_msix_queue_irqs_uld(adap, type);
575 free_sge_queues_uld(adap, type);
576 free_queues_uld(adap, type);
580 void t4_uld_clean_up(struct adapter *adap)
584 if (!is_uld(adap))
589 if (!adap->uld[i].handle)
592 cxgb4_shutdown_uld_adapter(adap, i);
597 static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
601 lld->pdev = adap->pdev;
602 lld->pf = adap->pf;
603 lld->l2t = adap->l2t;
604 lld->tids = &adap->tids;
605 lld->ports = adap->port;
606 lld->vr = &adap->vres;
607 lld->mtus = adap->params.mtus;
608 lld->nchan = adap->params.nports;
609 lld->nports = adap->params.nports;
610 lld->wr_cred = adap->params.ofldq_wr_cred;
611 lld->crypto = adap->params.crypto;
612 lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
613 lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
614 lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
615 lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
616 lld->iscsi_ppm = &adap->iscsi_ppm;
617 lld->adapter_type = adap->params.chip;
618 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
619 lld->udb_density = 1 << adap->params.sge.eq_qpp;
620 lld->ucq_density = 1 << adap->params.sge.iq_qpp;
621 lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10);
622 lld->filt_mode = adap->params.tp.vlan_pri_map;
626 lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
627 lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
628 lld->fw_vers = adap->params.fw_vers;
630 lld->sge_ingpadboundary = adap->sge.fl_align;
631 lld->sge_egrstatuspagesize = adap->sge.stat_len;
632 lld->sge_pktshift = adap->sge.pktshift;
633 lld->ulp_crypto = adap->params.crypto;
634 lld->enable_fw_ofld_conn = adap->flags & CXGB4_FW_OFLD_CONN;
635 lld->max_ordird_qp = adap->params.max_ordird_qp;
636 lld->max_ird_adapter = adap->params.max_ird_adapter;
637 lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
638 lld->nodeid = dev_to_node(adap->pdev_dev);
639 lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support;
640 lld->write_w_imm_support = adap->params.write_w_imm_support;
641 lld->write_cmpl_support = adap->params.write_cmpl_support;
644 static int uld_attach(struct adapter *adap, unsigned int uld)
649 uld_init(adap, &lli);
650 uld_queue_init(adap, uld, &lli);
652 handle = adap->uld[uld].add(&lli);
654 dev_warn(adap->pdev_dev,
656 adap->uld[uld].name, PTR_ERR(handle));
660 adap->uld[uld].handle = handle;
663 if (adap->flags & CXGB4_FULL_INIT_DONE)
664 adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
670 static bool cxgb4_uld_in_use(struct adapter *adap)
672 const struct tid_info *t = &adap->tids;
678 * @adap: adapter info
681 int cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
691 if (!refcount_read(&adap->chcr_ktls.ktls_refcount)) {
695 if (cxgb4_uld_in_use(adap)) {
696 dev_dbg(adap->pdev_dev,
700 ret = t4_set_params(adap, adap->mbox, adap->pf,
704 refcount_set(&adap->chcr_ktls.ktls_refcount, 1);
708 refcount_inc(&adap->chcr_ktls.ktls_refcount);
712 if (!refcount_read(&adap->chcr_ktls.ktls_refcount))
717 if (refcount_dec_and_test(&adap->chcr_ktls.ktls_refcount)) {
718 ret = t4_set_params(adap, adap->mbox, adap->pf,
730 static void cxgb4_uld_alloc_resources(struct adapter *adap,
736 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
737 (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
739 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
741 ret = cfg_queues_uld(adap, type, p);
744 ret = setup_sge_queues_uld(adap, type, p->lro);
747 if (adap->flags & CXGB4_USING_MSIX) {
748 ret = request_msix_queue_irqs_uld(adap, type);
752 if (adap->flags & CXGB4_FULL_INIT_DONE)
753 enable_rx_uld(adap, type);
754 if (adap->uld[type].add)
756 ret = setup_sge_txq_uld(adap, type, p);
759 adap->uld[type] = *p;
760 ret = uld_attach(adap, type);
765 release_sge_txq_uld(adap, type);
767 if (adap->flags & CXGB4_FULL_INIT_DONE)
768 quiesce_rx_uld(adap, type);
769 if (adap->flags & CXGB4_USING_MSIX)
770 free_msix_queue_irqs_uld(adap, type);
772 free_sge_queues_uld(adap, type);
774 free_queues_uld(adap, type);
776 dev_warn(adap->pdev_dev,
780 void cxgb4_uld_enable(struct adapter *adap)
785 list_add_tail(&adap->list_node, &adapter_list);
787 cxgb4_uld_alloc_resources(adap, uld_entry->uld_type,
803 struct adapter *adap;
814 list_for_each_entry(adap, &adapter_list, list_node)
815 cxgb4_uld_alloc_resources(adap, type, p);
833 struct adapter *adap;
839 list_for_each_entry(adap, &adapter_list, list_node) {
840 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
841 (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
843 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
846 cxgb4_shutdown_uld_adapter(adap, type);