Lines Matching defs:ctxp

219 	struct lpfc_async_xchg_ctx *ctxp;
224 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
225 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
233 return ctxp;
241 struct lpfc_async_xchg_ctx *ctxp;
246 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
247 if (ctxp->oxid != oxid || ctxp->sid != sid)
255 return ctxp;
263 struct lpfc_async_xchg_ctx *ctxp)
265 lockdep_assert_held(&ctxp->ctxlock);
269 ctxp->oxid, ctxp->flag);
271 if (ctxp->flag & LPFC_NVME_CTX_RLS)
274 ctxp->flag |= LPFC_NVME_CTX_RLS;
276 list_del(&ctxp->list);
279 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
389 struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
398 if (ctxp->state == LPFC_NVME_STE_FREE) {
401 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
404 if (ctxp->rqb_buffer) {
405 spin_lock_irqsave(&ctxp->ctxlock, iflag);
406 nvmebuf = ctxp->rqb_buffer;
409 ctxp->rqb_buffer = NULL;
410 if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
411 ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ;
412 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
416 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
421 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
424 ctxp->state = LPFC_NVME_STE_FREE;
441 ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
442 ctxp->wqeq = NULL;
443 ctxp->offset = 0;
444 ctxp->phba = phba;
445 ctxp->size = size;
446 ctxp->oxid = oxid;
447 ctxp->sid = sid;
448 ctxp->state = LPFC_NVME_STE_RCV;
449 ctxp->entry_cnt = 1;
450 ctxp->flag = 0;
451 ctxp->ctxbuf = ctx_buf;
452 ctxp->rqb_buffer = (void *)nvmebuf;
453 spin_lock_init(&ctxp->ctxlock);
457 if (ctxp->ts_isr_cmd) {
458 ctxp->ts_cmd_nvme = 0;
459 ctxp->ts_nvme_data = 0;
460 ctxp->ts_data_wqput = 0;
461 ctxp->ts_isr_data = 0;
462 ctxp->ts_data_nvme = 0;
463 ctxp->ts_nvme_status = 0;
464 ctxp->ts_status_wqput = 0;
465 ctxp->ts_isr_status = 0;
466 ctxp->ts_status_nvme = 0;
472 spin_lock_irqsave(&ctxp->ctxlock, iflag);
473 ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ;
474 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
482 ctxp->oxid,
487 spin_lock_irqsave(&ctxp->ctxlock, iflag);
488 lpfc_nvmet_defer_release(phba, ctxp);
489 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
490 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
498 * (ctxp->idx), to save context structure.
501 list_del_init(&ctxp->list);
504 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
515 struct lpfc_async_xchg_ctx *ctxp)
521 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
522 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
523 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
524 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
525 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
528 if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
530 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
532 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
534 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
536 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
538 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
540 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
542 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
544 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
546 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
573 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
576 seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
582 seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
588 seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
594 seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
602 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
603 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
609 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
615 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
621 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
627 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
629 seg10 = (ctxp->ts_isr_status -
630 ctxp->ts_isr_cmd);
632 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
638 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
724 struct lpfc_async_xchg_ctx *ctxp;
731 ctxp = cmdwqe->context_un.axchg;
732 ctxp->flag &= ~LPFC_NVME_IO_INP;
734 rsp = &ctxp->hdlrctx.fcp_req;
746 ctxp->oxid, op, status);
761 ctxp->flag |= LPFC_NVME_XBUSY;
767 ctxp->flag &= ~LPFC_NVME_XBUSY;
773 ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
774 status, result, ctxp->flag);
789 ctxp->state = LPFC_NVME_STE_DONE;
790 ctxp->entry_cnt++;
793 if (ctxp->ts_cmd_nvme) {
795 ctxp->ts_isr_data =
797 ctxp->ts_data_nvme =
799 ctxp->ts_nvme_status =
800 ctxp->ts_data_nvme;
801 ctxp->ts_status_wqput =
802 ctxp->ts_data_nvme;
803 ctxp->ts_isr_status =
804 ctxp->ts_data_nvme;
805 ctxp->ts_status_nvme =
806 ctxp->ts_data_nvme;
808 ctxp->ts_isr_status =
810 ctxp->ts_status_nvme =
817 if (ctxp->ts_cmd_nvme)
818 lpfc_nvmet_ktime(phba, ctxp);
822 ctxp->entry_cnt++;
825 if (ctxp->ts_cmd_nvme) {
826 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
827 ctxp->ts_data_nvme = ktime_get_ns();
836 if (ctxp->cpu != id)
840 id, ctxp->cpu);
931 * before freeing ctxp and iocbq.
1013 struct lpfc_async_xchg_ctx *ctxp =
1015 struct lpfc_hba *phba = ctxp->phba;
1031 if (ctxp->ts_cmd_nvme) {
1033 ctxp->ts_nvme_status = ktime_get_ns();
1035 ctxp->ts_nvme_data = ktime_get_ns();
1039 if (!ctxp->hdwq)
1040 ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
1050 ctxp->cpu = id; /* Setup cpu for cmpl check */
1055 if ((ctxp->flag & LPFC_NVME_ABTS_RCV) ||
1056 (ctxp->state == LPFC_NVME_STE_ABORT)) {
1060 ctxp->oxid);
1065 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
1070 ctxp->oxid);
1076 nvmewqeq->context_un.axchg = ctxp;
1078 ctxp->wqeq->hba_wqidx = rsp->hwqid;
1081 ctxp->oxid, rsp->op, rsp->rsplen);
1083 ctxp->flag |= LPFC_NVME_IO_INP;
1084 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1087 if (!ctxp->ts_cmd_nvme)
1090 ctxp->ts_status_wqput = ktime_get_ns();
1092 ctxp->ts_data_wqput = ktime_get_ns();
1102 ctxp->flag |= LPFC_NVME_DEFER_WQFULL;
1103 wq = ctxp->hdwq->io_wq;
1117 ctxp->oxid, rc);
1119 ctxp->wqeq->hba_wqidx = 0;
1142 struct lpfc_async_xchg_ctx *ctxp =
1144 struct lpfc_hba *phba = ctxp->phba;
1151 if (!ctxp->hdwq)
1152 ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1156 ctxp->oxid, ctxp->flag, ctxp->state);
1159 ctxp->oxid, ctxp->flag, ctxp->state);
1163 spin_lock_irqsave(&ctxp->ctxlock, flags);
1168 if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) {
1169 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1172 ctxp->flag |= LPFC_NVME_ABORT_OP;
1174 if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) {
1175 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1176 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1177 ctxp->oxid);
1178 wq = ctxp->hdwq->io_wq;
1179 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1182 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1188 if (ctxp->state == LPFC_NVME_STE_RCV)
1189 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1190 ctxp->oxid);
1192 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1193 ctxp->oxid);
1201 struct lpfc_async_xchg_ctx *ctxp =
1203 struct lpfc_hba *phba = ctxp->phba;
1207 spin_lock_irqsave(&ctxp->ctxlock, flags);
1208 if (ctxp->flag & LPFC_NVME_XBUSY)
1212 ctxp->flag, ctxp->oxid);
1213 else if (ctxp->state != LPFC_NVME_STE_DONE &&
1214 ctxp->state != LPFC_NVME_STE_ABORT)
1217 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1219 if ((ctxp->flag & LPFC_NVME_ABORT_OP) ||
1220 (ctxp->flag & LPFC_NVME_XBUSY)) {
1223 lpfc_nvmet_defer_release(phba, ctxp);
1225 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1227 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1228 ctxp->state, aborting);
1231 ctxp->flag &= ~LPFC_NVME_TNOTIFY;
1236 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1244 struct lpfc_async_xchg_ctx *ctxp =
1246 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1247 struct lpfc_hba *phba = ctxp->phba;
1252 ctxp->oxid, ctxp->size, raw_smp_processor_id());
1258 ctxp->oxid, ctxp->flag, ctxp->state);
1268 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1269 ctxp->rqb_buffer = NULL;
1270 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1771 struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1791 list_for_each_entry_safe(ctxp, next_ctxp,
1794 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1800 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1804 if (ctxp->flag & LPFC_NVME_CTX_RLS &&
1805 !(ctxp->flag & LPFC_NVME_ABORT_OP)) {
1807 list_del_init(&ctxp->list);
1811 ctxp->flag &= ~LPFC_NVME_XBUSY;
1812 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1815 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1820 ctxp->ctxbuf->sglq->sli4_lxritag,
1827 ctxp->oxid, ctxp->flag, released);
1829 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1836 ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
1837 if (ctxp) {
1843 "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1845 xri, ctxp->state, ctxp->flag, ctxp->oxid,
1848 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1849 ctxp->flag |= LPFC_NVME_ABTS_RCV;
1850 ctxp->state = LPFC_NVME_STE_ABORT;
1851 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1857 req = &ctxp->hdlrctx.fcp_req;
1870 struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1880 list_for_each_entry_safe(ctxp, next_ctxp,
1883 if (ctxp->oxid != oxid || ctxp->sid != sid)
1886 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1890 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1891 ctxp->flag |= LPFC_NVME_ABTS_RCV;
1892 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1901 rsp = &ctxp->hdlrctx.fcp_req;
1932 "is waiting for a ctxp\n",
1953 ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
1954 if (ctxp) {
1955 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1957 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1958 ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP);
1959 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1968 ctxp->oxid, xri, ctxp->flag, ctxp->state);
1970 if (ctxp->flag & LPFC_NVME_TNOTIFY) {
1973 &ctxp->hdlrctx.fcp_req);
1975 cancel_work_sync(&ctxp->ctxbuf->defer_work);
1976 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1977 lpfc_nvmet_defer_release(phba, ctxp);
1978 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1980 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1981 ctxp->oxid);
2001 struct lpfc_async_xchg_ctx *ctxp)
2021 if (ctxp) {
2023 if (nvmewqeq->context_un.axchg == ctxp) {
2043 if (!ctxp)
2055 struct lpfc_async_xchg_ctx *ctxp;
2069 ctxp = nvmewqeq->context_un.axchg;
2070 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
2080 if (ctxp->ts_cmd_nvme) {
2081 if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP)
2082 ctxp->ts_status_wqput = ktime_get_ns();
2084 ctxp->ts_data_wqput = ktime_get_ns();
2182 struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
2183 struct lpfc_hba *phba = ctxp->phba;
2184 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
2194 ctxp->oxid, ctxp->flag, ctxp->state);
2195 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2196 lpfc_nvmet_defer_release(phba, ctxp);
2197 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2198 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
2199 ctxp->oxid);
2203 if (ctxp->flag & LPFC_NVME_ABTS_RCV) {
2206 ctxp->oxid);
2212 ctxp->flag |= LPFC_NVME_TNOTIFY;
2214 if (ctxp->ts_isr_cmd)
2215 ctxp->ts_cmd_nvme = ktime_get_ns();
2220 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2226 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req,
2227 payload, ctxp->size);
2231 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2232 if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) ||
2233 (nvmebuf != ctxp->rqb_buffer)) {
2234 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2237 ctxp->rqb_buffer = NULL;
2238 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2247 ctxp->oxid, ctxp->size, ctxp->sid);
2250 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2251 if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
2252 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2255 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2266 ctxp->flag &= ~LPFC_NVME_TNOTIFY;
2270 ctxp->oxid, rc,
2275 ctxp->oxid, ctxp->size, ctxp->sid);
2276 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2277 lpfc_nvmet_defer_release(phba, ctxp);
2278 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2279 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
2373 struct lpfc_async_xchg_ctx *ctxp;
2455 ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
2457 list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
2459 if (ctxp->state != LPFC_NVME_STE_FREE) {
2462 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2464 ctxp->wqeq = NULL;
2465 ctxp->offset = 0;
2466 ctxp->phba = phba;
2467 ctxp->size = size;
2468 ctxp->oxid = oxid;
2469 ctxp->sid = sid;
2470 ctxp->idx = idx;
2471 ctxp->state = LPFC_NVME_STE_RCV;
2472 ctxp->entry_cnt = 1;
2473 ctxp->flag = 0;
2474 ctxp->ctxbuf = ctx_buf;
2475 ctxp->rqb_buffer = (void *)nvmebuf;
2476 ctxp->hdwq = NULL;
2477 spin_lock_init(&ctxp->ctxlock);
2481 ctxp->ts_isr_cmd = isr_timestamp;
2482 ctxp->ts_cmd_nvme = 0;
2483 ctxp->ts_nvme_data = 0;
2484 ctxp->ts_data_wqput = 0;
2485 ctxp->ts_isr_data = 0;
2486 ctxp->ts_data_nvme = 0;
2487 ctxp->ts_nvme_status = 0;
2488 ctxp->ts_status_wqput = 0;
2489 ctxp->ts_isr_status = 0;
2490 ctxp->ts_status_nvme = 0;
2505 ctxp->oxid,
2510 spin_lock_irqsave(&ctxp->ctxlock, iflag);
2511 lpfc_nvmet_defer_release(phba, ctxp);
2512 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
2513 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2553 * @ctxp: Context info for NVME LS Request
2577 struct lpfc_async_xchg_ctx *ctxp,
2588 ctxp->sid, ctxp->oxid, ctxp->state);
2598 ctxp->sid, ctxp->oxid, ctxp->state);
2602 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2609 ctxp->sid, ctxp->oxid, ctxp->state);
2612 ctxp->wqeq = nvmewqe;
2618 nvmewqe->context_un.axchg = ctxp;
2658 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2685 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2700 struct lpfc_async_xchg_ctx *ctxp)
2702 struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req;
2719 ctxp->sid, ctxp->oxid, ctxp->state);
2723 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2730 ctxp->sid, ctxp->oxid, ctxp->state);
2738 ctxp->sid, ctxp->oxid, ctxp->state,
2745 nvmewqe = ctxp->wqeq;
2748 nvmewqe = ctxp->ctxbuf->iocbq;
2753 ctxp->sid, ctxp->oxid, ctxp->state);
2756 ctxp->wqeq = nvmewqe;
2763 if (((ctxp->state == LPFC_NVME_STE_RCV) &&
2764 (ctxp->entry_cnt == 1)) ||
2765 (ctxp->state == LPFC_NVME_STE_DATA)) {
2770 ctxp->state, ctxp->entry_cnt);
2774 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2796 wqe->fcp_tsend.relative_offset = ctxp->offset;
2814 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2876 wqe->fcp_treceive.relative_offset = ctxp->offset;
2891 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2958 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
3003 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
3009 ctxp->offset += cnt;
3026 ctxp->state = LPFC_NVME_STE_DATA;
3027 ctxp->entry_cnt++;
3045 struct lpfc_async_xchg_ctx *ctxp;
3052 ctxp = cmdwqe->context_un.axchg;
3056 if (ctxp->flag & LPFC_NVME_ABORT_OP)
3059 spin_lock_irqsave(&ctxp->ctxlock, flags);
3060 ctxp->state = LPFC_NVME_STE_DONE;
3065 if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3066 !(ctxp->flag & LPFC_NVME_XBUSY)) {
3068 list_del_init(&ctxp->list);
3072 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3073 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3079 ctxp->oxid, ctxp->flag, released,
3090 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3115 struct lpfc_async_xchg_ctx *ctxp;
3122 ctxp = cmdwqe->context_un.axchg;
3125 if (!ctxp) {
3135 spin_lock_irqsave(&ctxp->ctxlock, flags);
3136 if (ctxp->flag & LPFC_NVME_ABORT_OP)
3140 if (ctxp->state != LPFC_NVME_STE_ABORT) {
3143 ctxp->state, ctxp->oxid);
3149 ctxp->state = LPFC_NVME_STE_DONE;
3150 if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3151 !(ctxp->flag & LPFC_NVME_XBUSY)) {
3153 list_del_init(&ctxp->list);
3157 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3158 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3164 ctxp->oxid, ctxp->flag, released,
3175 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3197 struct lpfc_async_xchg_ctx *ctxp;
3202 ctxp = cmdwqe->context_un.axchg;
3212 ctxp, wcqe->word0, wcqe->total_data_placed,
3215 if (!ctxp) {
3226 if (ctxp->state != LPFC_NVME_STE_LS_ABORT) {
3230 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3236 kfree(ctxp);
3241 struct lpfc_async_xchg_ctx *ctxp,
3251 sid, xri, ctxp->wqeq->sli4_xritag);
3270 abts_wqeq = ctxp->wqeq;
3322 abts_wqeq->context_un.axchg = ctxp;
3337 struct lpfc_async_xchg_ctx *ctxp,
3348 if (!ctxp->wqeq) {
3349 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3350 ctxp->wqeq->hba_wqidx = 0;
3363 spin_lock_irqsave(&ctxp->ctxlock, flags);
3364 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3365 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3370 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3371 spin_lock_irqsave(&ctxp->ctxlock, flags);
3372 if (!ctxp->abort_wqeq) {
3376 "xri: x%x\n", ctxp->oxid);
3378 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3379 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3382 abts_wqeq = ctxp->abort_wqeq;
3383 ctxp->state = LPFC_NVME_STE_ABORT;
3384 ia = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? true : false;
3385 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3391 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3404 phba->hba_flag, ctxp->oxid);
3406 spin_lock_irqsave(&ctxp->ctxlock, flags);
3407 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3408 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3419 ctxp->oxid);
3421 spin_lock_irqsave(&ctxp->ctxlock, flags);
3422 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3423 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3430 lpfc_sli_prep_abort_xri(phba, abts_wqeq, ctxp->wqeq->sli4_xritag,
3435 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3438 abts_wqeq->context_un.axchg = ctxp;
3440 if (!ctxp->hdwq)
3441 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3443 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3451 spin_lock_irqsave(&ctxp->ctxlock, flags);
3452 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3453 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3458 rc, ctxp->oxid);
3464 struct lpfc_async_xchg_ctx *ctxp,
3474 if (!ctxp->wqeq) {
3475 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3476 ctxp->wqeq->hba_wqidx = 0;
3479 if (ctxp->state == LPFC_NVME_STE_FREE) {
3482 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3486 ctxp->state = LPFC_NVME_STE_ABORT;
3487 ctxp->entry_cnt++;
3488 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3493 abts_wqeq = ctxp->wqeq;
3496 if (!ctxp->hdwq)
3497 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3499 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3506 spin_lock_irqsave(&ctxp->ctxlock, flags);
3507 if (ctxp->flag & LPFC_NVME_CTX_RLS) {
3509 list_del_init(&ctxp->list);
3513 ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS);
3514 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3520 ctxp->oxid, rc, released);
3522 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3530 * @ctxp: pointer to the asynchronously received received sequence
3536 struct lpfc_async_xchg_ctx *ctxp,
3544 if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3545 (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3546 ctxp->state = LPFC_NVME_STE_LS_ABORT;
3547 ctxp->entry_cnt++;
3552 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3553 ctxp->state = LPFC_NVME_STE_LS_ABORT;
3559 if (!ctxp->wqeq) {
3561 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3562 if (!ctxp->wqeq) {
3567 kfree(ctxp);
3571 abts_wqeq = ctxp->wqeq;
3573 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3581 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);