Lines Matching refs:ch

303 static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
305 struct srp_target_port *target = ch->target;
309 srp_ib_cm_handler, ch);
313 if (ch->ib_cm.cm_id)
314 ib_destroy_cm_id(ch->ib_cm.cm_id);
315 ch->ib_cm.cm_id = new_cm_id;
318 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
320 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
321 ch->ib_cm.path.sgid = target->sgid;
322 ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
323 ch->ib_cm.path.pkey = target->ib_cm.pkey;
324 ch->ib_cm.path.service_id = target->ib_cm.service_id;
329 static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
331 struct srp_target_port *target = ch->target;
335 new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
343 init_completion(&ch->done);
353 ret = wait_for_completion_interruptible(&ch->done);
357 ret = ch->status;
364 swap(ch->rdma_cm.cm_id, new_cm_id);
373 static int srp_new_cm_id(struct srp_rdma_ch *ch)
375 struct srp_target_port *target = ch->target;
377 return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
378 srp_new_ib_cm_id(ch);
507 * @ch: SRP RDMA channel.
513 static void srp_destroy_qp(struct srp_rdma_ch *ch)
515 spin_lock_irq(&ch->lock);
516 ib_process_cq_direct(ch->send_cq, -1);
517 spin_unlock_irq(&ch->lock);
519 ib_drain_qp(ch->qp);
520 ib_destroy_qp(ch->qp);
523 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
525 struct srp_target_port *target = ch->target;
540 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
541 ch->comp_vector, IB_POLL_SOFTIRQ);
547 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
548 ch->comp_vector, IB_POLL_DIRECT);
564 ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
567 ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
568 qp = ch->rdma_cm.cm_id->qp;
595 if (ch->qp)
596 srp_destroy_qp(ch);
597 if (ch->recv_cq)
598 ib_free_cq(ch->recv_cq);
599 if (ch->send_cq)
600 ib_free_cq(ch->send_cq);
602 ch->qp = qp;
603 ch->recv_cq = recv_cq;
604 ch->send_cq = send_cq;
607 if (ch->fr_pool)
608 srp_destroy_fr_pool(ch->fr_pool);
609 ch->fr_pool = fr_pool;
617 rdma_destroy_qp(ch->rdma_cm.cm_id);
634 * invoked. Hence the ch->[rt]x_ring checks.
637 struct srp_rdma_ch *ch)
642 if (!ch->target)
646 if (ch->rdma_cm.cm_id) {
647 rdma_destroy_id(ch->rdma_cm.cm_id);
648 ch->rdma_cm.cm_id = NULL;
651 if (ch->ib_cm.cm_id) {
652 ib_destroy_cm_id(ch->ib_cm.cm_id);
653 ch->ib_cm.cm_id = NULL;
658 if (!ch->qp)
662 if (ch->fr_pool)
663 srp_destroy_fr_pool(ch->fr_pool);
666 srp_destroy_qp(ch);
667 ib_free_cq(ch->send_cq);
668 ib_free_cq(ch->recv_cq);
676 ch->target = NULL;
678 ch->qp = NULL;
679 ch->send_cq = ch->recv_cq = NULL;
681 if (ch->rx_ring) {
683 srp_free_iu(target->srp_host, ch->rx_ring[i]);
684 kfree(ch->rx_ring);
685 ch->rx_ring = NULL;
687 if (ch->tx_ring) {
689 srp_free_iu(target->srp_host, ch->tx_ring[i]);
690 kfree(ch->tx_ring);
691 ch->tx_ring = NULL;
699 struct srp_rdma_ch *ch = ch_ptr;
700 struct srp_target_port *target = ch->target;
702 ch->status = status;
707 ch->ib_cm.path = *pathrec;
708 complete(&ch->done);
711 static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
713 struct srp_target_port *target = ch->target;
716 ch->ib_cm.path.numb_path = 1;
718 init_completion(&ch->done);
720 ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
723 &ch->ib_cm.path,
732 ch, &ch->ib_cm.path_query);
733 if (ch->ib_cm.path_query_id < 0)
734 return ch->ib_cm.path_query_id;
736 ret = wait_for_completion_interruptible(&ch->done);
740 if (ch->status < 0)
743 ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
747 return ch->status;
750 static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
752 struct srp_target_port *target = ch->target;
755 init_completion(&ch->done);
757 ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
761 wait_for_completion_interruptible(&ch->done);
763 if (ch->status != 0)
767 return ch->status;
770 static int srp_lookup_path(struct srp_rdma_ch *ch)
772 struct srp_target_port *target = ch->target;
774 return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
775 srp_ib_lookup_path(ch);
795 static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
798 struct srp_target_port *target = ch->target;
859 req->ib_param.primary_path = &ch->ib_cm.path;
864 req->ib_param.qp_num = ch->qp->qp_num;
865 req->ib_param.qp_type = ch->qp->qp_type;
911 status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
913 status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
939 struct srp_rdma_ch *ch;
945 ch = &target->ch[i];
946 ch->connected = false;
949 if (ch->rdma_cm.cm_id)
950 rdma_disconnect(ch->rdma_cm.cm_id);
952 if (ch->ib_cm.cm_id)
953 ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
1039 struct srp_rdma_ch *ch;
1052 ch = &target->ch[i];
1053 srp_free_ch_ib(target, ch);
1057 kfree(target->ch);
1058 target->ch = NULL;
1093 c += target->ch[i].connected;
1098 static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
1101 struct srp_target_port *target = ch->target;
1106 ret = srp_lookup_path(ch);
1111 init_completion(&ch->done);
1112 ret = srp_send_req(ch, max_iu_len, multich);
1115 ret = wait_for_completion_interruptible(&ch->done);
1125 ret = ch->status;
1128 ch->connected = true;
1132 ret = srp_lookup_path(ch);
1160 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1173 return ib_post_send(ch->qp, &wr, NULL);
1177 struct srp_rdma_ch *ch,
1180 struct srp_target_port *target = ch->target;
1194 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1204 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1214 * @ch: SRP RDMA channel.
1223 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1230 spin_lock_irqsave(&ch->lock, flags);
1239 spin_unlock_irqrestore(&ch->lock, flags);
1245 * srp_free_req() - Unmap data and adjust ch->req_lim.
1246 * @ch: SRP RDMA channel.
1251 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1256 srp_unmap_data(scmnd, ch, req);
1258 spin_lock_irqsave(&ch->lock, flags);
1259 ch->req_lim += req_lim_delta;
1260 spin_unlock_irqrestore(&ch->lock, flags);
1263 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1266 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1269 srp_free_req(ch, req, scmnd, 0);
1285 struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
1288 srp_finish_req(ch, req, NULL, context->scsi_result);
1334 struct srp_rdma_ch *ch;
1352 ch = &target->ch[i];
1353 ret += srp_new_cm_id(ch);
1363 ch = &target->ch[i];
1369 ret += srp_create_ch_ib(ch);
1371 INIT_LIST_HEAD(&ch->free_tx);
1373 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1379 ch = &target->ch[i];
1382 ret = srp_connect_ch(ch, max_iu_len, multich);
1422 struct srp_rdma_ch *ch, int sg_nents,
1425 struct srp_target_port *target = ch->target;
1433 shost_printk(KERN_ERR, ch->target->scsi_host,
1435 ch->target->mr_per_cmd);
1452 desc = srp_fr_pool_get(ch->fr_pool);
1462 srp_fr_pool_put(ch->fr_pool, &desc, 1);
1490 err = ib_post_send(ch->qp, &wr.wr, NULL);
1499 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1506 state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1515 n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
1527 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1531 struct srp_target_port *target = ch->target;
1550 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1554 struct srp_target_port *target = ch->target;
1576 ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
1590 struct srp_rdma_ch *ch, struct srp_request *req,
1593 struct srp_device *dev = ch->target->srp_host->srp_dev;
1613 * @ch: SRP RDMA channel
1620 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1623 struct srp_target_port *target = ch->target;
1660 if (ch->use_imm_data &&
1661 count <= ch->max_imm_sge &&
1662 SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
1716 ret = srp_map_sg_fr(&state, ch, req, scat, count);
1718 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1727 srp_check_mapping(&state, ch, req, scat, count);
1769 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1800 srp_unmap_data(scmnd, ch, req);
1809 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1814 spin_lock_irqsave(&ch->lock, flags);
1815 list_add(&iu->list, &ch->free_tx);
1817 ++ch->req_lim;
1818 spin_unlock_irqrestore(&ch->lock, flags);
1822 * Must be called with ch->lock held to protect req_lim and free_tx.
1834 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1837 struct srp_target_port *target = ch->target;
1841 lockdep_assert_held(&ch->lock);
1843 ib_process_cq_direct(ch->send_cq, -1);
1845 if (list_empty(&ch->free_tx))
1850 if (ch->req_lim <= rsv) {
1855 --ch->req_lim;
1858 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1865 * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1871 struct srp_rdma_ch *ch = cq->cq_context;
1878 lockdep_assert_held(&ch->lock);
1880 list_add(&iu->list, &ch->free_tx);
1885 * @ch: RDMA channel over which to send the information unit.
1889 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1891 struct srp_target_port *target = ch->target;
1910 return ib_post_send(ch->qp, &wr, NULL);
1913 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1915 struct srp_target_port *target = ch->target;
1930 return ib_post_recv(ch->qp, &wr, NULL);
1933 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1935 struct srp_target_port *target = ch->target;
1941 spin_lock_irqsave(&ch->lock, flags);
1942 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1943 if (rsp->tag == ch->tsk_mgmt_tag) {
1944 ch->tsk_mgmt_status = -1;
1946 ch->tsk_mgmt_status = rsp->data[3];
1947 complete(&ch->tsk_mgmt_done);
1953 spin_unlock_irqrestore(&ch->lock, flags);
1958 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1962 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1963 rsp->tag, ch - target->ch, ch->qp->qp_num);
1965 spin_lock_irqsave(&ch->lock, flags);
1966 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1967 spin_unlock_irqrestore(&ch->lock, flags);
1985 srp_free_req(ch, req, scmnd,
1992 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1995 struct srp_target_port *target = ch->target;
2001 spin_lock_irqsave(&ch->lock, flags);
2002 ch->req_lim += req_delta;
2003 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
2004 spin_unlock_irqrestore(&ch->lock, flags);
2017 err = srp_post_send(ch, iu, len);
2021 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
2027 static void srp_process_cred_req(struct srp_rdma_ch *ch,
2036 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2037 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
2041 static void srp_process_aer_req(struct srp_rdma_ch *ch,
2044 struct srp_target_port *target = ch->target;
2054 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2062 struct srp_rdma_ch *ch = cq->cq_context;
2063 struct srp_target_port *target = ch->target;
2073 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2087 srp_process_rsp(ch, iu->buf);
2091 srp_process_cred_req(ch, iu->buf);
2095 srp_process_aer_req(ch, iu->buf);
2110 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2113 res = srp_post_recv(ch, iu);
2138 struct srp_rdma_ch *ch = cq->cq_context;
2139 struct srp_target_port *target = ch->target;
2141 if (ch->connected && !target->qp_in_error) {
2155 struct srp_rdma_ch *ch;
2170 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2172 spin_lock_irqsave(&ch->lock, flags);
2173 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2174 spin_unlock_irqrestore(&ch->lock, flags);
2180 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
2200 len = srp_map_data(scmnd, ch, req);
2215 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
2218 if (srp_post_send(ch, iu, len)) {
2227 srp_unmap_data(scmnd, ch, req);
2230 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2253 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2255 struct srp_target_port *target = ch->target;
2258 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2260 if (!ch->rx_ring)
2262 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2264 if (!ch->tx_ring)
2268 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2269 ch->max_ti_iu_len,
2271 if (!ch->rx_ring[i])
2276 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2277 ch->max_it_iu_len,
2279 if (!ch->tx_ring[i])
2282 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2289 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2290 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2295 kfree(ch->tx_ring);
2296 ch->tx_ring = NULL;
2297 kfree(ch->rx_ring);
2298 ch->rx_ring = NULL;
2332 struct srp_rdma_ch *ch)
2334 struct srp_target_port *target = ch->target;
2341 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2342 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2343 ch->use_imm_data = srp_use_imm_data &&
2345 ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
2346 ch->use_imm_data,
2348 WARN_ON_ONCE(ch->max_it_iu_len >
2351 if (ch->use_imm_data)
2360 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2372 if (!ch->rx_ring) {
2373 ret = srp_alloc_iu_bufs(ch);
2379 struct srp_iu *iu = ch->rx_ring[i];
2381 ret = srp_post_recv(ch, iu);
2397 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2408 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2419 ch->status = ret;
2424 struct srp_rdma_ch *ch)
2426 struct srp_target_port *target = ch->target;
2436 sa_path_set_dlid(&ch->ib_cm.path, dlid);
2437 ch->ib_cm.path.pkey = cpi->redirect_pkey;
2439 memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
2441 ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2446 union ib_gid *dgid = &ch->ib_cm.path.dgid;
2460 ch->status = SRP_PORT_REDIRECT;
2464 ch->status = -ECONNRESET;
2471 ch->status = -ECONNRESET;
2493 ch->status = -ECONNRESET;
2498 ch->status = SRP_STALE_CONN;
2504 ch->status = -ECONNRESET;
2511 struct srp_rdma_ch *ch = cm_id->context;
2512 struct srp_target_port *target = ch->target;
2520 ch->status = -ECONNRESET;
2525 srp_cm_rep_handler(cm_id, event->private_data, ch);
2532 srp_ib_cm_rej_handler(cm_id, event, ch);
2538 ch->connected = false;
2550 ch->status = 0;
2565 complete(&ch->done);
2570 static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
2573 struct srp_target_port *target = ch->target;
2581 ch->status = -ECONNRESET;
2603 ch->status = -ECONNRESET;
2609 ch->status = SRP_STALE_CONN;
2615 ch->status = -ECONNRESET;
2623 struct srp_rdma_ch *ch = cm_id->context;
2624 struct srp_target_port *target = ch->target;
2629 ch->status = 0;
2634 ch->status = -ENXIO;
2639 ch->status = 0;
2645 ch->status = -EHOSTUNREACH;
2653 ch->status = -ECONNRESET;
2658 srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
2665 srp_rdma_cm_rej_handler(ch, event);
2669 if (ch->connected) {
2672 rdma_disconnect(ch->rdma_cm.cm_id);
2674 ch->status = 0;
2684 ch->status = 0;
2694 complete(&ch->done);
2714 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2717 struct srp_target_port *target = ch->target;
2724 if (!ch->connected || target->qp_in_error)
2732 spin_lock_irq(&ch->lock);
2733 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2734 spin_unlock_irq(&ch->lock);
2754 spin_lock_irq(&ch->lock);
2755 ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2756 tsk_mgmt->tag = ch->tsk_mgmt_tag;
2757 spin_unlock_irq(&ch->lock);
2759 init_completion(&ch->tsk_mgmt_done);
2763 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2764 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2769 res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2772 *status = ch->tsk_mgmt_status;
2786 struct srp_rdma_ch *ch;
2794 ch = &target->ch[ch_idx];
2795 if (!srp_claim_req(ch, req, NULL, scmnd))
2799 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2801 srp_free_req(ch, req, scmnd, 0);
2813 struct srp_rdma_ch *ch;
2818 ch = &target->ch[0];
2819 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2922 struct srp_rdma_ch *ch = &target->ch[0];
2927 return sysfs_emit(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
2949 struct srp_rdma_ch *ch;
2953 ch = &target->ch[i];
2954 req_lim = min(req_lim, ch->req_lim);
3689 struct srp_rdma_ch *ch;
3823 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3825 if (!target->ch)
3829 ch = &target->ch[ch_idx];
3830 ch->target = target;
3831 ch->comp_vector = ch_idx % ibdev->num_comp_vectors;
3832 spin_lock_init(&ch->lock);
3833 INIT_LIST_HEAD(&ch->free_tx);
3834 ret = srp_new_cm_id(ch);
3838 ret = srp_create_ch_ib(ch);
3842 ret = srp_connect_ch(ch, max_iu_len, multich);
3859 srp_free_ch_ib(target, ch);
3860 target->ch_count = ch - target->ch;
3918 ch = &target->ch[i];
3919 srp_free_ch_ib(target, ch);
3922 kfree(target->ch);