Lines Matching refs:ch

106 static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
112 static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
118 spin_lock_irqsave(&ch->spinlock, flags);
119 prev = ch->state;
121 ch->state = new;
124 spin_unlock_irqrestore(&ch->spinlock, flags);
221 struct srpt_rdma_ch *ch = ptr;
223 pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
224 event->event, ch, ch->sess_name, ch->qp->qp_num,
225 get_ch_state_name(ch->state));
229 if (ch->using_rdma_cm)
230 rdma_notify(ch->rdma_cm.cm_id, event->event);
232 ib_cm_notify(ch->ib_cm.cm_id, event->event);
236 ch->sess_name, ch->qp->qp_num,
237 get_ch_state_name(ch->state));
825 * @ch: SRPT RDMA channel.
828 static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
848 return ib_post_recv(ch->qp, &wr, NULL);
853 * @ch: SRPT RDMA channel.
860 static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
865 { .wr_cqe = &ch->zw_cqe, },
871 pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
872 ch->qp->qp_num);
874 return ib_post_send(ch->qp, &wr.wr, NULL);
879 struct srpt_rdma_ch *ch = wc->qp->qp_context;
881 pr_debug("%s-%d wc->status %d\n", ch->sess_name, ch->qp->qp_num,
885 srpt_process_wait_list(ch);
887 if (srpt_set_ch_state(ch, CH_DISCONNECTED))
888 schedule_work(&ch->release_work);
891 ch->sess_name, ch->qp->qp_num);
900 struct srpt_rdma_ch *ch = ioctx->ch;
925 ret = rdma_rw_ctx_init(&ctx->rw, ch->qp, ch->sport->port,
954 rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
963 static void srpt_free_rw_ctxs(struct srpt_rdma_ch *ch,
972 rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
1111 * @ch: SRPT RDMA channel.
1117 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1122 WARN_ON_ONCE(ch->using_rdma_cm);
1130 attr->port_num = ch->sport->port;
1132 ret = ib_find_cached_pkey(ch->sport->sdev->device, ch->sport->port,
1133 ch->pkey, &attr->pkey_index);
1136 ch->pkey, ret);
1148 * @ch: channel of the queue pair.
1157 static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1163 WARN_ON_ONCE(ch->using_rdma_cm);
1166 ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
1180 * @ch: channel of the queue pair.
1189 static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1196 ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
1210 * @ch: SRPT RDMA channel.
1212 static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
1217 return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
1222 * @ch: SRPT RDMA channel.
1224 static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1229 BUG_ON(!ch);
1231 tag = sbitmap_queue_get(&ch->sess->sess_tag_pool, &cpu);
1235 ioctx = ch->ioctx_ring[tag];
1236 BUG_ON(ioctx->ch != ch);
1332 struct srpt_rdma_ch *ch = wc->qp->qp_context;
1337 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1357 * @ch: RDMA channel through which the request has been received.
1370 static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1396 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1424 max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
1441 * @ch: RDMA channel through which the request has been received.
1452 static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1469 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1489 * @ch: SRPT RDMA channel.
1493 static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
1528 &sg, &sg_cnt, &data_len, ch->imm_data_offset);
1537 rc = target_init_cmd(cmd, ch->sess, &send_ioctx->sense_data[0],
1577 * @ch: SRPT RDMA channel.
1586 static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
1592 struct se_session *sess = ch->sess;
1601 pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld ch %p sess %p\n",
1602 srp_tsk->tsk_mgmt_func, srp_tsk->task_tag, srp_tsk->tag, ch,
1603 ch->sess);
1621 * @ch: RDMA channel through which the information unit has been received.
1625 srpt_handle_new_iu(struct srpt_rdma_ch *ch, struct srpt_recv_ioctx *recv_ioctx)
1632 BUG_ON(!ch);
1635 if (unlikely(ch->state == CH_CONNECTING))
1638 ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1646 send_ioctx = srpt_get_send_ioctx(ch);
1652 WARN_ON_ONCE(!ch->processing_wait_list);
1658 srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
1661 srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
1681 srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
1689 WARN_ON_ONCE(ch->processing_wait_list);
1690 list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
1697 struct srpt_rdma_ch *ch = wc->qp->qp_context;
1704 req_lim = atomic_dec_return(&ch->req_lim);
1708 srpt_handle_new_iu(ch, ioctx);
1720 static void srpt_process_wait_list(struct srpt_rdma_ch *ch)
1724 WARN_ON_ONCE(ch->state == CH_CONNECTING);
1726 if (list_empty(&ch->cmd_wait_list))
1729 WARN_ON_ONCE(ch->processing_wait_list);
1730 ch->processing_wait_list = true;
1731 list_for_each_entry_safe(recv_ioctx, tmp, &ch->cmd_wait_list,
1733 if (!srpt_handle_new_iu(ch, recv_ioctx))
1736 ch->processing_wait_list = false;
1758 struct srpt_rdma_ch *ch = wc->qp->qp_context;
1768 atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
1781 srpt_process_wait_list(ch);
1786 * @ch: SRPT RDMA channel.
1788 static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
1791 struct srpt_port *sport = ch->sport;
1797 WARN_ON(ch->rq_size < 1);
1805 ch->cq = ib_cq_pool_get(sdev->device, ch->rq_size + sq_size, -1,
1807 if (IS_ERR(ch->cq)) {
1808 ret = PTR_ERR(ch->cq);
1810 ch->rq_size + sq_size, ret);
1813 ch->cq_size = ch->rq_size + sq_size;
1815 qp_init->qp_context = (void *)ch;
1817 qp_init->send_cq = ch->cq;
1818 qp_init->recv_cq = ch->cq;
1832 qp_init->port_num = ch->sport->port;
1836 qp_init->cap.max_recv_wr = ch->rq_size;
1838 if (ch->using_rdma_cm) {
1839 ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init);
1840 ch->qp = ch->rdma_cm.cm_id->qp;
1842 ch->qp = ib_create_qp(sdev->pd, qp_init);
1843 if (!IS_ERR(ch->qp)) {
1844 ret = srpt_init_ch_qp(ch, ch->qp);
1846 ib_destroy_qp(ch->qp);
1848 ret = PTR_ERR(ch->qp);
1857 ib_cq_pool_put(ch->cq, ch->cq_size);
1867 atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
1869 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d ch= %p\n",
1870 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
1871 qp_init->cap.max_send_wr, ch);
1874 for (i = 0; i < ch->rq_size; i++)
1875 srpt_post_recv(sdev, ch, ch->ioctx_recv_ring[i]);
1882 ch->qp = NULL;
1883 ib_cq_pool_put(ch->cq, ch->cq_size);
1887 static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
1889 ib_destroy_qp(ch->qp);
1890 ib_cq_pool_put(ch->cq, ch->cq_size);
1895 * @ch: SRPT RDMA channel.
1903 static bool srpt_close_ch(struct srpt_rdma_ch *ch)
1907 if (!srpt_set_ch_state(ch, CH_DRAINING)) {
1908 pr_debug("%s: already closed\n", ch->sess_name);
1912 kref_get(&ch->kref);
1914 ret = srpt_ch_qp_err(ch);
1917 ch->sess_name, ch->qp->qp_num, ret);
1919 ret = srpt_zerolength_write(ch);
1922 ch->sess_name, ch->qp->qp_num, ret);
1923 if (srpt_set_ch_state(ch, CH_DISCONNECTED))
1924 schedule_work(&ch->release_work);
1929 kref_put(&ch->kref, srpt_free_ch);
1943 static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
1947 if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
1950 if (ch->using_rdma_cm) {
1951 ret = rdma_disconnect(ch->rdma_cm.cm_id);
1953 ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0);
1955 ret = ib_send_cm_drep(ch->ib_cm.cm_id, NULL, 0);
1958 if (ret < 0 && srpt_close_ch(ch))
1965 static void srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
1968 struct srpt_port *sport = ch->sport;
1970 pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
1971 ch->state);
1973 ch->closed = &closed;
1976 srpt_disconnect_ch(ch);
1981 ch->sess_name, ch->qp->qp_num, ch->state);
1988 struct srpt_rdma_ch *ch;
1993 list_for_each_entry(ch, &nexus->ch_list, list) {
1994 if (srpt_disconnect_ch(ch) >= 0)
1996 ch->sess_name, ch->qp->qp_num,
1999 srpt_close_ch(ch);
2067 struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
2069 srpt_drop_sport_ref(ch->sport);
2070 kfree_rcu(ch, rcu);
2083 struct srpt_rdma_ch *ch;
2088 ch = container_of(w, struct srpt_rdma_ch, release_work);
2089 pr_debug("%s-%d\n", ch->sess_name, ch->qp->qp_num);
2091 sdev = ch->sport->sdev;
2094 se_sess = ch->sess;
2101 ch->sess = NULL;
2103 if (ch->using_rdma_cm)
2104 rdma_destroy_id(ch->rdma_cm.cm_id);
2106 ib_destroy_cm_id(ch->ib_cm.cm_id);
2108 sport = ch->sport;
2110 list_del_rcu(&ch->list);
2113 if (ch->closed)
2114 complete(ch->closed);
2116 srpt_destroy_ch_ib(ch);
2118 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2119 ch->sport->sdev, ch->rq_size,
2120 ch->rsp_buf_cache, DMA_TO_DEVICE);
2122 kmem_cache_destroy(ch->rsp_buf_cache);
2124 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
2125 sdev, ch->rq_size,
2126 ch->req_buf_cache, DMA_FROM_DEVICE);
2128 kmem_cache_destroy(ch->req_buf_cache);
2130 kref_put(&ch->kref, srpt_free_ch);
2162 struct srpt_rdma_ch *ch = NULL;
2216 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
2217 if (!ch) {
2223 kref_init(&ch->kref);
2224 ch->pkey = be16_to_cpu(pkey);
2225 ch->nexus = nexus;
2226 ch->zw_cqe.done = srpt_zerolength_write_done;
2227 INIT_WORK(&ch->release_work, srpt_release_channel_work);
2228 ch->sport = sport;
2230 ch->using_rdma_cm = true;
2231 ch->rdma_cm.cm_id = rdma_cm_id;
2232 rdma_cm_id->context = ch;
2234 ch->ib_cm.cm_id = ib_cm_id;
2235 ib_cm_id->context = ch;
2238 * ch->rq_size should be at least as large as the initiator queue
2242 ch->rq_size = min(MAX_SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
2243 spin_lock_init(&ch->spinlock);
2244 ch->state = CH_CONNECTING;
2245 INIT_LIST_HEAD(&ch->cmd_wait_list);
2246 ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
2248 ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size,
2250 if (!ch->rsp_buf_cache)
2253 ch->ioctx_ring = (struct srpt_send_ioctx **)
2254 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2255 sizeof(*ch->ioctx_ring[0]),
2256 ch->rsp_buf_cache, 0, DMA_TO_DEVICE);
2257 if (!ch->ioctx_ring) {
2263 for (i = 0; i < ch->rq_size; i++)
2264 ch->ioctx_ring[i]->ch = ch;
2275 ch->imm_data_offset = imm_data_offset;
2278 ch->imm_data_offset = 0;
2283 ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz,
2285 if (!ch->req_buf_cache)
2288 ch->ioctx_recv_ring = (struct srpt_recv_ioctx **)
2289 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2290 sizeof(*ch->ioctx_recv_ring[0]),
2291 ch->req_buf_cache,
2294 if (!ch->ioctx_recv_ring) {
2300 for (i = 0; i < ch->rq_size; i++)
2301 INIT_LIST_HEAD(&ch->ioctx_recv_ring[i]->wait_list);
2304 ret = srpt_create_ch_ib(ch);
2311 strscpy(ch->sess_name, src_addr, sizeof(ch->sess_name));
2316 pr_debug("registering src addr %s or i_port_id %s\n", ch->sess_name,
2319 tag_num = ch->rq_size;
2325 if (!IS_ERR_OR_NULL(ch->sess))
2327 ch->sess = target_setup_session(&stpg->tpg, tag_num,
2329 ch->sess_name, ch, NULL);
2337 if (!IS_ERR_OR_NULL(ch->sess))
2339 ch->sess = target_setup_session(&stpg->tpg, tag_num,
2341 ch, NULL);
2342 if (!IS_ERR_OR_NULL(ch->sess))
2345 ch->sess = target_setup_session(&stpg->tpg, tag_num,
2347 i_port_id + 2, ch, NULL);
2352 if (IS_ERR_OR_NULL(ch->sess)) {
2353 WARN_ON_ONCE(ch->sess == NULL);
2354 ret = PTR_ERR(ch->sess);
2355 ch->sess = NULL;
2357 ch->sess_name, ret);
2386 list_add_tail_rcu(&ch->list, &nexus->ch_list);
2400 ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rtr(ch, ch->qp);
2408 pr_debug("Establish connection sess=%p name=%s ch=%p\n", ch->sess,
2409 ch->sess_name, ch);
2416 ch->max_ti_iu_len = it_iu_len;
2419 rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
2420 atomic_set(&ch->req_lim, ch->rq_size);
2421 atomic_set(&ch->req_lim_delta, 0);
2424 if (ch->using_rdma_cm) {
2432 rep_param->ib_cm.qp_num = ch->qp->qp_num;
2448 if (sport->enabled && ch->state == CH_CONNECTING) {
2449 if (ch->using_rdma_cm)
2473 srpt_destroy_ch_ib(ch);
2476 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
2477 ch->sport->sdev, ch->rq_size,
2478 ch->req_buf_cache, DMA_FROM_DEVICE);
2481 kmem_cache_destroy(ch->req_buf_cache);
2484 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2485 ch->sport->sdev, ch->rq_size,
2486 ch->rsp_buf_cache, DMA_TO_DEVICE);
2489 kmem_cache_destroy(ch->rsp_buf_cache);
2496 kfree(ch);
2497 ch = NULL;
2515 if (ch && ch->sess) {
2516 srpt_close_ch(ch);
2581 static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
2594 pr_info("Received CM REJ for ch %s-%d; reason %d%s%s.\n",
2595 ch->sess_name, ch->qp->qp_num, reason, private_data_len ?
2602 * @ch: SRPT RDMA channel.
2607 static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
2611 ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rts(ch, ch->qp);
2613 pr_err("%s-%d: QP transition to RTS failed\n", ch->sess_name,
2614 ch->qp->qp_num);
2615 srpt_close_ch(ch);
2624 if (!srpt_set_ch_state(ch, CH_LIVE)) {
2626 ch->sess_name, ch->qp->qp_num);
2631 ret = srpt_zerolength_write(ch);
2650 struct srpt_rdma_ch *ch = cm_id->context;
2660 srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason,
2666 srpt_cm_rtu_recv(ch);
2669 srpt_disconnect_ch(ch);
2672 pr_info("Received CM DREP message for ch %s-%d.\n",
2673 ch->sess_name, ch->qp->qp_num);
2674 srpt_close_ch(ch);
2677 pr_info("Received CM TimeWait exit for ch %s-%d.\n",
2678 ch->sess_name, ch->qp->qp_num);
2679 srpt_close_ch(ch);
2682 pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
2683 ch->qp->qp_num);
2702 struct srpt_rdma_ch *ch = cm_id->context;
2710 srpt_cm_rej_recv(ch, event->status,
2715 srpt_cm_rtu_recv(ch);
2718 if (ch->state < CH_DISCONNECTING)
2719 srpt_disconnect_ch(ch);
2721 srpt_close_ch(ch);
2724 srpt_close_ch(ch);
2727 pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
2728 ch->qp->qp_num);
2749 struct srpt_rdma_ch *ch = ioctx->ch;
2764 if (atomic_sub_return(ioctx->n_rdma, &ch->sq_wr_avail) < 0) {
2775 first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp, ch->sport->port,
2780 ret = ib_post_send(ch->qp, first_wr, NULL);
2784 atomic_read(&ch->sq_wr_avail));
2790 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
2816 struct srpt_rdma_ch *ch = ioctx->ch;
2817 struct srpt_device *sdev = ch->sport->sdev;
2834 WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
2835 ch, ioctx->ioctx.index, ioctx->state);
2849 first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp,
2850 ch->sport->port, NULL, first_wr);
2855 resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag,
2860 resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
2864 atomic_inc(&ch->req_lim);
2867 &ch->sq_wr_avail) < 0)) {
2888 ret = ib_post_send(ch->qp, first_wr, NULL);
2898 atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
2899 atomic_dec(&ch->req_lim);
2919 * ch->req_lim_delta.
2925 struct srpt_rdma_ch *ch = ioctx->ch;
2927 atomic_inc(&ch->req_lim_delta);
2959 struct srpt_rdma_ch *ch;
2976 list_for_each_entry(ch, &nexus->ch_list, list) {
2978 ch->sess_name, ch->qp->qp_num,
2979 get_ch_state_name(ch->state));
3343 struct srpt_rdma_ch *ch = ioctx->ch;
3352 srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
3356 srpt_free_rw_ctxs(ch, ioctx);
3373 struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
3375 srpt_disconnect_ch_sync(ch);