Lines Matching refs:queue

66  * queue before determining it to be idle.  This optional module behavior
110 struct nvmet_tcp_queue *queue;
221 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
224 if (unlikely(!queue->nr_cmds)) {
229 return cmd - queue->cmds;
257 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
261 cmd = list_first_entry_or_null(&queue->free_list,
277 if (unlikely(cmd == &cmd->queue->connect))
280 list_add_tail(&cmd->entry, &cmd->queue->free_list);
283 static inline int queue_cpu(struct nvmet_tcp_queue *queue)
285 return queue->sock->sk->sk_incoming_cpu;
288 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
290 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
293 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
295 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
308 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
316 pr_err("queue %d: header digest enabled but no header digest\n",
317 queue->idx);
322 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
325 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
326 queue->idx, le32_to_cpu(recv_digest),
334 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
337 u8 digest_len = nvmet_tcp_hdgst_len(queue);
344 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
389 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
391 queue->rcv_state = NVMET_TCP_RECV_ERR;
392 if (queue->nvme_sq.ctrl)
393 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
395 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
398 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
400 queue->rcv_state = NVMET_TCP_RECV_ERR;
402 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
404 nvmet_tcp_fatal_error(queue);
455 struct nvmet_tcp_queue *queue = cmd->queue;
456 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
457 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
463 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
474 if (queue->data_digest) {
476 nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
479 if (cmd->queue->hdr_digest) {
481 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
488 struct nvmet_tcp_queue *queue = cmd->queue;
489 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
501 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
504 if (cmd->queue->hdr_digest) {
506 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
513 struct nvmet_tcp_queue *queue = cmd->queue;
514 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
524 if (cmd->queue->hdr_digest) {
526 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
530 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
535 for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
537 list_add(&cmd->entry, &queue->resp_send_list);
538 queue->send_list_len++;
542 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
544 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
546 if (!queue->snd_cmd) {
547 nvmet_tcp_process_resp_list(queue);
548 queue->snd_cmd =
549 list_first_entry_or_null(&queue->resp_send_list,
551 if (unlikely(!queue->snd_cmd))
555 list_del_init(&queue->snd_cmd->entry);
556 queue->send_list_len--;
558 if (nvmet_tcp_need_data_out(queue->snd_cmd))
559 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
560 else if (nvmet_tcp_need_data_in(queue->snd_cmd))
561 nvmet_setup_r2t_pdu(queue->snd_cmd);
563 nvmet_setup_response_pdu(queue->snd_cmd);
565 return queue->snd_cmd;
572 struct nvmet_tcp_queue *queue = cmd->queue;
576 if (unlikely(cmd == queue->cmd)) {
585 if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
591 llist_add(&cmd->lentry, &queue->resp_list);
592 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
609 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
615 ret = sock_sendmsg(cmd->queue->sock, &msg);
632 struct nvmet_tcp_queue *queue = cmd->queue;
643 if ((!last_in_batch && cmd->queue->send_list_len) ||
645 queue->data_digest || !queue->nvme_sq.sqhd_disabled)
650 ret = sock_sendmsg(cmd->queue->sock, &msg);
664 if (queue->data_digest) {
668 if (queue->nvme_sq.sqhd_disabled) {
669 cmd->queue->snd_cmd = NULL;
676 if (queue->nvme_sq.sqhd_disabled)
688 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
692 if (!last_in_batch && cmd->queue->send_list_len)
699 ret = sock_sendmsg(cmd->queue->sock, &msg);
709 cmd->queue->snd_cmd = NULL;
718 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
722 if (!last_in_batch && cmd->queue->send_list_len)
729 ret = sock_sendmsg(cmd->queue->sock, &msg);
738 cmd->queue->snd_cmd = NULL;
744 struct nvmet_tcp_queue *queue = cmd->queue;
753 if (!last_in_batch && cmd->queue->send_list_len)
758 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
768 if (queue->nvme_sq.sqhd_disabled) {
769 cmd->queue->snd_cmd = NULL;
777 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
780 struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
783 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
784 cmd = nvmet_tcp_fetch_cmd(queue);
826 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
832 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
834 nvmet_tcp_socket_error(queue, ret);
845 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
847 queue->offset = 0;
848 queue->left = sizeof(struct nvme_tcp_hdr);
849 queue->cmd = NULL;
850 queue->rcv_state = NVMET_TCP_RECV_PDU;
853 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
855 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
857 ahash_request_free(queue->rcv_hash);
858 ahash_request_free(queue->snd_hash);
862 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
870 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
871 if (!queue->snd_hash)
873 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
875 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
876 if (!queue->rcv_hash)
878 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
882 ahash_request_free(queue->snd_hash);
889 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
891 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
892 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
900 nvmet_tcp_fatal_error(queue);
905 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
910 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
915 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
916 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
917 if (queue->hdr_digest || queue->data_digest) {
918 ret = nvmet_tcp_alloc_crypto(queue);
931 if (queue->hdr_digest)
933 if (queue->data_digest)
938 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
940 queue->state = NVMET_TCP_Q_FAILED;
941 return ret; /* queue removal will cleanup */
944 queue->state = NVMET_TCP_Q_LIVE;
945 nvmet_prepare_receive_pdu(queue);
949 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
964 nvmet_prepare_receive_pdu(queue);
970 pr_err("queue %d: failed to map data\n", queue->idx);
971 nvmet_tcp_fatal_error(queue);
975 queue->rcv_state = NVMET_TCP_RECV_DATA;
980 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
982 struct nvme_tcp_data_pdu *data = &queue->pdu.data;
986 if (likely(queue->nr_cmds)) {
987 if (unlikely(data->ttag >= queue->nr_cmds)) {
988 pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
989 queue->idx, data->ttag, queue->nr_cmds);
992 cmd = &queue->cmds[data->ttag];
994 cmd = &queue->connect;
1005 nvmet_tcp_hdgst_len(queue) -
1006 nvmet_tcp_ddgst_len(queue) -
1018 queue->cmd = cmd;
1019 queue->rcv_state = NVMET_TCP_RECV_DATA;
1025 nvmet_tcp_fatal_error(queue);
1029 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
1031 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1032 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
1036 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1040 nvmet_tcp_fatal_error(queue);
1043 return nvmet_tcp_handle_icreq(queue);
1047 pr_err("queue %d: received icreq pdu in state %d\n",
1048 queue->idx, queue->state);
1049 nvmet_tcp_fatal_error(queue);
1054 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
1060 queue->cmd = nvmet_tcp_get_cmd(queue);
1061 if (unlikely(!queue->cmd)) {
1063 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
1064 queue->idx, queue->nr_cmds, queue->send_list_len,
1066 nvmet_tcp_fatal_error(queue);
1070 req = &queue->cmd->req;
1073 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
1074 &queue->nvme_sq, &nvmet_tcp_ops))) {
1080 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
1084 ret = nvmet_tcp_map_data(queue->cmd);
1086 pr_err("queue %d: failed to map data\n", queue->idx);
1087 if (nvmet_tcp_has_inline_data(queue->cmd))
1088 nvmet_tcp_fatal_error(queue);
1095 if (nvmet_tcp_need_data_in(queue->cmd)) {
1096 if (nvmet_tcp_has_inline_data(queue->cmd)) {
1097 queue->rcv_state = NVMET_TCP_RECV_DATA;
1098 nvmet_tcp_build_pdu_iovec(queue->cmd);
1102 nvmet_tcp_queue_response(&queue->cmd->req);
1106 queue->cmd->req.execute(&queue->cmd->req);
1108 nvmet_prepare_receive_pdu(queue);
1140 static int nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue,
1147 ctype = tls_get_record_type(queue->sock->sk, cmsg);
1154 tls_alert_recv(queue->sock->sk, msg, &level, &description);
1156 pr_err("queue %d: TLS Alert desc %u\n",
1157 queue->idx, description);
1160 pr_warn("queue %d: TLS Alert desc %u\n",
1161 queue->idx, description);
1167 pr_err("queue %d: TLS record %d unhandled\n",
1168 queue->idx, ctype);
1175 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1177 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1184 iov.iov_base = (void *)&queue->pdu + queue->offset;
1185 iov.iov_len = queue->left;
1186 if (queue->tls_pskid) {
1190 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1194 if (queue->tls_pskid) {
1195 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
1200 queue->offset += len;
1201 queue->left -= len;
1202 if (queue->left)
1205 if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1206 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1210 nvmet_tcp_fatal_error(queue);
1219 queue->left = hdr->hlen - queue->offset + hdgst;
1223 if (queue->hdr_digest &&
1224 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
1225 nvmet_tcp_fatal_error(queue); /* fatal */
1229 if (queue->data_digest &&
1230 nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1231 nvmet_tcp_fatal_error(queue); /* fatal */
1235 return nvmet_tcp_done_recv_pdu(queue);
1240 struct nvmet_tcp_queue *queue = cmd->queue;
1242 nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
1243 queue->offset = 0;
1244 queue->left = NVME_TCP_DIGEST_LENGTH;
1245 queue->rcv_state = NVMET_TCP_RECV_DDGST;
1248 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1250 struct nvmet_tcp_cmd *cmd = queue->cmd;
1254 len = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1258 if (queue->tls_pskid) {
1259 ret = nvmet_tcp_tls_record_ok(cmd->queue,
1269 if (queue->data_digest) {
1277 nvmet_prepare_receive_pdu(queue);
1281 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1283 struct nvmet_tcp_cmd *cmd = queue->cmd;
1288 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1289 .iov_len = queue->left
1292 if (queue->tls_pskid) {
1296 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1300 if (queue->tls_pskid) {
1301 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
1306 queue->offset += len;
1307 queue->left -= len;
1308 if (queue->left)
1311 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1312 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1313 queue->idx, cmd->req.cmd->common.command_id,
1314 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1318 nvmet_tcp_fatal_error(queue);
1328 nvmet_prepare_receive_pdu(queue);
1332 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1336 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1339 if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1340 result = nvmet_tcp_try_recv_pdu(queue);
1345 if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1346 result = nvmet_tcp_try_recv_data(queue);
1351 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1352 result = nvmet_tcp_try_recv_ddgst(queue);
1366 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1372 ret = nvmet_tcp_try_recv_one(queue);
1374 nvmet_tcp_socket_error(queue, ret);
1387 struct nvmet_tcp_queue *queue =
1390 WARN_ON(queue->state != NVMET_TCP_Q_DISCONNECTING);
1391 queue_work(nvmet_wq, &queue->release_work);
1394 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1396 spin_lock_bh(&queue->state_lock);
1397 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
1399 tls_handshake_cancel(queue->sock->sk);
1401 if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1402 queue->state = NVMET_TCP_Q_DISCONNECTING;
1403 kref_put(&queue->kref, nvmet_tcp_release_queue);
1405 spin_unlock_bh(&queue->state_lock);
1408 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
1410 queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
1413 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
1420 nvmet_tcp_arm_queue_deadline(queue);
1422 return !time_after(jiffies, queue->poll_end);
1427 struct nvmet_tcp_queue *queue =
1435 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1441 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1453 if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
1454 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1457 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1460 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1462 c->queue = queue;
1463 c->req.port = queue->port->nport;
1465 c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1471 c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1477 c->data_pdu = page_frag_alloc(&queue->pf_cache,
1482 c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1487 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
1493 list_add_tail(&c->entry, &queue->free_list);
1513 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1516 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1523 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1528 queue->cmds = cmds;
1539 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1541 struct nvmet_tcp_cmd *cmds = queue->cmds;
1544 for (i = 0; i < queue->nr_cmds; i++)
1547 nvmet_tcp_free_cmd(&queue->connect);
1551 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1553 struct socket *sock = queue->sock;
1556 sock->sk->sk_data_ready = queue->data_ready;
1557 sock->sk->sk_state_change = queue->state_change;
1558 sock->sk->sk_write_space = queue->write_space;
1563 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1565 struct nvmet_tcp_cmd *cmd = queue->cmds;
1568 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1573 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1575 nvmet_req_uninit(&queue->connect.req);
1579 static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
1581 struct nvmet_tcp_cmd *cmd = queue->cmds;
1584 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1589 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
1590 nvmet_tcp_free_cmd_buffers(&queue->connect);
1595 struct nvmet_tcp_queue *queue =
1599 list_del_init(&queue->queue_list);
1602 nvmet_tcp_restore_socket_callbacks(queue);
1603 cancel_delayed_work_sync(&queue->tls_handshake_tmo_work);
1604 cancel_work_sync(&queue->io_work);
1606 queue->rcv_state = NVMET_TCP_RECV_ERR;
1608 nvmet_tcp_uninit_data_in_cmds(queue);
1609 nvmet_sq_destroy(&queue->nvme_sq);
1610 cancel_work_sync(&queue->io_work);
1611 nvmet_tcp_free_cmd_data_in_buffers(queue);
1613 fput(queue->sock->file);
1614 nvmet_tcp_free_cmds(queue);
1615 if (queue->hdr_digest || queue->data_digest)
1616 nvmet_tcp_free_crypto(queue);
1617 ida_free(&nvmet_tcp_queue_ida, queue->idx);
1618 page_frag_cache_drain(&queue->pf_cache);
1619 kfree(queue);
1624 struct nvmet_tcp_queue *queue;
1629 queue = sk->sk_user_data;
1630 if (likely(queue)) {
1631 if (queue->data_ready)
1632 queue->data_ready(sk);
1633 if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)
1634 queue_work_on(queue_cpu(queue), nvmet_tcp_wq,
1635 &queue->io_work);
1642 struct nvmet_tcp_queue *queue;
1645 queue = sk->sk_user_data;
1646 if (unlikely(!queue))
1649 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1650 queue->write_space(sk);
1656 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1664 struct nvmet_tcp_queue *queue;
1667 queue = sk->sk_user_data;
1668 if (!queue)
1679 nvmet_tcp_schedule_release_queue(queue);
1682 pr_warn("queue %d unhandled state %d\n",
1683 queue->idx, sk->sk_state);
1689 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1691 struct socket *sock = queue->sock;
1696 (struct sockaddr *)&queue->sockaddr);
1701 (struct sockaddr *)&queue->sockaddr_peer);
1706 * Cleanup whatever is sitting in the TCP transmit queue on socket
1728 sock->sk->sk_user_data = queue;
1729 queue->data_ready = sock->sk->sk_data_ready;
1731 queue->state_change = sock->sk->sk_state_change;
1733 queue->write_space = sock->sk->sk_write_space;
1736 nvmet_tcp_arm_queue_deadline(queue);
1737 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1745 static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue)
1747 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1750 .iov_base = (u8 *)&queue->pdu + queue->offset,
1760 if (nvmet_port_secure_channel_required(queue->port->nport))
1763 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1766 pr_debug("queue %d: peek error %d\n",
1767 queue->idx, len);
1771 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
1776 pr_debug("queue %d: short read, %d bytes missing\n",
1777 queue->idx, (int)iov.iov_len - len);
1780 pr_debug("queue %d: hdr type %d hlen %d plen %d size %d\n",
1781 queue->idx, hdr->type, hdr->hlen, hdr->plen,
1786 pr_debug("queue %d: icreq detected\n",
1787 queue->idx);
1796 struct nvmet_tcp_queue *queue = data;
1798 pr_debug("queue %d: TLS handshake done, key %x, status %d\n",
1799 queue->idx, peerid, status);
1800 spin_lock_bh(&queue->state_lock);
1801 if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) {
1802 spin_unlock_bh(&queue->state_lock);
1806 queue->tls_pskid = peerid;
1807 queue->state = NVMET_TCP_Q_CONNECTING;
1809 queue->state = NVMET_TCP_Q_FAILED;
1810 spin_unlock_bh(&queue->state_lock);
1812 cancel_delayed_work_sync(&queue->tls_handshake_tmo_work);
1814 nvmet_tcp_schedule_release_queue(queue);
1816 nvmet_tcp_set_queue_sock(queue);
1817 kref_put(&queue->kref, nvmet_tcp_release_queue);
1822 struct nvmet_tcp_queue *queue = container_of(to_delayed_work(w),
1825 pr_warn("queue %d: TLS handshake timeout\n", queue->idx);
1829 if (!tls_handshake_cancel(queue->sock->sk))
1831 spin_lock_bh(&queue->state_lock);
1832 if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) {
1833 spin_unlock_bh(&queue->state_lock);
1836 queue->state = NVMET_TCP_Q_FAILED;
1837 spin_unlock_bh(&queue->state_lock);
1838 nvmet_tcp_schedule_release_queue(queue);
1839 kref_put(&queue->kref, nvmet_tcp_release_queue);
1842 static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue)
1847 if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) {
1848 pr_warn("cannot start TLS in state %d\n", queue->state);
1852 kref_get(&queue->kref);
1853 pr_debug("queue %d: TLS ServerHello\n", queue->idx);
1855 args.ta_sock = queue->sock;
1857 args.ta_data = queue;
1858 args.ta_keyring = key_serial(queue->port->nport->keyring);
1863 kref_put(&queue->kref, nvmet_tcp_release_queue);
1866 queue_delayed_work(nvmet_wq, &queue->tls_handshake_tmo_work,
1878 struct nvmet_tcp_queue *queue;
1882 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1883 if (!queue) {
1888 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1889 INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1890 kref_init(&queue->kref);
1891 queue->sock = newsock;
1892 queue->port = port;
1893 queue->nr_cmds = 0;
1894 spin_lock_init(&queue->state_lock);
1895 if (queue->port->nport->disc_addr.tsas.tcp.sectype ==
1897 queue->state = NVMET_TCP_Q_TLS_HANDSHAKE;
1899 queue->state = NVMET_TCP_Q_CONNECTING;
1900 INIT_LIST_HEAD(&queue->free_list);
1901 init_llist_head(&queue->resp_list);
1902 INIT_LIST_HEAD(&queue->resp_send_list);
1904 sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL);
1910 queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
1911 if (queue->idx < 0) {
1912 ret = queue->idx;
1916 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1920 ret = nvmet_sq_init(&queue->nvme_sq);
1924 nvmet_prepare_receive_pdu(queue);
1927 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1930 INIT_DELAYED_WORK(&queue->tls_handshake_tmo_work,
1933 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
1934 struct sock *sk = queue->sock->sk;
1941 if (!nvmet_tcp_try_peek_pdu(queue)) {
1942 if (!nvmet_tcp_tls_handshake(queue))
1948 queue->state = NVMET_TCP_Q_CONNECTING;
1952 ret = nvmet_tcp_set_queue_sock(queue);
1959 list_del_init(&queue->queue_list);
1961 nvmet_sq_destroy(&queue->nvme_sq);
1963 nvmet_tcp_free_cmd(&queue->connect);
1965 ida_free(&nvmet_tcp_queue_ida, queue->idx);
1967 fput(queue->sock->file);
1969 kfree(queue);
1971 pr_err("failed to allocate queue, error %d\n", ret);
2091 struct nvmet_tcp_queue *queue;
2094 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
2095 if (queue->port == port)
2096 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
2121 struct nvmet_tcp_queue *queue;
2124 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
2125 if (queue->nvme_sq.ctrl == ctrl)
2126 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
2132 struct nvmet_tcp_queue *queue =
2151 queue->nr_cmds = sq->size * 2;
2152 if (nvmet_tcp_alloc_cmds(queue))
2165 struct nvmet_tcp_queue *queue = cmd->queue;
2167 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
2206 struct nvmet_tcp_queue *queue;
2212 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
2213 kernel_sock_shutdown(queue->sock, SHUT_RDWR);