Lines Matching refs:pdu

171 	union nvme_tcp_pdu	pdu;
299 void *pdu, size_t len)
303 sg_init_one(&sg, pdu, len);
304 ahash_request_set_crypt(hash, &sg, pdu + len, len);
309 void *pdu, size_t len)
311 struct nvme_tcp_hdr *hdr = pdu;
321 recv_digest = *(__le32 *)(pdu + hdr->hlen);
322 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
323 exp_digest = *(__le32 *)(pdu + hdr->hlen);
334 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
336 struct nvme_tcp_hdr *hdr = pdu;
455 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
463 pdu->hdr.type = nvme_tcp_c2h_data;
464 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
466 pdu->hdr.hlen = sizeof(*pdu);
467 pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
468 pdu->hdr.plen =
469 cpu_to_le32(pdu->hdr.hlen + hdgst +
471 pdu->command_id = cmd->req.cqe->command_id;
472 pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
473 pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
476 pdu->hdr.flags |= NVME_TCP_F_DDGST;
481 pdu->hdr.flags |= NVME_TCP_F_HDGST;
482 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
488 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
495 pdu->hdr.type = nvme_tcp_r2t;
496 pdu->hdr.flags = 0;
497 pdu->hdr.hlen = sizeof(*pdu);
498 pdu->hdr.pdo = 0;
499 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
501 pdu->command_id = cmd->req.cmd->common.command_id;
502 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
503 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
504 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
506 pdu->hdr.flags |= NVME_TCP_F_HDGST;
507 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
513 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
520 pdu->hdr.type = nvme_tcp_rsp;
521 pdu->hdr.flags = 0;
522 pdu->hdr.hlen = sizeof(*pdu);
523 pdu->hdr.pdo = 0;
524 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
526 pdu->hdr.flags |= NVME_TCP_F_HDGST;
527 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
892 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
893 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
899 pr_err("bad nvme-tcp pdu length (%d)\n",
959 * we don't, we can simply prepare for the next pdu and bail out,
983 struct nvme_tcp_data_pdu *data = &queue->pdu.data;
1032 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1033 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
1039 pr_err("unexpected pdu type (%d) before icreq\n",
1048 pr_err("queue %d: received icreq pdu in state %d\n",
1178 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1185 iov.iov_base = (void *)&queue->pdu + queue->offset;
1210 pr_err("unexpected pdu type %d\n", hdr->type);
1216 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1225 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
1231 nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1313 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1315 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1744 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1747 .iov_base = (u8 *)&queue->pdu + queue->offset,