Lines Matching refs:pdu

104 	void			*pdu;
150 void *pdu;
245 return req->pdu;
250 /* use the pdu space in the back for the data pdu */
251 return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) -
448 void *pdu, size_t len)
452 sg_init_one(&sg, pdu, len);
453 ahash_request_set_crypt(hash, &sg, pdu + len, len);
458 void *pdu, size_t pdu_len)
460 struct nvme_tcp_hdr *hdr = pdu;
471 recv_digest = *(__le32 *)(pdu + hdr->hlen);
472 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
473 exp_digest = *(__le32 *)(pdu + hdr->hlen);
484 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
486 struct nvme_tcp_hdr *hdr = pdu;
509 page_frag_free(req->pdu);
518 struct nvme_tcp_cmd_pdu *pdu;
523 req->pdu = page_frag_alloc(&queue->pf_cache,
526 if (!req->pdu)
529 pdu = req->pdu;
532 nvme_req(rq)->cmd = &pdu->cmd;
610 struct nvme_tcp_data_pdu *pdu)
614 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
618 pdu->command_id, nvme_tcp_queue_id(queue));
629 queue->data_remaining = le32_to_cpu(pdu->data_length);
631 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
632 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
644 struct nvme_tcp_rsp_pdu *pdu)
646 struct nvme_completion *cqe = &pdu->cqe;
700 struct nvme_tcp_r2t_pdu *pdu)
704 u32 r2t_length = le32_to_cpu(pdu->r2t_length);
705 u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
707 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
711 pdu->command_id, nvme_tcp_queue_id(queue));
740 req->ttag = pdu->ttag;
752 char *pdu = queue->pdu;
757 &pdu[queue->pdu_offset], rcv_len);
768 hdr = queue->pdu;
770 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
777 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
784 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
787 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
790 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
793 "unsupported pdu type (%d)\n", hdr->type);
809 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
811 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
865 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
880 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
898 pdu->command_id);
909 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
911 pdu->command_id);
1097 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
1102 int len = sizeof(*pdu) + hdgst - req->offset;
1111 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1113 bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
1138 struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req);
1142 int len = sizeof(*pdu) - req->offset + hdgst;
1146 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1151 bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
1340 page_frag_free(async->pdu);
1349 async->pdu = page_frag_alloc(&queue->pf_cache,
1352 if (!async->pdu)
1379 kfree(queue->pdu);
1459 pr_err("queue %d: bad pdu length returned %d\n",
1753 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1754 if (!queue->pdu) {
1788 kfree(queue->pdu);
1879 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1882 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
2405 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2406 struct nvme_command *cmd = &pdu->cmd;
2409 memset(pdu, 0, sizeof(*pdu));
2410 pdu->hdr.type = nvme_tcp_cmd;
2412 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2413 pdu->hdr.hlen = sizeof(*pdu);
2414 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2442 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2443 struct nvme_command *cmd = &pdu->cmd;
2448 rq->tag, nvme_cid(rq), pdu->hdr.type, cmd->common.opcode,
2481 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2482 struct nvme_command *c = &pdu->cmd;
2501 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2527 pdu->hdr.type = nvme_tcp_cmd;
2528 pdu->hdr.flags = 0;
2530 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2532 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2535 pdu->hdr.hlen = sizeof(*pdu);
2536 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2537 pdu->hdr.plen =
2538 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);