Lines Matching refs:srx

24  * @srx:	Receive Context
29 static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
42 __func__, qp_id(rx_qp(srx)),
46 srx->skb_copied += copied;
47 srx->skb_new -= copied;
54 siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes);
57 rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off,
62 srx->skb_copied += copied;
63 srx->skb_new -= copied;
66 qp_id(rx_qp(srx)), __func__, len, p, rv);
70 if (srx->mpa_crc_hd) {
71 if (rdma_is_kernel_res(&rx_qp(srx)->base_qp.res)) {
72 crypto_shash_update(srx->mpa_crc_hd,
87 siw_crc_skb(srx, bytes);
92 srx->skb_offset += bytes;
98 srx->skb_copied += copied;
99 srx->skb_new -= copied;
104 static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len)
108 siw_dbg_qp(rx_qp(srx), "kva: 0x%pK, len: %u\n", kva, len);
110 rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len);
113 qp_id(rx_qp(srx)), __func__, len, kva, rv);
117 if (srx->mpa_crc_hd)
118 crypto_shash_update(srx->mpa_crc_hd, (u8 *)kva, len);
120 srx->skb_offset += len;
121 srx->skb_copied += len;
122 srx->skb_new -= len;
127 static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
142 if (siw_rx_kva(srx, ib_virt_dma_to_ptr(buf_addr), bytes) ==
165 static int siw_rresp_check_ntoh(struct siw_rx_stream *srx,
168 struct iwarp_rdma_rresp *rresp = &srx->hdr.rresp;
176 srx->ddp_stag = wqe->sqe.sge[0].lkey;
177 srx->ddp_to = wqe->sqe.sge[0].laddr;
190 if (unlikely(srx->ddp_stag != sink_stag)) {
192 qp_id(rx_qp(srx)), sink_stag, srx->ddp_stag);
196 if (unlikely(srx->ddp_to != sink_to)) {
198 qp_id(rx_qp(srx)), (unsigned long long)sink_to,
199 (unsigned long long)srx->ddp_to);
204 (wqe->processed + srx->fpdu_part_rem != wqe->bytes))) {
206 qp_id(rx_qp(srx)),
207 wqe->processed + srx->fpdu_part_rem, wqe->bytes);
213 siw_init_terminate(rx_qp(srx), TERM_ERROR_LAYER_DDP,
229 static int siw_write_check_ntoh(struct siw_rx_stream *srx,
232 struct iwarp_rdma_write *write = &srx->hdr.rwrite;
239 srx->ddp_stag = sink_stag;
240 srx->ddp_to = sink_to;
243 if (unlikely(srx->ddp_stag != sink_stag)) {
245 qp_id(rx_qp(srx)), sink_stag,
246 srx->ddp_stag);
250 if (unlikely(srx->ddp_to != sink_to)) {
252 qp_id(rx_qp(srx)),
254 (unsigned long long)srx->ddp_to);
261 siw_init_terminate(rx_qp(srx), TERM_ERROR_LAYER_DDP,
277 static int siw_send_check_ntoh(struct siw_rx_stream *srx,
280 struct iwarp_send_inv *send = &srx->hdr.send_inv;
290 qp_id(rx_qp(srx)), ddp_qn);
294 if (unlikely(ddp_msn != srx->ddp_msn[RDMAP_UNTAGGED_QN_SEND])) {
296 qp_id(rx_qp(srx)), ddp_msn,
297 srx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]);
303 qp_id(rx_qp(srx)), ddp_mo, wqe->processed);
314 srx->inval_stag = be32_to_cpu(send->inval_stag);
316 if (unlikely(wqe->bytes < wqe->processed + srx->fpdu_part_rem)) {
317 siw_dbg_qp(rx_qp(srx), "receive space short: %d - %d < %d\n",
318 wqe->bytes, wqe->processed, srx->fpdu_part_rem);
325 siw_init_terminate(rx_qp(srx), TERM_ERROR_LAYER_DDP,
408 static int siw_rx_data(struct siw_mem *mem_p, struct siw_rx_stream *srx,
414 rv = siw_rx_kva(srx, ib_virt_dma_to_ptr(addr), bytes);
416 rv = siw_rx_umem(srx, mem_p->umem, addr, bytes);
418 rv = siw_rx_pbl(srx, pbl_idx, mem_p, addr, bytes);
437 struct siw_rx_stream *srx = &qp->rx_stream;
455 if (srx->state == SIW_GET_DATA_START) {
456 rv = siw_send_check_ntoh(srx, frx);
461 if (!srx->fpdu_part_rem) /* zero length SEND */
464 data_bytes = min(srx->fpdu_part_rem, srx->skb_new);
502 rv = siw_rx_data(mem_p, srx, &frx->pbl_idx,
522 srx->fpdu_part_rem -= rv;
523 srx->fpdu_part_rcvd += rv;
527 if (!srx->fpdu_part_rem)
547 struct siw_rx_stream *srx = &qp->rx_stream;
552 if (srx->state == SIW_GET_DATA_START) {
553 if (!srx->fpdu_part_rem) /* zero length WRITE */
556 rv = siw_write_check_ntoh(srx, frx);
562 bytes = min(srx->fpdu_part_rem, srx->skb_new);
567 rx_mem(frx) = siw_mem_id2obj(qp->sdev, srx->ddp_stag >> 8);
571 srx->ddp_stag);
588 if (unlikely(mem->stag != srx->ddp_stag)) {
594 rv = siw_check_mem(qp->pd, mem, srx->ddp_to + srx->fpdu_part_rcvd,
606 rv = siw_rx_data(mem, srx, &frx->pbl_idx,
607 srx->ddp_to + srx->fpdu_part_rcvd, bytes);
614 srx->fpdu_part_rem -= rv;
615 srx->fpdu_part_rcvd += rv;
617 if (!srx->fpdu_part_rem) {
618 srx->ddp_to += srx->fpdu_part_rcvd;
629 struct siw_rx_stream *srx = &qp->rx_stream;
631 if (!srx->fpdu_part_rem)
635 be16_to_cpu(srx->hdr.ctrl.mpa_len));
656 static int siw_init_rresp(struct siw_qp *qp, struct siw_rx_stream *srx)
661 uint64_t raddr = be64_to_cpu(srx->hdr.rreq.sink_to),
662 laddr = be64_to_cpu(srx->hdr.rreq.source_to);
663 uint32_t length = be32_to_cpu(srx->hdr.rreq.read_size),
664 lkey = be32_to_cpu(srx->hdr.rreq.source_stag),
665 rkey = be32_to_cpu(srx->hdr.rreq.sink_stag),
666 msn = be32_to_cpu(srx->hdr.rreq.ddp_msn);
671 if (unlikely(msn != srx->ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ])) {
784 struct siw_rx_stream *srx = &qp->rx_stream;
807 rv = siw_rresp_check_ntoh(srx, frx);
820 if (!srx->fpdu_part_rem) /* zero length RRESPONSE */
847 bytes = min(srx->fpdu_part_rem, srx->skb_new);
848 rv = siw_rx_data(mem_p, srx, &frx->pbl_idx,
855 srx->fpdu_part_rem -= rv;
856 srx->fpdu_part_rcvd += rv;
859 if (!srx->fpdu_part_rem) {
860 srx->ddp_to += srx->fpdu_part_rcvd;
871 static void siw_update_skb_rcvd(struct siw_rx_stream *srx, u16 length)
873 srx->skb_offset += length;
874 srx->skb_new -= length;
875 srx->skb_copied += length;
880 struct siw_rx_stream *srx = &qp->rx_stream;
881 struct sk_buff *skb = srx->skb;
882 struct iwarp_terminate *term = &srx->hdr.terminate;
910 if (srx->skb_new < sizeof(struct iwarp_ctrl_tagged))
915 skb_copy_bits(skb, srx->skb_offset, infop, to_copy);
922 siw_update_skb_rcvd(srx, to_copy);
923 srx->fpdu_part_rcvd += to_copy;
924 srx->fpdu_part_rem -= to_copy;
929 if (to_copy + MPA_CRC_SIZE > srx->skb_new)
932 skb_copy_bits(skb, srx->skb_offset, infop, to_copy);
944 siw_update_skb_rcvd(srx, to_copy);
945 srx->fpdu_part_rcvd += to_copy;
946 srx->fpdu_part_rem -= to_copy;
951 static int siw_get_trailer(struct siw_qp *qp, struct siw_rx_stream *srx)
953 struct sk_buff *skb = srx->skb;
954 int avail = min(srx->skb_new, srx->fpdu_part_rem);
955 u8 *tbuf = (u8 *)&srx->trailer.crc - srx->pad;
959 srx->fpdu_part_rem, srx->skb_new, srx->pad);
961 skb_copy_bits(skb, srx->skb_offset, tbuf, avail);
963 siw_update_skb_rcvd(srx, avail);
964 srx->fpdu_part_rem -= avail;
966 if (srx->fpdu_part_rem)
969 if (!srx->mpa_crc_hd)
972 if (srx->pad)
973 crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad);
978 crypto_shash_final(srx->mpa_crc_hd, (u8 *)&crc_own);
979 crc_in = (__force __wsum)srx->trailer.crc;
995 static int siw_get_hdr(struct siw_rx_stream *srx)
997 struct sk_buff *skb = srx->skb;
998 struct siw_qp *qp = rx_qp(srx);
999 struct iwarp_ctrl *c_hdr = &srx->hdr.ctrl;
1004 if (srx->fpdu_part_rcvd < MIN_DDP_HDR) {
1008 bytes = min_t(int, srx->skb_new,
1009 MIN_DDP_HDR - srx->fpdu_part_rcvd);
1011 skb_copy_bits(skb, srx->skb_offset,
1012 (char *)c_hdr + srx->fpdu_part_rcvd, bytes);
1014 siw_update_skb_rcvd(srx, bytes);
1015 srx->fpdu_part_rcvd += bytes;
1016 if (srx->fpdu_part_rcvd < MIN_DDP_HDR)
1033 siw_init_terminate(rx_qp(srx), TERM_ERROR_LAYER_DDP,
1041 siw_init_terminate(rx_qp(srx), TERM_ERROR_LAYER_RDMAP,
1052 siw_init_terminate(rx_qp(srx), TERM_ERROR_LAYER_RDMAP,
1057 siw_dbg_qp(rx_qp(srx), "new header, opcode %u\n", opcode);
1073 bytes = min_t(int, hdrlen - MIN_DDP_HDR, srx->skb_new);
1075 skb_copy_bits(skb, srx->skb_offset,
1076 (char *)c_hdr + srx->fpdu_part_rcvd, bytes);
1078 siw_update_skb_rcvd(srx, bytes);
1079 srx->fpdu_part_rcvd += bytes;
1080 if (srx->fpdu_part_rcvd < hdrlen)
1096 if (srx->mpa_crc_hd) {
1100 crypto_shash_init(srx->mpa_crc_hd);
1101 crypto_shash_update(srx->mpa_crc_hd, (u8 *)c_hdr,
1102 srx->fpdu_part_rcvd);
1201 struct siw_rx_stream *srx = &qp->rx_stream;
1204 u8 opcode = __rdmap_get_opcode(&srx->hdr.ctrl);
1218 srx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]++;
1228 rv = siw_invalidate_stag(qp->pd, srx->inval_stag);
1240 rv ? 0 : srx->inval_stag,
1254 if ((srx->state == SIW_GET_HDR &&
1297 rv = siw_init_rresp(qp, srx);
1298 srx->ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ]++;
1340 struct siw_rx_stream *srx = &qp->rx_stream;
1343 srx->skb = skb;
1344 srx->skb_new = skb->len - off;
1345 srx->skb_offset = off;
1346 srx->skb_copied = 0;
1348 siw_dbg_qp(qp, "new data, len %d\n", srx->skb_new);
1350 while (srx->skb_new) {
1353 if (unlikely(srx->rx_suspend)) {
1355 srx->skb_copied += srx->skb_new;
1358 switch (srx->state) {
1360 rv = siw_get_hdr(srx);
1362 srx->fpdu_part_rem =
1363 be16_to_cpu(srx->hdr.ctrl.mpa_len) -
1364 srx->fpdu_part_rcvd + MPA_HDR_SIZE;
1366 if (srx->fpdu_part_rem)
1367 srx->pad = -srx->fpdu_part_rem & 0x3;
1369 srx->pad = 0;
1371 srx->state = SIW_GET_DATA_START;
1372 srx->fpdu_part_rcvd = 0;
1394 be16_to_cpu(srx->hdr.ctrl.mpa_len)
1397 srx->fpdu_part_rem = (-mpa_len & 0x3)
1399 srx->fpdu_part_rcvd = 0;
1400 srx->state = SIW_GET_TRAILER;
1405 srx->state = SIW_GET_DATA_MORE;
1413 rv = siw_get_trailer(qp, srx);
1419 srx->state = SIW_GET_HDR;
1420 srx->fpdu_part_rcvd = 0;
1422 if (!(srx->hdr.ctrl.ddp_rdmap_ctrl &
1438 if ((srx->state > SIW_GET_HDR ||
1443 srx->state);
1451 srx->state, srx->fpdu_part_rem);
1455 return srx->skb_copied;