• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/infiniband/hw/qib/

Lines Matching refs:wqe

43 static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe,
48 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
49 ss->sge = wqe->sg_list[0];
50 ss->sg_list = wqe->sg_list + 1;
51 ss->num_sge = wqe->wr.num_sge;
52 ss->total_len = wqe->length;
54 return wqe->length - len;
237 struct qib_swqe *wqe;
274 wqe = get_swqe_ptr(qp, qp->s_last);
276 qib_send_complete(qp, wqe, IB_WC_SUCCESS);
279 wqe = get_swqe_ptr(qp, qp->s_last);
281 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
302 wqe = get_swqe_ptr(qp, qp->s_cur);
323 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
328 wqe->psn = qp->s_next_psn;
336 len = wqe->length;
339 switch (wqe->wr.opcode) {
344 qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
348 wqe->lpsn = wqe->psn;
350 wqe->lpsn += (len - 1) / pmtu;
355 if (wqe->wr.opcode == IB_WR_SEND)
360 ohdr->u.imm_data = wqe->wr.ex.imm_data;
363 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
377 qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
382 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
384 cpu_to_be32(wqe->wr.wr.rdma.rkey);
387 wqe->lpsn = wqe->psn;
389 wqe->lpsn += (len - 1) / pmtu;
394 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
400 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
402 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
430 wqe->lpsn = qp->s_next_psn++;
433 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
435 cpu_to_be32(wqe->wr.wr.rdma.rkey);
461 wqe->lpsn = wqe->psn;
463 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
466 wqe->wr.wr.atomic.swap);
468 wqe->wr.wr.atomic.compare_add);
472 wqe->wr.wr.atomic.compare_add);
476 wqe->wr.wr.atomic.remote_addr >> 32);
478 wqe->wr.wr.atomic.remote_addr);
480 wqe->wr.wr.atomic.rkey);
492 qp->s_sge.sge = wqe->sg_list[0];
493 qp->s_sge.sg_list = wqe->sg_list + 1;
494 qp->s_sge.num_sge = wqe->wr.num_sge;
495 qp->s_sge.total_len = wqe->length;
496 qp->s_len = wqe->length;
502 if (wqe->wr.opcode == IB_WR_RDMA_READ)
503 qp->s_psn = wqe->lpsn + 1;
521 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
536 if (wqe->wr.opcode == IB_WR_SEND)
541 ohdr->u.imm_data = wqe->wr.ex.imm_data;
544 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
562 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
577 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
582 ohdr->u.imm_data = wqe->wr.ex.imm_data;
584 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
603 len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
605 cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
607 cpu_to_be32(wqe->wr.wr.rdma.rkey);
608 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
612 qp->s_psn = wqe->lpsn + 1;
621 delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
791 struct qib_swqe *wqe = get_swqe_ptr(qp, n);
800 if (qib_cmp24(psn, wqe->psn) <= 0) {
806 opcode = wqe->wr.opcode;
814 wqe = get_swqe_ptr(qp, n);
815 diff = qib_cmp24(psn, wqe->psn);
827 opcode = wqe->wr.opcode;
875 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked);
883 qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
892 if (wqe->wr.opcode == IB_WR_RDMA_READ)
951 struct qib_swqe *wqe;
956 wqe = get_swqe_ptr(qp, n);
957 if (qib_cmp24(psn, wqe->lpsn) <= 0) {
958 if (wqe->wr.opcode == IB_WR_RDMA_READ)
959 qp->s_sending_psn = wqe->lpsn + 1;
977 struct qib_swqe *wqe;
1012 wqe = get_swqe_ptr(qp, qp->s_last);
1013 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
1016 for (i = 0; i < wqe->wr.num_sge; i++) {
1017 struct qib_sge *sge = &wqe->sg_list[i];
1023 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
1025 wc.wr_id = wqe->wr.wr_id;
1027 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
1028 wc.byte_len = wqe->length;
1059 struct qib_swqe *wqe,
1070 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
1072 for (i = 0; i < wqe->wr.num_sge; i++) {
1073 struct qib_sge *sge = &wqe->sg_list[i];
1079 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
1081 wc.wr_id = wqe->wr.wr_id;
1083 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
1084 wc.byte_len = wqe->length;
1094 update_last_psn(qp, wqe->lpsn);
1105 wqe = get_swqe_ptr(qp, qp->s_cur);
1108 qp->s_psn = wqe->psn;
1115 wqe = get_swqe_ptr(qp, qp->s_acked);
1117 return wqe;
1136 struct qib_swqe *wqe;
1156 wqe = get_swqe_ptr(qp, qp->s_acked);
1163 while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
1170 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
1185 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
1187 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1188 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
1207 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1208 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1209 u64 *vaddr = wqe->sg_list[0].vaddr;
1213 (wqe->wr.opcode == IB_WR_RDMA_READ ||
1214 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1215 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
1229 wqe = do_rc_completion(qp, wqe, ibp);
1327 qib_send_complete(qp, wqe, status);
1357 struct qib_swqe *wqe;
1365 wqe = get_swqe_ptr(qp, qp->s_acked);
1367 while (qib_cmp24(psn, wqe->lpsn) > 0) {
1368 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1369 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1370 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1372 wqe = do_rc_completion(qp, wqe, ibp);
1409 struct qib_swqe *wqe;
1447 wqe = get_swqe_ptr(qp, qp->s_acked);
1466 wqe = get_swqe_ptr(qp, qp->s_acked);
1467 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1475 wqe, psn, pmtu);
1482 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1534 wqe = get_swqe_ptr(qp, qp->s_acked);
1536 wqe, psn, pmtu);
1543 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1578 qib_send_complete(qp, wqe, status);