Lines Matching refs:send

151 						struct rds_ib_send_work *send,
157 switch (send->s_wr.opcode) {
159 if (send->s_op) {
160 rm = container_of(send->s_op, struct rds_message, data);
161 rds_ib_send_unmap_data(ic, send->s_op, wc_status);
166 if (send->s_op) {
167 rm = container_of(send->s_op, struct rds_message, rdma);
168 rds_ib_send_unmap_rdma(ic, send->s_op, wc_status);
173 if (send->s_op) {
174 rm = container_of(send->s_op, struct rds_message, atomic);
175 rds_ib_send_unmap_atomic(ic, send->s_op, wc_status);
181 __func__, send->s_wr.opcode);
185 send->s_wr.opcode = 0xdead;
192 struct rds_ib_send_work *send;
195 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
198 send->s_op = NULL;
200 send->s_wr.wr_id = i;
201 send->s_wr.sg_list = send->s_sge;
202 send->s_wr.ex.imm_data = 0;
204 sge = &send->s_sge[0];
210 send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
216 struct rds_ib_send_work *send;
219 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
220 if (send->s_op && send->s_wr.opcode != 0xdead)
221 rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR);
239 * operations performed in the send path. As the sender allocs and potentially
247 struct rds_ib_send_work *send;
272 send = &ic->i_sends[oldest];
273 if (send->s_wr.send_flags & IB_SEND_SIGNALED)
276 rm = rds_ib_send_unmap_op(ic, send, wc->status);
278 if (time_after(jiffies, send->s_queued + HZ / 2))
281 if (send->s_op) {
282 if (send->s_op == rm->m_final_op) {
289 send->s_op = NULL;
304 rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c,%d> had status %u (%s), vendor err 0x%x, disconnecting and reconnecting\n",
316 * - send credits: this tells us how many WRs we're allowed
327 * exhausted their send credits, and are unable to send new credits
328 * to the peer. We achieve this by requiring that we send at least
334 * The RDS send code is essentially single-threaded; rds_send_xmit
335 * sets RDS_IN_XMIT to ensure exclusive access to the send ring.
339 * In the send path, we need to update the counters for send credits
345 * Spinlocks shared between the send and the receive path are bad,
374 /* The last credit must be used to send a credit update. */
392 * the posted regardless of whether any send credits are
438 /* Decide whether to send an update to the peer now.
439 * If we would send a credit update for every single buffer we
441 * consumes buffer, we refill the ring, send ACK to remote
444 * Performance pretty much depends on how often we send
455 struct rds_ib_send_work *send,
465 send->s_wr.send_flags |= IB_SEND_SIGNALED;
478 * once we send the final fragment.
489 struct rds_ib_send_work *send = NULL;
509 /* Do not send cong updates to IB loopback */
594 * sticking the header into the send ring. Which is why we
619 send = &ic->i_sends[pos];
620 first = send;
628 send->s_wr.send_flags = send_flags;
629 send->s_wr.opcode = IB_WR_SEND;
630 send->s_wr.num_sge = 1;
631 send->s_wr.next = NULL;
632 send->s_queued = jiffies;
633 send->s_op = NULL;
635 send->s_sge[0].addr = ic->i_send_hdrs_dma[pos];
637 send->s_sge[0].length = sizeof(struct rds_header);
638 send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
653 send->s_wr.num_sge = 2;
655 send->s_sge[1].addr = sg_dma_address(scat);
656 send->s_sge[1].addr += rm->data.op_dmaoff;
657 send->s_sge[1].length = len;
658 send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
669 rds_ib_set_wr_signal_state(ic, send, false);
675 rds_ib_set_wr_signal_state(ic, send, true);
676 send->s_wr.send_flags |= IB_SEND_SOLICITED;
679 if (send->s_wr.send_flags & IB_SEND_SIGNALED)
682 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
683 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
700 prev->s_wr.next = &send->s_wr;
701 prev = send;
704 send = &ic->i_sends[pos];
715 /* if we finished the message then send completion owns it */
769 struct rds_ib_send_work *send = NULL;
783 /* address of send request in ring */
784 send = &ic->i_sends[pos];
785 send->s_queued = jiffies;
788 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP;
789 send->s_atomic_wr.compare_add = op->op_m_cswp.compare;
790 send->s_atomic_wr.swap = op->op_m_cswp.swap;
791 send->s_atomic_wr.compare_add_mask = op->op_m_cswp.compare_mask;
792 send->s_atomic_wr.swap_mask = op->op_m_cswp.swap_mask;
794 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD;
795 send->s_atomic_wr.compare_add = op->op_m_fadd.add;
796 send->s_atomic_wr.swap = 0;
797 send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask;
798 send->s_atomic_wr.swap_mask = 0;
800 send->s_wr.send_flags = 0;
801 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
802 send->s_atomic_wr.wr.num_sge = 1;
803 send->s_atomic_wr.wr.next = NULL;
804 send->s_atomic_wr.remote_addr = op->op_remote_addr;
805 send->s_atomic_wr.rkey = op->op_rkey;
806 send->s_op = op;
807 rds_message_addref(container_of(send->s_op, struct rds_message, atomic));
820 send->s_sge[0].addr = sg_dma_address(op->op_sg);
821 send->s_sge[0].length = sg_dma_len(op->op_sg);
822 send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
825 send->s_sge[0].addr, send->s_sge[0].length);
830 failed_wr = &send->s_atomic_wr.wr;
831 ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr);
832 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
833 send, &send->s_atomic_wr, ret, failed_wr);
834 BUG_ON(failed_wr != &send->s_atomic_wr.wr);
843 if (unlikely(failed_wr != &send->s_atomic_wr.wr)) {
845 BUG_ON(failed_wr != &send->s_atomic_wr.wr);
855 struct rds_ib_send_work *send = NULL;
898 * be enough work requests to send the entire message.
910 send = &ic->i_sends[pos];
911 first = send;
918 send->s_wr.send_flags = 0;
919 send->s_queued = jiffies;
920 send->s_op = NULL;
923 nr_sig += rds_ib_set_wr_signal_state(ic, send,
926 send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
927 send->s_rdma_wr.remote_addr = remote_addr;
928 send->s_rdma_wr.rkey = op->op_rkey;
931 send->s_rdma_wr.wr.num_sge = max_sge;
934 send->s_rdma_wr.wr.num_sge = num_sge;
937 send->s_rdma_wr.wr.next = NULL;
940 prev->s_rdma_wr.wr.next = &send->s_rdma_wr.wr;
942 for (j = 0; j < send->s_rdma_wr.wr.num_sge &&
946 send->s_sge[j].addr = sg_dma_address(scat);
947 send->s_sge[j].lkey = ic->i_pd->local_dma_lkey;
949 send->s_sge[j].addr = odp_addr;
950 send->s_sge[j].lkey = odp_lkey;
952 send->s_sge[j].length = len;
962 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
963 &send->s_rdma_wr.wr,
964 send->s_rdma_wr.wr.num_sge,
965 send->s_rdma_wr.wr.next);
967 prev = send;
968 if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
969 send = ic->i_sends;
1015 * to send previously (due to flow control). Try again. */