Lines Matching defs:rm

67 	struct rds_message *rm, *tmp;
71 rm = cp->cp_xmit_rm;
77 rds_message_unmapped(rm);
78 rds_message_put(rm);
95 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
96 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
97 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
139 struct rds_message *rm;
201 rm = cp->cp_xmit_rm;
203 if (!rm) {
218 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
219 rm = rds_cong_update_alloc(conn);
220 if (IS_ERR(rm)) {
221 ret = PTR_ERR(rm);
224 rm->data.op_active = 1;
225 rm->m_inc.i_conn_path = cp;
226 rm->m_inc.i_conn = cp->cp_conn;
228 cp->cp_xmit_rm = rm;
238 if (!rm) {
254 rm = list_entry(cp->cp_send_queue.next,
257 rds_message_addref(rm);
263 list_move_tail(&rm->m_conn_item,
269 if (!rm)
279 if (test_bit(RDS_MSG_FLUSH, &rm->m_flags) ||
280 (rm->rdma.op_active &&
281 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))) {
283 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
284 list_move(&rm->m_conn_item, &to_be_dropped);
290 len = ntohl(rm->m_inc.i_hdr.h_len);
293 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
305 cp->cp_xmit_rm = rm;
309 if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) {
310 rm->m_final_op = &rm->rdma;
314 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
315 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
317 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
318 wake_up_interruptible(&rm->m_flush_wait);
325 if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) {
326 rm->m_final_op = &rm->atomic;
330 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
331 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
333 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
334 wake_up_interruptible(&rm->m_flush_wait);
348 if (rm->data.op_nents == 0) {
352 ops_present = (rm->atomic.op_active || rm->rdma.op_active);
353 if (rm->atomic.op_active && !rm->atomic.op_silent)
355 if (rm->rdma.op_active && !rm->rdma.op_silent)
359 && !rm->m_rdma_cookie)
360 rm->data.op_active = 0;
363 if (rm->data.op_active && !cp->cp_xmit_data_sent) {
364 rm->m_final_op = &rm->data;
366 ret = conn->c_trans->xmit(conn, rm,
381 sg = &rm->data.op_sg[cp->cp_xmit_sg];
392 rm->data.op_nents);
397 (cp->cp_xmit_sg == rm->data.op_nents))
402 * A rm will only take multiple times through this loop
404 * none), then we're done with the rm.
406 if (!rm->data.op_active || cp->cp_xmit_data_sent) {
415 rds_message_put(rm);
427 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
428 rds_message_put(rm);
472 static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
474 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
485 static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
489 return is_acked(rm, ack);
490 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
499 void rds_rdma_send_complete(struct rds_message *rm, int status)
506 spin_lock_irqsave(&rm->m_rs_lock, flags);
508 ro = &rm->rdma;
509 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
512 rs = rm->m_rs;
523 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
535 void rds_atomic_send_complete(struct rds_message *rm, int status)
542 spin_lock_irqsave(&rm->m_rs_lock, flags);
544 ao = &rm->atomic;
545 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
548 rs = rm->m_rs;
559 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
574 __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
579 ro = &rm->rdma;
586 ao = &rm->atomic;
608 struct rds_message *rm;
613 rm = list_entry(messages->next, struct rds_message,
615 list_del_init(&rm->m_conn_item);
623 * The message spinlock makes sure nobody clears rm->m_rs
627 spin_lock_irqsave(&rm->m_rs_lock, flags);
628 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
631 if (rs != rm->m_rs) {
636 rs = rm->m_rs;
644 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
645 struct rm_rdma_op *ro = &rm->rdma;
648 list_del_init(&rm->m_sock_item);
649 rds_send_sndbuf_remove(rs, rm);
658 rm->rdma.op_notifier = NULL;
665 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
666 rds_message_put(rm);
668 rds_message_put(rm);
688 struct rds_message *rm, *tmp;
694 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
695 if (!rds_send_is_acked(rm, ack, is_acked))
698 list_move(&rm->m_conn_item, &list);
699 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
723 struct rds_message *rm, *tmp;
732 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
734 (!ipv6_addr_equal(&dest->sin6_addr, &rm->m_daddr) ||
735 dest->sin6_port != rm->m_inc.i_hdr.h_dport))
738 list_move(&rm->m_sock_item, &list);
739 rds_send_sndbuf_remove(rs, rm);
740 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
752 list_for_each_entry(rm, &list, m_sock_item) {
754 conn = rm->m_inc.i_conn;
756 cp = rm->m_inc.i_conn_path;
762 * Maybe someone else beat us to removing rm from the conn.
766 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
770 list_del_init(&rm->m_conn_item);
777 spin_lock_irqsave(&rm->m_rs_lock, flags);
780 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
783 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
785 rds_message_put(rm);
791 rm = list_entry(list.next, struct rds_message, m_sock_item);
792 list_del_init(&rm->m_sock_item);
793 rds_message_wait(rm);
800 spin_lock_irqsave(&rm->m_rs_lock, flags);
803 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
806 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
808 rds_message_put(rm);
819 struct rds_message *rm, __be16 sport,
828 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
851 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
853 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
854 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
855 rds_message_addref(rm);
857 rm->m_rs = rs;
861 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
862 rm->m_inc.i_conn = conn;
863 rm->m_inc.i_conn_path = cp;
864 rds_message_addref(rm);
867 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++);
868 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
869 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
873 rm, len, rs, rs->rs_snd_bytes,
874 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
970 static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm,
976 !rm->data.op_mmp_znotifier)
979 rm->data.op_mmp_znotifier->z_cookie = *cookie;
983 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
998 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
1004 ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]);
1009 ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
1013 ret = rds_cmsg_rdma_map(rs, rm, cmsg);
1026 ret = rds_cmsg_atomic(rs, rm, cmsg);
1030 ret = rds_cmsg_zcopy(rs, rm, cmsg);
1107 struct rds_message *rm = NULL;
1262 /* size of rm including all sgs */
1267 rm = rds_message_alloc(ret, GFP_KERNEL);
1268 if (!rm) {
1273 /* Attach data to the rm */
1275 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
1276 if (IS_ERR(rm->data.op_sg)) {
1277 ret = PTR_ERR(rm->data.op_sg);
1280 ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy);
1284 rm->data.op_active = 1;
1286 rm->m_daddr = daddr;
1311 rm->m_conn_path = cpath;
1314 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
1318 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1320 &rm->rdma, conn->c_trans->xmit_rdma);
1325 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1327 &rm->atomic, conn->c_trans->xmit_atomic);
1345 while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port,
1355 rds_send_queue_rm(rs, conn, cpath, rm,
1388 rds_message_put(rm);
1405 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1407 if (rm)
1408 rds_message_put(rm);
1424 struct rds_message *rm;
1428 rm = rds_message_alloc(0, GFP_ATOMIC);
1429 if (!rm) {
1434 rm->m_daddr = cp->cp_conn->c_faddr;
1435 rm->data.op_active = 1;
1444 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
1445 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1446 rds_message_addref(rm);
1447 rm->m_inc.i_conn = cp->cp_conn;
1448 rm->m_inc.i_conn_path = cp;
1450 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport,
1452 rm->m_inc.i_hdr.h_flags |= h_flags;
1460 rds_message_add_extension(&rm->m_inc.i_hdr,
1463 rds_message_add_extension(&rm->m_inc.i_hdr,
1479 rds_message_put(rm);
1483 if (rm)
1484 rds_message_put(rm);