Lines Matching refs:rs

472 static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
476 assert_spin_locked(&rs->rs_lock);
478 BUG_ON(rs->rs_snd_bytes < len);
479 rs->rs_snd_bytes -= len;
481 if (rs->rs_snd_bytes == 0)
501 struct rds_sock *rs = NULL;
512 rs = rm->m_rs;
513 sock_hold(rds_rs_to_sk(rs));
516 spin_lock(&rs->rs_lock);
517 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
518 spin_unlock(&rs->rs_lock);
525 if (rs) {
526 rds_wake_sk_sleep(rs);
527 sock_put(rds_rs_to_sk(rs));
537 struct rds_sock *rs = NULL;
548 rs = rm->m_rs;
549 sock_hold(rds_rs_to_sk(rs));
552 spin_lock(&rs->rs_lock);
553 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
554 spin_unlock(&rs->rs_lock);
561 if (rs) {
562 rds_wake_sk_sleep(rs);
563 sock_put(rds_rs_to_sk(rs));
574 __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
582 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
589 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
607 struct rds_sock *rs = NULL;
631 if (rs != rm->m_rs) {
632 if (rs) {
633 rds_wake_sk_sleep(rs);
634 sock_put(rds_rs_to_sk(rs));
636 rs = rm->m_rs;
637 if (rs)
638 sock_hold(rds_rs_to_sk(rs));
640 if (!rs)
642 spin_lock(&rs->rs_lock);
649 rds_send_sndbuf_remove(rs, rm);
655 &rs->rs_notify_queue);
662 spin_unlock(&rs->rs_lock);
671 if (rs) {
672 rds_wake_sk_sleep(rs);
673 sock_put(rds_rs_to_sk(rs));
721 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in6 *dest)
729 /* get all the messages we're dropping under the rs lock */
730 spin_lock_irqsave(&rs->rs_lock, flags);
732 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
739 rds_send_sndbuf_remove(rs, rm);
743 /* order flag updates with the rs lock */
746 spin_unlock_irqrestore(&rs->rs_lock, flags);
779 spin_lock(&rs->rs_lock);
780 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
781 spin_unlock(&rs->rs_lock);
788 rds_wake_sk_sleep(rs);
802 spin_lock(&rs->rs_lock);
803 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
804 spin_unlock(&rs->rs_lock);
817 static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
832 spin_lock_irqsave(&rs->rs_lock, flags);
842 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
843 rs->rs_snd_bytes += len;
850 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
853 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
856 sock_hold(rds_rs_to_sk(rs));
857 rm->m_rs = rs;
872 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
873 rm, len, rs, rs->rs_snd_bytes,
879 spin_unlock_irqrestore(&rs->rs_lock, flags);
970 static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm,
983 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
1004 ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]);
1009 ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
1013 ret = rds_cmsg_rdma_map(rs, rm, cmsg);
1026 ret = rds_cmsg_atomic(rs, rm, cmsg);
1030 ret = rds_cmsg_zcopy(rs, rm, cmsg);
1044 static int rds_send_mprds_hash(struct rds_sock *rs,
1050 hash = RDS_MPATH_HASH(rs, RDS_MPATH_WORKERS);
1052 hash = RDS_MPATH_HASH(rs, conn->c_npaths);
1103 struct rds_sock *rs = rds_sk_to_rs(sk);
1118 sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
1203 daddr = rs->rs_conn_addr;
1204 dport = rs->rs_conn_port;
1205 scope_id = rs->rs_bound_scope_id;
1210 if (ipv6_addr_any(&rs->rs_bound_addr) || ipv6_addr_any(&daddr)) {
1220 ipv6_addr_v4mapped(&rs->rs_bound_addr)) {
1229 if (scope_id != rs->rs_bound_scope_id) {
1231 scope_id = rs->rs_bound_scope_id;
1232 } else if (rs->rs_bound_scope_id) {
1250 if (payload_len > rds_sk_sndbuf(rs)) {
1256 if (rs->rs_transport->t_type != RDS_TRANS_TCP) {
1290 if (rs->rs_conn && ipv6_addr_equal(&rs->rs_conn->c_faddr, &daddr) &&
1291 rs->rs_tos == rs->rs_conn->c_tos) {
1292 conn = rs->rs_conn;
1295 &rs->rs_bound_addr, &daddr,
1296 rs->rs_transport, rs->rs_tos,
1303 rs->rs_conn = conn;
1307 cpath = &conn->c_path[rds_send_mprds_hash(rs, conn, nonblock)];
1314 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
1340 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1342 rs->rs_seen_congestion = 1;
1345 while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port,
1355 rds_send_queue_rm(rs, conn, cpath, rm,
1356 rs->rs_bound_port,
1405 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);