• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/net/rds/

Lines Matching defs:conn

59 void rds_send_reset(struct rds_connection *conn)
64 if (conn->c_xmit_rm) {
69 rds_message_unmapped(conn->c_xmit_rm);
70 rds_message_put(conn->c_xmit_rm);
71 conn->c_xmit_rm = NULL;
73 conn->c_xmit_sg = 0;
74 conn->c_xmit_hdr_off = 0;
75 conn->c_xmit_data_off = 0;
76 conn->c_xmit_rdma_sent = 0;
78 conn->c_map_queued = 0;
80 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
81 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
84 spin_lock_irqsave(&conn->c_lock, flags);
85 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
89 list_splice_init(&conn->c_retrans, &conn->c_send_queue);
90 spin_unlock_irqrestore(&conn->c_lock, flags);
98 * - reassembly is optional and easily done by transports per conn
107 int rds_send_xmit(struct rds_connection *conn)
129 if (!mutex_trylock(&conn->c_send_lock)) {
135 if (conn->c_trans->xmit_prepare)
136 conn->c_trans->xmit_prepare(conn);
151 if (conn->c_map_bytes) {
152 ret = conn->c_trans->xmit_cong_map(conn, conn->c_lcong,
153 conn->c_map_offset);
157 conn->c_map_offset += ret;
158 conn->c_map_bytes -= ret;
159 if (conn->c_map_bytes)
166 rm = conn->c_xmit_rm;
168 conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
169 conn->c_xmit_sg == rm->m_nents) {
170 conn->c_xmit_rm = NULL;
171 conn->c_xmit_sg = 0;
172 conn->c_xmit_hdr_off = 0;
173 conn->c_xmit_data_off = 0;
174 conn->c_xmit_rdma_sent = 0;
183 if (rm == NULL && test_and_clear_bit(0, &conn->c_map_queued)) {
184 if (conn->c_trans->xmit_cong_map != NULL) {
185 conn->c_map_offset = 0;
186 conn->c_map_bytes = sizeof(struct rds_header) +
191 rm = rds_cong_update_alloc(conn);
197 conn->c_xmit_rm = rm;
210 spin_lock_irqsave(&conn->c_lock, flags);
212 if (!list_empty(&conn->c_send_queue)) {
213 rm = list_entry(conn->c_send_queue.next,
222 list_move_tail(&rm->m_conn_item, &conn->c_retrans);
225 spin_unlock_irqrestore(&conn->c_lock, flags);
241 spin_lock_irqsave(&conn->c_lock, flags);
244 spin_unlock_irqrestore(&conn->c_lock, flags);
251 if (conn->c_unacked_packets == 0 ||
252 conn->c_unacked_bytes < len) {
255 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
256 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
259 conn->c_unacked_bytes -= len;
260 conn->c_unacked_packets--;
263 conn->c_xmit_rm = rm;
271 if (rm->m_rdma_op && !conn->c_xmit_rdma_sent) {
272 ret = conn->c_trans->xmit_rdma(conn, rm->m_rdma_op);
275 conn->c_xmit_rdma_sent = 1;
281 if (conn->c_xmit_hdr_off < sizeof(struct rds_header) ||
282 conn->c_xmit_sg < rm->m_nents) {
283 ret = conn->c_trans->xmit(conn, rm,
284 conn->c_xmit_hdr_off,
285 conn->c_xmit_sg,
286 conn->c_xmit_data_off);
290 if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
293 conn->c_xmit_hdr_off);
294 conn->c_xmit_hdr_off += tmp;
298 sg = &rm->m_sg[conn->c_xmit_sg];
301 conn->c_xmit_data_off);
302 conn->c_xmit_data_off += tmp;
304 if (conn->c_xmit_data_off == sg->length) {
305 conn->c_xmit_data_off = 0;
307 conn->c_xmit_sg++;
309 conn->c_xmit_sg == rm->m_nents);
319 if (conn->c_trans->xmit_complete)
320 conn->c_trans->xmit_complete(conn);
334 mutex_unlock(&conn->c_send_lock);
336 if (conn->c_map_bytes || (send_quota == 0 && !was_empty)) {
346 spin_lock_irqsave(&conn->c_lock, flags);
347 if (!list_empty(&conn->c_send_queue)) {
351 spin_unlock_irqrestore(&conn->c_lock, flags);
383 int rds_send_acked_before(struct rds_connection *conn, u64 seq)
388 spin_lock(&conn->c_lock);
390 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
396 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
402 spin_unlock(&conn->c_lock);
470 struct rds_message *rds_send_get_message(struct rds_connection *conn,
476 spin_lock_irqsave(&conn->c_lock, flags);
478 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
486 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
495 spin_unlock_irqrestore(&conn->c_lock, flags);
579 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
586 spin_lock_irqsave(&conn->c_lock, flags);
588 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
600 spin_unlock_irqrestore(&conn->c_lock, flags);
610 struct rds_connection *conn;
635 conn = NULL;
637 /* now remove the messages from the conn list as needed */
651 * else beat us to removing it from the conn. If we race
658 if (conn != rm->m_inc.i_conn) {
659 if (conn)
660 spin_unlock_irqrestore(&conn->c_lock, flags);
661 conn = rm->m_inc.i_conn;
662 spin_lock_irqsave(&conn->c_lock, flags);
671 if (conn)
672 spin_unlock_irqrestore(&conn->c_lock, flags);
691 static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
734 rm->m_inc.i_conn = conn;
737 spin_lock(&conn->c_lock);
738 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
739 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
741 spin_unlock(&conn->c_lock);
806 struct rds_connection *conn;
850 * Caching the conn in the socket helps a lot. */
852 conn = rs->rs_conn;
854 conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
857 if (IS_ERR(conn)) {
858 ret = PTR_ERR(conn);
861 rs->rs_conn = conn;
870 conn->c_trans->xmit_rdma == NULL) {
872 printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
873 rm->m_rdma_op, conn->c_trans->xmit_rdma);
882 if (rds_conn_state(conn) == RDS_CONN_DOWN &&
883 !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
884 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
886 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
892 while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
905 rds_send_queue_rm(rs, conn, rm,
926 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
927 rds_send_worker(&conn->c_send_w.work);
948 rds_send_pong(struct rds_connection *conn, __be16 dport)
960 rm->m_daddr = conn->c_faddr;
966 if (rds_conn_state(conn) == RDS_CONN_DOWN &&
967 !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
968 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
970 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
974 spin_lock_irqsave(&conn->c_lock, flags);
975 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
978 rm->m_inc.i_conn = conn;
981 conn->c_next_tx_seq);
982 conn->c_next_tx_seq++;
983 spin_unlock_irqrestore(&conn->c_lock, flags);
988 queue_delayed_work(rds_wq, &conn->c_send_w, 0);