Lines Matching refs:cp

65 void rds_send_path_reset(struct rds_conn_path *cp)
70 if (cp->cp_xmit_rm) {
71 rm = cp->cp_xmit_rm;
72 cp->cp_xmit_rm = NULL;
81 cp->cp_xmit_sg = 0;
82 cp->cp_xmit_hdr_off = 0;
83 cp->cp_xmit_data_off = 0;
84 cp->cp_xmit_atomic_sent = 0;
85 cp->cp_xmit_rdma_sent = 0;
86 cp->cp_xmit_data_sent = 0;
88 cp->cp_conn->c_map_queued = 0;
90 cp->cp_unacked_packets = rds_sysctl_max_unacked_packets;
91 cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes;
94 spin_lock_irqsave(&cp->cp_lock, flags);
95 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
99 list_splice_init(&cp->cp_retrans, &cp->cp_send_queue);
100 spin_unlock_irqrestore(&cp->cp_lock, flags);
104 static int acquire_in_xmit(struct rds_conn_path *cp)
106 return test_and_set_bit_lock(RDS_IN_XMIT, &cp->cp_flags) == 0;
109 static void release_in_xmit(struct rds_conn_path *cp)
111 clear_bit_unlock(RDS_IN_XMIT, &cp->cp_flags);
118 if (waitqueue_active(&cp->cp_waitq))
119 wake_up_all(&cp->cp_waitq);
136 int rds_send_xmit(struct rds_conn_path *cp)
138 struct rds_connection *conn = cp->cp_conn;
159 if (!acquire_in_xmit(cp)) {
165 if (rds_destroy_pending(cp->cp_conn)) {
166 release_in_xmit(cp);
179 send_gen = READ_ONCE(cp->cp_send_gen) + 1;
180 WRITE_ONCE(cp->cp_send_gen, send_gen);
186 if (!rds_conn_path_up(cp)) {
187 release_in_xmit(cp);
193 conn->c_trans->xmit_path_prepare(cp);
201 rm = cp->cp_xmit_rm;
225 rm->m_inc.i_conn_path = cp;
226 rm->m_inc.i_conn = cp->cp_conn;
228 cp->cp_xmit_rm = rm;
251 spin_lock_irqsave(&cp->cp_lock, flags);
253 if (!list_empty(&cp->cp_send_queue)) {
254 rm = list_entry(cp->cp_send_queue.next,
264 &cp->cp_retrans);
267 spin_unlock_irqrestore(&cp->cp_lock, flags);
282 spin_lock_irqsave(&cp->cp_lock, flags);
285 spin_unlock_irqrestore(&cp->cp_lock, flags);
291 if (cp->cp_unacked_packets == 0 ||
292 cp->cp_unacked_bytes < len) {
295 cp->cp_unacked_packets =
297 cp->cp_unacked_bytes =
301 cp->cp_unacked_bytes -= len;
302 cp->cp_unacked_packets--;
305 cp->cp_xmit_rm = rm;
309 if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) {
321 cp->cp_xmit_rdma_sent = 1;
325 if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) {
337 cp->cp_xmit_atomic_sent = 1;
363 if (rm->data.op_active && !cp->cp_xmit_data_sent) {
367 cp->cp_xmit_hdr_off,
368 cp->cp_xmit_sg,
369 cp->cp_xmit_data_off);
373 if (cp->cp_xmit_hdr_off < sizeof(struct rds_header)) {
376 cp->cp_xmit_hdr_off);
377 cp->cp_xmit_hdr_off += tmp;
381 sg = &rm->data.op_sg[cp->cp_xmit_sg];
384 cp->cp_xmit_data_off);
385 cp->cp_xmit_data_off += tmp;
387 if (cp->cp_xmit_data_off == sg->length) {
388 cp->cp_xmit_data_off = 0;
390 cp->cp_xmit_sg++;
391 BUG_ON(ret != 0 && cp->cp_xmit_sg ==
396 if (cp->cp_xmit_hdr_off == sizeof(struct rds_header) &&
397 (cp->cp_xmit_sg == rm->data.op_nents))
398 cp->cp_xmit_data_sent = 1;
406 if (!rm->data.op_active || cp->cp_xmit_data_sent) {
407 cp->cp_xmit_rm = NULL;
408 cp->cp_xmit_sg = 0;
409 cp->cp_xmit_hdr_off = 0;
410 cp->cp_xmit_data_off = 0;
411 cp->cp_xmit_rdma_sent = 0;
412 cp->cp_xmit_atomic_sent = 0;
413 cp->cp_xmit_data_sent = 0;
421 conn->c_trans->xmit_path_complete(cp);
422 release_in_xmit(cp);
451 raced = send_gen != READ_ONCE(cp->cp_send_gen);
454 !list_empty(&cp->cp_send_queue)) && !raced) {
458 if (rds_destroy_pending(cp->cp_conn))
461 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
685 void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
692 spin_lock_irqsave(&cp->cp_lock, flags);
694 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
706 spin_unlock_irqrestore(&cp->cp_lock, flags);
725 struct rds_conn_path *cp;
756 cp = rm->m_inc.i_conn_path;
758 cp = &conn->c_path[0];
760 spin_lock_irqsave(&cp->cp_lock, flags);
767 spin_unlock_irqrestore(&cp->cp_lock, flags);
771 spin_unlock_irqrestore(&cp->cp_lock, flags);
818 struct rds_conn_path *cp,
863 rm->m_inc.i_conn_path = cp;
866 spin_lock(&cp->cp_lock);
867 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++);
868 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
870 spin_unlock(&cp->cp_lock);
1421 rds_send_probe(struct rds_conn_path *cp, __be16 sport,
1434 rm->m_daddr = cp->cp_conn->c_faddr;
1437 rds_conn_path_connect_if_down(cp);
1439 ret = rds_cong_wait(cp->cp_conn->c_fcong, dport, 1, NULL);
1443 spin_lock_irqsave(&cp->cp_lock, flags);
1444 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
1447 rm->m_inc.i_conn = cp->cp_conn;
1448 rm->m_inc.i_conn_path = cp;
1451 cp->cp_next_tx_seq);
1453 cp->cp_next_tx_seq++;
1456 cp->cp_conn->c_trans->t_mp_capable) {
1458 u32 my_gen_num = cpu_to_be32(cp->cp_conn->c_my_gen_num);
1468 spin_unlock_irqrestore(&cp->cp_lock, flags);
1475 if (!rds_destroy_pending(cp->cp_conn))
1476 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
1489 rds_send_pong(struct rds_conn_path *cp, __be16 dport)
1491 return rds_send_probe(cp, 0, dport, 0);
1498 struct rds_conn_path *cp = &conn->c_path[cp_index];
1500 spin_lock_irqsave(&cp->cp_lock, flags);
1502 spin_unlock_irqrestore(&cp->cp_lock, flags);
1506 spin_unlock_irqrestore(&cp->cp_lock, flags);
1507 rds_send_probe(cp, cpu_to_be16(RDS_FLAG_PROBE_PORT), 0, 0);