Lines Matching refs:ssk

147 #define rx_ring_posted(ssk) ring_posted(ssk->rx_ring)
149 #define tx_ring_posted(ssk) (ring_posted(ssk->tx_ring) + \
150 (ssk->tx_ring.rdma_inflight ? ssk->tx_ring.rdma_inflight->busy : 0))
152 #define tx_ring_posted(ssk) ring_posted(ssk->tx_ring)
262 struct sdp_sock *ssk;
322 #define tx_credits(ssk) (atomic_read(&ssk->tx_ring.credits))
365 #define SDP_TIMEWAIT 0x0001 /* In ssk timewait state. */
409 #define rcv_nxt(ssk) atomic_read(&(ssk->rcv_nxt))
414 #define mseq_ack(ssk) (atomic_read(&ssk->mseq_ack))
421 #define remote_credits(ssk) (atomic_read(&ssk->remote_credits))
448 #define SDP_RLOCK(ssk) rw_rlock(&(ssk)->lock)
449 #define SDP_WLOCK(ssk) rw_wlock(&(ssk)->lock)
450 #define SDP_RUNLOCK(ssk) rw_runlock(&(ssk)->lock)
451 #define SDP_WUNLOCK(ssk) rw_wunlock(&(ssk)->lock)
452 #define SDP_WLOCK_ASSERT(ssk) rw_assert(&(ssk)->lock, RA_WLOCKED)
453 #define SDP_RLOCK_ASSERT(ssk) rw_assert(&(ssk)->lock, RA_RLOCKED)
454 #define SDP_LOCK_ASSERT(ssk) rw_assert(&(ssk)->lock, RA_LOCKED)
486 static inline void sdp_arm_rx_cq(struct sdp_sock *ssk)
488 sdp_prf(ssk->socket, NULL, "Arming RX cq");
489 sdp_dbg_data(ssk->socket, "Arming RX cq\n");
491 ib_req_notify_cq(ssk->rx_ring.cq, IB_CQ_NEXT_COMP);
494 static inline void sdp_arm_tx_cq(struct sdp_sock *ssk)
496 sdp_prf(ssk->socket, NULL, "Arming TX cq");
497 sdp_dbg_data(ssk->socket, "Arming TX cq. credits: %d, posted: %d\n",
498 tx_credits(ssk), tx_ring_posted(ssk));
500 ib_req_notify_cq(ssk->tx_ring.cq, IB_CQ_NEXT_COMP);
507 static inline int tx_slots_free(struct sdp_sock *ssk)
511 min_free = MIN(tx_credits(ssk),
512 SDP_TX_SIZE - tx_ring_posted(ssk));
640 static inline int sdp_tx_ring_slots_left(struct sdp_sock *ssk)
642 return SDP_TX_SIZE - tx_ring_posted(ssk);
645 static inline int credit_update_needed(struct sdp_sock *ssk)
649 c = remote_credits(ssk);
652 return unlikely(c < rx_ring_posted(ssk)) &&
653 likely(tx_credits(ssk) > 0) &&
654 likely(sdp_tx_ring_slots_left(ssk));
665 sdp_cleanup_sdp_buf(struct sdp_sock *ssk, struct sdp_buf *sbuf,
672 dev = ssk->ib_device;
678 void sdp_set_default_moderation(struct sdp_sock *ssk);
680 void sdp_urg(struct sdp_sock *ssk, struct mbuf *mb);
681 void sdp_cancel_dreq_wait_timeout(struct sdp_sock *ssk);
683 struct sdp_sock *sdp_notify(struct sdp_sock *ssk, int error);
690 int sdp_tx_ring_create(struct sdp_sock *ssk, struct ib_device *device);
691 void sdp_tx_ring_destroy(struct sdp_sock *ssk);
692 int sdp_xmit_poll(struct sdp_sock *ssk, int force);
693 void sdp_post_send(struct sdp_sock *ssk, struct mbuf *mb);
694 void sdp_post_sends(struct sdp_sock *ssk, int wait);
695 void sdp_post_keepalive(struct sdp_sock *ssk);
698 void sdp_rx_ring_init(struct sdp_sock *ssk);
699 int sdp_rx_ring_create(struct sdp_sock *ssk, struct ib_device *device);
700 void sdp_rx_ring_destroy(struct sdp_sock *ssk);
701 int sdp_resize_buffers(struct sdp_sock *ssk, u32 new_size);
702 int sdp_init_buffers(struct sdp_sock *ssk, u32 new_size);
703 void sdp_do_posts(struct sdp_sock *ssk);
704 void sdp_rx_comp_full(struct sdp_sock *ssk);
709 int sdp_handle_srcavail(struct sdp_sock *ssk, struct sdp_srcah *srcah);
710 void sdp_handle_sendsm(struct sdp_sock *ssk, u32 mseq_ack);
711 void sdp_handle_rdma_read_compl(struct sdp_sock *ssk, u32 mseq_ack,
713 int sdp_handle_rdma_read_cqe(struct sdp_sock *ssk);
716 int sdp_post_rdma_rd_compl(struct sdp_sock *ssk,
722 int sdp_process_rx(struct sdp_sock *ssk);