1#ifndef _SDP_H_
2#define _SDP_H_
3
4#define	LINUXKPI_PARAM_PREFIX ib_sdp_
5
6#include "opt_ddb.h"
7#include "opt_inet.h"
8#include "opt_ofed.h"
9
10#include <sys/param.h>
11#include <sys/systm.h>
12#include <sys/malloc.h>
13#include <sys/kernel.h>
14#include <sys/sysctl.h>
15#include <sys/mbuf.h>
16#include <sys/lock.h>
17#include <sys/rwlock.h>
18#include <sys/socket.h>
19#include <sys/socketvar.h>
20#include <sys/protosw.h>
21#include <sys/proc.h>
22#include <sys/jail.h>
23#include <sys/domain.h>
24
25#ifdef DDB
26#include <ddb/ddb.h>
27#endif
28
29#include <net/if.h>
30#include <net/if_var.h>
31#include <net/route.h>
32#include <net/vnet.h>
33
34#include <netinet/in.h>
35#include <netinet/in_systm.h>
36#include <netinet/in_var.h>
37#include <netinet/in_pcb.h>
38#include <netinet/tcp.h>
39#include <netinet/tcp_fsm.h>
40#include <netinet/tcp_timer.h>
41#include <netinet/tcp_var.h>
42
43#include <linux/device.h>
44#include <linux/err.h>
45#include <linux/sched.h>
46#include <linux/workqueue.h>
47#include <linux/wait.h>
48#include <linux/module.h>
49#include <linux/moduleparam.h>
50#include <linux/pci.h>
51
52#include <rdma/ib_verbs.h>
53#include <rdma/rdma_cm.h>
54#include <rdma/ib_cm.h>
55#include <rdma/ib_fmr_pool.h>
56#include <rdma/rdma_sdp.h>
57
58#ifdef SDP_DEBUG
59#define	CONFIG_INFINIBAND_SDP_DEBUG
60#endif
61
62#include "sdp_dbg.h"
63
64#undef LIST_HEAD
65/* From sys/queue.h */
66#define LIST_HEAD(name, type)                                           \
67struct name {                                                           \
68        struct type *lh_first;  /* first element */                     \
69}
70
71/* Interval between successive polls in the Tx routine when polling is used
72   instead of interrupts (in per-core Tx rings) - should be power of 2 */
73#define SDP_TX_POLL_MODER	16
74#define SDP_TX_POLL_TIMEOUT	(HZ / 20)
75#define SDP_NAGLE_TIMEOUT (HZ / 10)
76
77#define SDP_SRCAVAIL_CANCEL_TIMEOUT (HZ * 5)
78#define SDP_SRCAVAIL_ADV_TIMEOUT (1 * HZ)
79#define SDP_SRCAVAIL_PAYLOAD_LEN 1
80
81#define SDP_RESOLVE_TIMEOUT 1000
82#define SDP_ROUTE_TIMEOUT 1000
83#define SDP_RETRY_COUNT 5
84#define SDP_KEEPALIVE_TIME (120 * 60 * HZ)
85#define SDP_FIN_WAIT_TIMEOUT (60 * HZ) /* like TCP_FIN_TIMEOUT */
86
87#define SDP_TX_SIZE 0x40
88#define SDP_RX_SIZE 0x40
89
90#define SDP_FMR_SIZE (MIN(0x1000, PAGE_SIZE) / sizeof(u64))
91#define SDP_FMR_POOL_SIZE	1024
92#define SDP_FMR_DIRTY_SIZE	( SDP_FMR_POOL_SIZE / 4 )
93
94#define SDP_MAX_RDMA_READ_LEN (PAGE_SIZE * (SDP_FMR_SIZE - 2))
95
96/* mb inlined data len - rest will be rx'ed into frags */
97#define SDP_HEAD_SIZE (sizeof(struct sdp_bsdh))
98
99/* limit tx payload len, if the sink supports bigger buffers than the source
100 * can handle.
101 * or rx fragment size (limited by sge->length size) */
102#define	SDP_MAX_PACKET	(1 << 16)
103#define SDP_MAX_PAYLOAD (SDP_MAX_PACKET - SDP_HEAD_SIZE)
104
105#define SDP_MAX_RECV_SGES (SDP_MAX_PACKET / MCLBYTES)
106#define SDP_MAX_SEND_SGES (SDP_MAX_PACKET / MCLBYTES) + 2
107
108#define SDP_NUM_WC 4
109
110#define SDP_DEF_ZCOPY_THRESH 64*1024
111#define SDP_MIN_ZCOPY_THRESH PAGE_SIZE
112#define SDP_MAX_ZCOPY_THRESH 1048576
113
114#define SDP_OP_RECV 0x800000000LL
115#define SDP_OP_SEND 0x400000000LL
116#define SDP_OP_RDMA 0x200000000LL
117#define SDP_OP_NOP  0x100000000LL
118
119/* how long (in jiffies) to block sender till tx completion*/
120#define SDP_BZCOPY_POLL_TIMEOUT (HZ / 10)
121
122#define SDP_AUTO_CONF	0xffff
123#define AUTO_MOD_DELAY (HZ / 4)
124
125struct sdp_mb_cb {
126	__u32		seq;		/* Starting sequence number	*/
127	struct bzcopy_state      *bz;
128	struct rx_srcavail_state *rx_sa;
129	struct tx_srcavail_state *tx_sa;
130};
131
132#define	M_PUSH	M_PROTO1	/* Do a 'push'. */
133#define	M_URG	M_PROTO2	/* Mark as urgent (oob). */
134
135#define SDP_SKB_CB(__mb)      ((struct sdp_mb_cb *)&((__mb)->cb[0]))
136#define BZCOPY_STATE(mb)      (SDP_SKB_CB(mb)->bz)
137#define RX_SRCAVAIL_STATE(mb) (SDP_SKB_CB(mb)->rx_sa)
138#define TX_SRCAVAIL_STATE(mb) (SDP_SKB_CB(mb)->tx_sa)
139
140#ifndef MIN
141#define MIN(a, b) (a < b ? a : b)
142#endif
143
144#define ring_head(ring)   (atomic_read(&(ring).head))
145#define ring_tail(ring)   (atomic_read(&(ring).tail))
146#define ring_posted(ring) (ring_head(ring) - ring_tail(ring))
147
148#define rx_ring_posted(ssk) ring_posted(ssk->rx_ring)
149#ifdef SDP_ZCOPY
150#define tx_ring_posted(ssk) (ring_posted(ssk->tx_ring) + \
151	(ssk->tx_ring.rdma_inflight ? ssk->tx_ring.rdma_inflight->busy : 0))
152#else
153#define tx_ring_posted(ssk) ring_posted(ssk->tx_ring)
154#endif
155
156extern int sdp_zcopy_thresh;
157extern int rcvbuf_initial_size;
158extern struct workqueue_struct *rx_comp_wq;
159extern struct ib_client sdp_client;
160
161enum sdp_mid {
162	SDP_MID_HELLO = 0x0,
163	SDP_MID_HELLO_ACK = 0x1,
164	SDP_MID_DISCONN = 0x2,
165	SDP_MID_ABORT = 0x3,
166	SDP_MID_SENDSM = 0x4,
167	SDP_MID_RDMARDCOMPL = 0x6,
168	SDP_MID_SRCAVAIL_CANCEL = 0x8,
169	SDP_MID_CHRCVBUF = 0xB,
170	SDP_MID_CHRCVBUF_ACK = 0xC,
171	SDP_MID_SINKAVAIL = 0xFD,
172	SDP_MID_SRCAVAIL = 0xFE,
173	SDP_MID_DATA = 0xFF,
174};
175
176enum sdp_flags {
177        SDP_OOB_PRES = 1 << 0,
178        SDP_OOB_PEND = 1 << 1,
179};
180
181enum {
182	SDP_MIN_TX_CREDITS = 2
183};
184
185enum {
186	SDP_ERR_ERROR   = -4,
187	SDP_ERR_FAULT   = -3,
188	SDP_NEW_SEG     = -2,
189	SDP_DO_WAIT_MEM = -1
190};
191
192struct sdp_rrch {
193	__u32 len;
194} __attribute__((__packed__));
195
196struct sdp_srcah {
197	__u32 len;
198	__u32 rkey;
199	__u64 vaddr;
200} __attribute__((__packed__));
201
202struct sdp_buf {
203        struct mbuf *mb;
204        u64             mapping[SDP_MAX_SEND_SGES];
205} __attribute__((__packed__));
206
207struct sdp_chrecvbuf {
208	u32 size;
209} __attribute__((__packed__));
210
211/* Context used for synchronous zero copy bcopy (BZCOPY) */
212struct bzcopy_state {
213	unsigned char __user  *u_base;
214	int                    u_len;
215	int                    left;
216	int                    page_cnt;
217	int                    cur_page;
218	int                    cur_offset;
219	int                    busy;
220	struct sdp_sock      *ssk;
221	struct page         **pages;
222};
223
224enum rx_sa_flag {
225	RX_SA_ABORTED    = 2,
226};
227
228enum tx_sa_flag {
229	TX_SA_SENDSM     = 0x01,
230	TX_SA_CROSS_SEND = 0x02,
231	TX_SA_INTRRUPTED = 0x04,
232	TX_SA_TIMEDOUT   = 0x08,
233	TX_SA_ERROR      = 0x10,
234};
235
236struct rx_srcavail_state {
237	/* Advertised buffer stuff */
238	u32 mseq;
239	u32 used;
240	u32 reported;
241	u32 len;
242	u32 rkey;
243	u64 vaddr;
244
245	/* Dest buff info */
246	struct ib_umem *umem;
247	struct ib_pool_fmr *fmr;
248
249	/* Utility */
250	u8  busy;
251	enum rx_sa_flag  flags;
252};
253
254struct tx_srcavail_state {
255	/* Data below 'busy' will be reset */
256	u8		busy;
257
258	struct ib_umem *umem;
259	struct ib_pool_fmr *fmr;
260
261	u32		bytes_sent;
262	u32		bytes_acked;
263
264	enum tx_sa_flag	abort_flags;
265	u8		posted;
266
267	u32		mseq;
268};
269
270struct sdp_tx_ring {
271#ifdef SDP_ZCOPY
272	struct rx_srcavail_state *rdma_inflight;
273#endif
274	struct sdp_buf   	*buffer;
275	atomic_t          	head;
276	atomic_t          	tail;
277	struct ib_cq 	 	*cq;
278
279	atomic_t 	  	credits;
280#define tx_credits(ssk) (atomic_read(&ssk->tx_ring.credits))
281
282	struct callout		timer;
283	u16 		  	poll_cnt;
284};
285
286struct sdp_rx_ring {
287	struct sdp_buf   *buffer;
288	atomic_t          head;
289	atomic_t          tail;
290	struct ib_cq 	 *cq;
291
292	int		 destroyed;
293	struct rwlock	 destroyed_lock;
294};
295
296struct sdp_device {
297	struct ib_pd 		*pd;
298	struct ib_fmr_pool 	*fmr_pool;
299};
300
301struct sdp_moderation {
302	unsigned long last_moder_packets;
303	unsigned long last_moder_tx_packets;
304	unsigned long last_moder_bytes;
305	unsigned long last_moder_jiffies;
306	int last_moder_time;
307	u16 rx_usecs;
308	u16 rx_frames;
309	u16 tx_usecs;
310	u32 pkt_rate_low;
311	u16 rx_usecs_low;
312	u32 pkt_rate_high;
313	u16 rx_usecs_high;
314	u16 sample_interval;
315	u16 adaptive_rx_coal;
316	u32 msg_enable;
317
318	int moder_cnt;
319	int moder_time;
320};
321
322/* These are flags fields. */
323#define	SDP_TIMEWAIT	0x0001		/* In ssk timewait state. */
324#define	SDP_DROPPED	0x0002		/* Socket has been dropped. */
325#define	SDP_SOCKREF	0x0004		/* Holding a sockref for close. */
326#define	SDP_NODELAY	0x0008		/* Disble nagle. */
327#define	SDP_NEEDFIN	0x0010		/* Send a fin on the next tx. */
328#define	SDP_DREQWAIT	0x0020		/* Waiting on DREQ. */
329#define	SDP_DESTROY	0x0040		/* Being destroyed. */
330#define	SDP_DISCON	0x0080		/* rdma_disconnect is owed. */
331
332/* These are oobflags */
333#define	SDP_HADOOB	0x0001		/* Had OOB data. */
334#define	SDP_HAVEOOB	0x0002		/* Have OOB data. */
335
336struct sdp_sock {
337	LIST_ENTRY(sdp_sock) list;
338	struct socket *socket;
339	struct rdma_cm_id *id;
340	struct ib_device *ib_device;
341	struct sdp_device *sdp_dev;
342	struct ib_qp *qp;
343	struct ucred *cred;
344	struct callout keep2msl;	/* 2msl and keepalive timer. */
345	struct callout nagle_timer;	/* timeout waiting for ack */
346	struct ib_ucontext context;
347	in_port_t lport;
348	in_addr_t laddr;
349	in_port_t fport;
350	in_addr_t faddr;
351	int flags;
352	int oobflags;		/* protected by rx lock. */
353	int state;
354	int softerror;
355	int recv_bytes;		/* Bytes per recv. buf including header */
356	int xmit_size_goal;
357	char iobc;
358
359	struct sdp_rx_ring rx_ring;
360	struct sdp_tx_ring tx_ring;
361	struct rwlock	lock;
362	struct mbufq	rxctlq;		/* received control packets */
363
364	int qp_active;	/* XXX Flag. */
365	int max_sge;
366	struct work_struct rx_comp_work;
367#define rcv_nxt(ssk) atomic_read(&(ssk->rcv_nxt))
368	atomic_t rcv_nxt;
369
370	/* SDP specific */
371	atomic_t mseq_ack;
372#define mseq_ack(ssk) (atomic_read(&ssk->mseq_ack))
373	unsigned max_bufs;	/* Initial buffers offered by other side */
374	unsigned min_bufs;	/* Low water mark to wake senders */
375
376	unsigned long nagle_last_unacked; /* mseq of lastest unacked packet */
377
378	atomic_t               remote_credits;
379#define remote_credits(ssk) (atomic_read(&ssk->remote_credits))
380	int 		  poll_cq;
381
382	/* SDP slow start */
383	int recv_request_head; 	/* mark the rx_head when the resize request
384				   was received */
385	int recv_request; 	/* XXX flag if request to resize was received */
386
387	unsigned long tx_packets;
388	unsigned long rx_packets;
389	unsigned long tx_bytes;
390	unsigned long rx_bytes;
391	struct sdp_moderation auto_mod;
392	struct task shutdown_task;
393#ifdef SDP_ZCOPY
394	struct tx_srcavail_state *tx_sa;
395	struct rx_srcavail_state *rx_sa;
396	spinlock_t tx_sa_lock;
397	struct delayed_work srcavail_cancel_work;
398	int srcavail_cancel_mseq;
399	/* ZCOPY data: -1:use global; 0:disable zcopy; >0: zcopy threshold */
400	int zcopy_thresh;
401#endif
402};
403
404#define	sdp_sk(so)	((struct sdp_sock *)(so->so_pcb))
405
406#define	SDP_RLOCK(ssk)		rw_rlock(&(ssk)->lock)
407#define	SDP_WLOCK(ssk)		rw_wlock(&(ssk)->lock)
408#define	SDP_RUNLOCK(ssk)	rw_runlock(&(ssk)->lock)
409#define	SDP_WUNLOCK(ssk)	rw_wunlock(&(ssk)->lock)
410#define	SDP_WLOCK_ASSERT(ssk)	rw_assert(&(ssk)->lock, RA_WLOCKED)
411#define	SDP_RLOCK_ASSERT(ssk)	rw_assert(&(ssk)->lock, RA_RLOCKED)
412#define	SDP_LOCK_ASSERT(ssk)	rw_assert(&(ssk)->lock, RA_LOCKED)
413
414MALLOC_DECLARE(M_SDP);
415SYSCTL_DECL(_net_inet_sdp);
416
417static inline void tx_sa_reset(struct tx_srcavail_state *tx_sa)
418{
419	memset((void *)&tx_sa->busy, 0,
420			sizeof(*tx_sa) - offsetof(typeof(*tx_sa), busy));
421}
422
423static inline void rx_ring_unlock(struct sdp_rx_ring *rx_ring)
424{
425	rw_runlock(&rx_ring->destroyed_lock);
426}
427
428static inline int rx_ring_trylock(struct sdp_rx_ring *rx_ring)
429{
430	rw_rlock(&rx_ring->destroyed_lock);
431	if (rx_ring->destroyed) {
432		rx_ring_unlock(rx_ring);
433		return 0;
434	}
435	return 1;
436}
437
438static inline void rx_ring_destroy_lock(struct sdp_rx_ring *rx_ring)
439{
440	rw_wlock(&rx_ring->destroyed_lock);
441	rx_ring->destroyed = 1;
442	rw_wunlock(&rx_ring->destroyed_lock);
443}
444
445static inline void sdp_arm_rx_cq(struct sdp_sock *ssk)
446{
447	sdp_prf(ssk->socket, NULL, "Arming RX cq");
448	sdp_dbg_data(ssk->socket, "Arming RX cq\n");
449
450	ib_req_notify_cq(ssk->rx_ring.cq, IB_CQ_NEXT_COMP);
451}
452
453static inline void sdp_arm_tx_cq(struct sdp_sock *ssk)
454{
455	sdp_prf(ssk->socket, NULL, "Arming TX cq");
456	sdp_dbg_data(ssk->socket, "Arming TX cq. credits: %d, posted: %d\n",
457		tx_credits(ssk), tx_ring_posted(ssk));
458
459	ib_req_notify_cq(ssk->tx_ring.cq, IB_CQ_NEXT_COMP);
460}
461
462/* return the min of:
463 * - tx credits
464 * - free slots in tx_ring (not including SDP_MIN_TX_CREDITS
465 */
466static inline int tx_slots_free(struct sdp_sock *ssk)
467{
468	int min_free;
469
470	min_free = MIN(tx_credits(ssk),
471			SDP_TX_SIZE - tx_ring_posted(ssk));
472	if (min_free < SDP_MIN_TX_CREDITS)
473		return 0;
474
475	return min_free - SDP_MIN_TX_CREDITS;
476};
477
478/* utilities */
479static inline char *mid2str(int mid)
480{
481#define ENUM2STR(e) [e] = #e
482	static char *mid2str[] = {
483		ENUM2STR(SDP_MID_HELLO),
484		ENUM2STR(SDP_MID_HELLO_ACK),
485		ENUM2STR(SDP_MID_ABORT),
486		ENUM2STR(SDP_MID_DISCONN),
487		ENUM2STR(SDP_MID_SENDSM),
488		ENUM2STR(SDP_MID_RDMARDCOMPL),
489		ENUM2STR(SDP_MID_SRCAVAIL_CANCEL),
490		ENUM2STR(SDP_MID_CHRCVBUF),
491		ENUM2STR(SDP_MID_CHRCVBUF_ACK),
492		ENUM2STR(SDP_MID_DATA),
493		ENUM2STR(SDP_MID_SRCAVAIL),
494		ENUM2STR(SDP_MID_SINKAVAIL),
495	};
496
497	if (mid >= ARRAY_SIZE(mid2str))
498		return NULL;
499
500	return mid2str[mid];
501}
502
503static inline struct mbuf *
504sdp_alloc_mb(struct socket *sk, u8 mid, int size, int wait)
505{
506	struct sdp_bsdh *h;
507	struct mbuf *mb;
508
509	MGETHDR(mb, wait, MT_DATA);
510	if (mb == NULL)
511		return (NULL);
512	mb->m_pkthdr.len = mb->m_len = sizeof(struct sdp_bsdh);
513	h = mtod(mb, struct sdp_bsdh *);
514	h->mid = mid;
515
516	return mb;
517}
518static inline struct mbuf *
519sdp_alloc_mb_data(struct socket *sk, int wait)
520{
521	return sdp_alloc_mb(sk, SDP_MID_DATA, 0, wait);
522}
523
524static inline struct mbuf *
525sdp_alloc_mb_disconnect(struct socket *sk, int wait)
526{
527	return sdp_alloc_mb(sk, SDP_MID_DISCONN, 0, wait);
528}
529
530static inline void *
531mb_put(struct mbuf *mb, int len)
532{
533	uint8_t *data;
534
535	data = mb->m_data;
536	data += mb->m_len;
537	mb->m_len += len;
538	return (void *)data;
539}
540
541static inline struct mbuf *
542sdp_alloc_mb_chrcvbuf_ack(struct socket *sk, int size, int wait)
543{
544	struct mbuf *mb;
545	struct sdp_chrecvbuf *resp_size;
546
547	mb = sdp_alloc_mb(sk, SDP_MID_CHRCVBUF_ACK, sizeof(*resp_size), wait);
548	if (mb == NULL)
549		return (NULL);
550	resp_size = (struct sdp_chrecvbuf *)mb_put(mb, sizeof *resp_size);
551	resp_size->size = htonl(size);
552
553	return mb;
554}
555
556static inline struct mbuf *
557sdp_alloc_mb_srcavail(struct socket *sk, u32 len, u32 rkey, u64 vaddr, int wait)
558{
559	struct mbuf *mb;
560	struct sdp_srcah *srcah;
561
562	mb = sdp_alloc_mb(sk, SDP_MID_SRCAVAIL, sizeof(*srcah), wait);
563	if (mb == NULL)
564		return (NULL);
565	srcah = (struct sdp_srcah *)mb_put(mb, sizeof(*srcah));
566	srcah->len = htonl(len);
567	srcah->rkey = htonl(rkey);
568	srcah->vaddr = cpu_to_be64(vaddr);
569
570	return mb;
571}
572
573static inline struct mbuf *
574sdp_alloc_mb_srcavail_cancel(struct socket *sk, int wait)
575{
576	return sdp_alloc_mb(sk, SDP_MID_SRCAVAIL_CANCEL, 0, wait);
577}
578
579static inline struct mbuf *
580sdp_alloc_mb_rdmardcompl(struct socket *sk, u32 len, int wait)
581{
582	struct mbuf *mb;
583	struct sdp_rrch *rrch;
584
585	mb = sdp_alloc_mb(sk, SDP_MID_RDMARDCOMPL, sizeof(*rrch), wait);
586	if (mb == NULL)
587		return (NULL);
588	rrch = (struct sdp_rrch *)mb_put(mb, sizeof(*rrch));
589	rrch->len = htonl(len);
590
591	return mb;
592}
593
594static inline struct mbuf *
595sdp_alloc_mb_sendsm(struct socket *sk, int wait)
596{
597	return sdp_alloc_mb(sk, SDP_MID_SENDSM, 0, wait);
598}
599static inline int sdp_tx_ring_slots_left(struct sdp_sock *ssk)
600{
601	return SDP_TX_SIZE - tx_ring_posted(ssk);
602}
603
604static inline int credit_update_needed(struct sdp_sock *ssk)
605{
606	int c;
607
608	c = remote_credits(ssk);
609	if (likely(c > SDP_MIN_TX_CREDITS))
610		c += c/2;
611	return unlikely(c < rx_ring_posted(ssk)) &&
612	    likely(tx_credits(ssk) > 0) &&
613	    likely(sdp_tx_ring_slots_left(ssk));
614}
615
616
617#define SDPSTATS_COUNTER_INC(stat)
618#define SDPSTATS_COUNTER_ADD(stat, val)
619#define SDPSTATS_COUNTER_MID_INC(stat, mid)
620#define SDPSTATS_HIST_LINEAR(stat, size)
621#define SDPSTATS_HIST(stat, size)
622
623static inline void
624sdp_cleanup_sdp_buf(struct sdp_sock *ssk, struct sdp_buf *sbuf,
625    enum dma_data_direction dir)
626{
627	struct ib_device *dev;
628	struct mbuf *mb;
629	int i;
630
631	dev = ssk->ib_device;
632	for (i = 0, mb = sbuf->mb; mb != NULL; mb = mb->m_next, i++)
633		ib_dma_unmap_single(dev, sbuf->mapping[i], mb->m_len, dir);
634}
635
636/* sdp_main.c */
637void sdp_set_default_moderation(struct sdp_sock *ssk);
638void sdp_start_keepalive_timer(struct socket *sk);
639void sdp_urg(struct sdp_sock *ssk, struct mbuf *mb);
640void sdp_cancel_dreq_wait_timeout(struct sdp_sock *ssk);
641void sdp_abort(struct socket *sk);
642struct sdp_sock *sdp_notify(struct sdp_sock *ssk, int error);
643
644
645/* sdp_cma.c */
646int sdp_cma_handler(struct rdma_cm_id *, struct rdma_cm_event *);
647
648/* sdp_tx.c */
649int sdp_tx_ring_create(struct sdp_sock *ssk, struct ib_device *device);
650void sdp_tx_ring_destroy(struct sdp_sock *ssk);
651int sdp_xmit_poll(struct sdp_sock *ssk, int force);
652void sdp_post_send(struct sdp_sock *ssk, struct mbuf *mb);
653void sdp_post_sends(struct sdp_sock *ssk, int wait);
654void sdp_post_keepalive(struct sdp_sock *ssk);
655
656/* sdp_rx.c */
657void sdp_rx_ring_init(struct sdp_sock *ssk);
658int sdp_rx_ring_create(struct sdp_sock *ssk, struct ib_device *device);
659void sdp_rx_ring_destroy(struct sdp_sock *ssk);
660int sdp_resize_buffers(struct sdp_sock *ssk, u32 new_size);
661int sdp_init_buffers(struct sdp_sock *ssk, u32 new_size);
662void sdp_do_posts(struct sdp_sock *ssk);
663void sdp_rx_comp_full(struct sdp_sock *ssk);
664
665/* sdp_zcopy.c */
666struct kiocb;
667int sdp_sendmsg_zcopy(struct kiocb *iocb, struct socket *sk, struct iovec *iov);
668int sdp_handle_srcavail(struct sdp_sock *ssk, struct sdp_srcah *srcah);
669void sdp_handle_sendsm(struct sdp_sock *ssk, u32 mseq_ack);
670void sdp_handle_rdma_read_compl(struct sdp_sock *ssk, u32 mseq_ack,
671		u32 bytes_completed);
672int sdp_handle_rdma_read_cqe(struct sdp_sock *ssk);
673int sdp_rdma_to_iovec(struct socket *sk, struct iovec *iov, struct mbuf *mb,
674		unsigned long *used);
675int sdp_post_rdma_rd_compl(struct sdp_sock *ssk,
676		struct rx_srcavail_state *rx_sa);
677int sdp_post_sendsm(struct socket *sk);
678void srcavail_cancel_timeout(struct work_struct *work);
679void sdp_abort_srcavail(struct socket *sk);
680void sdp_abort_rdma_read(struct socket *sk);
681int sdp_process_rx(struct sdp_sock *ssk);
682
683#endif
684