sdp.h revision 331769
1#ifndef _SDP_H_
2#define _SDP_H_
3
4#define	LINUXKPI_PARAM_PREFIX ib_sdp_
5
6#include "opt_ddb.h"
7#include "opt_inet.h"
8#include "opt_ofed.h"
9
10#include <sys/param.h>
11#include <sys/systm.h>
12#include <sys/malloc.h>
13#include <sys/kernel.h>
14#include <sys/sysctl.h>
15#include <sys/mbuf.h>
16#include <sys/lock.h>
17#include <sys/rwlock.h>
18#include <sys/socket.h>
19#include <sys/socketvar.h>
20#include <sys/protosw.h>
21#include <sys/proc.h>
22#include <sys/jail.h>
23#include <sys/domain.h>
24
25#ifdef DDB
26#include <ddb/ddb.h>
27#endif
28
29#include <net/if.h>
30#include <net/if_var.h>
31#include <net/route.h>
32#include <net/vnet.h>
33
34#include <netinet/in.h>
35#include <netinet/in_systm.h>
36#include <netinet/in_var.h>
37#include <netinet/in_pcb.h>
38#include <netinet/tcp.h>
39#include <netinet/tcp_fsm.h>
40#include <netinet/tcp_timer.h>
41#include <netinet/tcp_var.h>
42
43#include <linux/device.h>
44#include <linux/err.h>
45#include <linux/sched.h>
46#include <linux/workqueue.h>
47#include <linux/wait.h>
48#include <linux/module.h>
49#include <linux/moduleparam.h>
50#include <linux/pci.h>
51
52#include <rdma/ib_verbs.h>
53#include <rdma/rdma_cm.h>
54#include <rdma/ib_cm.h>
55#include <rdma/ib_fmr_pool.h>
56
57#ifdef SDP_DEBUG
58#define	CONFIG_INFINIBAND_SDP_DEBUG
59#endif
60
61#include "sdp_dbg.h"
62
63#undef LIST_HEAD
64/* From sys/queue.h */
65#define LIST_HEAD(name, type)                                           \
66struct name {                                                           \
67        struct type *lh_first;  /* first element */                     \
68}
69
70/* Interval between successive polls in the Tx routine when polling is used
71   instead of interrupts (in per-core Tx rings) - should be power of 2 */
72#define SDP_TX_POLL_MODER	16
73#define SDP_TX_POLL_TIMEOUT	(HZ / 20)
74#define SDP_NAGLE_TIMEOUT (HZ / 10)
75
76#define SDP_SRCAVAIL_CANCEL_TIMEOUT (HZ * 5)
77#define SDP_SRCAVAIL_ADV_TIMEOUT (1 * HZ)
78#define SDP_SRCAVAIL_PAYLOAD_LEN 1
79
80#define SDP_RESOLVE_TIMEOUT 1000
81#define SDP_ROUTE_TIMEOUT 1000
82#define SDP_RETRY_COUNT 5
83#define SDP_KEEPALIVE_TIME (120 * 60 * HZ)
84#define SDP_FIN_WAIT_TIMEOUT (60 * HZ) /* like TCP_FIN_TIMEOUT */
85
86#define SDP_TX_SIZE 0x40
87#define SDP_RX_SIZE 0x40
88
89#define SDP_FMR_SIZE (MIN(0x1000, PAGE_SIZE) / sizeof(u64))
90#define SDP_FMR_POOL_SIZE	1024
91#define SDP_FMR_DIRTY_SIZE	( SDP_FMR_POOL_SIZE / 4 )
92
93#define SDP_MAX_RDMA_READ_LEN (PAGE_SIZE * (SDP_FMR_SIZE - 2))
94
95/* mb inlined data len - rest will be rx'ed into frags */
96#define SDP_HEAD_SIZE (sizeof(struct sdp_bsdh))
97
98/* limit tx payload len, if the sink supports bigger buffers than the source
99 * can handle.
100 * or rx fragment size (limited by sge->length size) */
101#define	SDP_MAX_PACKET	(1 << 16)
102#define SDP_MAX_PAYLOAD (SDP_MAX_PACKET - SDP_HEAD_SIZE)
103
104#define SDP_MAX_RECV_SGES (SDP_MAX_PACKET / MCLBYTES)
105#define SDP_MAX_SEND_SGES (SDP_MAX_PACKET / MCLBYTES) + 2
106
107#define SDP_NUM_WC 4
108
109#define SDP_DEF_ZCOPY_THRESH 64*1024
110#define SDP_MIN_ZCOPY_THRESH PAGE_SIZE
111#define SDP_MAX_ZCOPY_THRESH 1048576
112
113#define SDP_OP_RECV 0x800000000LL
114#define SDP_OP_SEND 0x400000000LL
115#define SDP_OP_RDMA 0x200000000LL
116#define SDP_OP_NOP  0x100000000LL
117
118/* how long (in jiffies) to block sender till tx completion*/
119#define SDP_BZCOPY_POLL_TIMEOUT (HZ / 10)
120
121#define SDP_AUTO_CONF	0xffff
122#define AUTO_MOD_DELAY (HZ / 4)
123
124struct sdp_mb_cb {
125	__u32		seq;		/* Starting sequence number	*/
126	struct bzcopy_state      *bz;
127	struct rx_srcavail_state *rx_sa;
128	struct tx_srcavail_state *tx_sa;
129};
130
131#define	M_PUSH	M_PROTO1	/* Do a 'push'. */
132#define	M_URG	M_PROTO2	/* Mark as urgent (oob). */
133
134#define SDP_SKB_CB(__mb)      ((struct sdp_mb_cb *)&((__mb)->cb[0]))
135#define BZCOPY_STATE(mb)      (SDP_SKB_CB(mb)->bz)
136#define RX_SRCAVAIL_STATE(mb) (SDP_SKB_CB(mb)->rx_sa)
137#define TX_SRCAVAIL_STATE(mb) (SDP_SKB_CB(mb)->tx_sa)
138
139#ifndef MIN
140#define MIN(a, b) (a < b ? a : b)
141#endif
142
143#define ring_head(ring)   (atomic_read(&(ring).head))
144#define ring_tail(ring)   (atomic_read(&(ring).tail))
145#define ring_posted(ring) (ring_head(ring) - ring_tail(ring))
146
147#define rx_ring_posted(ssk) ring_posted(ssk->rx_ring)
148#ifdef SDP_ZCOPY
149#define tx_ring_posted(ssk) (ring_posted(ssk->tx_ring) + \
150	(ssk->tx_ring.rdma_inflight ? ssk->tx_ring.rdma_inflight->busy : 0))
151#else
152#define tx_ring_posted(ssk) ring_posted(ssk->tx_ring)
153#endif
154
155extern int sdp_zcopy_thresh;
156extern int rcvbuf_initial_size;
157extern struct workqueue_struct *rx_comp_wq;
158extern struct ib_client sdp_client;
159
160enum sdp_mid {
161	SDP_MID_HELLO = 0x0,
162	SDP_MID_HELLO_ACK = 0x1,
163	SDP_MID_DISCONN = 0x2,
164	SDP_MID_ABORT = 0x3,
165	SDP_MID_SENDSM = 0x4,
166	SDP_MID_RDMARDCOMPL = 0x6,
167	SDP_MID_SRCAVAIL_CANCEL = 0x8,
168	SDP_MID_CHRCVBUF = 0xB,
169	SDP_MID_CHRCVBUF_ACK = 0xC,
170	SDP_MID_SINKAVAIL = 0xFD,
171	SDP_MID_SRCAVAIL = 0xFE,
172	SDP_MID_DATA = 0xFF,
173};
174
175enum sdp_flags {
176        SDP_OOB_PRES = 1 << 0,
177        SDP_OOB_PEND = 1 << 1,
178};
179
180enum {
181	SDP_MIN_TX_CREDITS = 2
182};
183
184enum {
185	SDP_ERR_ERROR   = -4,
186	SDP_ERR_FAULT   = -3,
187	SDP_NEW_SEG     = -2,
188	SDP_DO_WAIT_MEM = -1
189};
190
191struct sdp_bsdh {
192	u8 mid;
193	u8 flags;
194	__u16 bufs;
195	__u32 len;
196	__u32 mseq;
197	__u32 mseq_ack;
198} __attribute__((__packed__));
199
200union cma_ip_addr {
201	struct in6_addr ip6;
202	struct {
203		__u32 pad[3];
204		__u32 addr;
205	} ip4;
206} __attribute__((__packed__));
207
208/* TODO: too much? Can I avoid having the src/dst and port here? */
209struct sdp_hh {
210	struct sdp_bsdh bsdh;
211	u8 majv_minv;
212	u8 ipv_cap;
213	u8 rsvd1;
214	u8 max_adverts;
215	__u32 desremrcvsz;
216	__u32 localrcvsz;
217	__u16 port;
218	__u16 rsvd2;
219	union cma_ip_addr src_addr;
220	union cma_ip_addr dst_addr;
221	u8 rsvd3[IB_CM_REQ_PRIVATE_DATA_SIZE - sizeof(struct sdp_bsdh) - 48];
222} __attribute__((__packed__));
223
224struct sdp_hah {
225	struct sdp_bsdh bsdh;
226	u8 majv_minv;
227	u8 ipv_cap;
228	u8 rsvd1;
229	u8 ext_max_adverts;
230	__u32 actrcvsz;
231	u8 rsvd2[IB_CM_REP_PRIVATE_DATA_SIZE - sizeof(struct sdp_bsdh) - 8];
232} __attribute__((__packed__));
233
234struct sdp_rrch {
235	__u32 len;
236} __attribute__((__packed__));
237
238struct sdp_srcah {
239	__u32 len;
240	__u32 rkey;
241	__u64 vaddr;
242} __attribute__((__packed__));
243
244struct sdp_buf {
245        struct mbuf *mb;
246        u64             mapping[SDP_MAX_SEND_SGES];
247} __attribute__((__packed__));
248
249struct sdp_chrecvbuf {
250	u32 size;
251} __attribute__((__packed__));
252
253/* Context used for synchronous zero copy bcopy (BZCOPY) */
254struct bzcopy_state {
255	unsigned char __user  *u_base;
256	int                    u_len;
257	int                    left;
258	int                    page_cnt;
259	int                    cur_page;
260	int                    cur_offset;
261	int                    busy;
262	struct sdp_sock      *ssk;
263	struct page         **pages;
264};
265
266enum rx_sa_flag {
267	RX_SA_ABORTED    = 2,
268};
269
270enum tx_sa_flag {
271	TX_SA_SENDSM     = 0x01,
272	TX_SA_CROSS_SEND = 0x02,
273	TX_SA_INTRRUPTED = 0x04,
274	TX_SA_TIMEDOUT   = 0x08,
275	TX_SA_ERROR      = 0x10,
276};
277
278struct rx_srcavail_state {
279	/* Advertised buffer stuff */
280	u32 mseq;
281	u32 used;
282	u32 reported;
283	u32 len;
284	u32 rkey;
285	u64 vaddr;
286
287	/* Dest buff info */
288	struct ib_umem *umem;
289	struct ib_pool_fmr *fmr;
290
291	/* Utility */
292	u8  busy;
293	enum rx_sa_flag  flags;
294};
295
296struct tx_srcavail_state {
297	/* Data below 'busy' will be reset */
298	u8		busy;
299
300	struct ib_umem *umem;
301	struct ib_pool_fmr *fmr;
302
303	u32		bytes_sent;
304	u32		bytes_acked;
305
306	enum tx_sa_flag	abort_flags;
307	u8		posted;
308
309	u32		mseq;
310};
311
312struct sdp_tx_ring {
313#ifdef SDP_ZCOPY
314	struct rx_srcavail_state *rdma_inflight;
315#endif
316	struct sdp_buf   	*buffer;
317	atomic_t          	head;
318	atomic_t          	tail;
319	struct ib_cq 	 	*cq;
320
321	atomic_t 	  	credits;
322#define tx_credits(ssk) (atomic_read(&ssk->tx_ring.credits))
323
324	struct callout		timer;
325	u16 		  	poll_cnt;
326};
327
328struct sdp_rx_ring {
329	struct sdp_buf   *buffer;
330	atomic_t          head;
331	atomic_t          tail;
332	struct ib_cq 	 *cq;
333
334	int		 destroyed;
335	struct rwlock	 destroyed_lock;
336};
337
338struct sdp_device {
339	struct ib_pd 		*pd;
340	struct ib_fmr_pool 	*fmr_pool;
341};
342
343struct sdp_moderation {
344	unsigned long last_moder_packets;
345	unsigned long last_moder_tx_packets;
346	unsigned long last_moder_bytes;
347	unsigned long last_moder_jiffies;
348	int last_moder_time;
349	u16 rx_usecs;
350	u16 rx_frames;
351	u16 tx_usecs;
352	u32 pkt_rate_low;
353	u16 rx_usecs_low;
354	u32 pkt_rate_high;
355	u16 rx_usecs_high;
356	u16 sample_interval;
357	u16 adaptive_rx_coal;
358	u32 msg_enable;
359
360	int moder_cnt;
361	int moder_time;
362};
363
364/* These are flags fields. */
365#define	SDP_TIMEWAIT	0x0001		/* In ssk timewait state. */
366#define	SDP_DROPPED	0x0002		/* Socket has been dropped. */
367#define	SDP_SOCKREF	0x0004		/* Holding a sockref for close. */
368#define	SDP_NODELAY	0x0008		/* Disble nagle. */
369#define	SDP_NEEDFIN	0x0010		/* Send a fin on the next tx. */
370#define	SDP_DREQWAIT	0x0020		/* Waiting on DREQ. */
371#define	SDP_DESTROY	0x0040		/* Being destroyed. */
372#define	SDP_DISCON	0x0080		/* rdma_disconnect is owed. */
373
374/* These are oobflags */
375#define	SDP_HADOOB	0x0001		/* Had OOB data. */
376#define	SDP_HAVEOOB	0x0002		/* Have OOB data. */
377
378struct sdp_sock {
379	LIST_ENTRY(sdp_sock) list;
380	struct socket *socket;
381	struct rdma_cm_id *id;
382	struct ib_device *ib_device;
383	struct sdp_device *sdp_dev;
384	struct ib_qp *qp;
385	struct ucred *cred;
386	struct callout keep2msl;	/* 2msl and keepalive timer. */
387	struct callout nagle_timer;	/* timeout waiting for ack */
388	struct ib_ucontext context;
389	in_port_t lport;
390	in_addr_t laddr;
391	in_port_t fport;
392	in_addr_t faddr;
393	int flags;
394	int oobflags;		/* protected by rx lock. */
395	int state;
396	int softerror;
397	int recv_bytes;		/* Bytes per recv. buf including header */
398	int xmit_size_goal;
399	char iobc;
400
401	struct sdp_rx_ring rx_ring;
402	struct sdp_tx_ring tx_ring;
403	struct rwlock	lock;
404	struct mbufq	rxctlq;		/* received control packets */
405
406	int qp_active;	/* XXX Flag. */
407	int max_sge;
408	struct work_struct rx_comp_work;
409#define rcv_nxt(ssk) atomic_read(&(ssk->rcv_nxt))
410	atomic_t rcv_nxt;
411
412	/* SDP specific */
413	atomic_t mseq_ack;
414#define mseq_ack(ssk) (atomic_read(&ssk->mseq_ack))
415	unsigned max_bufs;	/* Initial buffers offered by other side */
416	unsigned min_bufs;	/* Low water mark to wake senders */
417
418	unsigned long nagle_last_unacked; /* mseq of lastest unacked packet */
419
420	atomic_t               remote_credits;
421#define remote_credits(ssk) (atomic_read(&ssk->remote_credits))
422	int 		  poll_cq;
423
424	/* SDP slow start */
425	int recv_request_head; 	/* mark the rx_head when the resize request
426				   was received */
427	int recv_request; 	/* XXX flag if request to resize was received */
428
429	unsigned long tx_packets;
430	unsigned long rx_packets;
431	unsigned long tx_bytes;
432	unsigned long rx_bytes;
433	struct sdp_moderation auto_mod;
434	struct task shutdown_task;
435#ifdef SDP_ZCOPY
436	struct tx_srcavail_state *tx_sa;
437	struct rx_srcavail_state *rx_sa;
438	spinlock_t tx_sa_lock;
439	struct delayed_work srcavail_cancel_work;
440	int srcavail_cancel_mseq;
441	/* ZCOPY data: -1:use global; 0:disable zcopy; >0: zcopy threshold */
442	int zcopy_thresh;
443#endif
444};
445
446#define	sdp_sk(so)	((struct sdp_sock *)(so->so_pcb))
447
448#define	SDP_RLOCK(ssk)		rw_rlock(&(ssk)->lock)
449#define	SDP_WLOCK(ssk)		rw_wlock(&(ssk)->lock)
450#define	SDP_RUNLOCK(ssk)	rw_runlock(&(ssk)->lock)
451#define	SDP_WUNLOCK(ssk)	rw_wunlock(&(ssk)->lock)
452#define	SDP_WLOCK_ASSERT(ssk)	rw_assert(&(ssk)->lock, RA_WLOCKED)
453#define	SDP_RLOCK_ASSERT(ssk)	rw_assert(&(ssk)->lock, RA_RLOCKED)
454#define	SDP_LOCK_ASSERT(ssk)	rw_assert(&(ssk)->lock, RA_LOCKED)
455
456MALLOC_DECLARE(M_SDP);
457
458static inline void tx_sa_reset(struct tx_srcavail_state *tx_sa)
459{
460	memset((void *)&tx_sa->busy, 0,
461			sizeof(*tx_sa) - offsetof(typeof(*tx_sa), busy));
462}
463
464static inline void rx_ring_unlock(struct sdp_rx_ring *rx_ring)
465{
466	rw_runlock(&rx_ring->destroyed_lock);
467}
468
469static inline int rx_ring_trylock(struct sdp_rx_ring *rx_ring)
470{
471	rw_rlock(&rx_ring->destroyed_lock);
472	if (rx_ring->destroyed) {
473		rx_ring_unlock(rx_ring);
474		return 0;
475	}
476	return 1;
477}
478
479static inline void rx_ring_destroy_lock(struct sdp_rx_ring *rx_ring)
480{
481	rw_wlock(&rx_ring->destroyed_lock);
482	rx_ring->destroyed = 1;
483	rw_wunlock(&rx_ring->destroyed_lock);
484}
485
486static inline void sdp_arm_rx_cq(struct sdp_sock *ssk)
487{
488	sdp_prf(ssk->socket, NULL, "Arming RX cq");
489	sdp_dbg_data(ssk->socket, "Arming RX cq\n");
490
491	ib_req_notify_cq(ssk->rx_ring.cq, IB_CQ_NEXT_COMP);
492}
493
494static inline void sdp_arm_tx_cq(struct sdp_sock *ssk)
495{
496	sdp_prf(ssk->socket, NULL, "Arming TX cq");
497	sdp_dbg_data(ssk->socket, "Arming TX cq. credits: %d, posted: %d\n",
498		tx_credits(ssk), tx_ring_posted(ssk));
499
500	ib_req_notify_cq(ssk->tx_ring.cq, IB_CQ_NEXT_COMP);
501}
502
503/* return the min of:
504 * - tx credits
505 * - free slots in tx_ring (not including SDP_MIN_TX_CREDITS
506 */
507static inline int tx_slots_free(struct sdp_sock *ssk)
508{
509	int min_free;
510
511	min_free = MIN(tx_credits(ssk),
512			SDP_TX_SIZE - tx_ring_posted(ssk));
513	if (min_free < SDP_MIN_TX_CREDITS)
514		return 0;
515
516	return min_free - SDP_MIN_TX_CREDITS;
517};
518
519/* utilities */
520static inline char *mid2str(int mid)
521{
522#define ENUM2STR(e) [e] = #e
523	static char *mid2str[] = {
524		ENUM2STR(SDP_MID_HELLO),
525		ENUM2STR(SDP_MID_HELLO_ACK),
526		ENUM2STR(SDP_MID_ABORT),
527		ENUM2STR(SDP_MID_DISCONN),
528		ENUM2STR(SDP_MID_SENDSM),
529		ENUM2STR(SDP_MID_RDMARDCOMPL),
530		ENUM2STR(SDP_MID_SRCAVAIL_CANCEL),
531		ENUM2STR(SDP_MID_CHRCVBUF),
532		ENUM2STR(SDP_MID_CHRCVBUF_ACK),
533		ENUM2STR(SDP_MID_DATA),
534		ENUM2STR(SDP_MID_SRCAVAIL),
535		ENUM2STR(SDP_MID_SINKAVAIL),
536	};
537
538	if (mid >= ARRAY_SIZE(mid2str))
539		return NULL;
540
541	return mid2str[mid];
542}
543
544static inline struct mbuf *
545sdp_alloc_mb(struct socket *sk, u8 mid, int size, int wait)
546{
547	struct sdp_bsdh *h;
548	struct mbuf *mb;
549
550	MGETHDR(mb, wait, MT_DATA);
551	if (mb == NULL)
552		return (NULL);
553	mb->m_pkthdr.len = mb->m_len = sizeof(struct sdp_bsdh);
554	h = mtod(mb, struct sdp_bsdh *);
555	h->mid = mid;
556
557	return mb;
558}
559static inline struct mbuf *
560sdp_alloc_mb_data(struct socket *sk, int wait)
561{
562	return sdp_alloc_mb(sk, SDP_MID_DATA, 0, wait);
563}
564
565static inline struct mbuf *
566sdp_alloc_mb_disconnect(struct socket *sk, int wait)
567{
568	return sdp_alloc_mb(sk, SDP_MID_DISCONN, 0, wait);
569}
570
571static inline void *
572mb_put(struct mbuf *mb, int len)
573{
574	uint8_t *data;
575
576	data = mb->m_data;
577	data += mb->m_len;
578	mb->m_len += len;
579	return (void *)data;
580}
581
582static inline struct mbuf *
583sdp_alloc_mb_chrcvbuf_ack(struct socket *sk, int size, int wait)
584{
585	struct mbuf *mb;
586	struct sdp_chrecvbuf *resp_size;
587
588	mb = sdp_alloc_mb(sk, SDP_MID_CHRCVBUF_ACK, sizeof(*resp_size), wait);
589	if (mb == NULL)
590		return (NULL);
591	resp_size = (struct sdp_chrecvbuf *)mb_put(mb, sizeof *resp_size);
592	resp_size->size = htonl(size);
593
594	return mb;
595}
596
597static inline struct mbuf *
598sdp_alloc_mb_srcavail(struct socket *sk, u32 len, u32 rkey, u64 vaddr, int wait)
599{
600	struct mbuf *mb;
601	struct sdp_srcah *srcah;
602
603	mb = sdp_alloc_mb(sk, SDP_MID_SRCAVAIL, sizeof(*srcah), wait);
604	if (mb == NULL)
605		return (NULL);
606	srcah = (struct sdp_srcah *)mb_put(mb, sizeof(*srcah));
607	srcah->len = htonl(len);
608	srcah->rkey = htonl(rkey);
609	srcah->vaddr = cpu_to_be64(vaddr);
610
611	return mb;
612}
613
614static inline struct mbuf *
615sdp_alloc_mb_srcavail_cancel(struct socket *sk, int wait)
616{
617	return sdp_alloc_mb(sk, SDP_MID_SRCAVAIL_CANCEL, 0, wait);
618}
619
620static inline struct mbuf *
621sdp_alloc_mb_rdmardcompl(struct socket *sk, u32 len, int wait)
622{
623	struct mbuf *mb;
624	struct sdp_rrch *rrch;
625
626	mb = sdp_alloc_mb(sk, SDP_MID_RDMARDCOMPL, sizeof(*rrch), wait);
627	if (mb == NULL)
628		return (NULL);
629	rrch = (struct sdp_rrch *)mb_put(mb, sizeof(*rrch));
630	rrch->len = htonl(len);
631
632	return mb;
633}
634
635static inline struct mbuf *
636sdp_alloc_mb_sendsm(struct socket *sk, int wait)
637{
638	return sdp_alloc_mb(sk, SDP_MID_SENDSM, 0, wait);
639}
640static inline int sdp_tx_ring_slots_left(struct sdp_sock *ssk)
641{
642	return SDP_TX_SIZE - tx_ring_posted(ssk);
643}
644
645static inline int credit_update_needed(struct sdp_sock *ssk)
646{
647	int c;
648
649	c = remote_credits(ssk);
650	if (likely(c > SDP_MIN_TX_CREDITS))
651		c += c/2;
652	return unlikely(c < rx_ring_posted(ssk)) &&
653	    likely(tx_credits(ssk) > 0) &&
654	    likely(sdp_tx_ring_slots_left(ssk));
655}
656
657
658#define SDPSTATS_COUNTER_INC(stat)
659#define SDPSTATS_COUNTER_ADD(stat, val)
660#define SDPSTATS_COUNTER_MID_INC(stat, mid)
661#define SDPSTATS_HIST_LINEAR(stat, size)
662#define SDPSTATS_HIST(stat, size)
663
664static inline void
665sdp_cleanup_sdp_buf(struct sdp_sock *ssk, struct sdp_buf *sbuf,
666    enum dma_data_direction dir)
667{
668	struct ib_device *dev;
669	struct mbuf *mb;
670	int i;
671
672	dev = ssk->ib_device;
673	for (i = 0, mb = sbuf->mb; mb != NULL; mb = mb->m_next, i++)
674		ib_dma_unmap_single(dev, sbuf->mapping[i], mb->m_len, dir);
675}
676
677/* sdp_main.c */
678void sdp_set_default_moderation(struct sdp_sock *ssk);
679void sdp_start_keepalive_timer(struct socket *sk);
680void sdp_urg(struct sdp_sock *ssk, struct mbuf *mb);
681void sdp_cancel_dreq_wait_timeout(struct sdp_sock *ssk);
682void sdp_abort(struct socket *sk);
683struct sdp_sock *sdp_notify(struct sdp_sock *ssk, int error);
684
685
686/* sdp_cma.c */
687int sdp_cma_handler(struct rdma_cm_id *, struct rdma_cm_event *);
688
689/* sdp_tx.c */
690int sdp_tx_ring_create(struct sdp_sock *ssk, struct ib_device *device);
691void sdp_tx_ring_destroy(struct sdp_sock *ssk);
692int sdp_xmit_poll(struct sdp_sock *ssk, int force);
693void sdp_post_send(struct sdp_sock *ssk, struct mbuf *mb);
694void sdp_post_sends(struct sdp_sock *ssk, int wait);
695void sdp_post_keepalive(struct sdp_sock *ssk);
696
697/* sdp_rx.c */
698void sdp_rx_ring_init(struct sdp_sock *ssk);
699int sdp_rx_ring_create(struct sdp_sock *ssk, struct ib_device *device);
700void sdp_rx_ring_destroy(struct sdp_sock *ssk);
701int sdp_resize_buffers(struct sdp_sock *ssk, u32 new_size);
702int sdp_init_buffers(struct sdp_sock *ssk, u32 new_size);
703void sdp_do_posts(struct sdp_sock *ssk);
704void sdp_rx_comp_full(struct sdp_sock *ssk);
705
706/* sdp_zcopy.c */
707struct kiocb;
708int sdp_sendmsg_zcopy(struct kiocb *iocb, struct socket *sk, struct iovec *iov);
709int sdp_handle_srcavail(struct sdp_sock *ssk, struct sdp_srcah *srcah);
710void sdp_handle_sendsm(struct sdp_sock *ssk, u32 mseq_ack);
711void sdp_handle_rdma_read_compl(struct sdp_sock *ssk, u32 mseq_ack,
712		u32 bytes_completed);
713int sdp_handle_rdma_read_cqe(struct sdp_sock *ssk);
714int sdp_rdma_to_iovec(struct socket *sk, struct iovec *iov, struct mbuf *mb,
715		unsigned long *used);
716int sdp_post_rdma_rd_compl(struct sdp_sock *ssk,
717		struct rx_srcavail_state *rx_sa);
718int sdp_post_sendsm(struct socket *sk);
719void srcavail_cancel_timeout(struct work_struct *work);
720void sdp_abort_srcavail(struct socket *sk);
721void sdp_abort_rdma_read(struct socket *sk);
722int sdp_process_rx(struct sdp_sock *ssk);
723
724#endif
725