1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2
3/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4/* Copyright (c) 2008-2019, IBM Corporation */
5
6#ifndef _SIW_H
7#define _SIW_H
8
9#include <rdma/ib_verbs.h>
10#include <rdma/restrack.h>
11#include <linux/socket.h>
12#include <linux/skbuff.h>
13#include <crypto/hash.h>
14#include <linux/crc32.h>
15#include <linux/crc32c.h>
16
17#include <rdma/siw-abi.h>
18#include "iwarp.h"
19
20#define SIW_VENDOR_ID 0x626d74 /* ascii 'bmt' for now */
21#define SIW_VENDORT_PART_ID 0
22#define SIW_MAX_QP (1024 * 100)
23#define SIW_MAX_QP_WR (1024 * 32)
24#define SIW_MAX_ORD_QP 128
25#define SIW_MAX_IRD_QP 128
26#define SIW_MAX_SGE_PBL 256 /* max num sge's for PBL */
27#define SIW_MAX_SGE_RD 1 /* iwarp limitation. we could relax */
28#define SIW_MAX_CQ (1024 * 100)
29#define SIW_MAX_CQE (SIW_MAX_QP_WR * 100)
30#define SIW_MAX_MR (SIW_MAX_QP * 10)
31#define SIW_MAX_PD SIW_MAX_QP
32#define SIW_MAX_MW 0 /* to be set if MW's are supported */
33#define SIW_MAX_SRQ SIW_MAX_QP
34#define SIW_MAX_SRQ_WR (SIW_MAX_QP_WR * 10)
35#define SIW_MAX_CONTEXT SIW_MAX_PD
36
37/* Min number of bytes for using zero copy transmit */
38#define SENDPAGE_THRESH PAGE_SIZE
39
40/* Maximum number of frames which can be send in one SQ processing */
41#define SQ_USER_MAXBURST 100
42
43/* Maximum number of consecutive IRQ elements which get served
44 * if SQ has pending work. Prevents starving local SQ processing
45 * by serving peer Read Requests.
46 */
47#define SIW_IRQ_MAXBURST_SQ_ACTIVE 4
48
49struct siw_dev_cap {
50	int max_qp;
51	int max_qp_wr;
52	int max_ord; /* max. outbound read queue depth */
53	int max_ird; /* max. inbound read queue depth */
54	int max_sge;
55	int max_sge_rd;
56	int max_cq;
57	int max_cqe;
58	int max_mr;
59	int max_pd;
60	int max_mw;
61	int max_srq;
62	int max_srq_wr;
63	int max_srq_sge;
64};
65
66struct siw_pd {
67	struct ib_pd base_pd;
68};
69
70struct siw_device {
71	struct ib_device base_dev;
72	struct net_device *netdev;
73	struct siw_dev_cap attrs;
74
75	u32 vendor_part_id;
76	int numa_node;
77	char raw_gid[ETH_ALEN];
78
79	/* physical port state (only one port per device) */
80	enum ib_port_state state;
81
82	spinlock_t lock;
83
84	struct xarray qp_xa;
85	struct xarray mem_xa;
86
87	struct list_head cep_list;
88	struct list_head qp_list;
89
90	/* active objects statistics to enforce limits */
91	atomic_t num_qp;
92	atomic_t num_cq;
93	atomic_t num_pd;
94	atomic_t num_mr;
95	atomic_t num_srq;
96	atomic_t num_ctx;
97
98	struct work_struct netdev_down;
99};
100
101struct siw_ucontext {
102	struct ib_ucontext base_ucontext;
103	struct siw_device *sdev;
104};
105
106/*
107 * The RDMA core does not define LOCAL_READ access, which is always
108 * enabled implictely.
109 */
110#define IWARP_ACCESS_MASK					\
111	(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE	|	\
112	 IB_ACCESS_REMOTE_READ)
113
114/*
115 * siw presentation of user memory registered as source
116 * or target of RDMA operations.
117 */
118
119struct siw_page_chunk {
120	struct page **plist;
121};
122
123struct siw_umem {
124	struct ib_umem *base_mem;
125	struct siw_page_chunk *page_chunk;
126	int num_pages;
127	u64 fp_addr; /* First page base address */
128};
129
130struct siw_pble {
131	dma_addr_t addr; /* Address of assigned buffer */
132	unsigned int size; /* Size of this entry */
133	unsigned long pbl_off; /* Total offset from start of PBL */
134};
135
136struct siw_pbl {
137	unsigned int num_buf;
138	unsigned int max_buf;
139	struct siw_pble pbe[] __counted_by(max_buf);
140};
141
142/*
143 * Generic memory representation for registered siw memory.
144 * Memory lookup always via higher 24 bit of STag (STag index).
145 */
146struct siw_mem {
147	struct siw_device *sdev;
148	struct kref ref;
149	u64 va; /* VA of memory */
150	u64 len; /* length of the memory buffer in bytes */
151	u32 stag; /* iWarp memory access steering tag */
152	u8 stag_valid; /* VALID or INVALID */
153	u8 is_pbl; /* PBL or user space mem */
154	u8 is_mw; /* Memory Region or Memory Window */
155	enum ib_access_flags perms; /* local/remote READ & WRITE */
156	union {
157		struct siw_umem *umem;
158		struct siw_pbl *pbl;
159		void *mem_obj;
160	};
161	struct ib_pd *pd;
162};
163
164struct siw_mr {
165	struct ib_mr base_mr;
166	struct siw_mem *mem;
167	struct rcu_head rcu;
168};
169
170/*
171 * Error codes for local or remote
172 * access to registered memory
173 */
174enum siw_access_state {
175	E_ACCESS_OK,
176	E_STAG_INVALID,
177	E_BASE_BOUNDS,
178	E_ACCESS_PERM,
179	E_PD_MISMATCH
180};
181
182enum siw_wr_state {
183	SIW_WR_IDLE,
184	SIW_WR_QUEUED, /* processing has not started yet */
185	SIW_WR_INPROGRESS /* initiated processing of the WR */
186};
187
188/* The WQE currently being processed (RX or TX) */
189struct siw_wqe {
190	/* Copy of applications SQE or RQE */
191	union {
192		struct siw_sqe sqe;
193		struct siw_rqe rqe;
194	};
195	struct siw_mem *mem[SIW_MAX_SGE]; /* per sge's resolved mem */
196	enum siw_wr_state wr_status;
197	enum siw_wc_status wc_status;
198	u32 bytes; /* total bytes to process */
199	u32 processed; /* bytes processed */
200};
201
202struct siw_cq {
203	struct ib_cq base_cq;
204	spinlock_t lock;
205	struct siw_cq_ctrl *notify;
206	struct siw_cqe *queue;
207	u32 cq_put;
208	u32 cq_get;
209	u32 num_cqe;
210	struct rdma_user_mmap_entry *cq_entry; /* mmap info for CQE array */
211	u32 id; /* For debugging only */
212};
213
214enum siw_qp_state {
215	SIW_QP_STATE_IDLE,
216	SIW_QP_STATE_RTR,
217	SIW_QP_STATE_RTS,
218	SIW_QP_STATE_CLOSING,
219	SIW_QP_STATE_TERMINATE,
220	SIW_QP_STATE_ERROR,
221	SIW_QP_STATE_COUNT
222};
223
224enum siw_qp_flags {
225	SIW_RDMA_BIND_ENABLED = (1 << 0),
226	SIW_RDMA_WRITE_ENABLED = (1 << 1),
227	SIW_RDMA_READ_ENABLED = (1 << 2),
228	SIW_SIGNAL_ALL_WR = (1 << 3),
229	SIW_MPA_CRC = (1 << 4),
230	SIW_QP_IN_DESTROY = (1 << 5)
231};
232
233enum siw_qp_attr_mask {
234	SIW_QP_ATTR_STATE = (1 << 0),
235	SIW_QP_ATTR_ACCESS_FLAGS = (1 << 1),
236	SIW_QP_ATTR_LLP_HANDLE = (1 << 2),
237	SIW_QP_ATTR_ORD = (1 << 3),
238	SIW_QP_ATTR_IRD = (1 << 4),
239	SIW_QP_ATTR_SQ_SIZE = (1 << 5),
240	SIW_QP_ATTR_RQ_SIZE = (1 << 6),
241	SIW_QP_ATTR_MPA = (1 << 7)
242};
243
244struct siw_srq {
245	struct ib_srq base_srq;
246	spinlock_t lock;
247	u32 max_sge;
248	u32 limit; /* low watermark for async event */
249	struct siw_rqe *recvq;
250	u32 rq_put;
251	u32 rq_get;
252	u32 num_rqe; /* max # of wqe's allowed */
253	struct rdma_user_mmap_entry *srq_entry; /* mmap info for SRQ array */
254	bool armed:1; /* inform user if limit hit */
255	bool is_kernel_res:1; /* true if kernel client */
256};
257
258struct siw_qp_attrs {
259	enum siw_qp_state state;
260	u32 sq_size;
261	u32 rq_size;
262	u32 orq_size;
263	u32 irq_size;
264	u32 sq_max_sges;
265	u32 rq_max_sges;
266	enum siw_qp_flags flags;
267
268	struct socket *sk;
269};
270
271enum siw_tx_ctx {
272	SIW_SEND_HDR, /* start or continue sending HDR */
273	SIW_SEND_DATA, /* start or continue sending DDP payload */
274	SIW_SEND_TRAILER, /* start or continue sending TRAILER */
275	SIW_SEND_SHORT_FPDU/* send whole FPDU hdr|data|trailer at once */
276};
277
278enum siw_rx_state {
279	SIW_GET_HDR, /* await new hdr or within hdr */
280	SIW_GET_DATA_START, /* start of inbound DDP payload */
281	SIW_GET_DATA_MORE, /* continuation of (misaligned) DDP payload */
282	SIW_GET_TRAILER/* await new trailer or within trailer */
283};
284
285struct siw_rx_stream {
286	struct sk_buff *skb;
287	int skb_new; /* pending unread bytes in skb */
288	int skb_offset; /* offset in skb */
289	int skb_copied; /* processed bytes in skb */
290
291	enum siw_rx_state state;
292
293	union iwarp_hdr hdr;
294	struct mpa_trailer trailer;
295	struct shash_desc *mpa_crc_hd;
296
297	/*
298	 * For each FPDU, main RX loop runs through 3 stages:
299	 * Receiving protocol headers, placing DDP payload and receiving
300	 * trailer information (CRC + possibly padding).
301	 * Next two variables keep state on receive status of the
302	 * current FPDU part (hdr, data, trailer).
303	 */
304	int fpdu_part_rcvd; /* bytes in pkt part copied */
305	int fpdu_part_rem; /* bytes in pkt part not seen */
306
307	/*
308	 * Next expected DDP MSN for each QN +
309	 * expected steering tag +
310	 * expected DDP tagget offset (all HBO)
311	 */
312	u32 ddp_msn[RDMAP_UNTAGGED_QN_COUNT];
313	u32 ddp_stag;
314	u64 ddp_to;
315	u32 inval_stag; /* Stag to be invalidated */
316
317	u8 rx_suspend : 1;
318	u8 pad : 2; /* # of pad bytes expected */
319	u8 rdmap_op : 4; /* opcode of current frame */
320};
321
322struct siw_rx_fpdu {
323	/*
324	 * Local destination memory of inbound RDMA operation.
325	 * Valid, according to wqe->wr_status
326	 */
327	struct siw_wqe wqe_active;
328
329	unsigned int pbl_idx; /* Index into current PBL */
330	unsigned int sge_idx; /* current sge in rx */
331	unsigned int sge_off; /* already rcvd in curr. sge */
332
333	char first_ddp_seg; /* this is the first DDP seg */
334	char more_ddp_segs; /* more DDP segs expected */
335	u8 prev_rdmap_op : 4; /* opcode of prev frame */
336};
337
338/*
339 * Shorthands for short packets w/o payload
340 * to be transmitted more efficient.
341 */
342struct siw_send_pkt {
343	struct iwarp_send send;
344	__be32 crc;
345};
346
347struct siw_write_pkt {
348	struct iwarp_rdma_write write;
349	__be32 crc;
350};
351
352struct siw_rreq_pkt {
353	struct iwarp_rdma_rreq rreq;
354	__be32 crc;
355};
356
357struct siw_rresp_pkt {
358	struct iwarp_rdma_rresp rresp;
359	__be32 crc;
360};
361
362struct siw_iwarp_tx {
363	union {
364		union iwarp_hdr hdr;
365
366		/* Generic part of FPDU header */
367		struct iwarp_ctrl ctrl;
368		struct iwarp_ctrl_untagged c_untagged;
369		struct iwarp_ctrl_tagged c_tagged;
370
371		/* FPDU headers */
372		struct iwarp_rdma_write rwrite;
373		struct iwarp_rdma_rreq rreq;
374		struct iwarp_rdma_rresp rresp;
375		struct iwarp_terminate terminate;
376		struct iwarp_send send;
377		struct iwarp_send_inv send_inv;
378
379		/* complete short FPDUs */
380		struct siw_send_pkt send_pkt;
381		struct siw_write_pkt write_pkt;
382		struct siw_rreq_pkt rreq_pkt;
383		struct siw_rresp_pkt rresp_pkt;
384	} pkt;
385
386	struct mpa_trailer trailer;
387	/* DDP MSN for untagged messages */
388	u32 ddp_msn[RDMAP_UNTAGGED_QN_COUNT];
389
390	enum siw_tx_ctx state;
391	u16 ctrl_len; /* ddp+rdmap hdr */
392	u16 ctrl_sent;
393	int burst;
394	int bytes_unsent; /* ddp payload bytes */
395
396	struct shash_desc *mpa_crc_hd;
397
398	u8 do_crc : 1; /* do crc for segment */
399	u8 use_sendpage : 1; /* send w/o copy */
400	u8 tx_suspend : 1; /* stop sending DDP segs. */
401	u8 pad : 2; /* # pad in current fpdu */
402	u8 orq_fence : 1; /* ORQ full or Send fenced */
403	u8 in_syscall : 1; /* TX out of user context */
404	u8 zcopy_tx : 1; /* Use TCP_SENDPAGE if possible */
405	u8 gso_seg_limit; /* Maximum segments for GSO, 0 = unbound */
406
407	u16 fpdu_len; /* len of FPDU to tx */
408	unsigned int tcp_seglen; /* remaining tcp seg space */
409
410	struct siw_wqe wqe_active;
411
412	int pbl_idx; /* Index into current PBL */
413	int sge_idx; /* current sge in tx */
414	u32 sge_off; /* already sent in curr. sge */
415};
416
417struct siw_qp {
418	struct ib_qp base_qp;
419	struct siw_device *sdev;
420	int tx_cpu;
421	struct kref ref;
422	struct completion qp_free;
423	struct list_head devq;
424	struct siw_qp_attrs attrs;
425
426	struct siw_cep *cep;
427	struct rw_semaphore state_lock;
428
429	struct ib_pd *pd;
430	struct siw_cq *scq;
431	struct siw_cq *rcq;
432	struct siw_srq *srq;
433
434	struct siw_iwarp_tx tx_ctx; /* Transmit context */
435	spinlock_t sq_lock;
436	struct siw_sqe *sendq; /* send queue element array */
437	uint32_t sq_get; /* consumer index into sq array */
438	uint32_t sq_put; /* kernel prod. index into sq array */
439	struct llist_node tx_list;
440
441	struct siw_sqe *orq; /* outbound read queue element array */
442	spinlock_t orq_lock;
443	uint32_t orq_get; /* consumer index into orq array */
444	uint32_t orq_put; /* shared producer index for ORQ */
445
446	struct siw_rx_stream rx_stream;
447	struct siw_rx_fpdu *rx_fpdu;
448	struct siw_rx_fpdu rx_tagged;
449	struct siw_rx_fpdu rx_untagged;
450	spinlock_t rq_lock;
451	struct siw_rqe *recvq; /* recv queue element array */
452	uint32_t rq_get; /* consumer index into rq array */
453	uint32_t rq_put; /* kernel prod. index into rq array */
454
455	struct siw_sqe *irq; /* inbound read queue element array */
456	uint32_t irq_get; /* consumer index into irq array */
457	uint32_t irq_put; /* producer index into irq array */
458	int irq_burst;
459
460	struct { /* information to be carried in TERMINATE pkt, if valid */
461		u8 valid;
462		u8 in_tx;
463		u8 layer : 4, etype : 4;
464		u8 ecode;
465	} term_info;
466	struct rdma_user_mmap_entry *sq_entry; /* mmap info for SQE array */
467	struct rdma_user_mmap_entry *rq_entry; /* mmap info for RQE array */
468};
469
470/* helper macros */
471#define rx_qp(rx) container_of(rx, struct siw_qp, rx_stream)
472#define tx_qp(tx) container_of(tx, struct siw_qp, tx_ctx)
473#define tx_wqe(qp) (&(qp)->tx_ctx.wqe_active)
474#define rx_wqe(rctx) (&(rctx)->wqe_active)
475#define rx_mem(rctx) ((rctx)->wqe_active.mem[0])
476#define tx_type(wqe) ((wqe)->sqe.opcode)
477#define rx_type(wqe) ((wqe)->rqe.opcode)
478#define tx_flags(wqe) ((wqe)->sqe.flags)
479
480struct iwarp_msg_info {
481	int hdr_len;
482	struct iwarp_ctrl ctrl;
483	int (*rx_data)(struct siw_qp *qp);
484};
485
486struct siw_user_mmap_entry {
487	struct rdma_user_mmap_entry rdma_entry;
488	void *address;
489};
490
491/* Global siw parameters. Currently set in siw_main.c */
492extern const bool zcopy_tx;
493extern const bool try_gso;
494extern const bool loopback_enabled;
495extern const bool mpa_crc_required;
496extern const bool mpa_crc_strict;
497extern const bool siw_tcp_nagle;
498extern u_char mpa_version;
499extern const bool peer_to_peer;
500extern struct task_struct *siw_tx_thread[];
501
502extern struct crypto_shash *siw_crypto_shash;
503extern struct iwarp_msg_info iwarp_pktinfo[RDMAP_TERMINATE + 1];
504
505/* QP general functions */
506int siw_qp_modify(struct siw_qp *qp, struct siw_qp_attrs *attr,
507		  enum siw_qp_attr_mask mask);
508int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl);
509void siw_qp_llp_close(struct siw_qp *qp);
510void siw_qp_cm_drop(struct siw_qp *qp, int schedule);
511void siw_send_terminate(struct siw_qp *qp);
512
513void siw_qp_get_ref(struct ib_qp *qp);
514void siw_qp_put_ref(struct ib_qp *qp);
515int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp);
516void siw_free_qp(struct kref *ref);
517
518void siw_init_terminate(struct siw_qp *qp, enum term_elayer layer,
519			u8 etype, u8 ecode, int in_tx);
520enum ddp_ecode siw_tagged_error(enum siw_access_state state);
521enum rdmap_ecode siw_rdmap_error(enum siw_access_state state);
522
523void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe);
524int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes,
525		     enum siw_wc_status status);
526int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes,
527		     u32 inval_stag, enum siw_wc_status status);
528void siw_qp_llp_data_ready(struct sock *sk);
529void siw_qp_llp_write_space(struct sock *sk);
530
531/* QP TX path functions */
532int siw_create_tx_threads(void);
533void siw_stop_tx_threads(void);
534int siw_run_sq(void *arg);
535int siw_qp_sq_process(struct siw_qp *qp);
536int siw_sq_start(struct siw_qp *qp);
537int siw_activate_tx(struct siw_qp *qp);
538int siw_get_tx_cpu(struct siw_device *sdev);
539void siw_put_tx_cpu(int cpu);
540
541/* QP RX path functions */
542int siw_proc_send(struct siw_qp *qp);
543int siw_proc_rreq(struct siw_qp *qp);
544int siw_proc_rresp(struct siw_qp *qp);
545int siw_proc_write(struct siw_qp *qp);
546int siw_proc_terminate(struct siw_qp *qp);
547
548int siw_tcp_rx_data(read_descriptor_t *rd_desc, struct sk_buff *skb,
549		    unsigned int off, size_t len);
550
551static inline void set_rx_fpdu_context(struct siw_qp *qp, u8 opcode)
552{
553	if (opcode == RDMAP_RDMA_WRITE || opcode == RDMAP_RDMA_READ_RESP)
554		qp->rx_fpdu = &qp->rx_tagged;
555	else
556		qp->rx_fpdu = &qp->rx_untagged;
557
558	qp->rx_stream.rdmap_op = opcode;
559}
560
561static inline struct siw_ucontext *to_siw_ctx(struct ib_ucontext *base_ctx)
562{
563	return container_of(base_ctx, struct siw_ucontext, base_ucontext);
564}
565
566static inline struct siw_qp *to_siw_qp(struct ib_qp *base_qp)
567{
568	return container_of(base_qp, struct siw_qp, base_qp);
569}
570
571static inline struct siw_cq *to_siw_cq(struct ib_cq *base_cq)
572{
573	return container_of(base_cq, struct siw_cq, base_cq);
574}
575
576static inline struct siw_srq *to_siw_srq(struct ib_srq *base_srq)
577{
578	return container_of(base_srq, struct siw_srq, base_srq);
579}
580
581static inline struct siw_device *to_siw_dev(struct ib_device *base_dev)
582{
583	return container_of(base_dev, struct siw_device, base_dev);
584}
585
586static inline struct siw_mr *to_siw_mr(struct ib_mr *base_mr)
587{
588	return container_of(base_mr, struct siw_mr, base_mr);
589}
590
591static inline struct siw_user_mmap_entry *
592to_siw_mmap_entry(struct rdma_user_mmap_entry *rdma_mmap)
593{
594	return container_of(rdma_mmap, struct siw_user_mmap_entry, rdma_entry);
595}
596
597static inline struct siw_qp *siw_qp_id2obj(struct siw_device *sdev, int id)
598{
599	struct siw_qp *qp;
600
601	rcu_read_lock();
602	qp = xa_load(&sdev->qp_xa, id);
603	if (likely(qp && kref_get_unless_zero(&qp->ref))) {
604		rcu_read_unlock();
605		return qp;
606	}
607	rcu_read_unlock();
608	return NULL;
609}
610
611static inline u32 qp_id(struct siw_qp *qp)
612{
613	return qp->base_qp.qp_num;
614}
615
616static inline void siw_qp_get(struct siw_qp *qp)
617{
618	kref_get(&qp->ref);
619}
620
621static inline void siw_qp_put(struct siw_qp *qp)
622{
623	kref_put(&qp->ref, siw_free_qp);
624}
625
626static inline int siw_sq_empty(struct siw_qp *qp)
627{
628	struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size];
629
630	return READ_ONCE(sqe->flags) == 0;
631}
632
633static inline struct siw_sqe *sq_get_next(struct siw_qp *qp)
634{
635	struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size];
636
637	if (READ_ONCE(sqe->flags) & SIW_WQE_VALID)
638		return sqe;
639
640	return NULL;
641}
642
643static inline struct siw_sqe *orq_get_current(struct siw_qp *qp)
644{
645	return &qp->orq[qp->orq_get % qp->attrs.orq_size];
646}
647
648static inline struct siw_sqe *orq_get_free(struct siw_qp *qp)
649{
650	struct siw_sqe *orq_e = &qp->orq[qp->orq_put % qp->attrs.orq_size];
651
652	if (READ_ONCE(orq_e->flags) == 0)
653		return orq_e;
654
655	return NULL;
656}
657
658static inline int siw_orq_empty(struct siw_qp *qp)
659{
660	return orq_get_current(qp)->flags == 0 ? 1 : 0;
661}
662
663static inline struct siw_sqe *irq_alloc_free(struct siw_qp *qp)
664{
665	struct siw_sqe *irq_e = &qp->irq[qp->irq_put % qp->attrs.irq_size];
666
667	if (READ_ONCE(irq_e->flags) == 0) {
668		qp->irq_put++;
669		return irq_e;
670	}
671	return NULL;
672}
673
674static inline __wsum siw_csum_update(const void *buff, int len, __wsum sum)
675{
676	return (__force __wsum)crc32c((__force __u32)sum, buff, len);
677}
678
679static inline __wsum siw_csum_combine(__wsum csum, __wsum csum2, int offset,
680				      int len)
681{
682	return (__force __wsum)__crc32c_le_combine((__force __u32)csum,
683						   (__force __u32)csum2, len);
684}
685
686static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
687{
688	const struct skb_checksum_ops siw_cs_ops = {
689		.update = siw_csum_update,
690		.combine = siw_csum_combine,
691	};
692	__wsum crc = *(u32 *)shash_desc_ctx(srx->mpa_crc_hd);
693
694	crc = __skb_checksum(srx->skb, srx->skb_offset, len, crc,
695			     &siw_cs_ops);
696	*(u32 *)shash_desc_ctx(srx->mpa_crc_hd) = crc;
697}
698
699#define siw_dbg(ibdev, fmt, ...)                                               \
700	ibdev_dbg(ibdev, "%s: " fmt, __func__, ##__VA_ARGS__)
701
702#define siw_dbg_qp(qp, fmt, ...)                                               \
703	ibdev_dbg(&qp->sdev->base_dev, "QP[%u] %s: " fmt, qp_id(qp), __func__, \
704		  ##__VA_ARGS__)
705
706#define siw_dbg_cq(cq, fmt, ...)                                               \
707	ibdev_dbg(cq->base_cq.device, "CQ[%u] %s: " fmt, cq->id, __func__,     \
708		  ##__VA_ARGS__)
709
710#define siw_dbg_pd(pd, fmt, ...)                                               \
711	ibdev_dbg(pd->device, "PD[%u] %s: " fmt, pd->res.id, __func__,         \
712		  ##__VA_ARGS__)
713
714#define siw_dbg_mem(mem, fmt, ...)                                             \
715	ibdev_dbg(&mem->sdev->base_dev,                                        \
716		  "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
717
718#define siw_dbg_cep(cep, fmt, ...)                                             \
719	ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt,                 \
720		  cep, __func__, ##__VA_ARGS__)
721
722void siw_cq_flush(struct siw_cq *cq);
723void siw_sq_flush(struct siw_qp *qp);
724void siw_rq_flush(struct siw_qp *qp);
725int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc);
726
727#endif
728