1/**************************************************************************
2
3Copyright (c) 2007, 2008 Chelsio Inc.
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10    this list of conditions and the following disclaimer.
11
12 2. Neither the name of the Chelsio Corporation nor the names of its
13    contributors may be used to endorse or promote products derived from
14    this software without specific prior written permission.
15
16THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26POSSIBILITY OF SUCH DAMAGE.
27
28$FreeBSD$
29
30***************************************************************************/
31#ifndef __CXIO_WR_H__
32#define __CXIO_WR_H__
33#define T3_MAX_SGE      4
34#define T3_MAX_INLINE	64
35#define T3_STAG0_PBL_SIZE (2 * T3_MAX_SGE << 3)
36#define T3_STAG0_MAX_PBE_LEN (128 * 1024 * 1024)
37#define T3_STAG0_PAGE_SHIFT 15
38
39#define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
40#define Q_FULL(rptr,wptr,size_log2)  ( (((wptr)-(rptr))>>(size_log2)) && \
41				       ((rptr)!=(wptr)) )
42#define Q_GENBIT(ptr,size_log2) (!(((ptr)>>size_log2)&0x1))
43#define Q_FREECNT(rptr,wptr,size_log2) ((1UL<<size_log2)-((wptr)-(rptr)))
44#define Q_COUNT(rptr,wptr) ((wptr)-(rptr))
45#define Q_PTR2IDX(ptr,size_log2) (ptr & ((1UL<<size_log2)-1))
46
47static __inline void
48ring_doorbell(void /* __iomem  */ *doorbell, u32 qpid)
49{
50	writel(doorbell, ((1<<31) | qpid));
51}
52
53#define SEQ32_GE(x,y) (!( (((u32) (x)) - ((u32) (y))) & 0x80000000 ))
54
55enum t3_wr_flags {
56	T3_COMPLETION_FLAG = 0x01,
57	T3_NOTIFY_FLAG = 0x02,
58	T3_SOLICITED_EVENT_FLAG = 0x04,
59	T3_READ_FENCE_FLAG = 0x08,
60	T3_LOCAL_FENCE_FLAG = 0x10
61} __attribute__ ((packed));
62
63enum t3_wr_opcode {
64	T3_WR_BP = FW_WROPCODE_RI_BYPASS,
65	T3_WR_SEND = FW_WROPCODE_RI_SEND,
66	T3_WR_WRITE = FW_WROPCODE_RI_RDMA_WRITE,
67	T3_WR_READ = FW_WROPCODE_RI_RDMA_READ,
68	T3_WR_INV_STAG = FW_WROPCODE_RI_LOCAL_INV,
69	T3_WR_BIND = FW_WROPCODE_RI_BIND_MW,
70	T3_WR_RCV = FW_WROPCODE_RI_RECEIVE,
71	T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT,
72	T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP
73} __attribute__ ((packed));
74
75enum t3_rdma_opcode {
76	T3_RDMA_WRITE,		/* IETF RDMAP v1.0 ... */
77	T3_READ_REQ,
78	T3_READ_RESP,
79	T3_SEND,
80	T3_SEND_WITH_INV,
81	T3_SEND_WITH_SE,
82	T3_SEND_WITH_SE_INV,
83	T3_TERMINATE,
84	T3_RDMA_INIT,		/* CHELSIO RI specific ... */
85	T3_BIND_MW,
86	T3_FAST_REGISTER,
87	T3_LOCAL_INV,
88	T3_QP_MOD,
89	T3_BYPASS
90} __attribute__ ((packed));
91
92static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
93{
94	switch (wrop) {
95		case T3_WR_BP: return T3_BYPASS;
96		case T3_WR_SEND: return T3_SEND;
97		case T3_WR_WRITE: return T3_RDMA_WRITE;
98		case T3_WR_READ: return T3_READ_REQ;
99		case T3_WR_INV_STAG: return T3_LOCAL_INV;
100		case T3_WR_BIND: return T3_BIND_MW;
101		case T3_WR_INIT: return T3_RDMA_INIT;
102		case T3_WR_QP_MOD: return T3_QP_MOD;
103		default: break;
104	}
105	return -1;
106}
107
108
109/* Work request id */
110union t3_wrid {
111	struct {
112		u32 hi;
113		u32 low;
114	} id0;
115	u64 id1;
116};
117
118#define WRID(wrid)		(wrid.id1)
119#define WRID_GEN(wrid)		(wrid.id0.wr_gen)
120#define WRID_IDX(wrid)		(wrid.id0.wr_idx)
121#define WRID_LO(wrid)		(wrid.id0.wr_lo)
122
123struct fw_riwrh {
124	__be32 op_seop_flags;
125	__be32 gen_tid_len;
126};
127
128#define S_FW_RIWR_OP		24
129#define M_FW_RIWR_OP		0xff
130#define V_FW_RIWR_OP(x)		((x) << S_FW_RIWR_OP)
131#define G_FW_RIWR_OP(x)	((((x) >> S_FW_RIWR_OP)) & M_FW_RIWR_OP)
132
133#define S_FW_RIWR_SOPEOP	22
134#define M_FW_RIWR_SOPEOP	0x3
135#define V_FW_RIWR_SOPEOP(x)	((x) << S_FW_RIWR_SOPEOP)
136
137#define S_FW_RIWR_FLAGS		8
138#define M_FW_RIWR_FLAGS		0x3fffff
139#define V_FW_RIWR_FLAGS(x)	((x) << S_FW_RIWR_FLAGS)
140#define G_FW_RIWR_FLAGS(x)	((((x) >> S_FW_RIWR_FLAGS)) & M_FW_RIWR_FLAGS)
141
142#define S_FW_RIWR_TID		8
143#define V_FW_RIWR_TID(x)	((x) << S_FW_RIWR_TID)
144
145#define S_FW_RIWR_LEN		0
146#define V_FW_RIWR_LEN(x)	((x) << S_FW_RIWR_LEN)
147
148#define S_FW_RIWR_GEN           31
149#define V_FW_RIWR_GEN(x)        ((x)  << S_FW_RIWR_GEN)
150
151struct t3_sge {
152	__be32 stag;
153	__be32 len;
154	__be64 to;
155};
156
157/* If num_sgle is zero, flit 5+ contains immediate data.*/
158struct t3_send_wr {
159	struct fw_riwrh wrh;	/* 0 */
160	union t3_wrid wrid;	/* 1 */
161
162	u8 rdmaop;		/* 2 */
163	u8 reserved[3];
164	__be32 rem_stag;
165	__be32 plen;		/* 3 */
166	__be32 num_sgle;
167	struct t3_sge sgl[T3_MAX_SGE];	/* 4+ */
168};
169
170struct t3_local_inv_wr {
171	struct fw_riwrh wrh;	/* 0 */
172	union t3_wrid wrid;	/* 1 */
173	__be32 stag;		/* 2 */
174	__be32 reserved3;
175};
176
177struct t3_rdma_write_wr {
178	struct fw_riwrh wrh;	/* 0 */
179	union t3_wrid wrid;	/* 1 */
180	u8 rdmaop;		/* 2 */
181	u8 reserved[3];
182	__be32 stag_sink;
183	__be64 to_sink;		/* 3 */
184	__be32 plen;		/* 4 */
185	__be32 num_sgle;
186	struct t3_sge sgl[T3_MAX_SGE];	/* 5+ */
187};
188
189struct t3_rdma_read_wr {
190	struct fw_riwrh wrh;	/* 0 */
191	union t3_wrid wrid;	/* 1 */
192	u8 rdmaop;		/* 2 */
193	u8 reserved[3];
194	__be32 rem_stag;
195	__be64 rem_to;		/* 3 */
196	__be32 local_stag;	/* 4 */
197	__be32 local_len;
198	__be64 local_to;	/* 5 */
199};
200
201enum t3_addr_type {
202	T3_VA_BASED_TO = 0x0,
203	T3_ZERO_BASED_TO = 0x1
204} __attribute__ ((packed));
205
206enum t3_mem_perms {
207	T3_MEM_ACCESS_LOCAL_READ = 0x1,
208	T3_MEM_ACCESS_LOCAL_WRITE = 0x2,
209	T3_MEM_ACCESS_REM_READ = 0x4,
210	T3_MEM_ACCESS_REM_WRITE = 0x8
211} __attribute__ ((packed));
212
213struct t3_bind_mw_wr {
214	struct fw_riwrh wrh;	/* 0 */
215	union t3_wrid wrid;	/* 1 */
216	u16 reserved;		/* 2 */
217	u8 type;
218	u8 perms;
219	__be32 mr_stag;
220	__be32 mw_stag;		/* 3 */
221	__be32 mw_len;
222	__be64 mw_va;		/* 4 */
223	__be32 mr_pbl_addr;	/* 5 */
224	u8 reserved2[3];
225	u8 mr_pagesz;
226};
227
228struct t3_receive_wr {
229	struct fw_riwrh wrh;	/* 0 */
230	union t3_wrid wrid;	/* 1 */
231	u8 pagesz[T3_MAX_SGE];
232	__be32 num_sgle;		/* 2 */
233	struct t3_sge sgl[T3_MAX_SGE];	/* 3+ */
234	__be32 pbl_addr[T3_MAX_SGE];
235};
236
237struct t3_bypass_wr {
238	struct fw_riwrh wrh;
239	union t3_wrid wrid;	/* 1 */
240};
241
242struct t3_modify_qp_wr {
243	struct fw_riwrh wrh;	/* 0 */
244	union t3_wrid wrid;	/* 1 */
245	__be32 flags;		/* 2 */
246	__be32 quiesce;		/* 2 */
247	__be32 max_ird;		/* 3 */
248	__be32 max_ord;		/* 3 */
249	__be64 sge_cmd;		/* 4 */
250	__be64 ctx1;		/* 5 */
251	__be64 ctx0;		/* 6 */
252};
253
254enum t3_modify_qp_flags {
255	MODQP_QUIESCE  = 0x01,
256	MODQP_MAX_IRD  = 0x02,
257	MODQP_MAX_ORD  = 0x04,
258	MODQP_WRITE_EC = 0x08,
259	MODQP_READ_EC  = 0x10,
260};
261
262
263enum t3_mpa_attrs {
264	uP_RI_MPA_RX_MARKER_ENABLE = 0x1,
265	uP_RI_MPA_TX_MARKER_ENABLE = 0x2,
266	uP_RI_MPA_CRC_ENABLE = 0x4,
267	uP_RI_MPA_IETF_ENABLE = 0x8
268} __attribute__ ((packed));
269
270enum t3_qp_caps {
271	uP_RI_QP_RDMA_READ_ENABLE = 0x01,
272	uP_RI_QP_RDMA_WRITE_ENABLE = 0x02,
273	uP_RI_QP_BIND_ENABLE = 0x04,
274	uP_RI_QP_FAST_REGISTER_ENABLE = 0x08,
275	uP_RI_QP_STAG0_ENABLE = 0x10
276} __attribute__ ((packed));
277
278enum rdma_init_rtr_types {
279        RTR_READ = 1,
280        RTR_WRITE = 2,
281        RTR_SEND = 3,
282};
283
284#define S_RTR_TYPE      2
285#define M_RTR_TYPE      0x3
286#define V_RTR_TYPE(x)   ((x) << S_RTR_TYPE)
287#define G_RTR_TYPE(x)   ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
288
289#define S_CHAN          4
290#define M_CHAN          0x3
291#define V_CHAN(x)       ((x) << S_CHAN)
292#define G_CHAN(x)       ((((x) >> S_CHAN)) & M_CHAN)
293
294struct t3_rdma_init_attr {
295	u32 tid;
296	u32 qpid;
297	u32 pdid;
298	u32 scqid;
299	u32 rcqid;
300	u32 rq_addr;
301	u32 rq_size;
302	enum t3_mpa_attrs mpaattrs;
303	enum t3_qp_caps qpcaps;
304	u16 tcp_emss;
305	u32 ord;
306	u32 ird;
307	u64 qp_dma_addr;
308	u32 qp_dma_size;
309	enum rdma_init_rtr_types rtr_type;
310	u16 flags;
311	u16 rqe_count;
312	u32 irs;
313	u32 chan;
314};
315
316struct t3_rdma_init_wr {
317	struct fw_riwrh wrh;	/* 0 */
318	union t3_wrid wrid;	/* 1 */
319	__be32 qpid;		/* 2 */
320	__be32 pdid;
321	__be32 scqid;		/* 3 */
322	__be32 rcqid;
323	__be32 rq_addr;		/* 4 */
324	__be32 rq_size;
325	u8 mpaattrs;		/* 5 */
326	u8 qpcaps;
327	__be16 ulpdu_size;
328	__be16 flags_rtr_type;
329        __be16 rqe_count;
330	__be32 ord;		/* 6 */
331	__be32 ird;
332	__be64 qp_dma_addr;	/* 7 */
333	__be32 qp_dma_size;	/* 8 */
334	__be32 irs;
335};
336
337struct t3_genbit {
338	u64 flit[15];
339	__be64 genbit;
340};
341
342enum rdma_init_wr_flags {
343        MPA_INITIATOR = (1<<0),
344        PRIV_QP = (1<<1),
345};
346
347union t3_wr {
348	struct t3_send_wr send;
349	struct t3_rdma_write_wr write;
350	struct t3_rdma_read_wr read;
351	struct t3_receive_wr recv;
352	struct t3_local_inv_wr local_inv;
353	struct t3_bind_mw_wr bind;
354	struct t3_bypass_wr bypass;
355	struct t3_rdma_init_wr init;
356	struct t3_modify_qp_wr qp_mod;
357	struct t3_genbit genbit;
358	u64 flit[16];
359};
360
361#define T3_SQ_CQE_FLIT	  13
362#define T3_SQ_COOKIE_FLIT 14
363
364#define T3_RQ_COOKIE_FLIT 13
365#define T3_RQ_CQE_FLIT	  14
366
367static inline enum t3_wr_opcode fw_riwrh_opcode(struct fw_riwrh *wqe)
368{
369	return G_FW_RIWR_OP(be32toh(wqe->op_seop_flags));
370}
371
372static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op,
373				  enum t3_wr_flags flags, u8 genbit, u32 tid,
374				  u8 len)
375{
376	wqe->op_seop_flags = htobe32(V_FW_RIWR_OP(op) |
377					 V_FW_RIWR_SOPEOP(M_FW_RIWR_SOPEOP) |
378					 V_FW_RIWR_FLAGS(flags));
379	wmb();
380	wqe->gen_tid_len = htobe32(V_FW_RIWR_GEN(genbit) |
381				       V_FW_RIWR_TID(tid) |
382				       V_FW_RIWR_LEN(len));
383	/* 2nd gen bit... */
384	((union t3_wr *)wqe)->genbit.genbit = htobe64(genbit);
385}
386
387/*
388 * T3 ULP2_TX commands
389 */
390enum t3_utx_mem_op {
391	T3_UTX_MEM_READ = 2,
392	T3_UTX_MEM_WRITE = 3
393};
394
395/* T3 MC7 RDMA TPT entry format */
396
397enum tpt_mem_type {
398	TPT_NON_SHARED_MR = 0x0,
399	TPT_SHARED_MR = 0x1,
400	TPT_MW = 0x2,
401	TPT_MW_RELAXED_PROTECTION = 0x3
402};
403
404enum tpt_addr_type {
405	TPT_ZBTO = 0,
406	TPT_VATO = 1
407};
408
409enum tpt_mem_perm {
410	TPT_LOCAL_READ = 0x8,
411	TPT_LOCAL_WRITE = 0x4,
412	TPT_REMOTE_READ = 0x2,
413	TPT_REMOTE_WRITE = 0x1
414};
415
416struct tpt_entry {
417	__be32 valid_stag_pdid;
418	__be32 flags_pagesize_qpid;
419
420	__be32 rsvd_pbl_addr;
421	__be32 len;
422	__be32 va_hi;
423	__be32 va_low_or_fbo;
424
425	__be32 rsvd_bind_cnt_or_pstag;
426	__be32 rsvd_pbl_size;
427};
428
429#define S_TPT_VALID		31
430#define V_TPT_VALID(x)		((x) << S_TPT_VALID)
431#define F_TPT_VALID		V_TPT_VALID(1U)
432
433#define S_TPT_STAG_KEY		23
434#define M_TPT_STAG_KEY		0xFF
435#define V_TPT_STAG_KEY(x)	((x) << S_TPT_STAG_KEY)
436#define G_TPT_STAG_KEY(x)	(((x) >> S_TPT_STAG_KEY) & M_TPT_STAG_KEY)
437
438#define S_TPT_STAG_STATE	22
439#define V_TPT_STAG_STATE(x)	((x) << S_TPT_STAG_STATE)
440#define F_TPT_STAG_STATE	V_TPT_STAG_STATE(1U)
441
442#define S_TPT_STAG_TYPE		20
443#define M_TPT_STAG_TYPE		0x3
444#define V_TPT_STAG_TYPE(x)	((x) << S_TPT_STAG_TYPE)
445#define G_TPT_STAG_TYPE(x)	(((x) >> S_TPT_STAG_TYPE) & M_TPT_STAG_TYPE)
446
447#define S_TPT_PDID		0
448#define M_TPT_PDID		0xFFFFF
449#define V_TPT_PDID(x)		((x) << S_TPT_PDID)
450#define G_TPT_PDID(x)		(((x) >> S_TPT_PDID) & M_TPT_PDID)
451
452#define S_TPT_PERM		28
453#define M_TPT_PERM		0xF
454#define V_TPT_PERM(x)		((x) << S_TPT_PERM)
455#define G_TPT_PERM(x)		(((x) >> S_TPT_PERM) & M_TPT_PERM)
456
457#define S_TPT_REM_INV_DIS	27
458#define V_TPT_REM_INV_DIS(x)	((x) << S_TPT_REM_INV_DIS)
459#define F_TPT_REM_INV_DIS	V_TPT_REM_INV_DIS(1U)
460
461#define S_TPT_ADDR_TYPE		26
462#define V_TPT_ADDR_TYPE(x)	((x) << S_TPT_ADDR_TYPE)
463#define F_TPT_ADDR_TYPE		V_TPT_ADDR_TYPE(1U)
464
465#define S_TPT_MW_BIND_ENABLE	25
466#define V_TPT_MW_BIND_ENABLE(x)	((x) << S_TPT_MW_BIND_ENABLE)
467#define F_TPT_MW_BIND_ENABLE    V_TPT_MW_BIND_ENABLE(1U)
468
469#define S_TPT_PAGE_SIZE		20
470#define M_TPT_PAGE_SIZE		0x1F
471#define V_TPT_PAGE_SIZE(x)	((x) << S_TPT_PAGE_SIZE)
472#define G_TPT_PAGE_SIZE(x)	(((x) >> S_TPT_PAGE_SIZE) & M_TPT_PAGE_SIZE)
473
474#define S_TPT_PBL_ADDR		0
475#define M_TPT_PBL_ADDR		0x1FFFFFFF
476#define V_TPT_PBL_ADDR(x)	((x) << S_TPT_PBL_ADDR)
477#define G_TPT_PBL_ADDR(x)       (((x) >> S_TPT_PBL_ADDR) & M_TPT_PBL_ADDR)
478
479#define S_TPT_QPID		0
480#define M_TPT_QPID		0xFFFFF
481#define V_TPT_QPID(x)		((x) << S_TPT_QPID)
482#define G_TPT_QPID(x)		(((x) >> S_TPT_QPID) & M_TPT_QPID)
483
484#define S_TPT_PSTAG		0
485#define M_TPT_PSTAG		0xFFFFFF
486#define V_TPT_PSTAG(x)		((x) << S_TPT_PSTAG)
487#define G_TPT_PSTAG(x)		(((x) >> S_TPT_PSTAG) & M_TPT_PSTAG)
488
489#define S_TPT_PBL_SIZE		0
490#define M_TPT_PBL_SIZE		0xFFFFF
491#define V_TPT_PBL_SIZE(x)	((x) << S_TPT_PBL_SIZE)
492#define G_TPT_PBL_SIZE(x)	(((x) >> S_TPT_PBL_SIZE) & M_TPT_PBL_SIZE)
493
494/*
495 * CQE defs
496 */
497struct t3_cqe {
498	__be32 header;
499	__be32 len;
500	union {
501		struct {
502			__be32 stag;
503			__be32 msn;
504		} rcqe;
505		struct {
506			u32 wrid_hi;
507			u32 wrid_low;
508		} scqe;
509	} u;
510};
511
512#define S_CQE_OOO	  31
513#define M_CQE_OOO	  0x1
514#define G_CQE_OOO(x)	  ((((x) >> S_CQE_OOO)) & M_CQE_OOO)
515#define V_CEQ_OOO(x)	  ((x)<<S_CQE_OOO)
516
517#define S_CQE_QPID        12
518#define M_CQE_QPID        0x7FFFF
519#define G_CQE_QPID(x)     ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
520#define V_CQE_QPID(x)	  ((x)<<S_CQE_QPID)
521
522#define S_CQE_SWCQE       11
523#define M_CQE_SWCQE       0x1
524#define G_CQE_SWCQE(x)    ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
525#define V_CQE_SWCQE(x)	  ((x)<<S_CQE_SWCQE)
526
527#define S_CQE_GENBIT      10
528#define M_CQE_GENBIT      0x1
529#define G_CQE_GENBIT(x)   (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
530#define V_CQE_GENBIT(x)	  ((x)<<S_CQE_GENBIT)
531
532#define S_CQE_STATUS      5
533#define M_CQE_STATUS      0x1F
534#define G_CQE_STATUS(x)   ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
535#define V_CQE_STATUS(x)   ((x)<<S_CQE_STATUS)
536
537#define S_CQE_TYPE        4
538#define M_CQE_TYPE        0x1
539#define G_CQE_TYPE(x)     ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
540#define V_CQE_TYPE(x)     ((x)<<S_CQE_TYPE)
541
542#define S_CQE_OPCODE      0
543#define M_CQE_OPCODE      0xF
544#define G_CQE_OPCODE(x)   ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
545#define V_CQE_OPCODE(x)   ((x)<<S_CQE_OPCODE)
546
547#define SW_CQE(x)         (G_CQE_SWCQE(be32toh((x).header)))
548#define CQE_OOO(x)        (G_CQE_OOO(be32toh((x).header)))
549#define CQE_QPID(x)       (G_CQE_QPID(be32toh((x).header)))
550#define CQE_GENBIT(x)     (G_CQE_GENBIT(be32toh((x).header)))
551#define CQE_TYPE(x)       (G_CQE_TYPE(be32toh((x).header)))
552#define SQ_TYPE(x)	  (CQE_TYPE((x)))
553#define RQ_TYPE(x)	  (!CQE_TYPE((x)))
554#define CQE_STATUS(x)     (G_CQE_STATUS(be32toh((x).header)))
555#define CQE_OPCODE(x)     (G_CQE_OPCODE(be32toh((x).header)))
556
557#define CQE_SEND_OPCODE(x)( \
558	(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND) || \
559	(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE) || \
560	(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_INV) || \
561	(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE_INV))
562
563#define CQE_LEN(x)        (be32toh((x).len))
564
565/* used for RQ completion processing */
566#define CQE_WRID_STAG(x)  (be32toh((x).u.rcqe.stag))
567#define CQE_WRID_MSN(x)   (be32toh((x).u.rcqe.msn))
568
569/* used for SQ completion processing */
570#define CQE_WRID_SQ_WPTR(x)	((x).u.scqe.wrid_hi)
571#define CQE_WRID_WPTR(x)	((x).u.scqe.wrid_low)
572
573/* generic accessor macros */
574#define CQE_WRID_HI(x)		((x).u.scqe.wrid_hi)
575#define CQE_WRID_LOW(x)		((x).u.scqe.wrid_low)
576
577#define TPT_ERR_SUCCESS                     0x0
578#define TPT_ERR_STAG                        0x1	 /* STAG invalid: either the */
579						 /* STAG is offlimt, being 0, */
580						 /* or STAG_key mismatch */
581#define TPT_ERR_PDID                        0x2	 /* PDID mismatch */
582#define TPT_ERR_QPID                        0x3	 /* QPID mismatch */
583#define TPT_ERR_ACCESS                      0x4	 /* Invalid access right */
584#define TPT_ERR_WRAP                        0x5	 /* Wrap error */
585#define TPT_ERR_BOUND                       0x6	 /* base and bounds voilation */
586#define TPT_ERR_INVALIDATE_SHARED_MR        0x7	 /* attempt to invalidate a  */
587						 /* shared memory region */
588#define TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8	 /* attempt to invalidate a  */
589						 /* shared memory region */
590#define TPT_ERR_ECC                         0x9	 /* ECC error detected */
591#define TPT_ERR_ECC_PSTAG                   0xA	 /* ECC error detected when  */
592						 /* reading PSTAG for a MW  */
593						 /* Invalidate */
594#define TPT_ERR_PBL_ADDR_BOUND              0xB	 /* pbl addr out of bounds:  */
595						 /* software error */
596#define TPT_ERR_SWFLUSH			    0xC	 /* SW FLUSHED */
597#define TPT_ERR_CRC                         0x10 /* CRC error */
598#define TPT_ERR_MARKER                      0x11 /* Marker error */
599#define TPT_ERR_PDU_LEN_ERR                 0x12 /* invalid PDU length */
600#define TPT_ERR_OUT_OF_RQE                  0x13 /* out of RQE */
601#define TPT_ERR_DDP_VERSION                 0x14 /* wrong DDP version */
602#define TPT_ERR_RDMA_VERSION                0x15 /* wrong RDMA version */
603#define TPT_ERR_OPCODE                      0x16 /* invalid rdma opcode */
604#define TPT_ERR_DDP_QUEUE_NUM               0x17 /* invalid ddp queue number */
605#define TPT_ERR_MSN                         0x18 /* MSN error */
606#define TPT_ERR_TBIT                        0x19 /* tag bit not set correctly */
607#define TPT_ERR_MO                          0x1A /* MO not 0 for TERMINATE  */
608						 /* or READ_REQ */
609#define TPT_ERR_MSN_GAP                     0x1B
610#define TPT_ERR_MSN_RANGE                   0x1C
611#define TPT_ERR_IRD_OVERFLOW                0x1D
612#define TPT_ERR_RQE_ADDR_BOUND              0x1E /* RQE addr out of bounds:  */
613						 /* software error */
614#define TPT_ERR_INTERNAL_ERR                0x1F /* internal error (opcode  */
615						 /* mismatch) */
616
617struct t3_swsq {
618	uint64_t		wr_id;
619	struct t3_cqe		cqe;
620	uint32_t		sq_wptr;
621	__be32   		read_len;
622	int			opcode;
623	int			complete;
624	int			signaled;
625};
626
627struct t3_swrq {
628        __u64                   wr_id;
629        __u32                   pbl_addr;
630};
631
632/*
633 * A T3 WQ implements both the SQ and RQ.
634 */
635struct t3_wq {
636	union t3_wr *queue;		/* DMA accessable memory */
637	bus_addr_t dma_addr;		/* DMA address for HW */
638	u32 error;			/* 1 once we go to ERROR */
639	u32 qpid;
640	u32 wptr;			/* idx to next available WR slot */
641	u32 size_log2;			/* total wq size */
642	struct t3_swsq *sq;		/* SW SQ */
643	struct t3_swsq *oldest_read;	/* tracks oldest pending read */
644	u32 sq_wptr;			/* sq_wptr - sq_rptr == count of */
645	u32 sq_rptr;			/* pending wrs */
646	u32 sq_size_log2;		/* sq size */
647        struct t3_swrq *rq;             /* SW RQ (holds consumer wr_ids */
648	u32 rq_wptr;			/* rq_wptr - rq_rptr == count of */
649	u32 rq_rptr;			/* pending wrs */
650	struct t3_swrq *rq_oldest_wr;	/* oldest wr on the SW RQ */
651	u32 rq_size_log2;		/* rq size */
652	u32 rq_addr;			/* rq adapter address */
653	void *doorbell;			/* kernel db */
654	u64 udb;			/* user db if any */
655	struct cxio_rdev *rdev;
656};
657
658struct t3_cq {
659	u32 cqid;
660	u32 rptr;
661	u32 wptr;
662	u32 size_log2;
663	bus_addr_t dma_addr;
664	struct t3_cqe *queue;
665	struct t3_cqe *sw_queue;
666	u32 sw_rptr;
667	u32 sw_wptr;
668};
669
670#define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
671					 CQE_GENBIT(*cqe))
672
673struct t3_cq_status_page {
674        u32 cq_err;
675};
676
677static inline int cxio_cq_in_error(struct t3_cq *cq)
678{
679        return ((struct t3_cq_status_page *)
680                &cq->queue[1 << cq->size_log2])->cq_err;
681}
682
683static inline void cxio_set_cq_in_error(struct t3_cq *cq)
684{
685        ((struct t3_cq_status_page *)
686         &cq->queue[1 << cq->size_log2])->cq_err = 1;
687}
688
689static inline void cxio_set_wq_in_error(struct t3_wq *wq)
690{
691	wq->queue->flit[13] = 1;
692}
693
694static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
695{
696	struct t3_cqe *cqe;
697
698	cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
699	if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
700		return cqe;
701	return NULL;
702}
703
704static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq)
705{
706	struct t3_cqe *cqe;
707
708	if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
709		cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
710		return cqe;
711	}
712	return NULL;
713}
714
715static inline struct t3_cqe *cxio_next_cqe(struct t3_cq *cq)
716{
717	struct t3_cqe *cqe;
718
719	if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
720		cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
721		return cqe;
722	}
723	cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
724	if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
725		return cqe;
726	return NULL;
727}
728
729#endif
730