1/*
2 * Copyright (c) 2007 Cisco Systems, Inc.  All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *	- Redistributions of source code must retain the above
15 *	  copyright notice, this list of conditions and the following
16 *	  disclaimer.
17 *
18 *	- Redistributions in binary form must reproduce the above
19 *	  copyright notice, this list of conditions and the following
20 *	  disclaimer in the documentation and/or other materials
21 *	  provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX4_QP_H
34#define MLX4_QP_H
35
36#include <linux/types.h>
37
38#include <linux/mlx4/device.h>
39
40#define MLX4_INVALID_LKEY	0x100
41
42#define	DS_SIZE_ALIGNMENT	16
43
44#define	SET_BYTE_COUNT(byte_count) cpu_to_be32(byte_count)
45#define	SET_LSO_MSS(mss_hdr_size) cpu_to_be32(mss_hdr_size)
46#define	DS_BYTE_COUNT_MASK	cpu_to_be32(0x7fffffff)
47
48enum ib_m_qp_attr_mask {
49	IB_M_EXT_CLASS_1 = 1 << 28, IB_M_EXT_CLASS_2 = 1 << 29, IB_M_EXT_CLASS_3 = 1
50			<< 30,
51
52	IB_M_QP_MOD_VEND_MASK = (IB_M_EXT_CLASS_1 | IB_M_EXT_CLASS_2
53			| IB_M_EXT_CLASS_3)
54};
55
56enum mlx4_qp_optpar {
57	MLX4_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
58	MLX4_QP_OPTPAR_RRE = 1 << 1,
59	MLX4_QP_OPTPAR_RAE = 1 << 2,
60	MLX4_QP_OPTPAR_RWE = 1 << 3,
61	MLX4_QP_OPTPAR_PKEY_INDEX = 1 << 4,
62	MLX4_QP_OPTPAR_Q_KEY = 1 << 5,
63	MLX4_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
64	MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
65	MLX4_QP_OPTPAR_SRA_MAX = 1 << 8,
66	MLX4_QP_OPTPAR_RRA_MAX = 1 << 9,
67	MLX4_QP_OPTPAR_PM_STATE = 1 << 10,
68	MLX4_QP_OPTPAR_RETRY_COUNT = 1 << 12,
69	MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13,
70	MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
71	MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16,
72	MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20
73};
74
75enum mlx4_qp_state {
76	MLX4_QP_STATE_RST = 0,
77	MLX4_QP_STATE_INIT = 1,
78	MLX4_QP_STATE_RTR = 2,
79	MLX4_QP_STATE_RTS = 3,
80	MLX4_QP_STATE_SQER = 4,
81	MLX4_QP_STATE_SQD = 5,
82	MLX4_QP_STATE_ERR = 6,
83	MLX4_QP_STATE_SQ_DRAINING = 7,
84	MLX4_QP_NUM_STATE
85};
86
87enum {
88	MLX4_QP_ST_RC = 0x0,
89	MLX4_QP_ST_UC = 0x1,
90	MLX4_QP_ST_RD = 0x2,
91	MLX4_QP_ST_UD = 0x3,
92	MLX4_QP_ST_XRC = 0x6,
93	MLX4_QP_ST_MLX = 0x7
94};
95
96enum {
97	MLX4_QP_PM_MIGRATED = 0x3, MLX4_QP_PM_ARMED = 0x0, MLX4_QP_PM_REARM = 0x1
98};
99
100enum {
101	/* params1 */
102	MLX4_QP_BIT_SRE = 1 << 15,
103	MLX4_QP_BIT_SWE = 1 << 14,
104	MLX4_QP_BIT_SAE = 1 << 13,
105	/* params2 */
106	MLX4_QP_BIT_RRE = 1 << 15,
107	MLX4_QP_BIT_RWE = 1 << 14,
108	MLX4_QP_BIT_RAE = 1 << 13,
109	MLX4_QP_BIT_RIC = 1 << 4,
110	MLX4_QP_BIT_COLL_SYNC_RQ = 1 << 2,
111	MLX4_QP_BIT_COLL_SYNC_SQ = 1 << 1,
112	MLX4_QP_BIT_COLL_MASTER = 1 << 0
113};
114
115enum {
116	MLX4_RSS_HASH_XOR = 0,
117	MLX4_RSS_HASH_TOP = 1,
118
119	MLX4_RSS_UDP_IPV6 = 1 << 0,
120	MLX4_RSS_UDP_IPV4 = 1 << 1,
121	MLX4_RSS_TCP_IPV6 = 1 << 2,
122	MLX4_RSS_IPV6 = 1 << 3,
123	MLX4_RSS_TCP_IPV4 = 1 << 4,
124	MLX4_RSS_IPV4 = 1 << 5,
125
126	/* offset of mlx4_rss_context within mlx4_qp_context.pri_path */
127	MLX4_RSS_OFFSET_IN_QPC_PRI_PATH = 0x24,
128	/* offset of being RSS indirection QP within mlx4_qp_context.flags */
129	MLX4_RSS_QPC_FLAG_OFFSET = 13,
130};
131
132struct mlx4_rss_context {
133	__be32 base_qpn;
134	__be32 default_qpn;
135	u16 reserved;
136	u8 hash_fn;
137	u8 flags;
138	__be32 rss_key[10];
139	__be32 base_qpn_udp;
140};
141
142struct mlx4_qp_path {
143	u8 fl;
144	u8 vlan_control;
145	u8 disable_pkey_check;
146	u8 pkey_index;
147	u8 counter_index;
148	u8 grh_mylmc;
149	__be16 rlid;
150	u8 ackto;
151	u8 mgid_index;
152	u8 static_rate;
153	u8 hop_limit;
154	__be32 tclass_flowlabel;
155	u8 rgid[16];
156	u8 sched_queue;
157	u8 vlan_index;
158	u8 feup;
159	u8 fvl_rx;
160	u8 reserved4[2];
161	u8 dmac[6];
162};
163
164enum { /* fl */
165	MLX4_FL_CV = 1 << 6,
166	MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2,
167	MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1,
168	MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0,
169};
170enum { /* vlan_control */
171	MLX4_VLAN_CTRL_ETH_SRC_CHECK_IF_COUNTER = 1 << 7,
172	MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6,
173	MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED = 1 << 2,
174	MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED = 1 << 1,/* 802.1p priorty tag*/
175	MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED = 1 << 0
176};
177
178enum { /* feup */
179	MLX4_FEUP_FORCE_ETH_UP = 1 << 6, /* force Eth UP */
180	MLX4_FSM_FORCE_ETH_SRC_MAC = 1 << 5, /* force Source MAC */
181	MLX4_FVL_FORCE_ETH_VLAN = 1 << 3 /* force Eth vlan */
182};
183
184enum { /* fvl_rx */
185	MLX4_FVL_RX_FORCE_ETH_VLAN = 1 << 0 /* enforce Eth rx vlan */
186};
187
188struct mlx4_qp_context {
189	__be32 flags;
190	__be32 pd;
191	u8 mtu_msgmax;
192	u8 rq_size_stride;
193	u8 sq_size_stride;
194	u8 rlkey;
195	__be32 usr_page;
196	__be32 local_qpn;
197	__be32 remote_qpn;
198	struct mlx4_qp_path pri_path;
199	struct mlx4_qp_path alt_path;
200	__be32 params1;
201	u32 reserved1;
202	__be32 next_send_psn;
203	__be32 cqn_send;
204	u32 reserved2[2];
205	__be32 last_acked_psn;
206	__be32 ssn;
207	__be32 params2;
208	__be32 rnr_nextrecvpsn;
209	__be32 xrcd;
210	__be32 cqn_recv;
211	__be64 db_rec_addr;
212	__be32 qkey;
213	__be32 srqn;
214	__be32 msn;
215	__be16 rq_wqe_counter;
216	__be16 sq_wqe_counter;
217	u32 reserved3[2];
218	__be32 param3;
219	__be32 nummmcpeers_basemkey;
220	u8 log_page_size;
221	u8 reserved4[2];
222	u8 mtt_base_addr_h;
223	__be32 mtt_base_addr_l;
224	u32 reserved5[10];
225};
226
227struct mlx4_update_qp_context {
228	__be64 qp_mask;
229	__be64 primary_addr_path_mask;
230	__be64 secondary_addr_path_mask;
231	u64 reserved1;
232	struct mlx4_qp_context qp_context;
233	u64 reserved2[58];
234};
235
236enum {
237	MLX4_UPD_QP_MASK_PM_STATE = 32, MLX4_UPD_QP_MASK_VSD = 33,
238};
239
240enum {
241	MLX4_UPD_QP_PATH_MASK_PKEY_INDEX = 0 + 32,
242	MLX4_UPD_QP_PATH_MASK_FSM = 1 + 32,
243	MLX4_UPD_QP_PATH_MASK_MAC_INDEX = 2 + 32,
244	MLX4_UPD_QP_PATH_MASK_FVL = 3 + 32,
245	MLX4_UPD_QP_PATH_MASK_CV = 4 + 32,
246	MLX4_UPD_QP_PATH_MASK_VLAN_INDEX = 5 + 32,
247	MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN = 6 + 32,
248	MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED = 7 + 32,
249	MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P = 8 + 32,
250	MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED = 9 + 32,
251	MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED = 10 + 32,
252	MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P = 11 + 32,
253	MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED = 12 + 32,
254	MLX4_UPD_QP_PATH_MASK_FEUP = 13 + 32,
255	MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE = 14 + 32,
256	MLX4_UPD_QP_PATH_MASK_IF_COUNTER_INDEX = 15 + 32,
257	MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32,
258	MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_UC_LB = 18 + 32,
259	MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB = 19 + 32,
260};
261
262enum { /* param3 */
263	MLX4_STRIP_VLAN = 1 << 30
264};
265
266/* Which firmware version adds support for NEC (NoErrorCompletion) bit */
267#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
268
269enum {
270	MLX4_WQE_CTRL_OWN = 1 << 31,
271	MLX4_WQE_CTRL_NEC = 1 << 29,
272	MLX4_WQE_CTRL_RR = 1 << 6,
273	MLX4_WQE_CTRL_FENCE = 1 << 6,
274	MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2,
275	MLX4_WQE_CTRL_SOLICITED = 1 << 1,
276	MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
277	MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
278	MLX4_WQE_CTRL_INS_VLAN = 1 << 6,
279	MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7,
280	MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0,
281};
282
283struct mlx4_wqe_ctrl_seg {
284	__be32 owner_opcode;
285	__be16 vlan_tag;
286	u8 ins_vlan;
287	u8 fence_size;
288	/*
289	 * High 24 bits are SRC remote buffer; low 8 bits are flags:
290	 * [7]   SO (strong ordering)
291	 * [5]   TCP/UDP checksum
292	 * [4]   IP checksum
293	 * [3:2] C (generate completion queue entry)
294	 * [1]   SE (solicited event)
295	 * [0]   FL (force loopback)
296	 */
297	union {
298		__be32 srcrb_flags;
299		__be16 srcrb_flags16[2];
300	};
301	/*
302	 * imm is immediate data for send/RDMA write w/ immediate;
303	 * also invalidation key for send with invalidate; input
304	 * modifier for WQEs on CCQs.
305	 */
306	__be32 imm;
307};
308
309enum {
310	MLX4_WQE_MLX_VL15 = 1 << 17, MLX4_WQE_MLX_SLR = 1 << 16
311};
312
313struct mlx4_wqe_mlx_seg {
314	u8 owner;
315	u8 reserved1[2];
316	u8 opcode;
317	__be16 sched_prio;
318	u8 reserved2;
319	u8 size;
320	/*
321	 * [17]    VL15
322	 * [16]    SLR
323	 * [15:12] static rate
324	 * [11:8]  SL
325	 * [4]     ICRC
326	 * [3:2]   C
327	 * [0]     FL (force loopback)
328	 */
329	__be32 flags;
330	__be16 rlid;
331	u16 reserved3;
332};
333
334struct mlx4_wqe_datagram_seg {
335	__be32 av[8];
336	__be32 dqpn;
337	__be32 qkey;
338	__be16 vlan;
339	u8 mac[6];
340};
341
342struct mlx4_wqe_lso_seg {
343	__be32 mss_hdr_size;
344	__be32 header[0];
345};
346
347enum mlx4_wqe_bind_seg_flags2 {
348	MLX4_WQE_BIND_TYPE_2 = (1 << 31), MLX4_WQE_BIND_ZERO_BASED = (1 << 30),
349};
350
351struct mlx4_wqe_bind_seg {
352	__be32 flags1;
353	__be32 flags2;
354	__be32 new_rkey;
355	__be32 lkey;
356	__be64 addr;
357	__be64 length;
358};
359
360enum {
361	MLX4_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
362	MLX4_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
363	MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ = 1 << 29,
364	MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE = 1 << 30,
365	MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC = 1 << 31
366};
367
368struct mlx4_wqe_fmr_seg {
369	__be32 flags;
370	__be32 mem_key;
371	__be64 buf_list;
372	__be64 start_addr;
373	__be64 reg_len;
374	__be32 offset;
375	__be32 page_size;
376	u32 reserved[2];
377};
378
379struct mlx4_wqe_fmr_ext_seg {
380	u8 flags;
381	u8 reserved;
382	__be16 app_mask;
383	__be16 wire_app_tag;
384	__be16 mem_app_tag;
385	__be32 wire_ref_tag_base;
386	__be32 mem_ref_tag_base;
387};
388
389struct mlx4_wqe_local_inval_seg {
390	u64 reserved1;
391	__be32 mem_key;
392	u32 reserved2;
393	u64 reserved3[2];
394};
395
396struct mlx4_wqe_raddr_seg {
397	__be64 raddr;
398	__be32 rkey;
399	u32 reserved;
400};
401
402struct mlx4_wqe_atomic_seg {
403	__be64 swap_add;
404	__be64 compare;
405};
406
407struct mlx4_wqe_masked_atomic_seg {
408	__be64 swap_add;
409	__be64 compare;
410	__be64 swap_add_mask;
411	__be64 compare_mask;
412};
413
414struct mlx4_wqe_data_seg {
415	__be32 byte_count;
416	__be32 lkey;
417	__be64 addr;
418};
419
420enum {
421	MLX4_INLINE_ALIGN = 64, MLX4_INLINE_SEG = 1 << 31,
422};
423
424struct mlx4_wqe_inline_seg {
425	__be32 byte_count;
426};
427
428int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
429		enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
430		struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
431		int sqd_event, struct mlx4_qp *qp);
432/*
433 int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
434 struct mlx4_qp_context *context);
435 */
436int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
437		struct mlx4_qp_context *context, struct mlx4_qp *qp,
438		enum mlx4_qp_state *qp_state);
439
440static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) {
441	return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1));
442}
443/*
444 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp);*/
445
446#endif /* MLX4_QP_H */
447