1/*
2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef MLX4_H
35#define MLX4_H
36
37#include <stddef.h>
38
39#include <infiniband/driver.h>
40#include <infiniband/arch.h>
41
42#ifdef HAVE_VALGRIND_MEMCHECK_H
43
44#  include <valgrind/memcheck.h>
45
46#  if !defined(VALGRIND_MAKE_MEM_DEFINED) || !defined(VALGRIND_MAKE_MEM_UNDEFINED)
47#    warning "Valgrind support requested, but VALGRIND_MAKE_MEM_(UN)DEFINED not available"
48#  endif
49
50#endif /* HAVE_VALGRIND_MEMCHECK_H */
51
52#ifndef VALGRIND_MAKE_MEM_DEFINED
53#  define VALGRIND_MAKE_MEM_DEFINED(addr,len)
54#endif
55
56#ifndef VALGRIND_MAKE_MEM_UNDEFINED
57#  define VALGRIND_MAKE_MEM_UNDEFINED(addr,len)
58#endif
59
60#ifndef rmb
61#  define rmb() mb()
62#endif
63
64#ifndef wmb
65#  define wmb() mb()
66#endif
67
68#ifndef wc_wmb
69
70#if defined(__i386__)
71#define wc_wmb() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
72#elif defined(__x86_64__)
73#define wc_wmb() asm volatile("sfence" ::: "memory")
74#elif defined(__ia64__)
75#define wc_wmb() asm volatile("fwb" ::: "memory")
76#else
77#define wc_wmb() wmb()
78#endif
79
80#endif
81
82#ifndef HAVE_IBV_MORE_OPS
83#undef HAVE_IBV_XRC_OPS
84#undef HAVE_IBV_CREATE_QP_EXP
85#endif
86
87#define HIDDEN		__attribute__((visibility ("hidden")))
88
89#define PFX		"mlx4: "
90
91#ifndef max
92#define max(a,b) \
93	({ typeof (a) _a = (a); \
94	   typeof (b) _b = (b); \
95	   _a > _b ? _a : _b; })
96#endif
97
98#ifndef min
99#define min(a,b) \
100	({ typeof (a) _a = (a); \
101	   typeof (b) _b = (b); \
102	   _a < _b ? _a : _b; })
103#endif
104
105enum {
106	MLX4_STAT_RATE_OFFSET		= 5
107};
108
109enum {
110	MLX4_QP_TABLE_BITS		= 8,
111	MLX4_QP_TABLE_SIZE		= 1 << MLX4_QP_TABLE_BITS,
112	MLX4_QP_TABLE_MASK		= MLX4_QP_TABLE_SIZE - 1
113};
114
115enum {
116	MLX4_XRC_SRQ_TABLE_BITS		= 8,
117	MLX4_XRC_SRQ_TABLE_SIZE		= 1 << MLX4_XRC_SRQ_TABLE_BITS,
118	MLX4_XRC_SRQ_TABLE_MASK		= MLX4_XRC_SRQ_TABLE_SIZE - 1
119};
120
121enum {
122	MLX4_XRC_QPN_BIT		= (1 << 23)
123};
124
125enum mlx4_db_type {
126	MLX4_DB_TYPE_CQ,
127	MLX4_DB_TYPE_RQ,
128	MLX4_NUM_DB_TYPE
129};
130
131enum {
132	MLX4_OPCODE_NOP			= 0x00,
133	MLX4_OPCODE_SEND_INVAL		= 0x01,
134	MLX4_OPCODE_RDMA_WRITE		= 0x08,
135	MLX4_OPCODE_RDMA_WRITE_IMM	= 0x09,
136	MLX4_OPCODE_SEND		= 0x0a,
137	MLX4_OPCODE_SEND_IMM		= 0x0b,
138	MLX4_OPCODE_LSO			= 0x0e,
139	MLX4_OPCODE_RDMA_READ		= 0x10,
140	MLX4_OPCODE_ATOMIC_CS		= 0x11,
141	MLX4_OPCODE_ATOMIC_FA		= 0x12,
142	MLX4_OPCODE_ATOMIC_MASK_CS	= 0x14,
143	MLX4_OPCODE_ATOMIC_MASK_FA	= 0x15,
144	MLX4_OPCODE_BIND_MW		= 0x18,
145	MLX4_OPCODE_FMR			= 0x19,
146	MLX4_OPCODE_LOCAL_INVAL		= 0x1b,
147	MLX4_OPCODE_CONFIG_CMD		= 0x1f,
148
149	MLX4_RECV_OPCODE_RDMA_WRITE_IMM	= 0x00,
150	MLX4_RECV_OPCODE_SEND		= 0x01,
151	MLX4_RECV_OPCODE_SEND_IMM	= 0x02,
152	MLX4_RECV_OPCODE_SEND_INVAL	= 0x03,
153
154	MLX4_CQE_OPCODE_ERROR		= 0x1e,
155	MLX4_CQE_OPCODE_RESIZE		= 0x16,
156};
157
158enum {
159	MLX4_MAX_WQE_SIZE = 1008
160};
161
162struct mlx4_device {
163	struct ibv_device		ibv_dev;
164	int				page_size;
165	int				driver_abi_ver;
166};
167
168struct mlx4_db_page;
169
170struct mlx4_context {
171	struct ibv_context		ibv_ctx;
172
173	void			       *uar;
174	pthread_spinlock_t		uar_lock;
175
176	void			       *bf_page;
177	int				bf_buf_size;
178	int				bf_offset;
179	pthread_spinlock_t		bf_lock;
180
181	struct {
182		struct mlx4_qp	      **table;
183		int			refcnt;
184	}				qp_table[MLX4_QP_TABLE_SIZE];
185	pthread_mutex_t			qp_table_mutex;
186	int				num_qps;
187	int				qp_table_shift;
188	int				qp_table_mask;
189	int				max_qp_wr;
190	int				max_sge;
191	int				max_cqe;
192	int				cqe_size;
193
194	struct {
195		struct mlx4_srq       **table;
196		int			refcnt;
197	}				xrc_srq_table[MLX4_XRC_SRQ_TABLE_SIZE];
198	pthread_mutex_t			xrc_srq_table_mutex;
199	int				num_xrc_srqs;
200	int				xrc_srq_table_shift;
201	int				xrc_srq_table_mask;
202
203	struct mlx4_db_page	       *db_list[MLX4_NUM_DB_TYPE];
204	pthread_mutex_t			db_list_mutex;
205};
206
207struct mlx4_buf {
208	void			       *buf;
209	size_t				length;
210};
211
212struct mlx4_pd {
213	struct ibv_pd			ibv_pd;
214	uint32_t			pdn;
215};
216
217struct mlx4_cq {
218	struct ibv_cq			ibv_cq;
219	struct mlx4_buf			buf;
220	struct mlx4_buf			resize_buf;
221	pthread_spinlock_t		lock;
222	uint32_t			cqn;
223	uint32_t			cons_index;
224	uint32_t		       *set_ci_db;
225	uint32_t		       *arm_db;
226	int				arm_sn;
227	int				cqe_size;
228};
229
230struct mlx4_srq {
231	struct ibv_srq			ibv_srq;
232	struct mlx4_buf			buf;
233	pthread_spinlock_t		lock;
234	uint64_t		       *wrid;
235	uint32_t			srqn;
236	int				max;
237	int				max_gs;
238	int				wqe_shift;
239	int				head;
240	int				tail;
241	uint32_t		       *db;
242	uint16_t			counter;
243};
244
245struct mlx4_wq {
246	uint64_t		       *wrid;
247	pthread_spinlock_t		lock;
248	int				wqe_cnt;
249	int				max_post;
250	unsigned			head;
251	unsigned			tail;
252	int				max_gs;
253	int				wqe_shift;
254	int				offset;
255};
256
257struct mlx4_qp {
258	struct ibv_qp			ibv_qp;
259	struct mlx4_buf			buf;
260	int				max_inline_data;
261	int				buf_size;
262
263	uint32_t			doorbell_qpn;
264	uint32_t			sq_signal_bits;
265	int				sq_spare_wqes;
266	struct mlx4_wq			sq;
267
268	uint32_t		       *db;
269	struct mlx4_wq			rq;
270};
271
272struct mlx4_av {
273	uint32_t			port_pd;
274	uint8_t				reserved1;
275	uint8_t				g_slid;
276	uint16_t			dlid;
277	uint8_t				reserved2;
278	uint8_t				gid_index;
279	uint8_t				stat_rate;
280	uint8_t				hop_limit;
281	uint32_t			sl_tclass_flowlabel;
282	uint8_t				dgid[16];
283	uint8_t				mac[8];
284};
285
286struct mlx4_ah {
287	struct ibv_ah			ibv_ah;
288	struct mlx4_av			av;
289	uint16_t			vlan;
290	uint8_t				mac[6];
291	uint8_t				tagged;
292};
293
294struct mlx4_xrc_domain {
295	struct ibv_xrc_domain		ibv_xrcd;
296	uint32_t			xrcdn;
297};
298
299static inline unsigned long align(unsigned long val, unsigned long align)
300{
301	return (val + align - 1) & ~(align - 1);
302}
303
304#define to_mxxx(xxx, type)						\
305	((struct mlx4_##type *)					\
306	 ((void *) ib##xxx - offsetof(struct mlx4_##type, ibv_##xxx)))
307
308static inline struct mlx4_device *to_mdev(struct ibv_device *ibdev)
309{
310	return to_mxxx(dev, device);
311}
312
313static inline struct mlx4_context *to_mctx(struct ibv_context *ibctx)
314{
315	return to_mxxx(ctx, context);
316}
317
318static inline struct mlx4_pd *to_mpd(struct ibv_pd *ibpd)
319{
320	return to_mxxx(pd, pd);
321}
322
323static inline struct mlx4_cq *to_mcq(struct ibv_cq *ibcq)
324{
325	return to_mxxx(cq, cq);
326}
327
328static inline struct mlx4_srq *to_msrq(struct ibv_srq *ibsrq)
329{
330	return to_mxxx(srq, srq);
331}
332
333static inline struct mlx4_qp *to_mqp(struct ibv_qp *ibqp)
334{
335	return to_mxxx(qp, qp);
336}
337
338static inline struct mlx4_ah *to_mah(struct ibv_ah *ibah)
339{
340	return to_mxxx(ah, ah);
341}
342
343#ifdef HAVE_IBV_XRC_OPS
344static inline struct mlx4_xrc_domain *to_mxrcd(struct ibv_xrc_domain *ibxrcd)
345{
346	return to_mxxx(xrcd, xrc_domain);
347}
348#endif
349
350int mlx4_alloc_buf(struct mlx4_buf *buf, size_t size, int page_size);
351void mlx4_free_buf(struct mlx4_buf *buf);
352
353uint32_t *mlx4_alloc_db(struct mlx4_context *context, enum mlx4_db_type type);
354void mlx4_free_db(struct mlx4_context *context, enum mlx4_db_type type, uint32_t *db);
355
356int mlx4_query_device(struct ibv_context *context,
357		       struct ibv_device_attr *attr);
358int mlx4_query_port(struct ibv_context *context, uint8_t port,
359		     struct ibv_port_attr *attr);
360
361struct ibv_pd *mlx4_alloc_pd(struct ibv_context *context);
362int mlx4_free_pd(struct ibv_pd *pd);
363
364struct ibv_mr *mlx4_reg_mr(struct ibv_pd *pd, void *addr,
365			    size_t length, enum ibv_access_flags access);
366int mlx4_dereg_mr(struct ibv_mr *mr);
367
368struct ibv_cq *mlx4_create_cq(struct ibv_context *context, int cqe,
369			       struct ibv_comp_channel *channel,
370			       int comp_vector);
371int mlx4_alloc_cq_buf(struct mlx4_device *dev, struct mlx4_buf *buf, int nent,
372		      int entry_size);
373int mlx4_resize_cq(struct ibv_cq *cq, int cqe);
374int mlx4_destroy_cq(struct ibv_cq *cq);
375int mlx4_poll_cq(struct ibv_cq *cq, int ne, struct ibv_wc *wc);
376int mlx4_arm_cq(struct ibv_cq *cq, int solicited);
377void mlx4_cq_event(struct ibv_cq *cq);
378void __mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct mlx4_srq *srq);
379void mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct mlx4_srq *srq);
380int mlx4_get_outstanding_cqes(struct mlx4_cq *cq);
381void mlx4_cq_resize_copy_cqes(struct mlx4_cq *cq, void *buf, int new_cqe);
382
383struct ibv_srq *mlx4_create_srq(struct ibv_pd *pd,
384				 struct ibv_srq_init_attr *attr);
385int mlx4_modify_srq(struct ibv_srq *srq,
386		     struct ibv_srq_attr *attr,
387		     enum ibv_srq_attr_mask mask);
388int mlx4_query_srq(struct ibv_srq *srq,
389			   struct ibv_srq_attr *attr);
390int mlx4_destroy_srq(struct ibv_srq *srq);
391int mlx4_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr,
392			struct mlx4_srq *srq);
393void mlx4_free_srq_wqe(struct mlx4_srq *srq, int ind);
394int mlx4_post_srq_recv(struct ibv_srq *ibsrq,
395		       struct ibv_recv_wr *wr,
396		       struct ibv_recv_wr **bad_wr);
397struct mlx4_srq *mlx4_find_xrc_srq(struct mlx4_context *ctx, uint32_t xrc_srqn);
398int mlx4_store_xrc_srq(struct mlx4_context *ctx, uint32_t xrc_srqn,
399		       struct mlx4_srq *srq);
400void mlx4_clear_xrc_srq(struct mlx4_context *ctx, uint32_t xrc_srqn);
401
402struct ibv_qp *mlx4_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *attr);
403int mlx4_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
404		   enum ibv_qp_attr_mask attr_mask,
405		   struct ibv_qp_init_attr *init_attr);
406int mlx4_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
407		    enum ibv_qp_attr_mask attr_mask);
408int mlx4_destroy_qp(struct ibv_qp *qp);
409void mlx4_init_qp_indices(struct mlx4_qp *qp);
410void mlx4_qp_init_sq_ownership(struct mlx4_qp *qp);
411int mlx4_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
412			  struct ibv_send_wr **bad_wr);
413int mlx4_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
414			  struct ibv_recv_wr **bad_wr);
415void mlx4_calc_sq_wqe_size(struct ibv_qp_cap *cap, enum ibv_qp_type type,
416			   struct mlx4_qp *qp);
417int num_inline_segs(int data, enum ibv_qp_type type);
418int mlx4_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap,
419		       enum ibv_qp_type type, struct mlx4_qp *qp);
420void mlx4_set_sq_sizes(struct mlx4_qp *qp, struct ibv_qp_cap *cap,
421		       enum ibv_qp_type type);
422struct mlx4_qp *mlx4_find_qp(struct mlx4_context *ctx, uint32_t qpn);
423int mlx4_store_qp(struct mlx4_context *ctx, uint32_t qpn, struct mlx4_qp *qp);
424void mlx4_clear_qp(struct mlx4_context *ctx, uint32_t qpn);
425struct ibv_ah *mlx4_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr);
426int mlx4_destroy_ah(struct ibv_ah *ah);
427int mlx4_alloc_av(struct mlx4_pd *pd, struct ibv_ah_attr *attr,
428		   struct mlx4_ah *ah);
429void mlx4_free_av(struct mlx4_ah *ah);
430#ifdef HAVE_IBV_XRC_OPS
431struct ibv_srq *mlx4_create_xrc_srq(struct ibv_pd *pd,
432				    struct ibv_xrc_domain *xrc_domain,
433				    struct ibv_cq *xrc_cq,
434				    struct ibv_srq_init_attr *attr);
435struct ibv_xrc_domain *mlx4_open_xrc_domain(struct ibv_context *context,
436					    int fd, int oflag);
437
438int mlx4_close_xrc_domain(struct ibv_xrc_domain *d);
439int mlx4_create_xrc_rcv_qp(struct ibv_qp_init_attr *init_attr,
440			   uint32_t *xrc_qp_num);
441int mlx4_modify_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
442			   uint32_t xrc_qp_num,
443			   struct ibv_qp_attr *attr,
444			   int attr_mask);
445int mlx4_query_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
446			  uint32_t xrc_qp_num,
447			  struct ibv_qp_attr *attr,
448			  int attr_mask,
449			  struct ibv_qp_init_attr *init_attr);
450int mlx4_reg_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
451			uint32_t xrc_qp_num);
452int mlx4_unreg_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
453			uint32_t xrc_qp_num);
454#endif
455
456
457#endif /* MLX4_H */
458