1/*
2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef MTHCA_PROVIDER_H
36#define MTHCA_PROVIDER_H
37
38#include <rdma/ib_verbs.h>
39#include <rdma/ib_pack.h>
40
41#define MTHCA_MPT_FLAG_ATOMIC        (1 << 14)
42#define MTHCA_MPT_FLAG_REMOTE_WRITE  (1 << 13)
43#define MTHCA_MPT_FLAG_REMOTE_READ   (1 << 12)
44#define MTHCA_MPT_FLAG_LOCAL_WRITE   (1 << 11)
45#define MTHCA_MPT_FLAG_LOCAL_READ    (1 << 10)
46
47struct mthca_buf_list {
48	void *buf;
49	DEFINE_DMA_UNMAP_ADDR(mapping);
50};
51
52union mthca_buf {
53	struct mthca_buf_list direct;
54	struct mthca_buf_list *page_list;
55};
56
57struct mthca_uar {
58	unsigned long pfn;
59	int           index;
60};
61
62struct mthca_user_db_table;
63
64struct mthca_ucontext {
65	struct ib_ucontext          ibucontext;
66	struct mthca_uar            uar;
67	struct mthca_user_db_table *db_tab;
68	int			    reg_mr_warned;
69};
70
71struct mthca_mtt;
72
73struct mthca_mr {
74	struct ib_mr      ibmr;
75	struct ib_umem   *umem;
76	struct mthca_mtt *mtt;
77};
78
79struct mthca_pd {
80	struct ib_pd    ibpd;
81	u32             pd_num;
82	atomic_t        sqp_count;
83	struct mthca_mr ntmr;
84	int             privileged;
85};
86
87struct mthca_eq {
88	struct mthca_dev      *dev;
89	int                    eqn;
90	u32                    eqn_mask;
91	u32                    cons_index;
92	u16                    msi_x_vector;
93	u16                    msi_x_entry;
94	int                    have_irq;
95	int                    nent;
96	struct mthca_buf_list *page_list;
97	struct mthca_mr        mr;
98	char		       irq_name[IB_DEVICE_NAME_MAX];
99};
100
101struct mthca_av;
102
103enum mthca_ah_type {
104	MTHCA_AH_ON_HCA,
105	MTHCA_AH_PCI_POOL,
106	MTHCA_AH_KMALLOC
107};
108
109struct mthca_ah {
110	struct ib_ah       ibah;
111	enum mthca_ah_type type;
112	u32                key;
113	struct mthca_av   *av;
114	dma_addr_t         avdma;
115};
116
117/*
118 * Quick description of our CQ/QP locking scheme:
119 *
120 * We have one global lock that protects dev->cq/qp_table.  Each
121 * struct mthca_cq/qp also has its own lock.  An individual qp lock
122 * may be taken inside of an individual cq lock.  Both cqs attached to
123 * a qp may be locked, with the cq with the lower cqn locked first.
124 * No other nesting should be done.
125 *
126 * Each struct mthca_cq/qp also has an ref count, protected by the
127 * corresponding table lock.  The pointer from the cq/qp_table to the
128 * struct counts as one reference.  This reference also is good for
129 * access through the consumer API, so modifying the CQ/QP etc doesn't
130 * need to take another reference.  Access to a QP because of a
131 * completion being polled does not need a reference either.
132 *
133 * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the
134 * destroy function to sleep on.
135 *
136 * This means that access from the consumer API requires nothing but
137 * taking the struct's lock.
138 *
139 * Access because of a completion event should go as follows:
140 * - lock cq/qp_table and look up struct
141 * - increment ref count in struct
142 * - drop cq/qp_table lock
143 * - lock struct, do your thing, and unlock struct
144 * - decrement ref count; if zero, wake up waiters
145 *
146 * To destroy a CQ/QP, we can do the following:
147 * - lock cq/qp_table
148 * - remove pointer and decrement ref count
149 * - unlock cq/qp_table lock
150 * - wait_event until ref count is zero
151 *
152 * It is the consumer's responsibilty to make sure that no QP
153 * operations (WQE posting or state modification) are pending when a
154 * QP is destroyed.  Also, the consumer must make sure that calls to
155 * qp_modify are serialized.  Similarly, the consumer is responsible
156 * for ensuring that no CQ resize operations are pending when a CQ
157 * is destroyed.
158 *
159 * Possible optimizations (wait for profile data to see if/where we
160 * have locks bouncing between CPUs):
161 * - split cq/qp table lock into n separate (cache-aligned) locks,
162 *   indexed (say) by the page in the table
163 * - split QP struct lock into three (one for common info, one for the
164 *   send queue and one for the receive queue)
165 */
166
167struct mthca_cq_buf {
168	union mthca_buf		queue;
169	struct mthca_mr		mr;
170	int			is_direct;
171};
172
173struct mthca_cq_resize {
174	struct mthca_cq_buf	buf;
175	int			cqe;
176	enum {
177		CQ_RESIZE_ALLOC,
178		CQ_RESIZE_READY,
179		CQ_RESIZE_SWAPPED
180	}			state;
181};
182
183struct mthca_cq {
184	struct ib_cq		ibcq;
185	spinlock_t		lock;
186	int			refcount;
187	int			cqn;
188	u32			cons_index;
189	struct mthca_cq_buf	buf;
190	struct mthca_cq_resize *resize_buf;
191	int			is_kernel;
192
193	/* Next fields are Arbel only */
194	int			set_ci_db_index;
195	__be32		       *set_ci_db;
196	int			arm_db_index;
197	__be32		       *arm_db;
198	int			arm_sn;
199
200	wait_queue_head_t	wait;
201	struct mutex		mutex;
202};
203
204struct mthca_srq {
205	struct ib_srq		ibsrq;
206	spinlock_t		lock;
207	int			refcount;
208	int			srqn;
209	int			max;
210	int			max_gs;
211	int			wqe_shift;
212	int			first_free;
213	int			last_free;
214	u16			counter;  /* Arbel only */
215	int			db_index; /* Arbel only */
216	__be32		       *db;       /* Arbel only */
217	void		       *last;
218
219	int			is_direct;
220	u64		       *wrid;
221	union mthca_buf		queue;
222	struct mthca_mr		mr;
223
224	wait_queue_head_t	wait;
225	struct mutex		mutex;
226};
227
228struct mthca_wq {
229	spinlock_t lock;
230	int        max;
231	unsigned   next_ind;
232	unsigned   last_comp;
233	unsigned   head;
234	unsigned   tail;
235	void      *last;
236	int        max_gs;
237	int        wqe_shift;
238
239	int        db_index;	/* Arbel only */
240	__be32    *db;
241};
242
243struct mthca_sqp {
244	int             pkey_index;
245	u32             qkey;
246	u32             send_psn;
247	struct ib_ud_header ud_header;
248	int             header_buf_size;
249	void           *header_buf;
250	dma_addr_t      header_dma;
251};
252
253struct mthca_qp {
254	struct ib_qp           ibqp;
255	int                    refcount;
256	u32                    qpn;
257	int                    is_direct;
258	u8                     port; /* for SQP and memfree use only */
259	u8                     alt_port; /* for memfree use only */
260	u8                     transport;
261	u8                     state;
262	u8                     atomic_rd_en;
263	u8                     resp_depth;
264
265	struct mthca_mr        mr;
266
267	struct mthca_wq        rq;
268	struct mthca_wq        sq;
269	enum ib_sig_type       sq_policy;
270	int                    send_wqe_offset;
271	int                    max_inline_data;
272
273	u64                   *wrid;
274	union mthca_buf	       queue;
275
276	wait_queue_head_t      wait;
277	struct mutex	       mutex;
278	struct mthca_sqp *sqp;
279};
280
281static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
282{
283	return container_of(ibucontext, struct mthca_ucontext, ibucontext);
284}
285
286static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)
287{
288	return container_of(ibmr, struct mthca_mr, ibmr);
289}
290
291static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd)
292{
293	return container_of(ibpd, struct mthca_pd, ibpd);
294}
295
296static inline struct mthca_ah *to_mah(struct ib_ah *ibah)
297{
298	return container_of(ibah, struct mthca_ah, ibah);
299}
300
301static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)
302{
303	return container_of(ibcq, struct mthca_cq, ibcq);
304}
305
306static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)
307{
308	return container_of(ibsrq, struct mthca_srq, ibsrq);
309}
310
311static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
312{
313	return container_of(ibqp, struct mthca_qp, ibqp);
314}
315
316#endif /* MTHCA_PROVIDER_H */
317