1219820Sjeff/*
2219820Sjeff * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3219820Sjeff * Copyright (c) 2005 Cisco Systems. All rights reserved.
4219820Sjeff * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5219820Sjeff * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6219820Sjeff *
7219820Sjeff * This software is available to you under a choice of one of two
8219820Sjeff * licenses.  You may choose to be licensed under the terms of the GNU
9219820Sjeff * General Public License (GPL) Version 2, available from the file
10219820Sjeff * COPYING in the main directory of this source tree, or the
11219820Sjeff * OpenIB.org BSD license below:
12219820Sjeff *
13219820Sjeff *     Redistribution and use in source and binary forms, with or
14219820Sjeff *     without modification, are permitted provided that the following
15219820Sjeff *     conditions are met:
16219820Sjeff *
17219820Sjeff *      - Redistributions of source code must retain the above
18219820Sjeff *        copyright notice, this list of conditions and the following
19219820Sjeff *        disclaimer.
20219820Sjeff *
21219820Sjeff *      - Redistributions in binary form must reproduce the above
22219820Sjeff *        copyright notice, this list of conditions and the following
23219820Sjeff *        disclaimer in the documentation and/or other materials
24219820Sjeff *        provided with the distribution.
25219820Sjeff *
26219820Sjeff * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27219820Sjeff * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28219820Sjeff * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29219820Sjeff * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30219820Sjeff * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31219820Sjeff * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32219820Sjeff * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33219820Sjeff * SOFTWARE.
34219820Sjeff */
35219820Sjeff
36219820Sjeff#include <linux/string.h>
37219820Sjeff#include <linux/slab.h>
38219820Sjeff#include <linux/sched.h>
39219820Sjeff
40219820Sjeff#include <asm/io.h>
41219820Sjeff
42219820Sjeff#include <rdma/ib_verbs.h>
43219820Sjeff#include <rdma/ib_cache.h>
44219820Sjeff#include <rdma/ib_pack.h>
45219820Sjeff
46219820Sjeff#include "mthca_dev.h"
47219820Sjeff#include "mthca_cmd.h"
48219820Sjeff#include "mthca_memfree.h"
49219820Sjeff#include "mthca_wqe.h"
50219820Sjeff
51219820Sjeffenum {
52219820Sjeff	MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
53219820Sjeff	MTHCA_ACK_REQ_FREQ       = 10,
54219820Sjeff	MTHCA_FLIGHT_LIMIT       = 9,
55219820Sjeff	MTHCA_UD_HEADER_SIZE     = 72, /* largest UD header possible */
56219820Sjeff	MTHCA_INLINE_HEADER_SIZE = 4,  /* data segment overhead for inline */
57219820Sjeff	MTHCA_INLINE_CHUNK_SIZE  = 16  /* inline data segment chunk */
58219820Sjeff};
59219820Sjeff
60219820Sjeffenum {
61219820Sjeff	MTHCA_QP_STATE_RST  = 0,
62219820Sjeff	MTHCA_QP_STATE_INIT = 1,
63219820Sjeff	MTHCA_QP_STATE_RTR  = 2,
64219820Sjeff	MTHCA_QP_STATE_RTS  = 3,
65219820Sjeff	MTHCA_QP_STATE_SQE  = 4,
66219820Sjeff	MTHCA_QP_STATE_SQD  = 5,
67219820Sjeff	MTHCA_QP_STATE_ERR  = 6,
68219820Sjeff	MTHCA_QP_STATE_DRAINING = 7
69219820Sjeff};
70219820Sjeff
71219820Sjeffenum {
72219820Sjeff	MTHCA_QP_ST_RC 	= 0x0,
73219820Sjeff	MTHCA_QP_ST_UC 	= 0x1,
74219820Sjeff	MTHCA_QP_ST_RD 	= 0x2,
75219820Sjeff	MTHCA_QP_ST_UD 	= 0x3,
76219820Sjeff	MTHCA_QP_ST_MLX = 0x7
77219820Sjeff};
78219820Sjeff
79219820Sjeffenum {
80219820Sjeff	MTHCA_QP_PM_MIGRATED = 0x3,
81219820Sjeff	MTHCA_QP_PM_ARMED    = 0x0,
82219820Sjeff	MTHCA_QP_PM_REARM    = 0x1
83219820Sjeff};
84219820Sjeff
85219820Sjeffenum {
86219820Sjeff	/* qp_context flags */
87219820Sjeff	MTHCA_QP_BIT_DE  = 1 <<  8,
88219820Sjeff	/* params1 */
89219820Sjeff	MTHCA_QP_BIT_SRE = 1 << 15,
90219820Sjeff	MTHCA_QP_BIT_SWE = 1 << 14,
91219820Sjeff	MTHCA_QP_BIT_SAE = 1 << 13,
92219820Sjeff	MTHCA_QP_BIT_SIC = 1 <<  4,
93219820Sjeff	MTHCA_QP_BIT_SSC = 1 <<  3,
94219820Sjeff	/* params2 */
95219820Sjeff	MTHCA_QP_BIT_RRE = 1 << 15,
96219820Sjeff	MTHCA_QP_BIT_RWE = 1 << 14,
97219820Sjeff	MTHCA_QP_BIT_RAE = 1 << 13,
98219820Sjeff	MTHCA_QP_BIT_RIC = 1 <<  4,
99219820Sjeff	MTHCA_QP_BIT_RSC = 1 <<  3
100219820Sjeff};
101219820Sjeff
102219820Sjeffenum {
103219820Sjeff	MTHCA_SEND_DOORBELL_FENCE = 1 << 5
104219820Sjeff};
105219820Sjeff
106219820Sjeffstruct mthca_qp_path {
107219820Sjeff	__be32 port_pkey;
108219820Sjeff	u8     rnr_retry;
109219820Sjeff	u8     g_mylmc;
110219820Sjeff	__be16 rlid;
111219820Sjeff	u8     ackto;
112219820Sjeff	u8     mgid_index;
113219820Sjeff	u8     static_rate;
114219820Sjeff	u8     hop_limit;
115219820Sjeff	__be32 sl_tclass_flowlabel;
116219820Sjeff	u8     rgid[16];
117219820Sjeff} __attribute__((packed));
118219820Sjeff
119219820Sjeffstruct mthca_qp_context {
120219820Sjeff	__be32 flags;
121219820Sjeff	__be32 tavor_sched_queue; /* Reserved on Arbel */
122219820Sjeff	u8     mtu_msgmax;
123219820Sjeff	u8     rq_size_stride;	/* Reserved on Tavor */
124219820Sjeff	u8     sq_size_stride;	/* Reserved on Tavor */
125219820Sjeff	u8     rlkey_arbel_sched_queue;	/* Reserved on Tavor */
126219820Sjeff	__be32 usr_page;
127219820Sjeff	__be32 local_qpn;
128219820Sjeff	__be32 remote_qpn;
129219820Sjeff	u32    reserved1[2];
130219820Sjeff	struct mthca_qp_path pri_path;
131219820Sjeff	struct mthca_qp_path alt_path;
132219820Sjeff	__be32 rdd;
133219820Sjeff	__be32 pd;
134219820Sjeff	__be32 wqe_base;
135219820Sjeff	__be32 wqe_lkey;
136219820Sjeff	__be32 params1;
137219820Sjeff	__be32 reserved2;
138219820Sjeff	__be32 next_send_psn;
139219820Sjeff	__be32 cqn_snd;
140219820Sjeff	__be32 snd_wqe_base_l;	/* Next send WQE on Tavor */
141219820Sjeff	__be32 snd_db_index;	/* (debugging only entries) */
142219820Sjeff	__be32 last_acked_psn;
143219820Sjeff	__be32 ssn;
144219820Sjeff	__be32 params2;
145219820Sjeff	__be32 rnr_nextrecvpsn;
146219820Sjeff	__be32 ra_buff_indx;
147219820Sjeff	__be32 cqn_rcv;
148219820Sjeff	__be32 rcv_wqe_base_l;	/* Next recv WQE on Tavor */
149219820Sjeff	__be32 rcv_db_index;	/* (debugging only entries) */
150219820Sjeff	__be32 qkey;
151219820Sjeff	__be32 srqn;
152219820Sjeff	__be32 rmsn;
153219820Sjeff	__be16 rq_wqe_counter;	/* reserved on Tavor */
154219820Sjeff	__be16 sq_wqe_counter;	/* reserved on Tavor */
155219820Sjeff	u32    reserved3[18];
156219820Sjeff} __attribute__((packed));
157219820Sjeff
158219820Sjeffstruct mthca_qp_param {
159219820Sjeff	__be32 opt_param_mask;
160219820Sjeff	u32    reserved1;
161219820Sjeff	struct mthca_qp_context context;
162219820Sjeff	u32    reserved2[62];
163219820Sjeff} __attribute__((packed));
164219820Sjeff
165219820Sjeffenum {
166219820Sjeff	MTHCA_QP_OPTPAR_ALT_ADDR_PATH     = 1 << 0,
167219820Sjeff	MTHCA_QP_OPTPAR_RRE               = 1 << 1,
168219820Sjeff	MTHCA_QP_OPTPAR_RAE               = 1 << 2,
169219820Sjeff	MTHCA_QP_OPTPAR_RWE               = 1 << 3,
170219820Sjeff	MTHCA_QP_OPTPAR_PKEY_INDEX        = 1 << 4,
171219820Sjeff	MTHCA_QP_OPTPAR_Q_KEY             = 1 << 5,
172219820Sjeff	MTHCA_QP_OPTPAR_RNR_TIMEOUT       = 1 << 6,
173219820Sjeff	MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
174219820Sjeff	MTHCA_QP_OPTPAR_SRA_MAX           = 1 << 8,
175219820Sjeff	MTHCA_QP_OPTPAR_RRA_MAX           = 1 << 9,
176219820Sjeff	MTHCA_QP_OPTPAR_PM_STATE          = 1 << 10,
177219820Sjeff	MTHCA_QP_OPTPAR_PORT_NUM          = 1 << 11,
178219820Sjeff	MTHCA_QP_OPTPAR_RETRY_COUNT       = 1 << 12,
179219820Sjeff	MTHCA_QP_OPTPAR_ALT_RNR_RETRY     = 1 << 13,
180219820Sjeff	MTHCA_QP_OPTPAR_ACK_TIMEOUT       = 1 << 14,
181219820Sjeff	MTHCA_QP_OPTPAR_RNR_RETRY         = 1 << 15,
182219820Sjeff	MTHCA_QP_OPTPAR_SCHED_QUEUE       = 1 << 16
183219820Sjeff};
184219820Sjeff
185219820Sjeffstatic const u8 mthca_opcode[] = {
186219820Sjeff	[IB_WR_SEND]                 = MTHCA_OPCODE_SEND,
187219820Sjeff	[IB_WR_SEND_WITH_IMM]        = MTHCA_OPCODE_SEND_IMM,
188219820Sjeff	[IB_WR_RDMA_WRITE]           = MTHCA_OPCODE_RDMA_WRITE,
189219820Sjeff	[IB_WR_RDMA_WRITE_WITH_IMM]  = MTHCA_OPCODE_RDMA_WRITE_IMM,
190219820Sjeff	[IB_WR_RDMA_READ]            = MTHCA_OPCODE_RDMA_READ,
191219820Sjeff	[IB_WR_ATOMIC_CMP_AND_SWP]   = MTHCA_OPCODE_ATOMIC_CS,
192219820Sjeff	[IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,
193219820Sjeff};
194219820Sjeff
195219820Sjeffstatic int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)
196219820Sjeff{
197219820Sjeff	return qp->qpn >= dev->qp_table.sqp_start &&
198219820Sjeff		qp->qpn <= dev->qp_table.sqp_start + 3;
199219820Sjeff}
200219820Sjeff
201219820Sjeffstatic int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp)
202219820Sjeff{
203219820Sjeff	return qp->qpn >= dev->qp_table.sqp_start &&
204219820Sjeff		qp->qpn <= dev->qp_table.sqp_start + 1;
205219820Sjeff}
206219820Sjeff
207219820Sjeffstatic void *get_recv_wqe(struct mthca_qp *qp, int n)
208219820Sjeff{
209219820Sjeff	if (qp->is_direct)
210219820Sjeff		return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
211219820Sjeff	else
212219820Sjeff		return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
213219820Sjeff			((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
214219820Sjeff}
215219820Sjeff
216219820Sjeffstatic void *get_send_wqe(struct mthca_qp *qp, int n)
217219820Sjeff{
218219820Sjeff	if (qp->is_direct)
219219820Sjeff		return qp->queue.direct.buf + qp->send_wqe_offset +
220219820Sjeff			(n << qp->sq.wqe_shift);
221219820Sjeff	else
222219820Sjeff		return qp->queue.page_list[(qp->send_wqe_offset +
223219820Sjeff					    (n << qp->sq.wqe_shift)) >>
224219820Sjeff					   PAGE_SHIFT].buf +
225219820Sjeff			((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &
226219820Sjeff			 (PAGE_SIZE - 1));
227219820Sjeff}
228219820Sjeff
229219820Sjeffstatic void mthca_wq_reset(struct mthca_wq *wq)
230219820Sjeff{
231219820Sjeff	wq->next_ind  = 0;
232219820Sjeff	wq->last_comp = wq->max - 1;
233219820Sjeff	wq->head      = 0;
234219820Sjeff	wq->tail      = 0;
235219820Sjeff}
236219820Sjeff
237219820Sjeffvoid mthca_qp_event(struct mthca_dev *dev, u32 qpn,
238219820Sjeff		    enum ib_event_type event_type)
239219820Sjeff{
240219820Sjeff	struct mthca_qp *qp;
241219820Sjeff	struct ib_event event;
242219820Sjeff
243219820Sjeff	spin_lock(&dev->qp_table.lock);
244219820Sjeff	qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
245219820Sjeff	if (qp)
246219820Sjeff		++qp->refcount;
247219820Sjeff	spin_unlock(&dev->qp_table.lock);
248219820Sjeff
249219820Sjeff	if (!qp) {
250219820Sjeff		mthca_warn(dev, "Async event %d for bogus QP %08x\n",
251219820Sjeff			  (int) event_type, qpn);
252219820Sjeff		return;
253219820Sjeff	}
254219820Sjeff
255219820Sjeff	if (event_type == IB_EVENT_PATH_MIG)
256219820Sjeff		qp->port = qp->alt_port;
257219820Sjeff
258219820Sjeff	event.device      = &dev->ib_dev;
259219820Sjeff	event.event       = event_type;
260219820Sjeff	event.element.qp  = &qp->ibqp;
261219820Sjeff	if (qp->ibqp.event_handler)
262219820Sjeff		qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
263219820Sjeff
264219820Sjeff	spin_lock(&dev->qp_table.lock);
265219820Sjeff	if (!--qp->refcount)
266219820Sjeff		wake_up(&qp->wait);
267219820Sjeff	spin_unlock(&dev->qp_table.lock);
268219820Sjeff}
269219820Sjeff
270219820Sjeffstatic int to_mthca_state(enum ib_qp_state ib_state)
271219820Sjeff{
272219820Sjeff	switch (ib_state) {
273219820Sjeff	case IB_QPS_RESET: return MTHCA_QP_STATE_RST;
274219820Sjeff	case IB_QPS_INIT:  return MTHCA_QP_STATE_INIT;
275219820Sjeff	case IB_QPS_RTR:   return MTHCA_QP_STATE_RTR;
276219820Sjeff	case IB_QPS_RTS:   return MTHCA_QP_STATE_RTS;
277219820Sjeff	case IB_QPS_SQD:   return MTHCA_QP_STATE_SQD;
278219820Sjeff	case IB_QPS_SQE:   return MTHCA_QP_STATE_SQE;
279219820Sjeff	case IB_QPS_ERR:   return MTHCA_QP_STATE_ERR;
280219820Sjeff	default:                return -1;
281219820Sjeff	}
282219820Sjeff}
283219820Sjeff
284219820Sjeffenum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS };
285219820Sjeff
286219820Sjeffstatic int to_mthca_st(int transport)
287219820Sjeff{
288219820Sjeff	switch (transport) {
289219820Sjeff	case RC:  return MTHCA_QP_ST_RC;
290219820Sjeff	case UC:  return MTHCA_QP_ST_UC;
291219820Sjeff	case UD:  return MTHCA_QP_ST_UD;
292219820Sjeff	case RD:  return MTHCA_QP_ST_RD;
293219820Sjeff	case MLX: return MTHCA_QP_ST_MLX;
294219820Sjeff	default:  return -1;
295219820Sjeff	}
296219820Sjeff}
297219820Sjeff
298219820Sjeffstatic void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr,
299219820Sjeff			int attr_mask)
300219820Sjeff{
301219820Sjeff	if (attr_mask & IB_QP_PKEY_INDEX)
302219820Sjeff		sqp->pkey_index = attr->pkey_index;
303219820Sjeff	if (attr_mask & IB_QP_QKEY)
304219820Sjeff		sqp->qkey = attr->qkey;
305219820Sjeff	if (attr_mask & IB_QP_SQ_PSN)
306219820Sjeff		sqp->send_psn = attr->sq_psn;
307219820Sjeff}
308219820Sjeff
309219820Sjeffstatic void init_port(struct mthca_dev *dev, int port)
310219820Sjeff{
311219820Sjeff	int err;
312219820Sjeff	u8 status;
313219820Sjeff	struct mthca_init_ib_param param;
314219820Sjeff
315219820Sjeff	memset(&param, 0, sizeof param);
316219820Sjeff
317219820Sjeff	param.port_width = dev->limits.port_width_cap;
318219820Sjeff	param.vl_cap     = dev->limits.vl_cap;
319219820Sjeff	param.mtu_cap    = dev->limits.mtu_cap;
320219820Sjeff	param.gid_cap    = dev->limits.gid_table_len;
321219820Sjeff	param.pkey_cap   = dev->limits.pkey_table_len;
322219820Sjeff
323219820Sjeff	err = mthca_INIT_IB(dev, &param, port, &status);
324219820Sjeff	if (err)
325219820Sjeff		mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);
326219820Sjeff	if (status)
327219820Sjeff		mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
328219820Sjeff}
329219820Sjeff
330219820Sjeffstatic __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr,
331219820Sjeff				  int attr_mask)
332219820Sjeff{
333219820Sjeff	u8 dest_rd_atomic;
334219820Sjeff	u32 access_flags;
335219820Sjeff	u32 hw_access_flags = 0;
336219820Sjeff
337219820Sjeff	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
338219820Sjeff		dest_rd_atomic = attr->max_dest_rd_atomic;
339219820Sjeff	else
340219820Sjeff		dest_rd_atomic = qp->resp_depth;
341219820Sjeff
342219820Sjeff	if (attr_mask & IB_QP_ACCESS_FLAGS)
343219820Sjeff		access_flags = attr->qp_access_flags;
344219820Sjeff	else
345219820Sjeff		access_flags = qp->atomic_rd_en;
346219820Sjeff
347219820Sjeff	if (!dest_rd_atomic)
348219820Sjeff		access_flags &= IB_ACCESS_REMOTE_WRITE;
349219820Sjeff
350219820Sjeff	if (access_flags & IB_ACCESS_REMOTE_READ)
351219820Sjeff		hw_access_flags |= MTHCA_QP_BIT_RRE;
352219820Sjeff	if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
353219820Sjeff		hw_access_flags |= MTHCA_QP_BIT_RAE;
354219820Sjeff	if (access_flags & IB_ACCESS_REMOTE_WRITE)
355219820Sjeff		hw_access_flags |= MTHCA_QP_BIT_RWE;
356219820Sjeff
357219820Sjeff	return cpu_to_be32(hw_access_flags);
358219820Sjeff}
359219820Sjeff
360219820Sjeffstatic inline enum ib_qp_state to_ib_qp_state(int mthca_state)
361219820Sjeff{
362219820Sjeff	switch (mthca_state) {
363219820Sjeff	case MTHCA_QP_STATE_RST:      return IB_QPS_RESET;
364219820Sjeff	case MTHCA_QP_STATE_INIT:     return IB_QPS_INIT;
365219820Sjeff	case MTHCA_QP_STATE_RTR:      return IB_QPS_RTR;
366219820Sjeff	case MTHCA_QP_STATE_RTS:      return IB_QPS_RTS;
367219820Sjeff	case MTHCA_QP_STATE_DRAINING:
368219820Sjeff	case MTHCA_QP_STATE_SQD:      return IB_QPS_SQD;
369219820Sjeff	case MTHCA_QP_STATE_SQE:      return IB_QPS_SQE;
370219820Sjeff	case MTHCA_QP_STATE_ERR:      return IB_QPS_ERR;
371219820Sjeff	default:                      return -1;
372219820Sjeff	}
373219820Sjeff}
374219820Sjeff
375219820Sjeffstatic inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state)
376219820Sjeff{
377219820Sjeff	switch (mthca_mig_state) {
378219820Sjeff	case 0:  return IB_MIG_ARMED;
379219820Sjeff	case 1:  return IB_MIG_REARM;
380219820Sjeff	case 3:  return IB_MIG_MIGRATED;
381219820Sjeff	default: return -1;
382219820Sjeff	}
383219820Sjeff}
384219820Sjeff
385219820Sjeffstatic int to_ib_qp_access_flags(int mthca_flags)
386219820Sjeff{
387219820Sjeff	int ib_flags = 0;
388219820Sjeff
389219820Sjeff	if (mthca_flags & MTHCA_QP_BIT_RRE)
390219820Sjeff		ib_flags |= IB_ACCESS_REMOTE_READ;
391219820Sjeff	if (mthca_flags & MTHCA_QP_BIT_RWE)
392219820Sjeff		ib_flags |= IB_ACCESS_REMOTE_WRITE;
393219820Sjeff	if (mthca_flags & MTHCA_QP_BIT_RAE)
394219820Sjeff		ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
395219820Sjeff
396219820Sjeff	return ib_flags;
397219820Sjeff}
398219820Sjeff
399219820Sjeffstatic void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,
400219820Sjeff				struct mthca_qp_path *path)
401219820Sjeff{
402219820Sjeff	memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
403219820Sjeff	ib_ah_attr->port_num 	  = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
404219820Sjeff
405219820Sjeff	if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports)
406219820Sjeff		return;
407219820Sjeff
408219820Sjeff	ib_ah_attr->dlid     	  = be16_to_cpu(path->rlid);
409219820Sjeff	ib_ah_attr->sl       	  = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
410219820Sjeff	ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
411219820Sjeff	ib_ah_attr->static_rate   = mthca_rate_to_ib(dev,
412219820Sjeff						     path->static_rate & 0xf,
413219820Sjeff						     ib_ah_attr->port_num);
414219820Sjeff	ib_ah_attr->ah_flags      = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
415219820Sjeff	if (ib_ah_attr->ah_flags) {
416219820Sjeff		ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1);
417219820Sjeff		ib_ah_attr->grh.hop_limit  = path->hop_limit;
418219820Sjeff		ib_ah_attr->grh.traffic_class =
419219820Sjeff			(be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff;
420219820Sjeff		ib_ah_attr->grh.flow_label =
421219820Sjeff			be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff;
422219820Sjeff		memcpy(ib_ah_attr->grh.dgid.raw,
423219820Sjeff			path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
424219820Sjeff	}
425219820Sjeff}
426219820Sjeff
427219820Sjeffint mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
428219820Sjeff		   struct ib_qp_init_attr *qp_init_attr)
429219820Sjeff{
430219820Sjeff	struct mthca_dev *dev = to_mdev(ibqp->device);
431219820Sjeff	struct mthca_qp *qp = to_mqp(ibqp);
432219820Sjeff	int err = 0;
433219820Sjeff	struct mthca_mailbox *mailbox = NULL;
434219820Sjeff	struct mthca_qp_param *qp_param;
435219820Sjeff	struct mthca_qp_context *context;
436219820Sjeff	int mthca_state;
437219820Sjeff	u8 status;
438219820Sjeff
439219820Sjeff	mutex_lock(&qp->mutex);
440219820Sjeff
441219820Sjeff	if (qp->state == IB_QPS_RESET) {
442219820Sjeff		qp_attr->qp_state = IB_QPS_RESET;
443219820Sjeff		goto done;
444219820Sjeff	}
445219820Sjeff
446219820Sjeff	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
447219820Sjeff	if (IS_ERR(mailbox)) {
448219820Sjeff		err = PTR_ERR(mailbox);
449219820Sjeff		goto out;
450219820Sjeff	}
451219820Sjeff
452219820Sjeff	err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status);
453219820Sjeff	if (err)
454219820Sjeff		goto out_mailbox;
455219820Sjeff	if (status) {
456219820Sjeff		mthca_warn(dev, "QUERY_QP returned status %02x\n", status);
457219820Sjeff		err = -EINVAL;
458219820Sjeff		goto out_mailbox;
459219820Sjeff	}
460219820Sjeff
461219820Sjeff	qp_param    = mailbox->buf;
462219820Sjeff	context     = &qp_param->context;
463219820Sjeff	mthca_state = be32_to_cpu(context->flags) >> 28;
464219820Sjeff
465219820Sjeff	qp->state		     = to_ib_qp_state(mthca_state);
466219820Sjeff	qp_attr->qp_state	     = qp->state;
467219820Sjeff	qp_attr->path_mtu 	     = context->mtu_msgmax >> 5;
468219820Sjeff	qp_attr->path_mig_state      =
469219820Sjeff		to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
470219820Sjeff	qp_attr->qkey 		     = be32_to_cpu(context->qkey);
471219820Sjeff	qp_attr->rq_psn 	     = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
472219820Sjeff	qp_attr->sq_psn 	     = be32_to_cpu(context->next_send_psn) & 0xffffff;
473219820Sjeff	qp_attr->dest_qp_num 	     = be32_to_cpu(context->remote_qpn) & 0xffffff;
474219820Sjeff	qp_attr->qp_access_flags     =
475219820Sjeff		to_ib_qp_access_flags(be32_to_cpu(context->params2));
476219820Sjeff
477219820Sjeff	if (qp->transport == RC || qp->transport == UC) {
478219820Sjeff		to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
479219820Sjeff		to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
480219820Sjeff		qp_attr->alt_pkey_index =
481219820Sjeff			be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
482219820Sjeff		qp_attr->alt_port_num 	= qp_attr->alt_ah_attr.port_num;
483219820Sjeff	}
484219820Sjeff
485219820Sjeff	qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
486219820Sjeff	qp_attr->port_num   =
487219820Sjeff		(be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3;
488219820Sjeff
489219820Sjeff	/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
490219820Sjeff	qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;
491219820Sjeff
492219820Sjeff	qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
493219820Sjeff
494219820Sjeff	qp_attr->max_dest_rd_atomic =
495219820Sjeff		1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
496219820Sjeff	qp_attr->min_rnr_timer 	    =
497219820Sjeff		(be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
498219820Sjeff	qp_attr->timeout 	    = context->pri_path.ackto >> 3;
499219820Sjeff	qp_attr->retry_cnt 	    = (be32_to_cpu(context->params1) >> 16) & 0x7;
500219820Sjeff	qp_attr->rnr_retry 	    = context->pri_path.rnr_retry >> 5;
501219820Sjeff	qp_attr->alt_timeout 	    = context->alt_path.ackto >> 3;
502219820Sjeff
503219820Sjeffdone:
504219820Sjeff	qp_attr->cur_qp_state	     = qp_attr->qp_state;
505219820Sjeff	qp_attr->cap.max_send_wr     = qp->sq.max;
506219820Sjeff	qp_attr->cap.max_recv_wr     = qp->rq.max;
507219820Sjeff	qp_attr->cap.max_send_sge    = qp->sq.max_gs;
508219820Sjeff	qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
509219820Sjeff	qp_attr->cap.max_inline_data = qp->max_inline_data;
510219820Sjeff
511219820Sjeff	qp_init_attr->cap	     = qp_attr->cap;
512219820Sjeff
513219820Sjeffout_mailbox:
514219820Sjeff	mthca_free_mailbox(dev, mailbox);
515219820Sjeff
516219820Sjeffout:
517219820Sjeff	mutex_unlock(&qp->mutex);
518219820Sjeff	return err;
519219820Sjeff}
520219820Sjeff
521219820Sjeffstatic int mthca_path_set(struct mthca_dev *dev, const struct ib_ah_attr *ah,
522219820Sjeff			  struct mthca_qp_path *path, u8 port)
523219820Sjeff{
524219820Sjeff	path->g_mylmc     = ah->src_path_bits & 0x7f;
525219820Sjeff	path->rlid        = cpu_to_be16(ah->dlid);
526219820Sjeff	path->static_rate = mthca_get_rate(dev, ah->static_rate, port);
527219820Sjeff
528219820Sjeff	if (ah->ah_flags & IB_AH_GRH) {
529219820Sjeff		if (ah->grh.sgid_index >= dev->limits.gid_table_len) {
530219820Sjeff			mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n",
531219820Sjeff				  ah->grh.sgid_index, dev->limits.gid_table_len-1);
532219820Sjeff			return -1;
533219820Sjeff		}
534219820Sjeff
535219820Sjeff		path->g_mylmc   |= 1 << 7;
536219820Sjeff		path->mgid_index = ah->grh.sgid_index;
537219820Sjeff		path->hop_limit  = ah->grh.hop_limit;
538219820Sjeff		path->sl_tclass_flowlabel =
539219820Sjeff			cpu_to_be32((ah->sl << 28)                |
540219820Sjeff				    (ah->grh.traffic_class << 20) |
541219820Sjeff				    (ah->grh.flow_label));
542219820Sjeff		memcpy(path->rgid, ah->grh.dgid.raw, 16);
543219820Sjeff	} else
544219820Sjeff		path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);
545219820Sjeff
546219820Sjeff	return 0;
547219820Sjeff}
548219820Sjeff
549219820Sjeffstatic int __mthca_modify_qp(struct ib_qp *ibqp,
550219820Sjeff			     const struct ib_qp_attr *attr, int attr_mask,
551219820Sjeff			     enum ib_qp_state cur_state, enum ib_qp_state new_state)
552219820Sjeff{
553219820Sjeff	struct mthca_dev *dev = to_mdev(ibqp->device);
554219820Sjeff	struct mthca_qp *qp = to_mqp(ibqp);
555219820Sjeff	struct mthca_mailbox *mailbox;
556219820Sjeff	struct mthca_qp_param *qp_param;
557219820Sjeff	struct mthca_qp_context *qp_context;
558219820Sjeff	u32 sqd_event = 0;
559219820Sjeff	u8 status;
560219820Sjeff	int err = -EINVAL;
561219820Sjeff
562219820Sjeff	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
563219820Sjeff	if (IS_ERR(mailbox)) {
564219820Sjeff		err = PTR_ERR(mailbox);
565219820Sjeff		goto out;
566219820Sjeff	}
567219820Sjeff	qp_param = mailbox->buf;
568219820Sjeff	qp_context = &qp_param->context;
569219820Sjeff	memset(qp_param, 0, sizeof *qp_param);
570219820Sjeff
571219820Sjeff	qp_context->flags      = cpu_to_be32((to_mthca_state(new_state) << 28) |
572219820Sjeff					     (to_mthca_st(qp->transport) << 16));
573219820Sjeff	qp_context->flags     |= cpu_to_be32(MTHCA_QP_BIT_DE);
574219820Sjeff	if (!(attr_mask & IB_QP_PATH_MIG_STATE))
575219820Sjeff		qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
576219820Sjeff	else {
577219820Sjeff		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);
578219820Sjeff		switch (attr->path_mig_state) {
579219820Sjeff		case IB_MIG_MIGRATED:
580219820Sjeff			qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
581219820Sjeff			break;
582219820Sjeff		case IB_MIG_REARM:
583219820Sjeff			qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);
584219820Sjeff			break;
585219820Sjeff		case IB_MIG_ARMED:
586219820Sjeff			qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);
587219820Sjeff			break;
588219820Sjeff		}
589219820Sjeff	}
590219820Sjeff
591219820Sjeff	/* leave tavor_sched_queue as 0 */
592219820Sjeff
593219820Sjeff	if (qp->transport == MLX || qp->transport == UD)
594219820Sjeff		qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
595219820Sjeff	else if (attr_mask & IB_QP_PATH_MTU) {
596219820Sjeff		if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) {
597219820Sjeff			mthca_dbg(dev, "path MTU (%u) is invalid\n",
598219820Sjeff				  attr->path_mtu);
599219820Sjeff			goto out_mailbox;
600219820Sjeff		}
601219820Sjeff		qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
602219820Sjeff	}
603219820Sjeff
604219820Sjeff	if (mthca_is_memfree(dev)) {
605219820Sjeff		if (qp->rq.max)
606219820Sjeff			qp_context->rq_size_stride = ilog2(qp->rq.max) << 3;
607219820Sjeff		qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
608219820Sjeff
609219820Sjeff		if (qp->sq.max)
610219820Sjeff			qp_context->sq_size_stride = ilog2(qp->sq.max) << 3;
611219820Sjeff		qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
612219820Sjeff	}
613219820Sjeff
614219820Sjeff	/* leave arbel_sched_queue as 0 */
615219820Sjeff
616219820Sjeff	if (qp->ibqp.uobject)
617219820Sjeff		qp_context->usr_page =
618219820Sjeff			cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
619219820Sjeff	else
620219820Sjeff		qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
621219820Sjeff	qp_context->local_qpn  = cpu_to_be32(qp->qpn);
622219820Sjeff	if (attr_mask & IB_QP_DEST_QPN) {
623219820Sjeff		qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
624219820Sjeff	}
625219820Sjeff
626219820Sjeff	if (qp->transport == MLX)
627219820Sjeff		qp_context->pri_path.port_pkey |=
628219820Sjeff			cpu_to_be32(qp->port << 24);
629219820Sjeff	else {
630219820Sjeff		if (attr_mask & IB_QP_PORT) {
631219820Sjeff			qp_context->pri_path.port_pkey |=
632219820Sjeff				cpu_to_be32(attr->port_num << 24);
633219820Sjeff			qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);
634219820Sjeff		}
635219820Sjeff	}
636219820Sjeff
637219820Sjeff	if (attr_mask & IB_QP_PKEY_INDEX) {
638219820Sjeff		qp_context->pri_path.port_pkey |=
639219820Sjeff			cpu_to_be32(attr->pkey_index);
640219820Sjeff		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);
641219820Sjeff	}
642219820Sjeff
643219820Sjeff	if (attr_mask & IB_QP_RNR_RETRY) {
644219820Sjeff		qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry =
645219820Sjeff			attr->rnr_retry << 5;
646219820Sjeff		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |
647219820Sjeff							MTHCA_QP_OPTPAR_ALT_RNR_RETRY);
648219820Sjeff	}
649219820Sjeff
650219820Sjeff	if (attr_mask & IB_QP_AV) {
651219820Sjeff		if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path,
652219820Sjeff				   attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
653219820Sjeff			goto out_mailbox;
654219820Sjeff
655219820Sjeff		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
656219820Sjeff	}
657219820Sjeff
658219820Sjeff	if (ibqp->qp_type == IB_QPT_RC &&
659219820Sjeff	    cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
660219820Sjeff		u8 sched_queue = ibqp->uobject ? 0x2 : 0x1;
661219820Sjeff
662219820Sjeff		if (mthca_is_memfree(dev))
663219820Sjeff			qp_context->rlkey_arbel_sched_queue |= sched_queue;
664219820Sjeff		else
665219820Sjeff			qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue);
666219820Sjeff
667219820Sjeff		qp_param->opt_param_mask |=
668219820Sjeff			cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE);
669219820Sjeff	}
670219820Sjeff
671219820Sjeff	if (attr_mask & IB_QP_TIMEOUT) {
672219820Sjeff		qp_context->pri_path.ackto = attr->timeout << 3;
673219820Sjeff		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
674219820Sjeff	}
675219820Sjeff
676219820Sjeff	if (attr_mask & IB_QP_ALT_PATH) {
677219820Sjeff		if (attr->alt_pkey_index >= dev->limits.pkey_table_len) {
678219820Sjeff			mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n",
679219820Sjeff				  attr->alt_pkey_index, dev->limits.pkey_table_len-1);
680219820Sjeff			goto out_mailbox;
681219820Sjeff		}
682219820Sjeff
683219820Sjeff		if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {
684219820Sjeff			mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
685219820Sjeff				attr->alt_port_num);
686219820Sjeff			goto out_mailbox;
687219820Sjeff		}
688219820Sjeff
689219820Sjeff		if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,
690219820Sjeff				   attr->alt_ah_attr.port_num))
691219820Sjeff			goto out_mailbox;
692219820Sjeff
693219820Sjeff		qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
694219820Sjeff							      attr->alt_port_num << 24);
695219820Sjeff		qp_context->alt_path.ackto = attr->alt_timeout << 3;
696219820Sjeff		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH);
697219820Sjeff	}
698219820Sjeff
699219820Sjeff	/* leave rdd as 0 */
700219820Sjeff	qp_context->pd         = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
701219820Sjeff	/* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
702219820Sjeff	qp_context->wqe_lkey   = cpu_to_be32(qp->mr.ibmr.lkey);
703219820Sjeff	qp_context->params1    = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |
704219820Sjeff					     (MTHCA_FLIGHT_LIMIT << 24) |
705219820Sjeff					     MTHCA_QP_BIT_SWE);
706219820Sjeff	if (qp->sq_policy == IB_SIGNAL_ALL_WR)
707219820Sjeff		qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);
708219820Sjeff	if (attr_mask & IB_QP_RETRY_CNT) {
709219820Sjeff		qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
710219820Sjeff		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
711219820Sjeff	}
712219820Sjeff
713219820Sjeff	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
714219820Sjeff		if (attr->max_rd_atomic) {
715219820Sjeff			qp_context->params1 |=
716219820Sjeff				cpu_to_be32(MTHCA_QP_BIT_SRE |
717219820Sjeff					    MTHCA_QP_BIT_SAE);
718219820Sjeff			qp_context->params1 |=
719219820Sjeff				cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
720219820Sjeff		}
721219820Sjeff		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
722219820Sjeff	}
723219820Sjeff
724219820Sjeff	if (attr_mask & IB_QP_SQ_PSN)
725219820Sjeff		qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
726219820Sjeff	qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
727219820Sjeff
728219820Sjeff	if (mthca_is_memfree(dev)) {
729219820Sjeff		qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);
730219820Sjeff		qp_context->snd_db_index   = cpu_to_be32(qp->sq.db_index);
731219820Sjeff	}
732219820Sjeff
733219820Sjeff	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
734219820Sjeff		if (attr->max_dest_rd_atomic)
735219820Sjeff			qp_context->params2 |=
736219820Sjeff				cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
737219820Sjeff
738219820Sjeff		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
739219820Sjeff	}
740219820Sjeff
741219820Sjeff	if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
742219820Sjeff		qp_context->params2      |= get_hw_access_flags(qp, attr, attr_mask);
743219820Sjeff		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
744219820Sjeff							MTHCA_QP_OPTPAR_RRE |
745219820Sjeff							MTHCA_QP_OPTPAR_RAE);
746219820Sjeff	}
747219820Sjeff
748219820Sjeff	qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
749219820Sjeff
750219820Sjeff	if (ibqp->srq)
751219820Sjeff		qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
752219820Sjeff
753219820Sjeff	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
754219820Sjeff		qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
755219820Sjeff		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
756219820Sjeff	}
757219820Sjeff	if (attr_mask & IB_QP_RQ_PSN)
758219820Sjeff		qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
759219820Sjeff
760219820Sjeff	qp_context->ra_buff_indx =
761219820Sjeff		cpu_to_be32(dev->qp_table.rdb_base +
762219820Sjeff			    ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
763219820Sjeff			     dev->qp_table.rdb_shift));
764219820Sjeff
765219820Sjeff	qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
766219820Sjeff
767219820Sjeff	if (mthca_is_memfree(dev))
768219820Sjeff		qp_context->rcv_db_index   = cpu_to_be32(qp->rq.db_index);
769219820Sjeff
770219820Sjeff	if (attr_mask & IB_QP_QKEY) {
771219820Sjeff		qp_context->qkey = cpu_to_be32(attr->qkey);
772219820Sjeff		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
773219820Sjeff	}
774219820Sjeff
775219820Sjeff	if (ibqp->srq)
776219820Sjeff		qp_context->srqn = cpu_to_be32(1 << 24 |
777219820Sjeff					       to_msrq(ibqp->srq)->srqn);
778219820Sjeff
779219820Sjeff	if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD	&&
780219820Sjeff	    attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY		&&
781219820Sjeff	    attr->en_sqd_async_notify)
782219820Sjeff		sqd_event = 1 << 31;
783219820Sjeff
784219820Sjeff	err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
785219820Sjeff			      mailbox, sqd_event, &status);
786219820Sjeff	if (err)
787219820Sjeff		goto out_mailbox;
788219820Sjeff	if (status) {
789219820Sjeff		mthca_warn(dev, "modify QP %d->%d returned status %02x.\n",
790219820Sjeff			   cur_state, new_state, status);
791219820Sjeff		err = -EINVAL;
792219820Sjeff		goto out_mailbox;
793219820Sjeff	}
794219820Sjeff
795219820Sjeff	qp->state = new_state;
796219820Sjeff	if (attr_mask & IB_QP_ACCESS_FLAGS)
797219820Sjeff		qp->atomic_rd_en = attr->qp_access_flags;
798219820Sjeff	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
799219820Sjeff		qp->resp_depth = attr->max_dest_rd_atomic;
800219820Sjeff	if (attr_mask & IB_QP_PORT)
801219820Sjeff		qp->port = attr->port_num;
802219820Sjeff	if (attr_mask & IB_QP_ALT_PATH)
803219820Sjeff		qp->alt_port = attr->alt_port_num;
804219820Sjeff
805219820Sjeff	if (is_sqp(dev, qp))
806219820Sjeff		store_attrs(to_msqp(qp), attr, attr_mask);
807219820Sjeff
808219820Sjeff	/*
809219820Sjeff	 * If we moved QP0 to RTR, bring the IB link up; if we moved
810219820Sjeff	 * QP0 to RESET or ERROR, bring the link back down.
811219820Sjeff	 */
812219820Sjeff	if (is_qp0(dev, qp)) {
813219820Sjeff		if (cur_state != IB_QPS_RTR &&
814219820Sjeff		    new_state == IB_QPS_RTR)
815219820Sjeff			init_port(dev, qp->port);
816219820Sjeff
817219820Sjeff		if (cur_state != IB_QPS_RESET &&
818219820Sjeff		    cur_state != IB_QPS_ERR &&
819219820Sjeff		    (new_state == IB_QPS_RESET ||
820219820Sjeff		     new_state == IB_QPS_ERR))
821219820Sjeff			mthca_CLOSE_IB(dev, qp->port, &status);
822219820Sjeff	}
823219820Sjeff
824219820Sjeff	/*
825219820Sjeff	 * If we moved a kernel QP to RESET, clean up all old CQ
826219820Sjeff	 * entries and reinitialize the QP.
827219820Sjeff	 */
828219820Sjeff	if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
829219820Sjeff		mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
830219820Sjeff			       qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
831219820Sjeff		if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
832219820Sjeff			mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL);
833219820Sjeff
834219820Sjeff		mthca_wq_reset(&qp->sq);
835219820Sjeff		qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
836219820Sjeff
837219820Sjeff		mthca_wq_reset(&qp->rq);
838219820Sjeff		qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
839219820Sjeff
840219820Sjeff		if (mthca_is_memfree(dev)) {
841219820Sjeff			*qp->sq.db = 0;
842219820Sjeff			*qp->rq.db = 0;
843219820Sjeff		}
844219820Sjeff	}
845219820Sjeff
846219820Sjeffout_mailbox:
847219820Sjeff	mthca_free_mailbox(dev, mailbox);
848219820Sjeffout:
849219820Sjeff	return err;
850219820Sjeff}
851219820Sjeff
852219820Sjeffint mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
853219820Sjeff		    struct ib_udata *udata)
854219820Sjeff{
855219820Sjeff	struct mthca_dev *dev = to_mdev(ibqp->device);
856219820Sjeff	struct mthca_qp *qp = to_mqp(ibqp);
857219820Sjeff	enum ib_qp_state cur_state, new_state;
858219820Sjeff	int err = -EINVAL;
859219820Sjeff
860219820Sjeff	mutex_lock(&qp->mutex);
861219820Sjeff	if (attr_mask & IB_QP_CUR_STATE) {
862219820Sjeff		cur_state = attr->cur_qp_state;
863219820Sjeff	} else {
864219820Sjeff		spin_lock_irq(&qp->sq.lock);
865219820Sjeff		spin_lock(&qp->rq.lock);
866219820Sjeff		cur_state = qp->state;
867219820Sjeff		spin_unlock(&qp->rq.lock);
868219820Sjeff		spin_unlock_irq(&qp->sq.lock);
869219820Sjeff	}
870219820Sjeff
871219820Sjeff	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
872219820Sjeff
873219820Sjeff	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
874219820Sjeff		mthca_dbg(dev, "Bad QP transition (transport %d) "
875219820Sjeff			  "%d->%d with attr 0x%08x\n",
876219820Sjeff			  qp->transport, cur_state, new_state,
877219820Sjeff			  attr_mask);
878219820Sjeff		goto out;
879219820Sjeff	}
880219820Sjeff
881219820Sjeff	if ((attr_mask & IB_QP_PKEY_INDEX) &&
882219820Sjeff	     attr->pkey_index >= dev->limits.pkey_table_len) {
883219820Sjeff		mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
884219820Sjeff			  attr->pkey_index, dev->limits.pkey_table_len-1);
885219820Sjeff		goto out;
886219820Sjeff	}
887219820Sjeff
888219820Sjeff	if ((attr_mask & IB_QP_PORT) &&
889219820Sjeff	    (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
890219820Sjeff		mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
891219820Sjeff		goto out;
892219820Sjeff	}
893219820Sjeff
894219820Sjeff	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
895219820Sjeff	    attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
896219820Sjeff		mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
897219820Sjeff			  attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
898219820Sjeff		goto out;
899219820Sjeff	}
900219820Sjeff
901219820Sjeff	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
902219820Sjeff	    attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
903219820Sjeff		mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
904219820Sjeff			  attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
905219820Sjeff		goto out;
906219820Sjeff	}
907219820Sjeff
908219820Sjeff	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
909219820Sjeff		err = 0;
910219820Sjeff		goto out;
911219820Sjeff	}
912219820Sjeff
913219820Sjeff	err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
914219820Sjeff
915219820Sjeffout:
916219820Sjeff	mutex_unlock(&qp->mutex);
917219820Sjeff	return err;
918219820Sjeff}
919219820Sjeff
920219820Sjeffstatic int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz)
921219820Sjeff{
922219820Sjeff	/*
923219820Sjeff	 * Calculate the maximum size of WQE s/g segments, excluding
924219820Sjeff	 * the next segment and other non-data segments.
925219820Sjeff	 */
926219820Sjeff	int max_data_size = desc_sz - sizeof (struct mthca_next_seg);
927219820Sjeff
928219820Sjeff	switch (qp->transport) {
929219820Sjeff	case MLX:
930219820Sjeff		max_data_size -= 2 * sizeof (struct mthca_data_seg);
931219820Sjeff		break;
932219820Sjeff
933219820Sjeff	case UD:
934219820Sjeff		if (mthca_is_memfree(dev))
935219820Sjeff			max_data_size -= sizeof (struct mthca_arbel_ud_seg);
936219820Sjeff		else
937219820Sjeff			max_data_size -= sizeof (struct mthca_tavor_ud_seg);
938219820Sjeff		break;
939219820Sjeff
940219820Sjeff	default:
941219820Sjeff		max_data_size -= sizeof (struct mthca_raddr_seg);
942219820Sjeff		break;
943219820Sjeff	}
944219820Sjeff
945219820Sjeff	return max_data_size;
946219820Sjeff}
947219820Sjeff
948219820Sjeffstatic inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size)
949219820Sjeff{
950219820Sjeff	/* We don't support inline data for kernel QPs (yet). */
951219820Sjeff	return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;
952219820Sjeff}
953219820Sjeff
954219820Sjeffstatic void mthca_adjust_qp_caps(struct mthca_dev *dev,
955219820Sjeff				 struct mthca_pd *pd,
956219820Sjeff				 struct mthca_qp *qp)
957219820Sjeff{
958219820Sjeff	int max_data_size = mthca_max_data_size(dev, qp,
959219820Sjeff						min(dev->limits.max_desc_sz,
960219820Sjeff						    1 << qp->sq.wqe_shift));
961219820Sjeff
962219820Sjeff	qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);
963219820Sjeff
964219820Sjeff	qp->sq.max_gs = min_t(int, dev->limits.max_sg,
965219820Sjeff			      max_data_size / sizeof (struct mthca_data_seg));
966219820Sjeff	qp->rq.max_gs = min_t(int, dev->limits.max_sg,
967219820Sjeff			       (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
968219820Sjeff				sizeof (struct mthca_next_seg)) /
969219820Sjeff			       sizeof (struct mthca_data_seg));
970219820Sjeff}
971219820Sjeff
972219820Sjeff/*
973219820Sjeff * Allocate and register buffer for WQEs.  qp->rq.max, sq.max,
974219820Sjeff * rq.max_gs and sq.max_gs must all be assigned.
975219820Sjeff * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
976219820Sjeff * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
977219820Sjeff * queue)
978219820Sjeff */
979219820Sjeffstatic int mthca_alloc_wqe_buf(struct mthca_dev *dev,
980219820Sjeff			       struct mthca_pd *pd,
981219820Sjeff			       struct mthca_qp *qp)
982219820Sjeff{
983219820Sjeff	int size;
984219820Sjeff	int err = -ENOMEM;
985219820Sjeff
986219820Sjeff	size = sizeof (struct mthca_next_seg) +
987219820Sjeff		qp->rq.max_gs * sizeof (struct mthca_data_seg);
988219820Sjeff
989219820Sjeff	if (size > dev->limits.max_desc_sz)
990219820Sjeff		return -EINVAL;
991219820Sjeff
992219820Sjeff	for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
993219820Sjeff	     qp->rq.wqe_shift++)
994219820Sjeff		; /* nothing */
995219820Sjeff
996219820Sjeff	size = qp->sq.max_gs * sizeof (struct mthca_data_seg);
997219820Sjeff	switch (qp->transport) {
998219820Sjeff	case MLX:
999219820Sjeff		size += 2 * sizeof (struct mthca_data_seg);
1000219820Sjeff		break;
1001219820Sjeff
1002219820Sjeff	case UD:
1003219820Sjeff		size += mthca_is_memfree(dev) ?
1004219820Sjeff			sizeof (struct mthca_arbel_ud_seg) :
1005219820Sjeff			sizeof (struct mthca_tavor_ud_seg);
1006219820Sjeff		break;
1007219820Sjeff
1008219820Sjeff	case UC:
1009219820Sjeff		size += sizeof (struct mthca_raddr_seg);
1010219820Sjeff		break;
1011219820Sjeff
1012219820Sjeff	case RC:
1013219820Sjeff		size += sizeof (struct mthca_raddr_seg);
1014219820Sjeff		/*
1015219820Sjeff		 * An atomic op will require an atomic segment, a
1016219820Sjeff		 * remote address segment and one scatter entry.
1017219820Sjeff		 */
1018219820Sjeff		size = max_t(int, size,
1019219820Sjeff			     sizeof (struct mthca_atomic_seg) +
1020219820Sjeff			     sizeof (struct mthca_raddr_seg) +
1021219820Sjeff			     sizeof (struct mthca_data_seg));
1022219820Sjeff		break;
1023219820Sjeff
1024219820Sjeff	default:
1025219820Sjeff		break;
1026219820Sjeff	}
1027219820Sjeff
1028219820Sjeff	/* Make sure that we have enough space for a bind request */
1029219820Sjeff	size = max_t(int, size, sizeof (struct mthca_bind_seg));
1030219820Sjeff
1031219820Sjeff	size += sizeof (struct mthca_next_seg);
1032219820Sjeff
1033219820Sjeff	if (size > dev->limits.max_desc_sz)
1034219820Sjeff		return -EINVAL;
1035219820Sjeff
1036219820Sjeff	for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
1037219820Sjeff	     qp->sq.wqe_shift++)
1038219820Sjeff		; /* nothing */
1039219820Sjeff
1040219820Sjeff	qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
1041219820Sjeff				    1 << qp->sq.wqe_shift);
1042219820Sjeff
1043219820Sjeff	/*
1044219820Sjeff	 * If this is a userspace QP, we don't actually have to
1045219820Sjeff	 * allocate anything.  All we need is to calculate the WQE
1046219820Sjeff	 * sizes and the send_wqe_offset, so we're done now.
1047219820Sjeff	 */
1048219820Sjeff	if (pd->ibpd.uobject)
1049219820Sjeff		return 0;
1050219820Sjeff
1051219820Sjeff	size = PAGE_ALIGN(qp->send_wqe_offset +
1052219820Sjeff			  (qp->sq.max << qp->sq.wqe_shift));
1053219820Sjeff
1054219820Sjeff	qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
1055219820Sjeff			   GFP_KERNEL);
1056219820Sjeff	if (!qp->wrid)
1057219820Sjeff		goto err_out;
1058219820Sjeff
1059219820Sjeff	err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
1060219820Sjeff			      &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
1061219820Sjeff	if (err)
1062219820Sjeff		goto err_out;
1063219820Sjeff
1064219820Sjeff	return 0;
1065219820Sjeff
1066219820Sjefferr_out:
1067219820Sjeff	kfree(qp->wrid);
1068219820Sjeff	return err;
1069219820Sjeff}
1070219820Sjeff
1071219820Sjeffstatic void mthca_free_wqe_buf(struct mthca_dev *dev,
1072219820Sjeff			       struct mthca_qp *qp)
1073219820Sjeff{
1074219820Sjeff	mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
1075219820Sjeff				       (qp->sq.max << qp->sq.wqe_shift)),
1076219820Sjeff		       &qp->queue, qp->is_direct, &qp->mr);
1077219820Sjeff	kfree(qp->wrid);
1078219820Sjeff}
1079219820Sjeff
1080219820Sjeffstatic int mthca_map_memfree(struct mthca_dev *dev,
1081219820Sjeff			     struct mthca_qp *qp)
1082219820Sjeff{
1083219820Sjeff	int ret;
1084219820Sjeff
1085219820Sjeff	if (mthca_is_memfree(dev)) {
1086219820Sjeff		ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
1087219820Sjeff		if (ret)
1088219820Sjeff			return ret;
1089219820Sjeff
1090219820Sjeff		ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);
1091219820Sjeff		if (ret)
1092219820Sjeff			goto err_qpc;
1093219820Sjeff
1094219820Sjeff		ret = mthca_table_get(dev, dev->qp_table.rdb_table,
1095219820Sjeff				      qp->qpn << dev->qp_table.rdb_shift);
1096219820Sjeff		if (ret)
1097219820Sjeff			goto err_eqpc;
1098219820Sjeff
1099219820Sjeff	}
1100219820Sjeff
1101219820Sjeff	return 0;
1102219820Sjeff
1103219820Sjefferr_eqpc:
1104219820Sjeff	mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1105219820Sjeff
1106219820Sjefferr_qpc:
1107219820Sjeff	mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1108219820Sjeff
1109219820Sjeff	return ret;
1110219820Sjeff}
1111219820Sjeff
1112219820Sjeffstatic void mthca_unmap_memfree(struct mthca_dev *dev,
1113219820Sjeff				struct mthca_qp *qp)
1114219820Sjeff{
1115219820Sjeff	mthca_table_put(dev, dev->qp_table.rdb_table,
1116219820Sjeff			qp->qpn << dev->qp_table.rdb_shift);
1117219820Sjeff	mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1118219820Sjeff	mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1119219820Sjeff}
1120219820Sjeff
1121219820Sjeffstatic int mthca_alloc_memfree(struct mthca_dev *dev,
1122219820Sjeff			       struct mthca_qp *qp)
1123219820Sjeff{
1124219820Sjeff	if (mthca_is_memfree(dev)) {
1125219820Sjeff		qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1126219820Sjeff						 qp->qpn, &qp->rq.db);
1127219820Sjeff		if (qp->rq.db_index < 0)
1128219820Sjeff			return -ENOMEM;
1129219820Sjeff
1130219820Sjeff		qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
1131219820Sjeff						 qp->qpn, &qp->sq.db);
1132219820Sjeff		if (qp->sq.db_index < 0) {
1133219820Sjeff			mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1134219820Sjeff			return -ENOMEM;
1135219820Sjeff		}
1136219820Sjeff	}
1137219820Sjeff
1138219820Sjeff	return 0;
1139219820Sjeff}
1140219820Sjeff
1141219820Sjeffstatic void mthca_free_memfree(struct mthca_dev *dev,
1142219820Sjeff			       struct mthca_qp *qp)
1143219820Sjeff{
1144219820Sjeff	if (mthca_is_memfree(dev)) {
1145219820Sjeff		mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
1146219820Sjeff		mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1147219820Sjeff	}
1148219820Sjeff}
1149219820Sjeff
1150219820Sjeffstatic int mthca_alloc_qp_common(struct mthca_dev *dev,
1151219820Sjeff				 struct mthca_pd *pd,
1152219820Sjeff				 struct mthca_cq *send_cq,
1153219820Sjeff				 struct mthca_cq *recv_cq,
1154219820Sjeff				 enum ib_sig_type send_policy,
1155219820Sjeff				 struct mthca_qp *qp)
1156219820Sjeff{
1157219820Sjeff	int ret;
1158219820Sjeff	int i;
1159219820Sjeff	struct mthca_next_seg *next;
1160219820Sjeff
1161219820Sjeff	qp->refcount = 1;
1162219820Sjeff	init_waitqueue_head(&qp->wait);
1163219820Sjeff	mutex_init(&qp->mutex);
1164219820Sjeff	qp->state    	 = IB_QPS_RESET;
1165219820Sjeff	qp->atomic_rd_en = 0;
1166219820Sjeff	qp->resp_depth   = 0;
1167219820Sjeff	qp->sq_policy    = send_policy;
1168219820Sjeff	mthca_wq_reset(&qp->sq);
1169219820Sjeff	mthca_wq_reset(&qp->rq);
1170219820Sjeff
1171219820Sjeff	spin_lock_init(&qp->sq.lock);
1172219820Sjeff	spin_lock_init(&qp->rq.lock);
1173219820Sjeff
1174219820Sjeff	ret = mthca_map_memfree(dev, qp);
1175219820Sjeff	if (ret)
1176219820Sjeff		return ret;
1177219820Sjeff
1178219820Sjeff	ret = mthca_alloc_wqe_buf(dev, pd, qp);
1179219820Sjeff	if (ret) {
1180219820Sjeff		mthca_unmap_memfree(dev, qp);
1181219820Sjeff		return ret;
1182219820Sjeff	}
1183219820Sjeff
1184219820Sjeff	mthca_adjust_qp_caps(dev, pd, qp);
1185219820Sjeff
1186219820Sjeff	/*
1187219820Sjeff	 * If this is a userspace QP, we're done now.  The doorbells
1188219820Sjeff	 * will be allocated and buffers will be initialized in
1189219820Sjeff	 * userspace.
1190219820Sjeff	 */
1191219820Sjeff	if (pd->ibpd.uobject)
1192219820Sjeff		return 0;
1193219820Sjeff
1194219820Sjeff	ret = mthca_alloc_memfree(dev, qp);
1195219820Sjeff	if (ret) {
1196219820Sjeff		mthca_free_wqe_buf(dev, qp);
1197219820Sjeff		mthca_unmap_memfree(dev, qp);
1198219820Sjeff		return ret;
1199219820Sjeff	}
1200219820Sjeff
1201219820Sjeff	if (mthca_is_memfree(dev)) {
1202219820Sjeff		struct mthca_data_seg *scatter;
1203219820Sjeff		int size = (sizeof (struct mthca_next_seg) +
1204219820Sjeff			    qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
1205219820Sjeff
1206219820Sjeff		for (i = 0; i < qp->rq.max; ++i) {
1207219820Sjeff			next = get_recv_wqe(qp, i);
1208219820Sjeff			next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
1209219820Sjeff						   qp->rq.wqe_shift);
1210219820Sjeff			next->ee_nds = cpu_to_be32(size);
1211219820Sjeff
1212219820Sjeff			for (scatter = (void *) (next + 1);
1213219820Sjeff			     (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
1214219820Sjeff			     ++scatter)
1215219820Sjeff				scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
1216219820Sjeff		}
1217219820Sjeff
1218219820Sjeff		for (i = 0; i < qp->sq.max; ++i) {
1219219820Sjeff			next = get_send_wqe(qp, i);
1220219820Sjeff			next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) <<
1221219820Sjeff						    qp->sq.wqe_shift) +
1222219820Sjeff						   qp->send_wqe_offset);
1223219820Sjeff		}
1224219820Sjeff	} else {
1225219820Sjeff		for (i = 0; i < qp->rq.max; ++i) {
1226219820Sjeff			next = get_recv_wqe(qp, i);
1227219820Sjeff			next->nda_op = htonl((((i + 1) % qp->rq.max) <<
1228219820Sjeff					      qp->rq.wqe_shift) | 1);
1229219820Sjeff		}
1230219820Sjeff
1231219820Sjeff	}
1232219820Sjeff
1233219820Sjeff	qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
1234219820Sjeff	qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
1235219820Sjeff
1236219820Sjeff	return 0;
1237219820Sjeff}
1238219820Sjeff
1239219820Sjeffstatic int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
1240219820Sjeff			     struct mthca_pd *pd, struct mthca_qp *qp)
1241219820Sjeff{
1242219820Sjeff	int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz);
1243219820Sjeff        u32 max_inline_data;
1244219820Sjeff
1245219820Sjeff	/* Sanity check QP size before proceeding */
1246219820Sjeff	if (cap->max_send_wr  	 > dev->limits.max_wqes ||
1247219820Sjeff	    cap->max_recv_wr  	 > dev->limits.max_wqes ||
1248219820Sjeff	    cap->max_send_sge 	 > dev->limits.max_sg   ||
1249219820Sjeff	    cap->max_recv_sge 	 > dev->limits.max_sg)
1250219820Sjeff		return -EINVAL;
1251219820Sjeff
1252219820Sjeff	if (pd->ibpd.uobject &&
1253219820Sjeff	    cap->max_inline_data > mthca_max_inline_data(pd, max_data_size))
1254219820Sjeff		return -EINVAL;
1255219820Sjeff
1256219820Sjeff	max_inline_data = pd->ibpd.uobject ? cap->max_inline_data : 0;
1257219820Sjeff
1258219820Sjeff	/*
1259219820Sjeff	 * For MLX transport we need 2 extra send gather entries:
1260219820Sjeff	 * one for the header and one for the checksum at the end
1261219820Sjeff	 */
1262219820Sjeff	if (qp->transport == MLX && cap->max_send_sge + 2 > dev->limits.max_sg)
1263219820Sjeff		return -EINVAL;
1264219820Sjeff
1265219820Sjeff	if (mthca_is_memfree(dev)) {
1266219820Sjeff		qp->rq.max = cap->max_recv_wr ?
1267219820Sjeff			roundup_pow_of_two(cap->max_recv_wr) : 0;
1268219820Sjeff		qp->sq.max = cap->max_send_wr ?
1269219820Sjeff			roundup_pow_of_two(cap->max_send_wr) : 0;
1270219820Sjeff	} else {
1271219820Sjeff		qp->rq.max = cap->max_recv_wr;
1272219820Sjeff		qp->sq.max = cap->max_send_wr;
1273219820Sjeff	}
1274219820Sjeff
1275219820Sjeff	qp->rq.max_gs = cap->max_recv_sge;
1276219820Sjeff	qp->sq.max_gs = max_t(int, cap->max_send_sge,
1277219820Sjeff			      ALIGN(max_inline_data + MTHCA_INLINE_HEADER_SIZE,
1278219820Sjeff				    MTHCA_INLINE_CHUNK_SIZE) /
1279219820Sjeff			      sizeof (struct mthca_data_seg));
1280219820Sjeff
1281219820Sjeff	return 0;
1282219820Sjeff}
1283219820Sjeff
1284219820Sjeffint mthca_alloc_qp(struct mthca_dev *dev,
1285219820Sjeff		   struct mthca_pd *pd,
1286219820Sjeff		   struct mthca_cq *send_cq,
1287219820Sjeff		   struct mthca_cq *recv_cq,
1288219820Sjeff		   enum ib_qp_type type,
1289219820Sjeff		   enum ib_sig_type send_policy,
1290219820Sjeff		   struct ib_qp_cap *cap,
1291219820Sjeff		   struct mthca_qp *qp)
1292219820Sjeff{
1293219820Sjeff	int err;
1294219820Sjeff
1295219820Sjeff	switch (type) {
1296219820Sjeff	case IB_QPT_RC: qp->transport = RC; break;
1297219820Sjeff	case IB_QPT_UC: qp->transport = UC; break;
1298219820Sjeff	case IB_QPT_UD: qp->transport = UD; break;
1299219820Sjeff	default: return -EINVAL;
1300219820Sjeff	}
1301219820Sjeff
1302219820Sjeff	err = mthca_set_qp_size(dev, cap, pd, qp);
1303219820Sjeff	if (err)
1304219820Sjeff		return err;
1305219820Sjeff
1306219820Sjeff	qp->qpn = mthca_alloc(&dev->qp_table.alloc);
1307219820Sjeff	if (qp->qpn == -1)
1308219820Sjeff		return -ENOMEM;
1309219820Sjeff
1310219820Sjeff	/* initialize port to zero for error-catching. */
1311219820Sjeff	qp->port = 0;
1312219820Sjeff
1313219820Sjeff	err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1314219820Sjeff				    send_policy, qp);
1315219820Sjeff	if (err) {
1316219820Sjeff		mthca_free(&dev->qp_table.alloc, qp->qpn);
1317219820Sjeff		return err;
1318219820Sjeff	}
1319219820Sjeff
1320219820Sjeff	spin_lock_irq(&dev->qp_table.lock);
1321219820Sjeff	mthca_array_set(&dev->qp_table.qp,
1322219820Sjeff			qp->qpn & (dev->limits.num_qps - 1), qp);
1323219820Sjeff	spin_unlock_irq(&dev->qp_table.lock);
1324219820Sjeff
1325219820Sjeff	return 0;
1326219820Sjeff}
1327219820Sjeff
1328219820Sjeffstatic void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1329219820Sjeff{
1330219820Sjeff	if (send_cq == recv_cq)
1331219820Sjeff		spin_lock_irq(&send_cq->lock);
1332219820Sjeff	else if (send_cq->cqn < recv_cq->cqn) {
1333219820Sjeff		spin_lock_irq(&send_cq->lock);
1334219820Sjeff		spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1335219820Sjeff	} else {
1336219820Sjeff		spin_lock_irq(&recv_cq->lock);
1337219820Sjeff		spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1338219820Sjeff	}
1339219820Sjeff}
1340219820Sjeff
1341219820Sjeffstatic void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1342219820Sjeff{
1343219820Sjeff	if (send_cq == recv_cq)
1344219820Sjeff		spin_unlock_irq(&send_cq->lock);
1345219820Sjeff	else if (send_cq->cqn < recv_cq->cqn) {
1346219820Sjeff		spin_unlock(&recv_cq->lock);
1347219820Sjeff		spin_unlock_irq(&send_cq->lock);
1348219820Sjeff	} else {
1349219820Sjeff		spin_unlock(&send_cq->lock);
1350219820Sjeff		spin_unlock_irq(&recv_cq->lock);
1351219820Sjeff	}
1352219820Sjeff}
1353219820Sjeff
1354219820Sjeffint mthca_alloc_sqp(struct mthca_dev *dev,
1355219820Sjeff		    struct mthca_pd *pd,
1356219820Sjeff		    struct mthca_cq *send_cq,
1357219820Sjeff		    struct mthca_cq *recv_cq,
1358219820Sjeff		    enum ib_sig_type send_policy,
1359219820Sjeff		    struct ib_qp_cap *cap,
1360219820Sjeff		    int qpn,
1361219820Sjeff		    int port,
1362219820Sjeff		    struct mthca_sqp *sqp)
1363219820Sjeff{
1364219820Sjeff	u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
1365219820Sjeff	int err;
1366219820Sjeff
1367219820Sjeff	sqp->qp.transport = MLX;
1368219820Sjeff	err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
1369219820Sjeff	if (err)
1370219820Sjeff		return err;
1371219820Sjeff
1372219820Sjeff	sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
1373219820Sjeff	sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
1374219820Sjeff					     &sqp->header_dma, GFP_KERNEL);
1375219820Sjeff	if (!sqp->header_buf)
1376219820Sjeff		return -ENOMEM;
1377219820Sjeff
1378219820Sjeff	spin_lock_irq(&dev->qp_table.lock);
1379219820Sjeff	if (mthca_array_get(&dev->qp_table.qp, mqpn))
1380219820Sjeff		err = -EBUSY;
1381219820Sjeff	else
1382219820Sjeff		mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
1383219820Sjeff	spin_unlock_irq(&dev->qp_table.lock);
1384219820Sjeff
1385219820Sjeff	if (err)
1386219820Sjeff		goto err_out;
1387219820Sjeff
1388219820Sjeff	sqp->qp.port      = port;
1389219820Sjeff	sqp->qp.qpn       = mqpn;
1390219820Sjeff	sqp->qp.transport = MLX;
1391219820Sjeff
1392219820Sjeff	err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1393219820Sjeff				    send_policy, &sqp->qp);
1394219820Sjeff	if (err)
1395219820Sjeff		goto err_out_free;
1396219820Sjeff
1397219820Sjeff	atomic_inc(&pd->sqp_count);
1398219820Sjeff
1399219820Sjeff	return 0;
1400219820Sjeff
1401219820Sjeff err_out_free:
1402219820Sjeff	/*
1403219820Sjeff	 * Lock CQs here, so that CQ polling code can do QP lookup
1404219820Sjeff	 * without taking a lock.
1405219820Sjeff	 */
1406219820Sjeff	mthca_lock_cqs(send_cq, recv_cq);
1407219820Sjeff
1408219820Sjeff	spin_lock(&dev->qp_table.lock);
1409219820Sjeff	mthca_array_clear(&dev->qp_table.qp, mqpn);
1410219820Sjeff	spin_unlock(&dev->qp_table.lock);
1411219820Sjeff
1412219820Sjeff	mthca_unlock_cqs(send_cq, recv_cq);
1413219820Sjeff
1414219820Sjeff err_out:
1415219820Sjeff	dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
1416219820Sjeff			  sqp->header_buf, sqp->header_dma);
1417219820Sjeff
1418219820Sjeff	return err;
1419219820Sjeff}
1420219820Sjeff
1421219820Sjeffstatic inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
1422219820Sjeff{
1423219820Sjeff	int c;
1424219820Sjeff
1425219820Sjeff	spin_lock_irq(&dev->qp_table.lock);
1426219820Sjeff	c = qp->refcount;
1427219820Sjeff	spin_unlock_irq(&dev->qp_table.lock);
1428219820Sjeff
1429219820Sjeff	return c;
1430219820Sjeff}
1431219820Sjeff
1432219820Sjeffvoid mthca_free_qp(struct mthca_dev *dev,
1433219820Sjeff		   struct mthca_qp *qp)
1434219820Sjeff{
1435219820Sjeff	u8 status;
1436219820Sjeff	struct mthca_cq *send_cq;
1437219820Sjeff	struct mthca_cq *recv_cq;
1438219820Sjeff
1439219820Sjeff	send_cq = to_mcq(qp->ibqp.send_cq);
1440219820Sjeff	recv_cq = to_mcq(qp->ibqp.recv_cq);
1441219820Sjeff
1442219820Sjeff	/*
1443219820Sjeff	 * Lock CQs here, so that CQ polling code can do QP lookup
1444219820Sjeff	 * without taking a lock.
1445219820Sjeff	 */
1446219820Sjeff	mthca_lock_cqs(send_cq, recv_cq);
1447219820Sjeff
1448219820Sjeff	spin_lock(&dev->qp_table.lock);
1449219820Sjeff	mthca_array_clear(&dev->qp_table.qp,
1450219820Sjeff			  qp->qpn & (dev->limits.num_qps - 1));
1451219820Sjeff	--qp->refcount;
1452219820Sjeff	spin_unlock(&dev->qp_table.lock);
1453219820Sjeff
1454219820Sjeff	mthca_unlock_cqs(send_cq, recv_cq);
1455219820Sjeff
1456219820Sjeff	wait_event(qp->wait, !get_qp_refcount(dev, qp));
1457219820Sjeff
1458219820Sjeff	if (qp->state != IB_QPS_RESET)
1459219820Sjeff		mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
1460219820Sjeff				NULL, 0, &status);
1461219820Sjeff
1462219820Sjeff	/*
1463219820Sjeff	 * If this is a userspace QP, the buffers, MR, CQs and so on
1464219820Sjeff	 * will be cleaned up in userspace, so all we have to do is
1465219820Sjeff	 * unref the mem-free tables and free the QPN in our table.
1466219820Sjeff	 */
1467219820Sjeff	if (!qp->ibqp.uobject) {
1468219820Sjeff		mthca_cq_clean(dev, recv_cq, qp->qpn,
1469219820Sjeff			       qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1470219820Sjeff		if (send_cq != recv_cq)
1471219820Sjeff			mthca_cq_clean(dev, send_cq, qp->qpn, NULL);
1472219820Sjeff
1473219820Sjeff		mthca_free_memfree(dev, qp);
1474219820Sjeff		mthca_free_wqe_buf(dev, qp);
1475219820Sjeff	}
1476219820Sjeff
1477219820Sjeff	mthca_unmap_memfree(dev, qp);
1478219820Sjeff
1479219820Sjeff	if (is_sqp(dev, qp)) {
1480219820Sjeff		atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
1481219820Sjeff		dma_free_coherent(&dev->pdev->dev,
1482219820Sjeff				  to_msqp(qp)->header_buf_size,
1483219820Sjeff				  to_msqp(qp)->header_buf,
1484219820Sjeff				  to_msqp(qp)->header_dma);
1485219820Sjeff	} else
1486219820Sjeff		mthca_free(&dev->qp_table.alloc, qp->qpn);
1487219820Sjeff}
1488219820Sjeff
1489219820Sjeff/* Create UD header for an MLX send and build a data segment for it */
1490219820Sjeffstatic int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1491219820Sjeff			    int ind, struct ib_send_wr *wr,
1492219820Sjeff			    struct mthca_mlx_seg *mlx,
1493219820Sjeff			    struct mthca_data_seg *data)
1494219820Sjeff{
1495219820Sjeff	int header_size;
1496219820Sjeff	int err;
1497219820Sjeff	u16 pkey;
1498219820Sjeff
1499219820Sjeff	ib_ud_header_init(256, /* assume a MAD */
1500219820Sjeff			  1, 0, 0,
1501219820Sjeff			  mthca_ah_grh_present(to_mah(wr->wr.ud.ah)),
1502219820Sjeff			  0,
1503219820Sjeff			  &sqp->ud_header);
1504219820Sjeff
1505219820Sjeff	err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
1506219820Sjeff	if (err)
1507219820Sjeff		return err;
1508219820Sjeff	mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
1509219820Sjeff	mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
1510219820Sjeff				  (sqp->ud_header.lrh.destination_lid ==
1511219820Sjeff				   IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
1512219820Sjeff				  (sqp->ud_header.lrh.service_level << 8));
1513219820Sjeff	mlx->rlid = sqp->ud_header.lrh.destination_lid;
1514219820Sjeff	mlx->vcrc = 0;
1515219820Sjeff
1516219820Sjeff	switch (wr->opcode) {
1517219820Sjeff	case IB_WR_SEND:
1518219820Sjeff		sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1519219820Sjeff		sqp->ud_header.immediate_present = 0;
1520219820Sjeff		break;
1521219820Sjeff	case IB_WR_SEND_WITH_IMM:
1522219820Sjeff		sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1523219820Sjeff		sqp->ud_header.immediate_present = 1;
1524219820Sjeff		sqp->ud_header.immediate_data = wr->ex.imm_data;
1525219820Sjeff		break;
1526219820Sjeff	default:
1527219820Sjeff		return -EINVAL;
1528219820Sjeff	}
1529219820Sjeff
1530219820Sjeff	sqp->ud_header.lrh.virtual_lane    = !sqp->qp.ibqp.qp_num ? 15 : 0;
1531219820Sjeff	if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
1532219820Sjeff		sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
1533219820Sjeff	sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
1534219820Sjeff	if (!sqp->qp.ibqp.qp_num)
1535219820Sjeff		ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
1536219820Sjeff				   sqp->pkey_index, &pkey);
1537219820Sjeff	else
1538219820Sjeff		ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
1539219820Sjeff				   wr->wr.ud.pkey_index, &pkey);
1540219820Sjeff	sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
1541219820Sjeff	sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1542219820Sjeff	sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1543219820Sjeff	sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
1544219820Sjeff					       sqp->qkey : wr->wr.ud.remote_qkey);
1545219820Sjeff	sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
1546219820Sjeff
1547219820Sjeff	header_size = ib_ud_header_pack(&sqp->ud_header,
1548219820Sjeff					sqp->header_buf +
1549219820Sjeff					ind * MTHCA_UD_HEADER_SIZE);
1550219820Sjeff
1551219820Sjeff	data->byte_count = cpu_to_be32(header_size);
1552219820Sjeff	data->lkey       = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
1553219820Sjeff	data->addr       = cpu_to_be64(sqp->header_dma +
1554219820Sjeff				       ind * MTHCA_UD_HEADER_SIZE);
1555219820Sjeff
1556219820Sjeff	return 0;
1557219820Sjeff}
1558219820Sjeff
1559219820Sjeffstatic inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
1560219820Sjeff				    struct ib_cq *ib_cq)
1561219820Sjeff{
1562219820Sjeff	unsigned cur;
1563219820Sjeff	struct mthca_cq *cq;
1564219820Sjeff
1565219820Sjeff	cur = wq->head - wq->tail;
1566219820Sjeff	if (likely(cur + nreq < wq->max))
1567219820Sjeff		return 0;
1568219820Sjeff
1569219820Sjeff	cq = to_mcq(ib_cq);
1570219820Sjeff	spin_lock(&cq->lock);
1571219820Sjeff	cur = wq->head - wq->tail;
1572219820Sjeff	spin_unlock(&cq->lock);
1573219820Sjeff
1574219820Sjeff	return cur + nreq >= wq->max;
1575219820Sjeff}
1576219820Sjeff
1577219820Sjeffstatic __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
1578219820Sjeff					  u64 remote_addr, u32 rkey)
1579219820Sjeff{
1580219820Sjeff	rseg->raddr    = cpu_to_be64(remote_addr);
1581219820Sjeff	rseg->rkey     = cpu_to_be32(rkey);
1582219820Sjeff	rseg->reserved = 0;
1583219820Sjeff}
1584219820Sjeff
1585219820Sjeffstatic __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
1586219820Sjeff					   struct ib_send_wr *wr)
1587219820Sjeff{
1588219820Sjeff	if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1589219820Sjeff		aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1590219820Sjeff		aseg->compare  = cpu_to_be64(wr->wr.atomic.compare_add);
1591219820Sjeff	} else {
1592219820Sjeff		aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1593219820Sjeff		aseg->compare  = 0;
1594219820Sjeff	}
1595219820Sjeff
1596219820Sjeff}
1597219820Sjeff
1598219820Sjeffstatic void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
1599219820Sjeff			     struct ib_send_wr *wr)
1600219820Sjeff{
1601219820Sjeff	useg->lkey    = cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
1602219820Sjeff	useg->av_addr =	cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
1603219820Sjeff	useg->dqpn    =	cpu_to_be32(wr->wr.ud.remote_qpn);
1604219820Sjeff	useg->qkey    =	cpu_to_be32(wr->wr.ud.remote_qkey);
1605219820Sjeff
1606219820Sjeff}
1607219820Sjeff
1608219820Sjeffstatic void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg,
1609219820Sjeff			     struct ib_send_wr *wr)
1610219820Sjeff{
1611219820Sjeff	memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
1612219820Sjeff	useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1613219820Sjeff	useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1614219820Sjeff}
1615219820Sjeff
1616219820Sjeffint mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1617219820Sjeff			  struct ib_send_wr **bad_wr)
1618219820Sjeff{
1619219820Sjeff	struct mthca_dev *dev = to_mdev(ibqp->device);
1620219820Sjeff	struct mthca_qp *qp = to_mqp(ibqp);
1621219820Sjeff	void *wqe;
1622219820Sjeff	void *prev_wqe;
1623219820Sjeff	unsigned long flags;
1624219820Sjeff	int err = 0;
1625219820Sjeff	int nreq;
1626219820Sjeff	int i;
1627219820Sjeff	int size;
1628219820Sjeff	/*
1629219820Sjeff	 * f0 and size0 are only used if nreq != 0, and they will
1630219820Sjeff	 * always be initialized the first time through the main loop
1631219820Sjeff	 * before nreq is incremented.  So nreq cannot become non-zero
1632219820Sjeff	 * without initializing f0 and size0, and they are in fact
1633219820Sjeff	 * never used uninitialized.
1634219820Sjeff	 */
1635219820Sjeff	int uninitialized_var(size0);
1636219820Sjeff	u32 uninitialized_var(f0);
1637219820Sjeff	int ind;
1638219820Sjeff	u8 op0 = 0;
1639219820Sjeff
1640219820Sjeff	spin_lock_irqsave(&qp->sq.lock, flags);
1641219820Sjeff
1642219820Sjeff	/* XXX check that state is OK to post send */
1643219820Sjeff
1644219820Sjeff	ind = qp->sq.next_ind;
1645219820Sjeff
1646219820Sjeff	for (nreq = 0; wr; ++nreq, wr = wr->next) {
1647219820Sjeff		if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1648219820Sjeff			mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1649219820Sjeff					" %d max, %d nreq)\n", qp->qpn,
1650219820Sjeff					qp->sq.head, qp->sq.tail,
1651219820Sjeff					qp->sq.max, nreq);
1652219820Sjeff			err = -ENOMEM;
1653219820Sjeff			*bad_wr = wr;
1654219820Sjeff			goto out;
1655219820Sjeff		}
1656219820Sjeff
1657219820Sjeff		wqe = get_send_wqe(qp, ind);
1658219820Sjeff		prev_wqe = qp->sq.last;
1659219820Sjeff		qp->sq.last = wqe;
1660219820Sjeff
1661219820Sjeff		((struct mthca_next_seg *) wqe)->nda_op = 0;
1662219820Sjeff		((struct mthca_next_seg *) wqe)->ee_nds = 0;
1663219820Sjeff		((struct mthca_next_seg *) wqe)->flags =
1664219820Sjeff			((wr->send_flags & IB_SEND_SIGNALED) ?
1665219820Sjeff			 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1666219820Sjeff			((wr->send_flags & IB_SEND_SOLICITED) ?
1667219820Sjeff			 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0)   |
1668219820Sjeff			cpu_to_be32(1);
1669219820Sjeff		if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1670219820Sjeff		    wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1671219820Sjeff			((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
1672219820Sjeff
1673219820Sjeff		wqe += sizeof (struct mthca_next_seg);
1674219820Sjeff		size = sizeof (struct mthca_next_seg) / 16;
1675219820Sjeff
1676219820Sjeff		switch (qp->transport) {
1677219820Sjeff		case RC:
1678219820Sjeff			switch (wr->opcode) {
1679219820Sjeff			case IB_WR_ATOMIC_CMP_AND_SWP:
1680219820Sjeff			case IB_WR_ATOMIC_FETCH_AND_ADD:
1681219820Sjeff				set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
1682219820Sjeff					      wr->wr.atomic.rkey);
1683219820Sjeff				wqe += sizeof (struct mthca_raddr_seg);
1684219820Sjeff
1685219820Sjeff				set_atomic_seg(wqe, wr);
1686219820Sjeff				wqe += sizeof (struct mthca_atomic_seg);
1687219820Sjeff				size += (sizeof (struct mthca_raddr_seg) +
1688219820Sjeff					 sizeof (struct mthca_atomic_seg)) / 16;
1689219820Sjeff				break;
1690219820Sjeff
1691219820Sjeff			case IB_WR_RDMA_WRITE:
1692219820Sjeff			case IB_WR_RDMA_WRITE_WITH_IMM:
1693219820Sjeff			case IB_WR_RDMA_READ:
1694219820Sjeff				set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
1695219820Sjeff					      wr->wr.rdma.rkey);
1696219820Sjeff				wqe  += sizeof (struct mthca_raddr_seg);
1697219820Sjeff				size += sizeof (struct mthca_raddr_seg) / 16;
1698219820Sjeff				break;
1699219820Sjeff
1700219820Sjeff			default:
1701219820Sjeff				/* No extra segments required for sends */
1702219820Sjeff				break;
1703219820Sjeff			}
1704219820Sjeff
1705219820Sjeff			break;
1706219820Sjeff
1707219820Sjeff		case UC:
1708219820Sjeff			switch (wr->opcode) {
1709219820Sjeff			case IB_WR_RDMA_WRITE:
1710219820Sjeff			case IB_WR_RDMA_WRITE_WITH_IMM:
1711219820Sjeff				set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
1712219820Sjeff					      wr->wr.rdma.rkey);
1713219820Sjeff				wqe  += sizeof (struct mthca_raddr_seg);
1714219820Sjeff				size += sizeof (struct mthca_raddr_seg) / 16;
1715219820Sjeff				break;
1716219820Sjeff
1717219820Sjeff			default:
1718219820Sjeff				/* No extra segments required for sends */
1719219820Sjeff				break;
1720219820Sjeff			}
1721219820Sjeff
1722219820Sjeff			break;
1723219820Sjeff
1724219820Sjeff		case UD:
1725219820Sjeff			set_tavor_ud_seg(wqe, wr);
1726219820Sjeff			wqe  += sizeof (struct mthca_tavor_ud_seg);
1727219820Sjeff			size += sizeof (struct mthca_tavor_ud_seg) / 16;
1728219820Sjeff			break;
1729219820Sjeff
1730219820Sjeff		case MLX:
1731219820Sjeff			err = build_mlx_header(dev, to_msqp(qp), ind, wr,
1732219820Sjeff					       wqe - sizeof (struct mthca_next_seg),
1733219820Sjeff					       wqe);
1734219820Sjeff			if (err) {
1735219820Sjeff				*bad_wr = wr;
1736219820Sjeff				goto out;
1737219820Sjeff			}
1738219820Sjeff			wqe += sizeof (struct mthca_data_seg);
1739219820Sjeff			size += sizeof (struct mthca_data_seg) / 16;
1740219820Sjeff			break;
1741219820Sjeff		}
1742219820Sjeff
1743219820Sjeff		if (wr->num_sge > qp->sq.max_gs) {
1744219820Sjeff			mthca_err(dev, "too many gathers\n");
1745219820Sjeff			err = -EINVAL;
1746219820Sjeff			*bad_wr = wr;
1747219820Sjeff			goto out;
1748219820Sjeff		}
1749219820Sjeff
1750219820Sjeff		for (i = 0; i < wr->num_sge; ++i) {
1751219820Sjeff			mthca_set_data_seg(wqe, wr->sg_list + i);
1752219820Sjeff			wqe  += sizeof (struct mthca_data_seg);
1753219820Sjeff			size += sizeof (struct mthca_data_seg) / 16;
1754219820Sjeff		}
1755219820Sjeff
1756219820Sjeff		/* Add one more inline data segment for ICRC */
1757219820Sjeff		if (qp->transport == MLX) {
1758219820Sjeff			((struct mthca_data_seg *) wqe)->byte_count =
1759261455Seadler				cpu_to_be32((1U << 31) | 4);
1760219820Sjeff			((u32 *) wqe)[1] = 0;
1761219820Sjeff			wqe += sizeof (struct mthca_data_seg);
1762219820Sjeff			size += sizeof (struct mthca_data_seg) / 16;
1763219820Sjeff		}
1764219820Sjeff
1765219820Sjeff		qp->wrid[ind] = wr->wr_id;
1766219820Sjeff
1767219820Sjeff		if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
1768219820Sjeff			mthca_err(dev, "opcode invalid\n");
1769219820Sjeff			err = -EINVAL;
1770219820Sjeff			*bad_wr = wr;
1771219820Sjeff			goto out;
1772219820Sjeff		}
1773219820Sjeff
1774219820Sjeff		((struct mthca_next_seg *) prev_wqe)->nda_op =
1775219820Sjeff			cpu_to_be32(((ind << qp->sq.wqe_shift) +
1776219820Sjeff				     qp->send_wqe_offset) |
1777219820Sjeff				    mthca_opcode[wr->opcode]);
1778219820Sjeff		wmb();
1779219820Sjeff		((struct mthca_next_seg *) prev_wqe)->ee_nds =
1780219820Sjeff			cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size |
1781219820Sjeff				    ((wr->send_flags & IB_SEND_FENCE) ?
1782219820Sjeff				    MTHCA_NEXT_FENCE : 0));
1783219820Sjeff
1784219820Sjeff		if (!nreq) {
1785219820Sjeff			size0 = size;
1786219820Sjeff			op0   = mthca_opcode[wr->opcode];
1787219820Sjeff			f0    = wr->send_flags & IB_SEND_FENCE ?
1788219820Sjeff				MTHCA_SEND_DOORBELL_FENCE : 0;
1789219820Sjeff		}
1790219820Sjeff
1791219820Sjeff		++ind;
1792219820Sjeff		if (unlikely(ind >= qp->sq.max))
1793219820Sjeff			ind -= qp->sq.max;
1794219820Sjeff	}
1795219820Sjeff
1796219820Sjeffout:
1797219820Sjeff	if (likely(nreq)) {
1798219820Sjeff		wmb();
1799219820Sjeff
1800219820Sjeff		mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) +
1801219820Sjeff			       qp->send_wqe_offset) | f0 | op0,
1802219820Sjeff			      (qp->qpn << 8) | size0,
1803219820Sjeff			      dev->kar + MTHCA_SEND_DOORBELL,
1804219820Sjeff			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1805219820Sjeff		/*
1806219820Sjeff		 * Make sure doorbells don't leak out of SQ spinlock
1807219820Sjeff		 * and reach the HCA out of order:
1808219820Sjeff		 */
1809219820Sjeff		mmiowb();
1810219820Sjeff	}
1811219820Sjeff
1812219820Sjeff	qp->sq.next_ind = ind;
1813219820Sjeff	qp->sq.head    += nreq;
1814219820Sjeff
1815219820Sjeff	spin_unlock_irqrestore(&qp->sq.lock, flags);
1816219820Sjeff	return err;
1817219820Sjeff}
1818219820Sjeff
1819219820Sjeffint mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1820219820Sjeff			     struct ib_recv_wr **bad_wr)
1821219820Sjeff{
1822219820Sjeff	struct mthca_dev *dev = to_mdev(ibqp->device);
1823219820Sjeff	struct mthca_qp *qp = to_mqp(ibqp);
1824219820Sjeff	unsigned long flags;
1825219820Sjeff	int err = 0;
1826219820Sjeff	int nreq;
1827219820Sjeff	int i;
1828219820Sjeff	int size;
1829219820Sjeff	/*
1830219820Sjeff	 * size0 is only used if nreq != 0, and it will always be
1831219820Sjeff	 * initialized the first time through the main loop before
1832219820Sjeff	 * nreq is incremented.  So nreq cannot become non-zero
1833219820Sjeff	 * without initializing size0, and it is in fact never used
1834219820Sjeff	 * uninitialized.
1835219820Sjeff	 */
1836219820Sjeff	int uninitialized_var(size0);
1837219820Sjeff	int ind;
1838219820Sjeff	void *wqe;
1839219820Sjeff	void *prev_wqe;
1840219820Sjeff
1841219820Sjeff	spin_lock_irqsave(&qp->rq.lock, flags);
1842219820Sjeff
1843219820Sjeff	/* XXX check that state is OK to post receive */
1844219820Sjeff
1845219820Sjeff	ind = qp->rq.next_ind;
1846219820Sjeff
1847219820Sjeff	for (nreq = 0; wr; wr = wr->next) {
1848219820Sjeff		if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1849219820Sjeff			mthca_err(dev, "RQ %06x full (%u head, %u tail,"
1850219820Sjeff					" %d max, %d nreq)\n", qp->qpn,
1851219820Sjeff					qp->rq.head, qp->rq.tail,
1852219820Sjeff					qp->rq.max, nreq);
1853219820Sjeff			err = -ENOMEM;
1854219820Sjeff			*bad_wr = wr;
1855219820Sjeff			goto out;
1856219820Sjeff		}
1857219820Sjeff
1858219820Sjeff		wqe = get_recv_wqe(qp, ind);
1859219820Sjeff		prev_wqe = qp->rq.last;
1860219820Sjeff		qp->rq.last = wqe;
1861219820Sjeff
1862219820Sjeff		((struct mthca_next_seg *) wqe)->ee_nds =
1863219820Sjeff			cpu_to_be32(MTHCA_NEXT_DBD);
1864219820Sjeff		((struct mthca_next_seg *) wqe)->flags = 0;
1865219820Sjeff
1866219820Sjeff		wqe += sizeof (struct mthca_next_seg);
1867219820Sjeff		size = sizeof (struct mthca_next_seg) / 16;
1868219820Sjeff
1869219820Sjeff		if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1870219820Sjeff			err = -EINVAL;
1871219820Sjeff			*bad_wr = wr;
1872219820Sjeff			goto out;
1873219820Sjeff		}
1874219820Sjeff
1875219820Sjeff		for (i = 0; i < wr->num_sge; ++i) {
1876219820Sjeff			mthca_set_data_seg(wqe, wr->sg_list + i);
1877219820Sjeff			wqe  += sizeof (struct mthca_data_seg);
1878219820Sjeff			size += sizeof (struct mthca_data_seg) / 16;
1879219820Sjeff		}
1880219820Sjeff
1881219820Sjeff		qp->wrid[ind + qp->sq.max] = wr->wr_id;
1882219820Sjeff
1883219820Sjeff		((struct mthca_next_seg *) prev_wqe)->ee_nds =
1884219820Sjeff			cpu_to_be32(MTHCA_NEXT_DBD | size);
1885219820Sjeff
1886219820Sjeff		if (!nreq)
1887219820Sjeff			size0 = size;
1888219820Sjeff
1889219820Sjeff		++ind;
1890219820Sjeff		if (unlikely(ind >= qp->rq.max))
1891219820Sjeff			ind -= qp->rq.max;
1892219820Sjeff
1893219820Sjeff		++nreq;
1894219820Sjeff		if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
1895219820Sjeff			nreq = 0;
1896219820Sjeff
1897219820Sjeff			wmb();
1898219820Sjeff
1899219820Sjeff			mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
1900219820Sjeff				      qp->qpn << 8, dev->kar + MTHCA_RECEIVE_DOORBELL,
1901219820Sjeff				      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1902219820Sjeff
1903219820Sjeff			qp->rq.next_ind = ind;
1904219820Sjeff			qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
1905219820Sjeff		}
1906219820Sjeff	}
1907219820Sjeff
1908219820Sjeffout:
1909219820Sjeff	if (likely(nreq)) {
1910219820Sjeff		wmb();
1911219820Sjeff
1912219820Sjeff		mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
1913219820Sjeff			      qp->qpn << 8 | nreq, dev->kar + MTHCA_RECEIVE_DOORBELL,
1914219820Sjeff			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1915219820Sjeff	}
1916219820Sjeff
1917219820Sjeff	qp->rq.next_ind = ind;
1918219820Sjeff	qp->rq.head    += nreq;
1919219820Sjeff
1920219820Sjeff	/*
1921219820Sjeff	 * Make sure doorbells don't leak out of RQ spinlock and reach
1922219820Sjeff	 * the HCA out of order:
1923219820Sjeff	 */
1924219820Sjeff	mmiowb();
1925219820Sjeff
1926219820Sjeff	spin_unlock_irqrestore(&qp->rq.lock, flags);
1927219820Sjeff	return err;
1928219820Sjeff}
1929219820Sjeff
1930219820Sjeffint mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1931219820Sjeff			  struct ib_send_wr **bad_wr)
1932219820Sjeff{
1933219820Sjeff	struct mthca_dev *dev = to_mdev(ibqp->device);
1934219820Sjeff	struct mthca_qp *qp = to_mqp(ibqp);
1935219820Sjeff	u32 dbhi;
1936219820Sjeff	void *wqe;
1937219820Sjeff	void *prev_wqe;
1938219820Sjeff	unsigned long flags;
1939219820Sjeff	int err = 0;
1940219820Sjeff	int nreq;
1941219820Sjeff	int i;
1942219820Sjeff	int size;
1943219820Sjeff	/*
1944219820Sjeff	 * f0 and size0 are only used if nreq != 0, and they will
1945219820Sjeff	 * always be initialized the first time through the main loop
1946219820Sjeff	 * before nreq is incremented.  So nreq cannot become non-zero
1947219820Sjeff	 * without initializing f0 and size0, and they are in fact
1948219820Sjeff	 * never used uninitialized.
1949219820Sjeff	 */
1950219820Sjeff	int uninitialized_var(size0);
1951219820Sjeff	u32 uninitialized_var(f0);
1952219820Sjeff	int ind;
1953219820Sjeff	u8 op0 = 0;
1954219820Sjeff
1955219820Sjeff	spin_lock_irqsave(&qp->sq.lock, flags);
1956219820Sjeff
1957219820Sjeff	/* XXX check that state is OK to post send */
1958219820Sjeff
1959219820Sjeff	ind = qp->sq.head & (qp->sq.max - 1);
1960219820Sjeff
1961219820Sjeff	for (nreq = 0; wr; ++nreq, wr = wr->next) {
1962219820Sjeff		if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) {
1963219820Sjeff			nreq = 0;
1964219820Sjeff
1965219820Sjeff			dbhi = (MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) |
1966219820Sjeff				((qp->sq.head & 0xffff) << 8) | f0 | op0;
1967219820Sjeff
1968219820Sjeff			qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
1969219820Sjeff
1970219820Sjeff			/*
1971219820Sjeff			 * Make sure that descriptors are written before
1972219820Sjeff			 * doorbell record.
1973219820Sjeff			 */
1974219820Sjeff			wmb();
1975219820Sjeff			*qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
1976219820Sjeff
1977219820Sjeff			/*
1978219820Sjeff			 * Make sure doorbell record is written before we
1979219820Sjeff			 * write MMIO send doorbell.
1980219820Sjeff			 */
1981219820Sjeff			wmb();
1982219820Sjeff
1983219820Sjeff			mthca_write64(dbhi, (qp->qpn << 8) | size0,
1984219820Sjeff				      dev->kar + MTHCA_SEND_DOORBELL,
1985219820Sjeff				      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1986219820Sjeff		}
1987219820Sjeff
1988219820Sjeff		if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1989219820Sjeff			mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1990219820Sjeff					" %d max, %d nreq)\n", qp->qpn,
1991219820Sjeff					qp->sq.head, qp->sq.tail,
1992219820Sjeff					qp->sq.max, nreq);
1993219820Sjeff			err = -ENOMEM;
1994219820Sjeff			*bad_wr = wr;
1995219820Sjeff			goto out;
1996219820Sjeff		}
1997219820Sjeff
1998219820Sjeff		wqe = get_send_wqe(qp, ind);
1999219820Sjeff		prev_wqe = qp->sq.last;
2000219820Sjeff		qp->sq.last = wqe;
2001219820Sjeff
2002219820Sjeff		((struct mthca_next_seg *) wqe)->flags =
2003219820Sjeff			((wr->send_flags & IB_SEND_SIGNALED) ?
2004219820Sjeff			 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
2005219820Sjeff			((wr->send_flags & IB_SEND_SOLICITED) ?
2006219820Sjeff			 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0)   |
2007219820Sjeff			((wr->send_flags & IB_SEND_IP_CSUM) ?
2008219820Sjeff			 cpu_to_be32(MTHCA_NEXT_IP_CSUM | MTHCA_NEXT_TCP_UDP_CSUM) : 0) |
2009219820Sjeff			cpu_to_be32(1);
2010219820Sjeff		if (wr->opcode == IB_WR_SEND_WITH_IMM ||
2011219820Sjeff		    wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
2012219820Sjeff			((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
2013219820Sjeff
2014219820Sjeff		wqe += sizeof (struct mthca_next_seg);
2015219820Sjeff		size = sizeof (struct mthca_next_seg) / 16;
2016219820Sjeff
2017219820Sjeff		switch (qp->transport) {
2018219820Sjeff		case RC:
2019219820Sjeff			switch (wr->opcode) {
2020219820Sjeff			case IB_WR_ATOMIC_CMP_AND_SWP:
2021219820Sjeff			case IB_WR_ATOMIC_FETCH_AND_ADD:
2022219820Sjeff				set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
2023219820Sjeff					      wr->wr.atomic.rkey);
2024219820Sjeff				wqe += sizeof (struct mthca_raddr_seg);
2025219820Sjeff
2026219820Sjeff				set_atomic_seg(wqe, wr);
2027219820Sjeff				wqe  += sizeof (struct mthca_atomic_seg);
2028219820Sjeff				size += (sizeof (struct mthca_raddr_seg) +
2029219820Sjeff					 sizeof (struct mthca_atomic_seg)) / 16;
2030219820Sjeff				break;
2031219820Sjeff
2032219820Sjeff			case IB_WR_RDMA_READ:
2033219820Sjeff			case IB_WR_RDMA_WRITE:
2034219820Sjeff			case IB_WR_RDMA_WRITE_WITH_IMM:
2035219820Sjeff				set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
2036219820Sjeff					      wr->wr.rdma.rkey);
2037219820Sjeff				wqe  += sizeof (struct mthca_raddr_seg);
2038219820Sjeff				size += sizeof (struct mthca_raddr_seg) / 16;
2039219820Sjeff				break;
2040219820Sjeff
2041219820Sjeff			default:
2042219820Sjeff				/* No extra segments required for sends */
2043219820Sjeff				break;
2044219820Sjeff			}
2045219820Sjeff
2046219820Sjeff			break;
2047219820Sjeff
2048219820Sjeff		case UC:
2049219820Sjeff			switch (wr->opcode) {
2050219820Sjeff			case IB_WR_RDMA_WRITE:
2051219820Sjeff			case IB_WR_RDMA_WRITE_WITH_IMM:
2052219820Sjeff				set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
2053219820Sjeff					      wr->wr.rdma.rkey);
2054219820Sjeff				wqe  += sizeof (struct mthca_raddr_seg);
2055219820Sjeff				size += sizeof (struct mthca_raddr_seg) / 16;
2056219820Sjeff				break;
2057219820Sjeff
2058219820Sjeff			default:
2059219820Sjeff				/* No extra segments required for sends */
2060219820Sjeff				break;
2061219820Sjeff			}
2062219820Sjeff
2063219820Sjeff			break;
2064219820Sjeff
2065219820Sjeff		case UD:
2066219820Sjeff			set_arbel_ud_seg(wqe, wr);
2067219820Sjeff			wqe  += sizeof (struct mthca_arbel_ud_seg);
2068219820Sjeff			size += sizeof (struct mthca_arbel_ud_seg) / 16;
2069219820Sjeff			break;
2070219820Sjeff
2071219820Sjeff		case MLX:
2072219820Sjeff			err = build_mlx_header(dev, to_msqp(qp), ind, wr,
2073219820Sjeff					       wqe - sizeof (struct mthca_next_seg),
2074219820Sjeff					       wqe);
2075219820Sjeff			if (err) {
2076219820Sjeff				*bad_wr = wr;
2077219820Sjeff				goto out;
2078219820Sjeff			}
2079219820Sjeff			wqe += sizeof (struct mthca_data_seg);
2080219820Sjeff			size += sizeof (struct mthca_data_seg) / 16;
2081219820Sjeff			break;
2082219820Sjeff		}
2083219820Sjeff
2084219820Sjeff		if (wr->num_sge > qp->sq.max_gs) {
2085219820Sjeff			mthca_err(dev, "too many gathers\n");
2086219820Sjeff			err = -EINVAL;
2087219820Sjeff			*bad_wr = wr;
2088219820Sjeff			goto out;
2089219820Sjeff		}
2090219820Sjeff
2091219820Sjeff		for (i = 0; i < wr->num_sge; ++i) {
2092219820Sjeff			mthca_set_data_seg(wqe, wr->sg_list + i);
2093219820Sjeff			wqe  += sizeof (struct mthca_data_seg);
2094219820Sjeff			size += sizeof (struct mthca_data_seg) / 16;
2095219820Sjeff		}
2096219820Sjeff
2097219820Sjeff		/* Add one more inline data segment for ICRC */
2098219820Sjeff		if (qp->transport == MLX) {
2099219820Sjeff			((struct mthca_data_seg *) wqe)->byte_count =
2100261455Seadler				cpu_to_be32((1U << 31) | 4);
2101219820Sjeff			((u32 *) wqe)[1] = 0;
2102219820Sjeff			wqe += sizeof (struct mthca_data_seg);
2103219820Sjeff			size += sizeof (struct mthca_data_seg) / 16;
2104219820Sjeff		}
2105219820Sjeff
2106219820Sjeff		qp->wrid[ind] = wr->wr_id;
2107219820Sjeff
2108219820Sjeff		if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
2109219820Sjeff			mthca_err(dev, "opcode invalid\n");
2110219820Sjeff			err = -EINVAL;
2111219820Sjeff			*bad_wr = wr;
2112219820Sjeff			goto out;
2113219820Sjeff		}
2114219820Sjeff
2115219820Sjeff		((struct mthca_next_seg *) prev_wqe)->nda_op =
2116219820Sjeff			cpu_to_be32(((ind << qp->sq.wqe_shift) +
2117219820Sjeff				     qp->send_wqe_offset) |
2118219820Sjeff				    mthca_opcode[wr->opcode]);
2119219820Sjeff		wmb();
2120219820Sjeff		((struct mthca_next_seg *) prev_wqe)->ee_nds =
2121219820Sjeff			cpu_to_be32(MTHCA_NEXT_DBD | size |
2122219820Sjeff				    ((wr->send_flags & IB_SEND_FENCE) ?
2123219820Sjeff				     MTHCA_NEXT_FENCE : 0));
2124219820Sjeff
2125219820Sjeff		if (!nreq) {
2126219820Sjeff			size0 = size;
2127219820Sjeff			op0   = mthca_opcode[wr->opcode];
2128219820Sjeff			f0    = wr->send_flags & IB_SEND_FENCE ?
2129219820Sjeff				MTHCA_SEND_DOORBELL_FENCE : 0;
2130219820Sjeff		}
2131219820Sjeff
2132219820Sjeff		++ind;
2133219820Sjeff		if (unlikely(ind >= qp->sq.max))
2134219820Sjeff			ind -= qp->sq.max;
2135219820Sjeff	}
2136219820Sjeff
2137219820Sjeffout:
2138219820Sjeff	if (likely(nreq)) {
2139219820Sjeff		dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0;
2140219820Sjeff
2141219820Sjeff		qp->sq.head += nreq;
2142219820Sjeff
2143219820Sjeff		/*
2144219820Sjeff		 * Make sure that descriptors are written before
2145219820Sjeff		 * doorbell record.
2146219820Sjeff		 */
2147219820Sjeff		wmb();
2148219820Sjeff		*qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
2149219820Sjeff
2150219820Sjeff		/*
2151219820Sjeff		 * Make sure doorbell record is written before we
2152219820Sjeff		 * write MMIO send doorbell.
2153219820Sjeff		 */
2154219820Sjeff		wmb();
2155219820Sjeff
2156219820Sjeff		mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL,
2157219820Sjeff			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
2158219820Sjeff	}
2159219820Sjeff
2160219820Sjeff	/*
2161219820Sjeff	 * Make sure doorbells don't leak out of SQ spinlock and reach
2162219820Sjeff	 * the HCA out of order:
2163219820Sjeff	 */
2164219820Sjeff	mmiowb();
2165219820Sjeff
2166219820Sjeff	spin_unlock_irqrestore(&qp->sq.lock, flags);
2167219820Sjeff	return err;
2168219820Sjeff}
2169219820Sjeff
2170219820Sjeffint mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2171219820Sjeff			     struct ib_recv_wr **bad_wr)
2172219820Sjeff{
2173219820Sjeff	struct mthca_dev *dev = to_mdev(ibqp->device);
2174219820Sjeff	struct mthca_qp *qp = to_mqp(ibqp);
2175219820Sjeff	unsigned long flags;
2176219820Sjeff	int err = 0;
2177219820Sjeff	int nreq;
2178219820Sjeff	int ind;
2179219820Sjeff	int i;
2180219820Sjeff	void *wqe;
2181219820Sjeff
2182219820Sjeff	spin_lock_irqsave(&qp->rq.lock, flags);
2183219820Sjeff
2184219820Sjeff	/* XXX check that state is OK to post receive */
2185219820Sjeff
2186219820Sjeff	ind = qp->rq.head & (qp->rq.max - 1);
2187219820Sjeff
2188219820Sjeff	for (nreq = 0; wr; ++nreq, wr = wr->next) {
2189219820Sjeff		if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2190219820Sjeff			mthca_err(dev, "RQ %06x full (%u head, %u tail,"
2191219820Sjeff					" %d max, %d nreq)\n", qp->qpn,
2192219820Sjeff					qp->rq.head, qp->rq.tail,
2193219820Sjeff					qp->rq.max, nreq);
2194219820Sjeff			err = -ENOMEM;
2195219820Sjeff			*bad_wr = wr;
2196219820Sjeff			goto out;
2197219820Sjeff		}
2198219820Sjeff
2199219820Sjeff		wqe = get_recv_wqe(qp, ind);
2200219820Sjeff
2201219820Sjeff		((struct mthca_next_seg *) wqe)->flags = 0;
2202219820Sjeff
2203219820Sjeff		wqe += sizeof (struct mthca_next_seg);
2204219820Sjeff
2205219820Sjeff		if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2206219820Sjeff			err = -EINVAL;
2207219820Sjeff			*bad_wr = wr;
2208219820Sjeff			goto out;
2209219820Sjeff		}
2210219820Sjeff
2211219820Sjeff		for (i = 0; i < wr->num_sge; ++i) {
2212219820Sjeff			mthca_set_data_seg(wqe, wr->sg_list + i);
2213219820Sjeff			wqe += sizeof (struct mthca_data_seg);
2214219820Sjeff		}
2215219820Sjeff
2216219820Sjeff		if (i < qp->rq.max_gs)
2217219820Sjeff			mthca_set_data_seg_inval(wqe);
2218219820Sjeff
2219219820Sjeff		qp->wrid[ind + qp->sq.max] = wr->wr_id;
2220219820Sjeff
2221219820Sjeff		++ind;
2222219820Sjeff		if (unlikely(ind >= qp->rq.max))
2223219820Sjeff			ind -= qp->rq.max;
2224219820Sjeff	}
2225219820Sjeffout:
2226219820Sjeff	if (likely(nreq)) {
2227219820Sjeff		qp->rq.head += nreq;
2228219820Sjeff
2229219820Sjeff		/*
2230219820Sjeff		 * Make sure that descriptors are written before
2231219820Sjeff		 * doorbell record.
2232219820Sjeff		 */
2233219820Sjeff		wmb();
2234219820Sjeff		*qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
2235219820Sjeff	}
2236219820Sjeff
2237219820Sjeff	spin_unlock_irqrestore(&qp->rq.lock, flags);
2238219820Sjeff	return err;
2239219820Sjeff}
2240219820Sjeff
2241219820Sjeffvoid mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2242219820Sjeff			int index, int *dbd, __be32 *new_wqe)
2243219820Sjeff{
2244219820Sjeff	struct mthca_next_seg *next;
2245219820Sjeff
2246219820Sjeff	/*
2247219820Sjeff	 * For SRQs, all receive WQEs generate a CQE, so we're always
2248219820Sjeff	 * at the end of the doorbell chain.
2249219820Sjeff	 */
2250219820Sjeff	if (qp->ibqp.srq && !is_send) {
2251219820Sjeff		*new_wqe = 0;
2252219820Sjeff		return;
2253219820Sjeff	}
2254219820Sjeff
2255219820Sjeff	if (is_send)
2256219820Sjeff		next = get_send_wqe(qp, index);
2257219820Sjeff	else
2258219820Sjeff		next = get_recv_wqe(qp, index);
2259219820Sjeff
2260219820Sjeff	*dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
2261219820Sjeff	if (next->ee_nds & cpu_to_be32(0x3f))
2262219820Sjeff		*new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |
2263219820Sjeff			(next->ee_nds & cpu_to_be32(0x3f));
2264219820Sjeff	else
2265219820Sjeff		*new_wqe = 0;
2266219820Sjeff}
2267219820Sjeff
2268219820Sjeffint mthca_init_qp_table(struct mthca_dev *dev)
2269219820Sjeff{
2270219820Sjeff	int err;
2271219820Sjeff	u8 status;
2272219820Sjeff	int i;
2273219820Sjeff
2274219820Sjeff	spin_lock_init(&dev->qp_table.lock);
2275219820Sjeff
2276219820Sjeff	/*
2277219820Sjeff	 * We reserve 2 extra QPs per port for the special QPs.  The
2278219820Sjeff	 * special QP for port 1 has to be even, so round up.
2279219820Sjeff	 */
2280219820Sjeff	dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL;
2281219820Sjeff	err = mthca_alloc_init(&dev->qp_table.alloc,
2282219820Sjeff			       dev->limits.num_qps,
2283219820Sjeff			       (1 << 24) - 1,
2284219820Sjeff			       dev->qp_table.sqp_start +
2285219820Sjeff			       MTHCA_MAX_PORTS * 2);
2286219820Sjeff	if (err)
2287219820Sjeff		return err;
2288219820Sjeff
2289219820Sjeff	err = mthca_array_init(&dev->qp_table.qp,
2290219820Sjeff			       dev->limits.num_qps);
2291219820Sjeff	if (err) {
2292219820Sjeff		mthca_alloc_cleanup(&dev->qp_table.alloc);
2293219820Sjeff		return err;
2294219820Sjeff	}
2295219820Sjeff
2296219820Sjeff	for (i = 0; i < 2; ++i) {
2297219820Sjeff		err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,
2298219820Sjeff					    dev->qp_table.sqp_start + i * 2,
2299219820Sjeff					    &status);
2300219820Sjeff		if (err)
2301219820Sjeff			goto err_out;
2302219820Sjeff		if (status) {
2303219820Sjeff			mthca_warn(dev, "CONF_SPECIAL_QP returned "
2304219820Sjeff				   "status %02x, aborting.\n",
2305219820Sjeff				   status);
2306219820Sjeff			err = -EINVAL;
2307219820Sjeff			goto err_out;
2308219820Sjeff		}
2309219820Sjeff	}
2310219820Sjeff	return 0;
2311219820Sjeff
2312219820Sjeff err_out:
2313219820Sjeff	for (i = 0; i < 2; ++i)
2314219820Sjeff		mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2315219820Sjeff
2316219820Sjeff	mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2317219820Sjeff	mthca_alloc_cleanup(&dev->qp_table.alloc);
2318219820Sjeff
2319219820Sjeff	return err;
2320219820Sjeff}
2321219820Sjeff
2322219820Sjeffvoid mthca_cleanup_qp_table(struct mthca_dev *dev)
2323219820Sjeff{
2324219820Sjeff	int i;
2325219820Sjeff	u8 status;
2326219820Sjeff
2327219820Sjeff	for (i = 0; i < 2; ++i)
2328219820Sjeff		mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2329219820Sjeff
2330219820Sjeff	mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2331219820Sjeff	mthca_alloc_cleanup(&dev->qp_table.alloc);
2332219820Sjeff}
2333