1219820Sjeff/*
2219820Sjeff * Copyright (c) 2005 Cisco Systems. All rights reserved.
3219820Sjeff *
4219820Sjeff * This software is available to you under a choice of one of two
5219820Sjeff * licenses.  You may choose to be licensed under the terms of the GNU
6219820Sjeff * General Public License (GPL) Version 2, available from the file
7219820Sjeff * COPYING in the main directory of this source tree, or the
8219820Sjeff * OpenIB.org BSD license below:
9219820Sjeff *
10219820Sjeff *     Redistribution and use in source and binary forms, with or
11219820Sjeff *     without modification, are permitted provided that the following
12219820Sjeff *     conditions are met:
13219820Sjeff *
14219820Sjeff *      - Redistributions of source code must retain the above
15219820Sjeff *        copyright notice, this list of conditions and the following
16219820Sjeff *        disclaimer.
17219820Sjeff *
18219820Sjeff *      - Redistributions in binary form must reproduce the above
19219820Sjeff *        copyright notice, this list of conditions and the following
20219820Sjeff *        disclaimer in the documentation and/or other materials
21219820Sjeff *        provided with the distribution.
22219820Sjeff *
23219820Sjeff * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24219820Sjeff * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25219820Sjeff * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26219820Sjeff * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27219820Sjeff * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28219820Sjeff * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29219820Sjeff * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30219820Sjeff * SOFTWARE.
31219820Sjeff */
32219820Sjeff
33219820Sjeff#include <linux/slab.h>
34219820Sjeff#include <linux/string.h>
35219820Sjeff#include <linux/sched.h>
36219820Sjeff
37219820Sjeff#include <asm/io.h>
38219820Sjeff
39219820Sjeff#include "mthca_dev.h"
40219820Sjeff#include "mthca_cmd.h"
41219820Sjeff#include "mthca_memfree.h"
42219820Sjeff#include "mthca_wqe.h"
43219820Sjeff
44219820Sjeffenum {
45219820Sjeff	MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
46219820Sjeff};
47219820Sjeff
48219820Sjeffstruct mthca_tavor_srq_context {
49219820Sjeff	__be64 wqe_base_ds;	/* low 6 bits is descriptor size */
50219820Sjeff	__be32 state_pd;
51219820Sjeff	__be32 lkey;
52219820Sjeff	__be32 uar;
53219820Sjeff	__be16 limit_watermark;
54219820Sjeff	__be16 wqe_cnt;
55219820Sjeff	u32    reserved[2];
56219820Sjeff};
57219820Sjeff
58219820Sjeffstruct mthca_arbel_srq_context {
59219820Sjeff	__be32 state_logsize_srqn;
60219820Sjeff	__be32 lkey;
61219820Sjeff	__be32 db_index;
62219820Sjeff	__be32 logstride_usrpage;
63219820Sjeff	__be64 wqe_base;
64219820Sjeff	__be32 eq_pd;
65219820Sjeff	__be16 limit_watermark;
66219820Sjeff	__be16 wqe_cnt;
67219820Sjeff	u16    reserved1;
68219820Sjeff	__be16 wqe_counter;
69219820Sjeff	u32    reserved2[3];
70219820Sjeff};
71219820Sjeff
72219820Sjeffstatic void *get_wqe(struct mthca_srq *srq, int n)
73219820Sjeff{
74219820Sjeff	if (srq->is_direct)
75219820Sjeff		return srq->queue.direct.buf + (n << srq->wqe_shift);
76219820Sjeff	else
77219820Sjeff		return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
78219820Sjeff			((n << srq->wqe_shift) & (PAGE_SIZE - 1));
79219820Sjeff}
80219820Sjeff
81219820Sjeff/*
82219820Sjeff * Return a pointer to the location within a WQE that we're using as a
83219820Sjeff * link when the WQE is in the free list.  We use the imm field
84219820Sjeff * because in the Tavor case, posting a WQE may overwrite the next
85219820Sjeff * segment of the previous WQE, but a receive WQE will never touch the
86219820Sjeff * imm field.  This avoids corrupting our free list if the previous
87219820Sjeff * WQE has already completed and been put on the free list when we
88219820Sjeff * post the next WQE.
89219820Sjeff */
90219820Sjeffstatic inline int *wqe_to_link(void *wqe)
91219820Sjeff{
92219820Sjeff	return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
93219820Sjeff}
94219820Sjeff
95219820Sjeffstatic void mthca_tavor_init_srq_context(struct mthca_dev *dev,
96219820Sjeff					 struct mthca_pd *pd,
97219820Sjeff					 struct mthca_srq *srq,
98219820Sjeff					 struct mthca_tavor_srq_context *context)
99219820Sjeff{
100219820Sjeff	memset(context, 0, sizeof *context);
101219820Sjeff
102219820Sjeff	context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
103219820Sjeff	context->state_pd    = cpu_to_be32(pd->pd_num);
104219820Sjeff	context->lkey        = cpu_to_be32(srq->mr.ibmr.lkey);
105219820Sjeff
106219820Sjeff	if (pd->ibpd.uobject)
107219820Sjeff		context->uar =
108219820Sjeff			cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
109219820Sjeff	else
110219820Sjeff		context->uar = cpu_to_be32(dev->driver_uar.index);
111219820Sjeff}
112219820Sjeff
113219820Sjeffstatic void mthca_arbel_init_srq_context(struct mthca_dev *dev,
114219820Sjeff					 struct mthca_pd *pd,
115219820Sjeff					 struct mthca_srq *srq,
116219820Sjeff					 struct mthca_arbel_srq_context *context)
117219820Sjeff{
118219820Sjeff	int logsize, max;
119219820Sjeff
120219820Sjeff	memset(context, 0, sizeof *context);
121219820Sjeff
122219820Sjeff	/*
123219820Sjeff	 * Put max in a temporary variable to work around gcc bug
124219820Sjeff	 * triggered by ilog2() on sparc64.
125219820Sjeff	 */
126219820Sjeff	max = srq->max;
127219820Sjeff	logsize = ilog2(max);
128219820Sjeff	context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
129219820Sjeff	context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
130219820Sjeff	context->db_index = cpu_to_be32(srq->db_index);
131219820Sjeff	context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
132219820Sjeff	if (pd->ibpd.uobject)
133219820Sjeff		context->logstride_usrpage |=
134219820Sjeff			cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
135219820Sjeff	else
136219820Sjeff		context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
137219820Sjeff	context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
138219820Sjeff}
139219820Sjeff
140219820Sjeffstatic void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
141219820Sjeff{
142219820Sjeff	mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
143219820Sjeff		       srq->is_direct, &srq->mr);
144219820Sjeff	kfree(srq->wrid);
145219820Sjeff}
146219820Sjeff
147219820Sjeffstatic int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
148219820Sjeff			       struct mthca_srq *srq)
149219820Sjeff{
150219820Sjeff	struct mthca_data_seg *scatter;
151219820Sjeff	void *wqe;
152219820Sjeff	int err;
153219820Sjeff	int i;
154219820Sjeff
155219820Sjeff	if (pd->ibpd.uobject)
156219820Sjeff		return 0;
157219820Sjeff
158219820Sjeff	srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
159219820Sjeff	if (!srq->wrid)
160219820Sjeff		return -ENOMEM;
161219820Sjeff
162219820Sjeff	err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
163219820Sjeff			      MTHCA_MAX_DIRECT_SRQ_SIZE,
164219820Sjeff			      &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
165219820Sjeff	if (err) {
166219820Sjeff		kfree(srq->wrid);
167219820Sjeff		return err;
168219820Sjeff	}
169219820Sjeff
170219820Sjeff	/*
171219820Sjeff	 * Now initialize the SRQ buffer so that all of the WQEs are
172219820Sjeff	 * linked into the list of free WQEs.  In addition, set the
173219820Sjeff	 * scatter list L_Keys to the sentry value of 0x100.
174219820Sjeff	 */
175219820Sjeff	for (i = 0; i < srq->max; ++i) {
176219820Sjeff		struct mthca_next_seg *next;
177219820Sjeff
178219820Sjeff		next = wqe = get_wqe(srq, i);
179219820Sjeff
180219820Sjeff		if (i < srq->max - 1) {
181219820Sjeff			*wqe_to_link(wqe) = i + 1;
182219820Sjeff			next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1);
183219820Sjeff		} else {
184219820Sjeff			*wqe_to_link(wqe) = -1;
185219820Sjeff			next->nda_op = 0;
186219820Sjeff		}
187219820Sjeff
188219820Sjeff		for (scatter = wqe + sizeof (struct mthca_next_seg);
189219820Sjeff		     (void *) scatter < wqe + (1 << srq->wqe_shift);
190219820Sjeff		     ++scatter)
191219820Sjeff			scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
192219820Sjeff	}
193219820Sjeff
194219820Sjeff	srq->last = get_wqe(srq, srq->max - 1);
195219820Sjeff
196219820Sjeff	return 0;
197219820Sjeff}
198219820Sjeff
199219820Sjeffint mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
200219820Sjeff		    struct ib_srq_attr *attr, struct mthca_srq *srq)
201219820Sjeff{
202219820Sjeff	struct mthca_mailbox *mailbox;
203219820Sjeff	u8 status;
204219820Sjeff	int ds;
205219820Sjeff	int err;
206219820Sjeff
207219820Sjeff	/* Sanity check SRQ size before proceeding */
208219820Sjeff	if (attr->max_wr  > dev->limits.max_srq_wqes ||
209219820Sjeff	    attr->max_sge > dev->limits.max_srq_sge)
210219820Sjeff		return -EINVAL;
211219820Sjeff
212219820Sjeff	srq->max      = attr->max_wr;
213219820Sjeff	srq->max_gs   = attr->max_sge;
214219820Sjeff	srq->counter  = 0;
215219820Sjeff
216219820Sjeff	if (mthca_is_memfree(dev))
217219820Sjeff		srq->max = roundup_pow_of_two(srq->max + 1);
218219820Sjeff	else
219219820Sjeff		srq->max = srq->max + 1;
220219820Sjeff
221219820Sjeff	ds = max(64UL,
222219820Sjeff		 roundup_pow_of_two(sizeof (struct mthca_next_seg) +
223219820Sjeff				    srq->max_gs * sizeof (struct mthca_data_seg)));
224219820Sjeff
225219820Sjeff	if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz))
226219820Sjeff		return -EINVAL;
227219820Sjeff
228219820Sjeff	srq->wqe_shift = ilog2(ds);
229219820Sjeff
230219820Sjeff	srq->srqn = mthca_alloc(&dev->srq_table.alloc);
231219820Sjeff	if (srq->srqn == -1)
232219820Sjeff		return -ENOMEM;
233219820Sjeff
234219820Sjeff	if (mthca_is_memfree(dev)) {
235219820Sjeff		err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
236219820Sjeff		if (err)
237219820Sjeff			goto err_out;
238219820Sjeff
239219820Sjeff		if (!pd->ibpd.uobject) {
240219820Sjeff			srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
241219820Sjeff						       srq->srqn, &srq->db);
242219820Sjeff			if (srq->db_index < 0) {
243219820Sjeff				err = -ENOMEM;
244219820Sjeff				goto err_out_icm;
245219820Sjeff			}
246219820Sjeff		}
247219820Sjeff	}
248219820Sjeff
249219820Sjeff	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
250219820Sjeff	if (IS_ERR(mailbox)) {
251219820Sjeff		err = PTR_ERR(mailbox);
252219820Sjeff		goto err_out_db;
253219820Sjeff	}
254219820Sjeff
255219820Sjeff	err = mthca_alloc_srq_buf(dev, pd, srq);
256219820Sjeff	if (err)
257219820Sjeff		goto err_out_mailbox;
258219820Sjeff
259219820Sjeff	spin_lock_init(&srq->lock);
260219820Sjeff	srq->refcount = 1;
261219820Sjeff	init_waitqueue_head(&srq->wait);
262219820Sjeff	mutex_init(&srq->mutex);
263219820Sjeff
264219820Sjeff	if (mthca_is_memfree(dev))
265219820Sjeff		mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
266219820Sjeff	else
267219820Sjeff		mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
268219820Sjeff
269219820Sjeff	err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status);
270219820Sjeff
271219820Sjeff	if (err) {
272219820Sjeff		mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
273219820Sjeff		goto err_out_free_buf;
274219820Sjeff	}
275219820Sjeff	if (status) {
276219820Sjeff		mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n",
277219820Sjeff			   status);
278219820Sjeff		err = -EINVAL;
279219820Sjeff		goto err_out_free_buf;
280219820Sjeff	}
281219820Sjeff
282219820Sjeff	spin_lock_irq(&dev->srq_table.lock);
283219820Sjeff	if (mthca_array_set(&dev->srq_table.srq,
284219820Sjeff			    srq->srqn & (dev->limits.num_srqs - 1),
285219820Sjeff			    srq)) {
286219820Sjeff		spin_unlock_irq(&dev->srq_table.lock);
287219820Sjeff		goto err_out_free_srq;
288219820Sjeff	}
289219820Sjeff	spin_unlock_irq(&dev->srq_table.lock);
290219820Sjeff
291219820Sjeff	mthca_free_mailbox(dev, mailbox);
292219820Sjeff
293219820Sjeff	srq->first_free = 0;
294219820Sjeff	srq->last_free  = srq->max - 1;
295219820Sjeff
296219820Sjeff	attr->max_wr    = srq->max - 1;
297219820Sjeff	attr->max_sge   = srq->max_gs;
298219820Sjeff
299219820Sjeff	return 0;
300219820Sjeff
301219820Sjefferr_out_free_srq:
302219820Sjeff	err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
303219820Sjeff	if (err)
304219820Sjeff		mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
305219820Sjeff	else if (status)
306219820Sjeff		mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
307219820Sjeff
308219820Sjefferr_out_free_buf:
309219820Sjeff	if (!pd->ibpd.uobject)
310219820Sjeff		mthca_free_srq_buf(dev, srq);
311219820Sjeff
312219820Sjefferr_out_mailbox:
313219820Sjeff	mthca_free_mailbox(dev, mailbox);
314219820Sjeff
315219820Sjefferr_out_db:
316219820Sjeff	if (!pd->ibpd.uobject && mthca_is_memfree(dev))
317219820Sjeff		mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
318219820Sjeff
319219820Sjefferr_out_icm:
320219820Sjeff	mthca_table_put(dev, dev->srq_table.table, srq->srqn);
321219820Sjeff
322219820Sjefferr_out:
323219820Sjeff	mthca_free(&dev->srq_table.alloc, srq->srqn);
324219820Sjeff
325219820Sjeff	return err;
326219820Sjeff}
327219820Sjeff
328219820Sjeffstatic inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq)
329219820Sjeff{
330219820Sjeff	int c;
331219820Sjeff
332219820Sjeff	spin_lock_irq(&dev->srq_table.lock);
333219820Sjeff	c = srq->refcount;
334219820Sjeff	spin_unlock_irq(&dev->srq_table.lock);
335219820Sjeff
336219820Sjeff	return c;
337219820Sjeff}
338219820Sjeff
339219820Sjeffvoid mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
340219820Sjeff{
341219820Sjeff	struct mthca_mailbox *mailbox;
342219820Sjeff	int err;
343219820Sjeff	u8 status;
344219820Sjeff
345219820Sjeff	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
346219820Sjeff	if (IS_ERR(mailbox)) {
347219820Sjeff		mthca_warn(dev, "No memory for mailbox to free SRQ.\n");
348219820Sjeff		return;
349219820Sjeff	}
350219820Sjeff
351219820Sjeff	err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
352219820Sjeff	if (err)
353219820Sjeff		mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
354219820Sjeff	else if (status)
355219820Sjeff		mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
356219820Sjeff
357219820Sjeff	spin_lock_irq(&dev->srq_table.lock);
358219820Sjeff	mthca_array_clear(&dev->srq_table.srq,
359219820Sjeff			  srq->srqn & (dev->limits.num_srqs - 1));
360219820Sjeff	--srq->refcount;
361219820Sjeff	spin_unlock_irq(&dev->srq_table.lock);
362219820Sjeff
363219820Sjeff	wait_event(srq->wait, !get_srq_refcount(dev, srq));
364219820Sjeff
365219820Sjeff	if (!srq->ibsrq.uobject) {
366219820Sjeff		mthca_free_srq_buf(dev, srq);
367219820Sjeff		if (mthca_is_memfree(dev))
368219820Sjeff			mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
369219820Sjeff	}
370219820Sjeff
371219820Sjeff	mthca_table_put(dev, dev->srq_table.table, srq->srqn);
372219820Sjeff	mthca_free(&dev->srq_table.alloc, srq->srqn);
373219820Sjeff	mthca_free_mailbox(dev, mailbox);
374219820Sjeff}
375219820Sjeff
376219820Sjeffint mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
377219820Sjeff		     enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
378219820Sjeff{
379219820Sjeff	struct mthca_dev *dev = to_mdev(ibsrq->device);
380219820Sjeff	struct mthca_srq *srq = to_msrq(ibsrq);
381219820Sjeff	int ret;
382219820Sjeff	u8 status;
383219820Sjeff
384219820Sjeff	/* We don't support resizing SRQs (yet?) */
385219820Sjeff	if (attr_mask & IB_SRQ_MAX_WR)
386219820Sjeff		return -EINVAL;
387219820Sjeff
388219820Sjeff	if (attr_mask & IB_SRQ_LIMIT) {
389219820Sjeff		u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
390219820Sjeff		if (attr->srq_limit > max_wr)
391219820Sjeff			return -EINVAL;
392219820Sjeff
393219820Sjeff		mutex_lock(&srq->mutex);
394219820Sjeff		ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);
395219820Sjeff		mutex_unlock(&srq->mutex);
396219820Sjeff
397219820Sjeff		if (ret)
398219820Sjeff			return ret;
399219820Sjeff		if (status)
400219820Sjeff			return -EINVAL;
401219820Sjeff	}
402219820Sjeff
403219820Sjeff	return 0;
404219820Sjeff}
405219820Sjeff
406219820Sjeffint mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
407219820Sjeff{
408219820Sjeff	struct mthca_dev *dev = to_mdev(ibsrq->device);
409219820Sjeff	struct mthca_srq *srq = to_msrq(ibsrq);
410219820Sjeff	struct mthca_mailbox *mailbox;
411219820Sjeff	struct mthca_arbel_srq_context *arbel_ctx;
412219820Sjeff	struct mthca_tavor_srq_context *tavor_ctx;
413219820Sjeff	u8 status;
414219820Sjeff	int err;
415219820Sjeff
416219820Sjeff	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
417219820Sjeff	if (IS_ERR(mailbox))
418219820Sjeff		return PTR_ERR(mailbox);
419219820Sjeff
420219820Sjeff	err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status);
421219820Sjeff	if (err)
422219820Sjeff		goto out;
423219820Sjeff
424219820Sjeff	if (mthca_is_memfree(dev)) {
425219820Sjeff		arbel_ctx = mailbox->buf;
426219820Sjeff		srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark);
427219820Sjeff	} else {
428219820Sjeff		tavor_ctx = mailbox->buf;
429219820Sjeff		srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark);
430219820Sjeff	}
431219820Sjeff
432219820Sjeff	srq_attr->max_wr  = srq->max - 1;
433219820Sjeff	srq_attr->max_sge = srq->max_gs;
434219820Sjeff
435219820Sjeffout:
436219820Sjeff	mthca_free_mailbox(dev, mailbox);
437219820Sjeff
438219820Sjeff	return err;
439219820Sjeff}
440219820Sjeff
441219820Sjeffvoid mthca_srq_event(struct mthca_dev *dev, u32 srqn,
442219820Sjeff		     enum ib_event_type event_type)
443219820Sjeff{
444219820Sjeff	struct mthca_srq *srq;
445219820Sjeff	struct ib_event event;
446219820Sjeff
447219820Sjeff	spin_lock(&dev->srq_table.lock);
448219820Sjeff	srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
449219820Sjeff	if (srq)
450219820Sjeff		++srq->refcount;
451219820Sjeff	spin_unlock(&dev->srq_table.lock);
452219820Sjeff
453219820Sjeff	if (!srq) {
454219820Sjeff		mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
455219820Sjeff		return;
456219820Sjeff	}
457219820Sjeff
458219820Sjeff	if (!srq->ibsrq.event_handler)
459219820Sjeff		goto out;
460219820Sjeff
461219820Sjeff	event.device      = &dev->ib_dev;
462219820Sjeff	event.event       = event_type;
463219820Sjeff	event.element.srq = &srq->ibsrq;
464219820Sjeff	srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
465219820Sjeff
466219820Sjeffout:
467219820Sjeff	spin_lock(&dev->srq_table.lock);
468219820Sjeff	if (!--srq->refcount)
469219820Sjeff		wake_up(&srq->wait);
470219820Sjeff	spin_unlock(&dev->srq_table.lock);
471219820Sjeff}
472219820Sjeff
473219820Sjeff/*
474219820Sjeff * This function must be called with IRQs disabled.
475219820Sjeff */
476219820Sjeffvoid mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
477219820Sjeff{
478219820Sjeff	int ind;
479219820Sjeff	struct mthca_next_seg *last_free;
480219820Sjeff
481219820Sjeff	ind = wqe_addr >> srq->wqe_shift;
482219820Sjeff
483219820Sjeff	spin_lock(&srq->lock);
484219820Sjeff
485219820Sjeff	last_free = get_wqe(srq, srq->last_free);
486219820Sjeff	*wqe_to_link(last_free) = ind;
487219820Sjeff	last_free->nda_op = htonl((ind << srq->wqe_shift) | 1);
488219820Sjeff	*wqe_to_link(get_wqe(srq, ind)) = -1;
489219820Sjeff	srq->last_free = ind;
490219820Sjeff
491219820Sjeff	spin_unlock(&srq->lock);
492219820Sjeff}
493219820Sjeff
494219820Sjeffint mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
495219820Sjeff			      struct ib_recv_wr **bad_wr)
496219820Sjeff{
497219820Sjeff	struct mthca_dev *dev = to_mdev(ibsrq->device);
498219820Sjeff	struct mthca_srq *srq = to_msrq(ibsrq);
499219820Sjeff	unsigned long flags;
500219820Sjeff	int err = 0;
501219820Sjeff	int first_ind;
502219820Sjeff	int ind;
503219820Sjeff	int next_ind;
504219820Sjeff	int nreq;
505219820Sjeff	int i;
506219820Sjeff	void *wqe;
507219820Sjeff	void *prev_wqe;
508219820Sjeff
509219820Sjeff	spin_lock_irqsave(&srq->lock, flags);
510219820Sjeff
511219820Sjeff	first_ind = srq->first_free;
512219820Sjeff
513219820Sjeff	for (nreq = 0; wr; wr = wr->next) {
514219820Sjeff		ind       = srq->first_free;
515219820Sjeff		wqe       = get_wqe(srq, ind);
516219820Sjeff		next_ind  = *wqe_to_link(wqe);
517219820Sjeff
518219820Sjeff		if (unlikely(next_ind < 0)) {
519219820Sjeff			mthca_err(dev, "SRQ %06x full\n", srq->srqn);
520219820Sjeff			err = -ENOMEM;
521219820Sjeff			*bad_wr = wr;
522219820Sjeff			break;
523219820Sjeff		}
524219820Sjeff
525219820Sjeff		prev_wqe  = srq->last;
526219820Sjeff		srq->last = wqe;
527219820Sjeff
528219820Sjeff		((struct mthca_next_seg *) wqe)->ee_nds = 0;
529219820Sjeff		/* flags field will always remain 0 */
530219820Sjeff
531219820Sjeff		wqe += sizeof (struct mthca_next_seg);
532219820Sjeff
533219820Sjeff		if (unlikely(wr->num_sge > srq->max_gs)) {
534219820Sjeff			err = -EINVAL;
535219820Sjeff			*bad_wr = wr;
536219820Sjeff			srq->last = prev_wqe;
537219820Sjeff			break;
538219820Sjeff		}
539219820Sjeff
540219820Sjeff		for (i = 0; i < wr->num_sge; ++i) {
541219820Sjeff			mthca_set_data_seg(wqe, wr->sg_list + i);
542219820Sjeff			wqe += sizeof (struct mthca_data_seg);
543219820Sjeff		}
544219820Sjeff
545219820Sjeff		if (i < srq->max_gs)
546219820Sjeff			mthca_set_data_seg_inval(wqe);
547219820Sjeff
548219820Sjeff		((struct mthca_next_seg *) prev_wqe)->ee_nds =
549219820Sjeff			cpu_to_be32(MTHCA_NEXT_DBD);
550219820Sjeff
551219820Sjeff		srq->wrid[ind]  = wr->wr_id;
552219820Sjeff		srq->first_free = next_ind;
553219820Sjeff
554219820Sjeff		++nreq;
555219820Sjeff		if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
556219820Sjeff			nreq = 0;
557219820Sjeff
558219820Sjeff			/*
559219820Sjeff			 * Make sure that descriptors are written
560219820Sjeff			 * before doorbell is rung.
561219820Sjeff			 */
562219820Sjeff			wmb();
563219820Sjeff
564219820Sjeff			mthca_write64(first_ind << srq->wqe_shift, srq->srqn << 8,
565219820Sjeff				      dev->kar + MTHCA_RECEIVE_DOORBELL,
566219820Sjeff				      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
567219820Sjeff
568219820Sjeff			first_ind = srq->first_free;
569219820Sjeff		}
570219820Sjeff	}
571219820Sjeff
572219820Sjeff	if (likely(nreq)) {
573219820Sjeff		/*
574219820Sjeff		 * Make sure that descriptors are written before
575219820Sjeff		 * doorbell is rung.
576219820Sjeff		 */
577219820Sjeff		wmb();
578219820Sjeff
579219820Sjeff		mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq,
580219820Sjeff			      dev->kar + MTHCA_RECEIVE_DOORBELL,
581219820Sjeff			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
582219820Sjeff	}
583219820Sjeff
584219820Sjeff	/*
585219820Sjeff	 * Make sure doorbells don't leak out of SRQ spinlock and
586219820Sjeff	 * reach the HCA out of order:
587219820Sjeff	 */
588219820Sjeff	mmiowb();
589219820Sjeff
590219820Sjeff	spin_unlock_irqrestore(&srq->lock, flags);
591219820Sjeff	return err;
592219820Sjeff}
593219820Sjeff
594219820Sjeffint mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
595219820Sjeff			      struct ib_recv_wr **bad_wr)
596219820Sjeff{
597219820Sjeff	struct mthca_dev *dev = to_mdev(ibsrq->device);
598219820Sjeff	struct mthca_srq *srq = to_msrq(ibsrq);
599219820Sjeff	unsigned long flags;
600219820Sjeff	int err = 0;
601219820Sjeff	int ind;
602219820Sjeff	int next_ind;
603219820Sjeff	int nreq;
604219820Sjeff	int i;
605219820Sjeff	void *wqe;
606219820Sjeff
607219820Sjeff	spin_lock_irqsave(&srq->lock, flags);
608219820Sjeff
609219820Sjeff	for (nreq = 0; wr; ++nreq, wr = wr->next) {
610219820Sjeff		ind       = srq->first_free;
611219820Sjeff		wqe       = get_wqe(srq, ind);
612219820Sjeff		next_ind  = *wqe_to_link(wqe);
613219820Sjeff
614219820Sjeff		if (unlikely(next_ind < 0)) {
615219820Sjeff			mthca_err(dev, "SRQ %06x full\n", srq->srqn);
616219820Sjeff			err = -ENOMEM;
617219820Sjeff			*bad_wr = wr;
618219820Sjeff			break;
619219820Sjeff		}
620219820Sjeff
621219820Sjeff		((struct mthca_next_seg *) wqe)->ee_nds = 0;
622219820Sjeff		/* flags field will always remain 0 */
623219820Sjeff
624219820Sjeff		wqe += sizeof (struct mthca_next_seg);
625219820Sjeff
626219820Sjeff		if (unlikely(wr->num_sge > srq->max_gs)) {
627219820Sjeff			err = -EINVAL;
628219820Sjeff			*bad_wr = wr;
629219820Sjeff			break;
630219820Sjeff		}
631219820Sjeff
632219820Sjeff		for (i = 0; i < wr->num_sge; ++i) {
633219820Sjeff			mthca_set_data_seg(wqe, wr->sg_list + i);
634219820Sjeff			wqe += sizeof (struct mthca_data_seg);
635219820Sjeff		}
636219820Sjeff
637219820Sjeff		if (i < srq->max_gs)
638219820Sjeff			mthca_set_data_seg_inval(wqe);
639219820Sjeff
640219820Sjeff		srq->wrid[ind]  = wr->wr_id;
641219820Sjeff		srq->first_free = next_ind;
642219820Sjeff	}
643219820Sjeff
644219820Sjeff	if (likely(nreq)) {
645219820Sjeff		srq->counter += nreq;
646219820Sjeff
647219820Sjeff		/*
648219820Sjeff		 * Make sure that descriptors are written before
649219820Sjeff		 * we write doorbell record.
650219820Sjeff		 */
651219820Sjeff		wmb();
652219820Sjeff		*srq->db = cpu_to_be32(srq->counter);
653219820Sjeff	}
654219820Sjeff
655219820Sjeff	spin_unlock_irqrestore(&srq->lock, flags);
656219820Sjeff	return err;
657219820Sjeff}
658219820Sjeff
659219820Sjeffint mthca_max_srq_sge(struct mthca_dev *dev)
660219820Sjeff{
661219820Sjeff	if (mthca_is_memfree(dev))
662219820Sjeff		return dev->limits.max_sg;
663219820Sjeff
664219820Sjeff	/*
665219820Sjeff	 * SRQ allocations are based on powers of 2 for Tavor,
666219820Sjeff	 * (although they only need to be multiples of 16 bytes).
667219820Sjeff	 *
668219820Sjeff	 * Therefore, we need to base the max number of sg entries on
669219820Sjeff	 * the largest power of 2 descriptor size that is <= to the
670219820Sjeff	 * actual max WQE descriptor size, rather than return the
671219820Sjeff	 * max_sg value given by the firmware (which is based on WQE
672219820Sjeff	 * sizes as multiples of 16, not powers of 2).
673219820Sjeff	 *
674219820Sjeff	 * If SRQ implementation is changed for Tavor to be based on
675219820Sjeff	 * multiples of 16, the calculation below can be deleted and
676219820Sjeff	 * the FW max_sg value returned.
677219820Sjeff	 */
678219820Sjeff	return min_t(int, dev->limits.max_sg,
679219820Sjeff		     ((1 << (fls(dev->limits.max_desc_sz) - 1)) -
680219820Sjeff		      sizeof (struct mthca_next_seg)) /
681219820Sjeff		     sizeof (struct mthca_data_seg));
682219820Sjeff}
683219820Sjeff
684219820Sjeffint mthca_init_srq_table(struct mthca_dev *dev)
685219820Sjeff{
686219820Sjeff	int err;
687219820Sjeff
688219820Sjeff	if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
689219820Sjeff		return 0;
690219820Sjeff
691219820Sjeff	spin_lock_init(&dev->srq_table.lock);
692219820Sjeff
693219820Sjeff	err = mthca_alloc_init(&dev->srq_table.alloc,
694219820Sjeff			       dev->limits.num_srqs,
695219820Sjeff			       dev->limits.num_srqs - 1,
696219820Sjeff			       dev->limits.reserved_srqs);
697219820Sjeff	if (err)
698219820Sjeff		return err;
699219820Sjeff
700219820Sjeff	err = mthca_array_init(&dev->srq_table.srq,
701219820Sjeff			       dev->limits.num_srqs);
702219820Sjeff	if (err)
703219820Sjeff		mthca_alloc_cleanup(&dev->srq_table.alloc);
704219820Sjeff
705219820Sjeff	return err;
706219820Sjeff}
707219820Sjeff
708219820Sjeffvoid mthca_cleanup_srq_table(struct mthca_dev *dev)
709219820Sjeff{
710219820Sjeff	if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
711219820Sjeff		return;
712219820Sjeff
713219820Sjeff	mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
714219820Sjeff	mthca_alloc_cleanup(&dev->srq_table.alloc);
715219820Sjeff}
716