1/*
2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses.  You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 *     Redistribution and use in source and binary forms, with or
15 *     without modification, are permitted provided that the following
16 *     conditions are met:
17 *
18 *      - Redistributions of source code must retain the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer.
21 *
22 *      - Redistributions in binary form must reproduce the above
23 *        copyright notice, this list of conditions and the following
24 *        disclaimer in the documentation and/or other materials
25 *        provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
37#include <linux/init.h>
38#include <linux/hardirq.h>
39
40#include <linux/mlx4/cmd.h>
41#include <linux/mlx4/cq.h>
42
43#include "mlx4.h"
44#include "icm.h"
45
46#define MLX4_CQ_STATUS_OK		( 0 << 28)
47#define MLX4_CQ_STATUS_OVERFLOW		( 9 << 28)
48#define MLX4_CQ_STATUS_WRITE_FAIL	(10 << 28)
49#define MLX4_CQ_FLAG_CC			( 1 << 18)
50#define MLX4_CQ_FLAG_OI			( 1 << 17)
51#define MLX4_CQ_STATE_ARMED		( 9 <<  8)
52#define MLX4_CQ_STATE_ARMED_SOL		( 6 <<  8)
53#define MLX4_EQ_STATE_FIRED		(10 <<  8)
54
55void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
56{
57	struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
58	struct mlx4_cq *cq;
59
60	spin_lock(&cq_table->lock);
61	cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
62			       cqn & (dev->caps.num_cqs - 1));
63	if (cq)
64		atomic_inc(&cq->refcount);
65	spin_unlock(&cq_table->lock);
66
67	if (!cq) {
68		mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
69		return;
70	}
71
72	++cq->arm_sn;
73
74	cq->comp(cq);
75
76	if (atomic_dec_and_test(&cq->refcount))
77		complete(&cq->free);
78}
79
80void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
81{
82	struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
83	struct mlx4_cq *cq;
84
85	spin_lock(&cq_table->lock);
86
87	cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
88	if (cq)
89		atomic_inc(&cq->refcount);
90
91	spin_unlock(&cq_table->lock);
92
93	if (!cq) {
94		mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
95		return;
96	}
97
98	cq->event(cq, event_type);
99
100	if (atomic_dec_and_test(&cq->refcount))
101		complete(&cq->free);
102}
103
104static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
105			 int cq_num)
106{
107	return mlx4_cmd(dev, mailbox->dma, cq_num, 0,
108			MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
109			MLX4_CMD_WRAPPED);
110}
111
112static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
113			 int cq_num, u32 opmod)
114{
115	return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
116			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
117}
118
119static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
120			 int cq_num)
121{
122	return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
123			    cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
124			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
125}
126
127int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
128		   u16 count, u16 period)
129{
130	struct mlx4_cmd_mailbox *mailbox;
131	struct mlx4_cq_context *cq_context;
132	int err;
133
134	mailbox = mlx4_alloc_cmd_mailbox(dev);
135	if (IS_ERR(mailbox))
136		return PTR_ERR(mailbox);
137
138	cq_context = mailbox->buf;
139	memset(cq_context, 0, sizeof *cq_context);
140
141	cq_context->cq_max_count = cpu_to_be16(count);
142	cq_context->cq_period    = cpu_to_be16(period);
143
144	err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
145
146	mlx4_free_cmd_mailbox(dev, mailbox);
147	return err;
148}
149EXPORT_SYMBOL_GPL(mlx4_cq_modify);
150
151int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
152		   int entries, struct mlx4_mtt *mtt)
153{
154	struct mlx4_cmd_mailbox *mailbox;
155	struct mlx4_cq_context *cq_context;
156	u64 mtt_addr;
157	int err;
158
159	mailbox = mlx4_alloc_cmd_mailbox(dev);
160	if (IS_ERR(mailbox))
161		return PTR_ERR(mailbox);
162
163	cq_context = mailbox->buf;
164	memset(cq_context, 0, sizeof *cq_context);
165
166	cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
167	cq_context->log_page_size   = mtt->page_shift - 12;
168	mtt_addr = mlx4_mtt_addr(dev, mtt);
169	cq_context->mtt_base_addr_h = mtt_addr >> 32;
170	cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
171
172	err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
173
174	mlx4_free_cmd_mailbox(dev, mailbox);
175	return err;
176}
177EXPORT_SYMBOL_GPL(mlx4_cq_resize);
178
179int mlx4_cq_ignore_overrun(struct mlx4_dev *dev, struct mlx4_cq *cq)
180{
181	struct mlx4_cmd_mailbox *mailbox;
182	struct mlx4_cq_context *cq_context;
183	int err;
184
185	mailbox = mlx4_alloc_cmd_mailbox(dev);
186	if (IS_ERR(mailbox))
187		return PTR_ERR(mailbox);
188
189	cq_context = mailbox->buf;
190	memset(cq_context, 0, sizeof *cq_context);
191
192	cq_context->flags |= cpu_to_be32(MLX4_CQ_FLAG_OI);
193
194	err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 3);
195
196	mlx4_free_cmd_mailbox(dev, mailbox);
197	return err;
198}
199EXPORT_SYMBOL_GPL(mlx4_cq_ignore_overrun);
200
201int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
202{
203	struct mlx4_priv *priv = mlx4_priv(dev);
204	struct mlx4_cq_table *cq_table = &priv->cq_table;
205	int err;
206
207	*cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
208	if (*cqn == -1)
209		return -ENOMEM;
210
211	err = mlx4_table_get(dev, &cq_table->table, *cqn);
212	if (err)
213		goto err_out;
214
215	err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
216	if (err)
217		goto err_put;
218	return 0;
219
220err_put:
221	mlx4_table_put(dev, &cq_table->table, *cqn);
222
223err_out:
224	mlx4_bitmap_free(&cq_table->bitmap, *cqn);
225	return err;
226}
227
228static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
229{
230	u64 out_param;
231	int err;
232
233	if (mlx4_is_mfunc(dev)) {
234		err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
235				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
236				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
237		if (err)
238			return err;
239		else {
240			*cqn = get_param_l(&out_param);
241			return 0;
242		}
243	}
244	return __mlx4_cq_alloc_icm(dev, cqn);
245}
246
247void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
248{
249	struct mlx4_priv *priv = mlx4_priv(dev);
250	struct mlx4_cq_table *cq_table = &priv->cq_table;
251
252	mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
253	mlx4_table_put(dev, &cq_table->table, cqn);
254	mlx4_bitmap_free(&cq_table->bitmap, cqn);
255}
256
257static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
258{
259	u64 in_param = 0;
260	int err;
261
262	if (mlx4_is_mfunc(dev)) {
263		set_param_l(&in_param, cqn);
264		err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
265			       MLX4_CMD_FREE_RES,
266			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
267		if (err)
268			mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
269	} else
270		__mlx4_cq_free_icm(dev, cqn);
271}
272
273static int mlx4_find_least_loaded_vector(struct mlx4_priv *priv)
274{
275        int i;
276        int index = 0;
277        int min = priv->eq_table.eq[0].load;
278
279        for (i = 1; i < priv->dev.caps.num_comp_vectors; i++) {
280                if (priv->eq_table.eq[i].load < min) {
281                        index = i;
282                        min = priv->eq_table.eq[i].load;
283                }
284        }
285
286        return index;
287}
288
289
290int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
291		  struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec,
292		  struct mlx4_cq *cq, unsigned vector, int collapsed,
293		  int timestamp_en)
294{
295	struct mlx4_priv *priv = mlx4_priv(dev);
296	struct mlx4_cq_table *cq_table = &priv->cq_table;
297	struct mlx4_cmd_mailbox *mailbox;
298	struct mlx4_cq_context *cq_context;
299	u64 mtt_addr;
300	int err;
301
302        cq->vector = (vector == MLX4_LEAST_ATTACHED_VECTOR) ?
303                mlx4_find_least_loaded_vector(priv) : vector;
304
305	if (cq->vector > dev->caps.num_comp_vectors + dev->caps.comp_pool) {
306		return -EINVAL;
307        }
308
309	err = mlx4_cq_alloc_icm(dev, &cq->cqn);
310	if (err) {
311		return err;
312        }
313
314	spin_lock_irq(&cq_table->lock);
315	err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
316	spin_unlock_irq(&cq_table->lock);
317	if (err){
318		goto err_icm;
319        }
320
321	mailbox = mlx4_alloc_cmd_mailbox(dev);
322	if (IS_ERR(mailbox)) {
323		err = PTR_ERR(mailbox);
324		goto err_radix;
325	}
326
327	cq_context = mailbox->buf;
328	memset(cq_context, 0, sizeof *cq_context);
329
330	cq_context->flags	    = cpu_to_be32(!!collapsed << 18);
331	if (timestamp_en)
332		cq_context->flags  |= cpu_to_be32(1 << 19);
333
334	cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
335	cq_context->comp_eqn	    = priv->eq_table.eq[cq->vector].eqn;
336	cq_context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
337
338	mtt_addr = mlx4_mtt_addr(dev, mtt);
339	cq_context->mtt_base_addr_h = mtt_addr >> 32;
340	cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
341	cq_context->db_rec_addr     = cpu_to_be64(db_rec);
342
343	err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
344	mlx4_free_cmd_mailbox(dev, mailbox);
345	if (err)
346		goto err_radix;
347
348        priv->eq_table.eq[cq->vector].load++;
349	cq->cons_index = 0;
350	cq->arm_sn     = 1;
351	cq->uar        = uar;
352	atomic_set(&cq->refcount, 1);
353	init_completion(&cq->free);
354
355	cq->eqn = priv->eq_table.eq[cq->vector].eqn;
356	cq->irq = priv->eq_table.eq[cq->vector].irq;
357
358	return 0;
359
360err_radix:
361	spin_lock_irq(&cq_table->lock);
362	radix_tree_delete(&cq_table->tree, cq->cqn);
363	spin_unlock_irq(&cq_table->lock);
364
365err_icm:
366	mlx4_cq_free_icm(dev, cq->cqn);
367
368	return err;
369}
370EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
371
372void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
373{
374	struct mlx4_priv *priv = mlx4_priv(dev);
375	struct mlx4_cq_table *cq_table = &priv->cq_table;
376	int err;
377
378	err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
379	if (err)
380		mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
381
382
383        priv->eq_table.eq[cq->vector].load--;
384	synchronize_irq(priv->eq_table.eq[cq->vector].irq);
385
386	spin_lock_irq(&cq_table->lock);
387	radix_tree_delete(&cq_table->tree, cq->cqn);
388	spin_unlock_irq(&cq_table->lock);
389
390	if (atomic_dec_and_test(&cq->refcount))
391		complete(&cq->free);
392	wait_for_completion(&cq->free);
393
394	mlx4_cq_free_icm(dev, cq->cqn);
395}
396EXPORT_SYMBOL_GPL(mlx4_cq_free);
397
398int mlx4_init_cq_table(struct mlx4_dev *dev)
399{
400	struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
401	int err;
402
403	spin_lock_init(&cq_table->lock);
404	INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
405	if (mlx4_is_slave(dev))
406		return 0;
407
408	err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
409			       dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
410	if (err)
411		return err;
412
413	return 0;
414}
415
416void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
417{
418	if (mlx4_is_slave(dev))
419		return;
420	/* Nothing to do to clean up radix_tree */
421	mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
422}
423