1/*
2 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *	- Redistributions of source code must retain the above
16 *	  copyright notice, this list of conditions and the following
17 *	  disclaimer.
18 *
19 *	- Redistributions in binary form must reproduce the above
20 *	  copyright notice, this list of conditions and the following
21 *	  disclaimer in the documentation and/or other materials
22 *	  provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/init.h>
35#include <linux/interrupt.h>
36#include <linux/dma-mapping.h>
37
38#include <linux/mlx4/cmd.h>
39
40#include "mlx4.h"
41#include "fw.h"
42
43enum {
44	MLX4_NUM_ASYNC_EQE	= 0x100,
45	MLX4_NUM_SPARE_EQE	= 0x80,
46	MLX4_EQ_ENTRY_SIZE	= 0x20
47};
48
49/*
50 * Must be packed because start is 64 bits but only aligned to 32 bits.
51 */
52struct mlx4_eq_context {
53	__be32			flags;
54	u16			reserved1[3];
55	__be16			page_offset;
56	u8			log_eq_size;
57	u8			reserved2[4];
58	u8			eq_period;
59	u8			reserved3;
60	u8			eq_max_count;
61	u8			reserved4[3];
62	u8			intr;
63	u8			log_page_size;
64	u8			reserved5[2];
65	u8			mtt_base_addr_h;
66	__be32			mtt_base_addr_l;
67	u32			reserved6[2];
68	__be32			consumer_index;
69	__be32			producer_index;
70	u32			reserved7[4];
71};
72
73#define MLX4_EQ_STATUS_OK	   ( 0 << 28)
74#define MLX4_EQ_STATUS_WRITE_FAIL  (10 << 28)
75#define MLX4_EQ_OWNER_SW	   ( 0 << 24)
76#define MLX4_EQ_OWNER_HW	   ( 1 << 24)
77#define MLX4_EQ_FLAG_EC		   ( 1 << 18)
78#define MLX4_EQ_FLAG_OI		   ( 1 << 17)
79#define MLX4_EQ_STATE_ARMED	   ( 9 <<  8)
80#define MLX4_EQ_STATE_FIRED	   (10 <<  8)
81#define MLX4_EQ_STATE_ALWAYS_ARMED (11 <<  8)
82
83#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG)	    | \
84			       (1ull << MLX4_EVENT_TYPE_COMM_EST)	    | \
85			       (1ull << MLX4_EVENT_TYPE_SQ_DRAINED)	    | \
86			       (1ull << MLX4_EVENT_TYPE_CQ_ERROR)	    | \
87			       (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR)	    | \
88			       (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR)    | \
89			       (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED)    | \
90			       (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
91			       (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
92			       (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR)  | \
93			       (1ull << MLX4_EVENT_TYPE_PORT_CHANGE)	    | \
94			       (1ull << MLX4_EVENT_TYPE_ECC_DETECT)	    | \
95			       (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
96			       (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE)    | \
97			       (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT)	    | \
98			       (1ull << MLX4_EVENT_TYPE_CMD))
99#define MLX4_CATAS_EVENT_MASK  (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR)
100
101struct mlx4_eqe {
102	u8			reserved1;
103	u8			type;
104	u8			reserved2;
105	u8			subtype;
106	union {
107		u32		raw[6];
108		struct {
109			__be32	cqn;
110		} __attribute__((packed)) comp;
111		struct {
112			u16	reserved1;
113			__be16	token;
114			u32	reserved2;
115			u8	reserved3[3];
116			u8	status;
117			__be64	out_param;
118		} __attribute__((packed)) cmd;
119		struct {
120			__be32	qpn;
121		} __attribute__((packed)) qp;
122		struct {
123			__be32	srqn;
124		} __attribute__((packed)) srq;
125		struct {
126			__be32	cqn;
127			u32	reserved1;
128			u8	reserved2[3];
129			u8	syndrome;
130		} __attribute__((packed)) cq_err;
131		struct {
132			u32	reserved1[2];
133			__be32	port;
134		} __attribute__((packed)) port_change;
135	}			event;
136	u8			reserved3[3];
137	u8			owner;
138} __attribute__((packed));
139
140static void eq_set_ci(struct mlx4_eq *eq, int req_not)
141{
142	__raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
143					       req_not << 31),
144		     eq->doorbell);
145	/* We still want ordering, just not swabbing, so add a barrier */
146	mb();
147}
148
149static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
150{
151	unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
152	return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
153}
154
155static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
156{
157	struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
158	return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
159}
160
161static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
162{
163	struct mlx4_eqe *eqe;
164	int cqn;
165	int eqes_found = 0;
166	int set_ci = 0;
167
168	while ((eqe = next_eqe_sw(eq))) {
169		/*
170		 * Make sure we read EQ entry contents after we've
171		 * checked the ownership bit.
172		 */
173		rmb();
174
175		switch (eqe->type) {
176		case MLX4_EVENT_TYPE_COMP:
177			cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
178			mlx4_cq_completion(dev, cqn);
179			break;
180
181		case MLX4_EVENT_TYPE_PATH_MIG:
182		case MLX4_EVENT_TYPE_COMM_EST:
183		case MLX4_EVENT_TYPE_SQ_DRAINED:
184		case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
185		case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
186		case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
187		case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
188		case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
189			mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
190				      eqe->type);
191			break;
192
193		case MLX4_EVENT_TYPE_SRQ_LIMIT:
194		case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
195			mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
196				      eqe->type);
197			break;
198
199		case MLX4_EVENT_TYPE_CMD:
200			mlx4_cmd_event(dev,
201				       be16_to_cpu(eqe->event.cmd.token),
202				       eqe->event.cmd.status,
203				       be64_to_cpu(eqe->event.cmd.out_param));
204			break;
205
206		case MLX4_EVENT_TYPE_PORT_CHANGE:
207			mlx4_dispatch_event(dev, eqe->type, eqe->subtype,
208					    be32_to_cpu(eqe->event.port_change.port) >> 28);
209			break;
210
211		case MLX4_EVENT_TYPE_CQ_ERROR:
212			mlx4_warn(dev, "CQ %s on CQN %06x\n",
213				  eqe->event.cq_err.syndrome == 1 ?
214				  "overrun" : "access violation",
215				  be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
216			mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
217				      eqe->type);
218			break;
219
220		case MLX4_EVENT_TYPE_EQ_OVERFLOW:
221			mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
222			break;
223
224		case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
225		case MLX4_EVENT_TYPE_ECC_DETECT:
226		default:
227			mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
228				  eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
229			break;
230		};
231
232		++eq->cons_index;
233		eqes_found = 1;
234		++set_ci;
235
236		/*
237		 * The HCA will think the queue has overflowed if we
238		 * don't tell it we've been processing events.  We
239		 * create our EQs with MLX4_NUM_SPARE_EQE extra
240		 * entries, so we must update our consumer index at
241		 * least that often.
242		 */
243		if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
244			/*
245			 * Conditional on hca_type is OK here because
246			 * this is a rare case, not the fast path.
247			 */
248			eq_set_ci(eq, 0);
249			set_ci = 0;
250		}
251	}
252
253	eq_set_ci(eq, 1);
254
255	return eqes_found;
256}
257
258static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
259{
260	struct mlx4_dev *dev = dev_ptr;
261	struct mlx4_priv *priv = mlx4_priv(dev);
262	int work = 0;
263	int i;
264
265	writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
266
267	for (i = 0; i < MLX4_EQ_CATAS; ++i)
268		work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
269
270	return IRQ_RETVAL(work);
271}
272
273static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
274{
275	struct mlx4_eq  *eq  = eq_ptr;
276	struct mlx4_dev *dev = eq->dev;
277
278	mlx4_eq_int(dev, eq);
279
280	/* MSI-X vectors always belong to us */
281	return IRQ_HANDLED;
282}
283
284static irqreturn_t mlx4_catas_interrupt(int irq, void *dev_ptr)
285{
286	mlx4_handle_catas_err(dev_ptr);
287
288	/* MSI-X vectors always belong to us */
289	return IRQ_HANDLED;
290}
291
292static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
293			int eq_num)
294{
295	return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
296			0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
297}
298
299static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
300			 int eq_num)
301{
302	return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
303			MLX4_CMD_TIME_CLASS_A);
304}
305
306static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
307			 int eq_num)
308{
309	return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
310			    MLX4_CMD_TIME_CLASS_A);
311}
312
313static void __devinit __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev,
314					       struct mlx4_eq *eq)
315{
316	struct mlx4_priv *priv = mlx4_priv(dev);
317	int index;
318
319	index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
320
321	if (!priv->eq_table.uar_map[index]) {
322		priv->eq_table.uar_map[index] =
323			ioremap(pci_resource_start(dev->pdev, 2) +
324				((eq->eqn / 4) << PAGE_SHIFT),
325				PAGE_SIZE);
326		if (!priv->eq_table.uar_map[index]) {
327			mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
328				 eq->eqn);
329			return NULL;
330		}
331	}
332
333	return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
334}
335
336static int __devinit mlx4_create_eq(struct mlx4_dev *dev, int nent,
337				    u8 intr, struct mlx4_eq *eq)
338{
339	struct mlx4_priv *priv = mlx4_priv(dev);
340	struct mlx4_cmd_mailbox *mailbox;
341	struct mlx4_eq_context *eq_context;
342	int npages;
343	u64 *dma_list = NULL;
344	dma_addr_t t;
345	u64 mtt_addr;
346	int err = -ENOMEM;
347	int i;
348
349	eq->dev   = dev;
350	eq->nent  = roundup_pow_of_two(max(nent, 2));
351	npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
352
353	eq->page_list = kmalloc(npages * sizeof *eq->page_list,
354				GFP_KERNEL);
355	if (!eq->page_list)
356		goto err_out;
357
358	for (i = 0; i < npages; ++i)
359		eq->page_list[i].buf = NULL;
360
361	dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
362	if (!dma_list)
363		goto err_out_free;
364
365	mailbox = mlx4_alloc_cmd_mailbox(dev);
366	if (IS_ERR(mailbox))
367		goto err_out_free;
368	eq_context = mailbox->buf;
369
370	for (i = 0; i < npages; ++i) {
371		eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
372							  PAGE_SIZE, &t, GFP_KERNEL);
373		if (!eq->page_list[i].buf)
374			goto err_out_free_pages;
375
376		dma_list[i] = t;
377		eq->page_list[i].map = t;
378
379		memset(eq->page_list[i].buf, 0, PAGE_SIZE);
380	}
381
382	eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
383	if (eq->eqn == -1)
384		goto err_out_free_pages;
385
386	eq->doorbell = mlx4_get_eq_uar(dev, eq);
387	if (!eq->doorbell) {
388		err = -ENOMEM;
389		goto err_out_free_eq;
390	}
391
392	err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
393	if (err)
394		goto err_out_free_eq;
395
396	err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
397	if (err)
398		goto err_out_free_mtt;
399
400	memset(eq_context, 0, sizeof *eq_context);
401	eq_context->flags	  = cpu_to_be32(MLX4_EQ_STATUS_OK   |
402						MLX4_EQ_STATE_ARMED);
403	eq_context->log_eq_size	  = ilog2(eq->nent);
404	eq_context->intr	  = intr;
405	eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
406
407	mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
408	eq_context->mtt_base_addr_h = mtt_addr >> 32;
409	eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
410
411	err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
412	if (err) {
413		mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
414		goto err_out_free_mtt;
415	}
416
417	kfree(dma_list);
418	mlx4_free_cmd_mailbox(dev, mailbox);
419
420	eq->cons_index = 0;
421
422	return err;
423
424err_out_free_mtt:
425	mlx4_mtt_cleanup(dev, &eq->mtt);
426
427err_out_free_eq:
428	mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
429
430err_out_free_pages:
431	for (i = 0; i < npages; ++i)
432		if (eq->page_list[i].buf)
433			dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
434					  eq->page_list[i].buf,
435					  eq->page_list[i].map);
436
437	mlx4_free_cmd_mailbox(dev, mailbox);
438
439err_out_free:
440	kfree(eq->page_list);
441	kfree(dma_list);
442
443err_out:
444	return err;
445}
446
447static void mlx4_free_eq(struct mlx4_dev *dev,
448			 struct mlx4_eq *eq)
449{
450	struct mlx4_priv *priv = mlx4_priv(dev);
451	struct mlx4_cmd_mailbox *mailbox;
452	int err;
453	int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
454	int i;
455
456	mailbox = mlx4_alloc_cmd_mailbox(dev);
457	if (IS_ERR(mailbox))
458		return;
459
460	err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
461	if (err)
462		mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
463
464	if (0) {
465		mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
466		for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
467			if (i % 4 == 0)
468				printk("[%02x] ", i * 4);
469			printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
470			if ((i + 1) % 4 == 0)
471				printk("\n");
472		}
473	}
474
475	mlx4_mtt_cleanup(dev, &eq->mtt);
476	for (i = 0; i < npages; ++i)
477		pci_free_consistent(dev->pdev, PAGE_SIZE,
478				    eq->page_list[i].buf,
479				    eq->page_list[i].map);
480
481	kfree(eq->page_list);
482	mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
483	mlx4_free_cmd_mailbox(dev, mailbox);
484}
485
486static void mlx4_free_irqs(struct mlx4_dev *dev)
487{
488	struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
489	int i;
490
491	if (eq_table->have_irq)
492		free_irq(dev->pdev->irq, dev);
493	for (i = 0; i < MLX4_EQ_CATAS; ++i)
494		if (eq_table->eq[i].have_irq)
495			free_irq(eq_table->eq[i].irq, eq_table->eq + i);
496	if (eq_table->eq[MLX4_EQ_CATAS].have_irq)
497		free_irq(eq_table->eq[MLX4_EQ_CATAS].irq, dev);
498}
499
500static int __devinit mlx4_map_clr_int(struct mlx4_dev *dev)
501{
502	struct mlx4_priv *priv = mlx4_priv(dev);
503
504	priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
505				 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
506	if (!priv->clr_base) {
507		mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
508		return -ENOMEM;
509	}
510
511	return 0;
512}
513
514static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
515{
516	struct mlx4_priv *priv = mlx4_priv(dev);
517
518	iounmap(priv->clr_base);
519}
520
521int __devinit mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
522{
523	struct mlx4_priv *priv = mlx4_priv(dev);
524	int ret;
525
526	/*
527	 * We assume that mapping one page is enough for the whole EQ
528	 * context table.  This is fine with all current HCAs, because
529	 * we only use 32 EQs and each EQ uses 64 bytes of context
530	 * memory, or 1 KB total.
531	 */
532	priv->eq_table.icm_virt = icm_virt;
533	priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
534	if (!priv->eq_table.icm_page)
535		return -ENOMEM;
536	priv->eq_table.icm_dma  = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
537					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
538	if (pci_dma_mapping_error(priv->eq_table.icm_dma)) {
539		__free_page(priv->eq_table.icm_page);
540		return -ENOMEM;
541	}
542
543	ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
544	if (ret) {
545		pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
546			       PCI_DMA_BIDIRECTIONAL);
547		__free_page(priv->eq_table.icm_page);
548	}
549
550	return ret;
551}
552
553void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
554{
555	struct mlx4_priv *priv = mlx4_priv(dev);
556
557	mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
558	pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
559		       PCI_DMA_BIDIRECTIONAL);
560	__free_page(priv->eq_table.icm_page);
561}
562
563int __devinit mlx4_init_eq_table(struct mlx4_dev *dev)
564{
565	struct mlx4_priv *priv = mlx4_priv(dev);
566	int err;
567	int i;
568
569	err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
570			       dev->caps.num_eqs - 1, dev->caps.reserved_eqs);
571	if (err)
572		return err;
573
574	for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i)
575		priv->eq_table.uar_map[i] = NULL;
576
577	err = mlx4_map_clr_int(dev);
578	if (err)
579		goto err_out_free;
580
581	priv->eq_table.clr_mask =
582		swab32(1 << (priv->eq_table.inta_pin & 31));
583	priv->eq_table.clr_int  = priv->clr_base +
584		(priv->eq_table.inta_pin < 32 ? 4 : 0);
585
586	err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
587			     (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_COMP : 0,
588			     &priv->eq_table.eq[MLX4_EQ_COMP]);
589	if (err)
590		goto err_out_unmap;
591
592	err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
593			     (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_ASYNC : 0,
594			     &priv->eq_table.eq[MLX4_EQ_ASYNC]);
595	if (err)
596		goto err_out_comp;
597
598	if (dev->flags & MLX4_FLAG_MSI_X) {
599		static const char *eq_name[] = {
600			[MLX4_EQ_COMP]  = DRV_NAME " (comp)",
601			[MLX4_EQ_ASYNC] = DRV_NAME " (async)",
602			[MLX4_EQ_CATAS] = DRV_NAME " (catas)"
603		};
604
605		err = mlx4_create_eq(dev, 1, MLX4_EQ_CATAS,
606				     &priv->eq_table.eq[MLX4_EQ_CATAS]);
607		if (err)
608			goto err_out_async;
609
610		for (i = 0; i < MLX4_EQ_CATAS; ++i) {
611			err = request_irq(priv->eq_table.eq[i].irq,
612					  mlx4_msi_x_interrupt,
613					  0, eq_name[i], priv->eq_table.eq + i);
614			if (err)
615				goto err_out_catas;
616
617			priv->eq_table.eq[i].have_irq = 1;
618		}
619
620		err = request_irq(priv->eq_table.eq[MLX4_EQ_CATAS].irq,
621				  mlx4_catas_interrupt, 0,
622				  eq_name[MLX4_EQ_CATAS], dev);
623		if (err)
624			goto err_out_catas;
625
626		priv->eq_table.eq[MLX4_EQ_CATAS].have_irq = 1;
627	} else {
628		err = request_irq(dev->pdev->irq, mlx4_interrupt,
629				  IRQF_SHARED, DRV_NAME, dev);
630		if (err)
631			goto err_out_async;
632
633		priv->eq_table.have_irq = 1;
634	}
635
636	err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
637			  priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
638	if (err)
639		mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
640			   priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
641
642	for (i = 0; i < MLX4_EQ_CATAS; ++i)
643		eq_set_ci(&priv->eq_table.eq[i], 1);
644
645	if (dev->flags & MLX4_FLAG_MSI_X) {
646		err = mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 0,
647				  priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
648		if (err)
649			mlx4_warn(dev, "MAP_EQ for catas EQ %d failed (%d)\n",
650				  priv->eq_table.eq[MLX4_EQ_CATAS].eqn, err);
651	}
652
653	return 0;
654
655err_out_catas:
656	mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
657
658err_out_async:
659	mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
660
661err_out_comp:
662	mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_COMP]);
663
664err_out_unmap:
665	mlx4_unmap_clr_int(dev);
666	mlx4_free_irqs(dev);
667
668err_out_free:
669	mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
670	return err;
671}
672
673void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
674{
675	struct mlx4_priv *priv = mlx4_priv(dev);
676	int i;
677
678	if (dev->flags & MLX4_FLAG_MSI_X)
679		mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 1,
680			    priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
681
682	mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
683		    priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
684
685	mlx4_free_irqs(dev);
686
687	for (i = 0; i < MLX4_EQ_CATAS; ++i)
688		mlx4_free_eq(dev, &priv->eq_table.eq[i]);
689	if (dev->flags & MLX4_FLAG_MSI_X)
690		mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
691
692	mlx4_unmap_clr_int(dev);
693
694	for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i)
695		if (priv->eq_table.uar_map[i])
696			iounmap(priv->eq_table.uar_map[i]);
697
698	mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
699}
700