1/*
2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * $Id: mthca_cmd.c,v 1.1.1.1 2007/08/03 18:52:32 Exp $
35 */
36
37#include <linux/completion.h>
38#include <linux/pci.h>
39#include <linux/errno.h>
40#include <linux/sched.h>
41#include <asm/io.h>
42#include <rdma/ib_mad.h>
43
44#include "mthca_dev.h"
45#include "mthca_config_reg.h"
46#include "mthca_cmd.h"
47#include "mthca_memfree.h"
48
49#define CMD_POLL_TOKEN 0xffff
50
51enum {
52	HCR_IN_PARAM_OFFSET    = 0x00,
53	HCR_IN_MODIFIER_OFFSET = 0x08,
54	HCR_OUT_PARAM_OFFSET   = 0x0c,
55	HCR_TOKEN_OFFSET       = 0x14,
56	HCR_STATUS_OFFSET      = 0x18,
57
58	HCR_OPMOD_SHIFT        = 12,
59	HCA_E_BIT              = 22,
60	HCR_GO_BIT             = 23
61};
62
63enum {
64	/* initialization and general commands */
65	CMD_SYS_EN          = 0x1,
66	CMD_SYS_DIS         = 0x2,
67	CMD_MAP_FA          = 0xfff,
68	CMD_UNMAP_FA        = 0xffe,
69	CMD_RUN_FW          = 0xff6,
70	CMD_MOD_STAT_CFG    = 0x34,
71	CMD_QUERY_DEV_LIM   = 0x3,
72	CMD_QUERY_FW        = 0x4,
73	CMD_ENABLE_LAM      = 0xff8,
74	CMD_DISABLE_LAM     = 0xff7,
75	CMD_QUERY_DDR       = 0x5,
76	CMD_QUERY_ADAPTER   = 0x6,
77	CMD_INIT_HCA        = 0x7,
78	CMD_CLOSE_HCA       = 0x8,
79	CMD_INIT_IB         = 0x9,
80	CMD_CLOSE_IB        = 0xa,
81	CMD_QUERY_HCA       = 0xb,
82	CMD_SET_IB          = 0xc,
83	CMD_ACCESS_DDR      = 0x2e,
84	CMD_MAP_ICM         = 0xffa,
85	CMD_UNMAP_ICM       = 0xff9,
86	CMD_MAP_ICM_AUX     = 0xffc,
87	CMD_UNMAP_ICM_AUX   = 0xffb,
88	CMD_SET_ICM_SIZE    = 0xffd,
89
90	/* TPT commands */
91	CMD_SW2HW_MPT 	    = 0xd,
92	CMD_QUERY_MPT 	    = 0xe,
93	CMD_HW2SW_MPT 	    = 0xf,
94	CMD_READ_MTT        = 0x10,
95	CMD_WRITE_MTT       = 0x11,
96	CMD_SYNC_TPT        = 0x2f,
97
98	/* EQ commands */
99	CMD_MAP_EQ          = 0x12,
100	CMD_SW2HW_EQ 	    = 0x13,
101	CMD_HW2SW_EQ 	    = 0x14,
102	CMD_QUERY_EQ        = 0x15,
103
104	/* CQ commands */
105	CMD_SW2HW_CQ 	    = 0x16,
106	CMD_HW2SW_CQ 	    = 0x17,
107	CMD_QUERY_CQ 	    = 0x18,
108	CMD_RESIZE_CQ       = 0x2c,
109
110	/* SRQ commands */
111	CMD_SW2HW_SRQ 	    = 0x35,
112	CMD_HW2SW_SRQ 	    = 0x36,
113	CMD_QUERY_SRQ       = 0x37,
114	CMD_ARM_SRQ         = 0x40,
115
116	/* QP/EE commands */
117	CMD_RST2INIT_QPEE   = 0x19,
118	CMD_INIT2RTR_QPEE   = 0x1a,
119	CMD_RTR2RTS_QPEE    = 0x1b,
120	CMD_RTS2RTS_QPEE    = 0x1c,
121	CMD_SQERR2RTS_QPEE  = 0x1d,
122	CMD_2ERR_QPEE       = 0x1e,
123	CMD_RTS2SQD_QPEE    = 0x1f,
124	CMD_SQD2SQD_QPEE    = 0x38,
125	CMD_SQD2RTS_QPEE    = 0x20,
126	CMD_ERR2RST_QPEE    = 0x21,
127	CMD_QUERY_QPEE      = 0x22,
128	CMD_INIT2INIT_QPEE  = 0x2d,
129	CMD_SUSPEND_QPEE    = 0x32,
130	CMD_UNSUSPEND_QPEE  = 0x33,
131	/* special QPs and management commands */
132	CMD_CONF_SPECIAL_QP = 0x23,
133	CMD_MAD_IFC         = 0x24,
134
135	/* multicast commands */
136	CMD_READ_MGM        = 0x25,
137	CMD_WRITE_MGM       = 0x26,
138	CMD_MGID_HASH       = 0x27,
139
140	/* miscellaneous commands */
141	CMD_DIAG_RPRT       = 0x30,
142	CMD_NOP             = 0x31,
143
144	/* debug commands */
145	CMD_QUERY_DEBUG_MSG = 0x2a,
146	CMD_SET_DEBUG_MSG   = 0x2b,
147};
148
149/*
150 * According to Mellanox code, FW may be starved and never complete
151 * commands.  So we can't use strict timeouts described in PRM -- we
152 * just arbitrarily select 60 seconds for now.
153 */
154enum {
155	CMD_TIME_CLASS_A = 60 * HZ,
156	CMD_TIME_CLASS_B = 60 * HZ,
157	CMD_TIME_CLASS_C = 60 * HZ
158};
159
160enum {
161	GO_BIT_TIMEOUT = HZ * 10
162};
163
164struct mthca_cmd_context {
165	struct completion done;
166	int               result;
167	int               next;
168	u64               out_param;
169	u16               token;
170	u8                status;
171};
172
173static int fw_cmd_doorbell = 0;
174module_param(fw_cmd_doorbell, int, 0644);
175MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero "
176		 "(and supported by FW)");
177
178static inline int go_bit(struct mthca_dev *dev)
179{
180	return readl(dev->hcr + HCR_STATUS_OFFSET) &
181		swab32(1 << HCR_GO_BIT);
182}
183
184static void mthca_cmd_post_dbell(struct mthca_dev *dev,
185				 u64 in_param,
186				 u64 out_param,
187				 u32 in_modifier,
188				 u8 op_modifier,
189				 u16 op,
190				 u16 token)
191{
192	void __iomem *ptr = dev->cmd.dbell_map;
193	u16 *offs = dev->cmd.dbell_offsets;
194
195	__raw_writel((__force u32) cpu_to_be32(in_param >> 32),           ptr + offs[0]);
196	wmb();
197	__raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  ptr + offs[1]);
198	wmb();
199	__raw_writel((__force u32) cpu_to_be32(in_modifier),              ptr + offs[2]);
200	wmb();
201	__raw_writel((__force u32) cpu_to_be32(out_param >> 32),          ptr + offs[3]);
202	wmb();
203	__raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), ptr + offs[4]);
204	wmb();
205	__raw_writel((__force u32) cpu_to_be32(token << 16),              ptr + offs[5]);
206	wmb();
207	__raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)                |
208					       (1 << HCA_E_BIT)                 |
209					       (op_modifier << HCR_OPMOD_SHIFT) |
210					        op),                      ptr + offs[6]);
211	wmb();
212	__raw_writel((__force u32) 0,                                     ptr + offs[7]);
213	wmb();
214}
215
216static int mthca_cmd_post_hcr(struct mthca_dev *dev,
217			      u64 in_param,
218			      u64 out_param,
219			      u32 in_modifier,
220			      u8 op_modifier,
221			      u16 op,
222			      u16 token,
223			      int event)
224{
225	if (event) {
226		unsigned long end = jiffies + GO_BIT_TIMEOUT;
227
228		while (go_bit(dev) && time_before(jiffies, end)) {
229			set_current_state(TASK_RUNNING);
230			schedule();
231		}
232	}
233
234	if (go_bit(dev))
235		return -EAGAIN;
236
237	/*
238	 * We use writel (instead of something like memcpy_toio)
239	 * because writes of less than 32 bits to the HCR don't work
240	 * (and some architectures such as ia64 implement memcpy_toio
241	 * in terms of writeb).
242	 */
243	__raw_writel((__force u32) cpu_to_be32(in_param >> 32),           dev->hcr + 0 * 4);
244	__raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  dev->hcr + 1 * 4);
245	__raw_writel((__force u32) cpu_to_be32(in_modifier),              dev->hcr + 2 * 4);
246	__raw_writel((__force u32) cpu_to_be32(out_param >> 32),          dev->hcr + 3 * 4);
247	__raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), dev->hcr + 4 * 4);
248	__raw_writel((__force u32) cpu_to_be32(token << 16),              dev->hcr + 5 * 4);
249
250	/* __raw_writel may not order writes. */
251	wmb();
252
253	__raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)                |
254					       (event ? (1 << HCA_E_BIT) : 0)   |
255					       (op_modifier << HCR_OPMOD_SHIFT) |
256					       op),                       dev->hcr + 6 * 4);
257
258	return 0;
259}
260
261static int mthca_cmd_post(struct mthca_dev *dev,
262			  u64 in_param,
263			  u64 out_param,
264			  u32 in_modifier,
265			  u8 op_modifier,
266			  u16 op,
267			  u16 token,
268			  int event)
269{
270	int err = 0;
271
272	mutex_lock(&dev->cmd.hcr_mutex);
273
274	if (event && dev->cmd.flags & MTHCA_CMD_POST_DOORBELLS && fw_cmd_doorbell)
275		mthca_cmd_post_dbell(dev, in_param, out_param, in_modifier,
276					   op_modifier, op, token);
277	else
278		err = mthca_cmd_post_hcr(dev, in_param, out_param, in_modifier,
279					 op_modifier, op, token, event);
280
281	mutex_unlock(&dev->cmd.hcr_mutex);
282	return err;
283}
284
285static int mthca_cmd_poll(struct mthca_dev *dev,
286			  u64 in_param,
287			  u64 *out_param,
288			  int out_is_imm,
289			  u32 in_modifier,
290			  u8 op_modifier,
291			  u16 op,
292			  unsigned long timeout,
293			  u8 *status)
294{
295	int err = 0;
296	unsigned long end;
297
298	down(&dev->cmd.poll_sem);
299
300	err = mthca_cmd_post(dev, in_param,
301			     out_param ? *out_param : 0,
302			     in_modifier, op_modifier,
303			     op, CMD_POLL_TOKEN, 0);
304	if (err)
305		goto out;
306
307	end = timeout + jiffies;
308	while (go_bit(dev) && time_before(jiffies, end)) {
309		set_current_state(TASK_RUNNING);
310		schedule();
311	}
312
313	if (go_bit(dev)) {
314		err = -EBUSY;
315		goto out;
316	}
317
318	if (out_is_imm)
319		*out_param =
320			(u64) be32_to_cpu((__force __be32)
321					  __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
322			(u64) be32_to_cpu((__force __be32)
323					  __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4));
324
325	*status = be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24;
326
327out:
328	up(&dev->cmd.poll_sem);
329	return err;
330}
331
332void mthca_cmd_event(struct mthca_dev *dev,
333		     u16 token,
334		     u8  status,
335		     u64 out_param)
336{
337	struct mthca_cmd_context *context =
338		&dev->cmd.context[token & dev->cmd.token_mask];
339
340	/* previously timed out command completing at long last */
341	if (token != context->token)
342		return;
343
344	context->result    = 0;
345	context->status    = status;
346	context->out_param = out_param;
347
348	context->token += dev->cmd.token_mask + 1;
349
350	complete(&context->done);
351}
352
353static int mthca_cmd_wait(struct mthca_dev *dev,
354			  u64 in_param,
355			  u64 *out_param,
356			  int out_is_imm,
357			  u32 in_modifier,
358			  u8 op_modifier,
359			  u16 op,
360			  unsigned long timeout,
361			  u8 *status)
362{
363	int err = 0;
364	struct mthca_cmd_context *context;
365
366	down(&dev->cmd.event_sem);
367
368	spin_lock(&dev->cmd.context_lock);
369	BUG_ON(dev->cmd.free_head < 0);
370	context = &dev->cmd.context[dev->cmd.free_head];
371	dev->cmd.free_head = context->next;
372	spin_unlock(&dev->cmd.context_lock);
373
374	init_completion(&context->done);
375
376	err = mthca_cmd_post(dev, in_param,
377			     out_param ? *out_param : 0,
378			     in_modifier, op_modifier,
379			     op, context->token, 1);
380	if (err)
381		goto out;
382
383	if (!wait_for_completion_timeout(&context->done, timeout)) {
384		err = -EBUSY;
385		goto out;
386	}
387
388	err = context->result;
389	if (err)
390		goto out;
391
392	*status = context->status;
393	if (*status)
394		mthca_dbg(dev, "Command %02x completed with status %02x\n",
395			  op, *status);
396
397	if (out_is_imm)
398		*out_param = context->out_param;
399
400out:
401	spin_lock(&dev->cmd.context_lock);
402	context->next = dev->cmd.free_head;
403	dev->cmd.free_head = context - dev->cmd.context;
404	spin_unlock(&dev->cmd.context_lock);
405
406	up(&dev->cmd.event_sem);
407	return err;
408}
409
410/* Invoke a command with an output mailbox */
411static int mthca_cmd_box(struct mthca_dev *dev,
412			 u64 in_param,
413			 u64 out_param,
414			 u32 in_modifier,
415			 u8 op_modifier,
416			 u16 op,
417			 unsigned long timeout,
418			 u8 *status)
419{
420	if (dev->cmd.flags & MTHCA_CMD_USE_EVENTS)
421		return mthca_cmd_wait(dev, in_param, &out_param, 0,
422				      in_modifier, op_modifier, op,
423				      timeout, status);
424	else
425		return mthca_cmd_poll(dev, in_param, &out_param, 0,
426				      in_modifier, op_modifier, op,
427				      timeout, status);
428}
429
430/* Invoke a command with no output parameter */
431static int mthca_cmd(struct mthca_dev *dev,
432		     u64 in_param,
433		     u32 in_modifier,
434		     u8 op_modifier,
435		     u16 op,
436		     unsigned long timeout,
437		     u8 *status)
438{
439	return mthca_cmd_box(dev, in_param, 0, in_modifier,
440			     op_modifier, op, timeout, status);
441}
442
443/*
444 * Invoke a command with an immediate output parameter (and copy the
445 * output into the caller's out_param pointer after the command
446 * executes).
447 */
448static int mthca_cmd_imm(struct mthca_dev *dev,
449			 u64 in_param,
450			 u64 *out_param,
451			 u32 in_modifier,
452			 u8 op_modifier,
453			 u16 op,
454			 unsigned long timeout,
455			 u8 *status)
456{
457	if (dev->cmd.flags & MTHCA_CMD_USE_EVENTS)
458		return mthca_cmd_wait(dev, in_param, out_param, 1,
459				      in_modifier, op_modifier, op,
460				      timeout, status);
461	else
462		return mthca_cmd_poll(dev, in_param, out_param, 1,
463				      in_modifier, op_modifier, op,
464				      timeout, status);
465}
466
467int mthca_cmd_init(struct mthca_dev *dev)
468{
469	mutex_init(&dev->cmd.hcr_mutex);
470	sema_init(&dev->cmd.poll_sem, 1);
471	dev->cmd.flags = 0;
472
473	dev->hcr = ioremap(pci_resource_start(dev->pdev, 0) + MTHCA_HCR_BASE,
474			   MTHCA_HCR_SIZE);
475	if (!dev->hcr) {
476		mthca_err(dev, "Couldn't map command register.");
477		return -ENOMEM;
478	}
479
480	dev->cmd.pool = pci_pool_create("mthca_cmd", dev->pdev,
481					MTHCA_MAILBOX_SIZE,
482					MTHCA_MAILBOX_SIZE, 0);
483	if (!dev->cmd.pool) {
484		iounmap(dev->hcr);
485		return -ENOMEM;
486	}
487
488	return 0;
489}
490
491void mthca_cmd_cleanup(struct mthca_dev *dev)
492{
493	pci_pool_destroy(dev->cmd.pool);
494	iounmap(dev->hcr);
495	if (dev->cmd.flags & MTHCA_CMD_POST_DOORBELLS)
496		iounmap(dev->cmd.dbell_map);
497}
498
499/*
500 * Switch to using events to issue FW commands (should be called after
501 * event queue to command events has been initialized).
502 */
503int mthca_cmd_use_events(struct mthca_dev *dev)
504{
505	int i;
506
507	dev->cmd.context = kmalloc(dev->cmd.max_cmds *
508				   sizeof (struct mthca_cmd_context),
509				   GFP_KERNEL);
510	if (!dev->cmd.context)
511		return -ENOMEM;
512
513	for (i = 0; i < dev->cmd.max_cmds; ++i) {
514		dev->cmd.context[i].token = i;
515		dev->cmd.context[i].next = i + 1;
516	}
517
518	dev->cmd.context[dev->cmd.max_cmds - 1].next = -1;
519	dev->cmd.free_head = 0;
520
521	sema_init(&dev->cmd.event_sem, dev->cmd.max_cmds);
522	spin_lock_init(&dev->cmd.context_lock);
523
524	for (dev->cmd.token_mask = 1;
525	     dev->cmd.token_mask < dev->cmd.max_cmds;
526	     dev->cmd.token_mask <<= 1)
527		; /* nothing */
528	--dev->cmd.token_mask;
529
530	dev->cmd.flags |= MTHCA_CMD_USE_EVENTS;
531
532	down(&dev->cmd.poll_sem);
533
534	return 0;
535}
536
537/*
538 * Switch back to polling (used when shutting down the device)
539 */
540void mthca_cmd_use_polling(struct mthca_dev *dev)
541{
542	int i;
543
544	dev->cmd.flags &= ~MTHCA_CMD_USE_EVENTS;
545
546	for (i = 0; i < dev->cmd.max_cmds; ++i)
547		down(&dev->cmd.event_sem);
548
549	kfree(dev->cmd.context);
550
551	up(&dev->cmd.poll_sem);
552}
553
554struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
555					  gfp_t gfp_mask)
556{
557	struct mthca_mailbox *mailbox;
558
559	mailbox = kmalloc(sizeof *mailbox, gfp_mask);
560	if (!mailbox)
561		return ERR_PTR(-ENOMEM);
562
563	mailbox->buf = pci_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma);
564	if (!mailbox->buf) {
565		kfree(mailbox);
566		return ERR_PTR(-ENOMEM);
567	}
568
569	return mailbox;
570}
571
572void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)
573{
574	if (!mailbox)
575		return;
576
577	pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
578	kfree(mailbox);
579}
580
581int mthca_SYS_EN(struct mthca_dev *dev, u8 *status)
582{
583	u64 out;
584	int ret;
585
586	ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, HZ, status);
587
588	if (*status == MTHCA_CMD_STAT_DDR_MEM_ERR)
589		mthca_warn(dev, "SYS_EN DDR error: syn=%x, sock=%d, "
590			   "sladdr=%d, SPD source=%s\n",
591			   (int) (out >> 6) & 0xf, (int) (out >> 4) & 3,
592			   (int) (out >> 1) & 7, (int) out & 1 ? "NVMEM" : "DIMM");
593
594	return ret;
595}
596
597int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status)
598{
599	return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, HZ, status);
600}
601
602static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
603			 u64 virt, u8 *status)
604{
605	struct mthca_mailbox *mailbox;
606	struct mthca_icm_iter iter;
607	__be64 *pages;
608	int lg;
609	int nent = 0;
610	int i;
611	int err = 0;
612	int ts = 0, tc = 0;
613
614	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
615	if (IS_ERR(mailbox))
616		return PTR_ERR(mailbox);
617	memset(mailbox->buf, 0, MTHCA_MAILBOX_SIZE);
618	pages = mailbox->buf;
619
620	for (mthca_icm_first(icm, &iter);
621	     !mthca_icm_last(&iter);
622	     mthca_icm_next(&iter)) {
623		/*
624		 * We have to pass pages that are aligned to their
625		 * size, so find the least significant 1 in the
626		 * address or size and use that as our log2 size.
627		 */
628		lg = ffs(mthca_icm_addr(&iter) | mthca_icm_size(&iter)) - 1;
629		if (lg < MTHCA_ICM_PAGE_SHIFT) {
630			mthca_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
631				   MTHCA_ICM_PAGE_SIZE,
632				   (unsigned long long) mthca_icm_addr(&iter),
633				   mthca_icm_size(&iter));
634			err = -EINVAL;
635			goto out;
636		}
637		for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) {
638			if (virt != -1) {
639				pages[nent * 2] = cpu_to_be64(virt);
640				virt += 1 << lg;
641			}
642
643			pages[nent * 2 + 1] =
644				cpu_to_be64((mthca_icm_addr(&iter) + (i << lg)) |
645					    (lg - MTHCA_ICM_PAGE_SHIFT));
646			ts += 1 << (lg - 10);
647			++tc;
648
649			if (++nent == MTHCA_MAILBOX_SIZE / 16) {
650				err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
651						CMD_TIME_CLASS_B, status);
652				if (err || *status)
653					goto out;
654				nent = 0;
655			}
656		}
657	}
658
659	if (nent)
660		err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
661				CMD_TIME_CLASS_B, status);
662
663	switch (op) {
664	case CMD_MAP_FA:
665		mthca_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
666		break;
667	case CMD_MAP_ICM_AUX:
668		mthca_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
669		break;
670	case CMD_MAP_ICM:
671		mthca_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
672			  tc, ts, (unsigned long long) virt - (ts << 10));
673		break;
674	}
675
676out:
677	mthca_free_mailbox(dev, mailbox);
678	return err;
679}
680
681int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status)
682{
683	return mthca_map_cmd(dev, CMD_MAP_FA, icm, -1, status);
684}
685
686int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status)
687{
688	return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B, status);
689}
690
691int mthca_RUN_FW(struct mthca_dev *dev, u8 *status)
692{
693	return mthca_cmd(dev, 0, 0, 0, CMD_RUN_FW, CMD_TIME_CLASS_A, status);
694}
695
696static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
697{
698	unsigned long addr;
699	u16 max_off = 0;
700	int i;
701
702	for (i = 0; i < 8; ++i)
703		max_off = max(max_off, dev->cmd.dbell_offsets[i]);
704
705	if ((base & PAGE_MASK) != ((base + max_off) & PAGE_MASK)) {
706		mthca_warn(dev, "Firmware doorbell region at 0x%016llx, "
707			   "length 0x%x crosses a page boundary\n",
708			   (unsigned long long) base, max_off);
709		return;
710	}
711
712	addr = pci_resource_start(dev->pdev, 2) +
713		((pci_resource_len(dev->pdev, 2) - 1) & base);
714	dev->cmd.dbell_map = ioremap(addr, max_off + sizeof(u32));
715	if (!dev->cmd.dbell_map)
716		return;
717
718	dev->cmd.flags |= MTHCA_CMD_POST_DOORBELLS;
719	mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
720}
721
722int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
723{
724	struct mthca_mailbox *mailbox;
725	u32 *outbox;
726	u64 base;
727	u32 tmp;
728	int err = 0;
729	u8 lg;
730	int i;
731
732#define QUERY_FW_OUT_SIZE             0x100
733#define QUERY_FW_VER_OFFSET            0x00
734#define QUERY_FW_MAX_CMD_OFFSET        0x0f
735#define QUERY_FW_ERR_START_OFFSET      0x30
736#define QUERY_FW_ERR_SIZE_OFFSET       0x38
737
738#define QUERY_FW_CMD_DB_EN_OFFSET      0x10
739#define QUERY_FW_CMD_DB_OFFSET         0x50
740#define QUERY_FW_CMD_DB_BASE           0x60
741
742#define QUERY_FW_START_OFFSET          0x20
743#define QUERY_FW_END_OFFSET            0x28
744
745#define QUERY_FW_SIZE_OFFSET           0x00
746#define QUERY_FW_CLR_INT_BASE_OFFSET   0x20
747#define QUERY_FW_EQ_ARM_BASE_OFFSET    0x40
748#define QUERY_FW_EQ_SET_CI_BASE_OFFSET 0x48
749
750	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
751	if (IS_ERR(mailbox))
752		return PTR_ERR(mailbox);
753	outbox = mailbox->buf;
754
755	err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW,
756			    CMD_TIME_CLASS_A, status);
757
758	if (err)
759		goto out;
760
761	MTHCA_GET(dev->fw_ver,   outbox, QUERY_FW_VER_OFFSET);
762	/*
763	 * FW subminor version is at more significant bits than minor
764	 * version, so swap here.
765	 */
766	dev->fw_ver = (dev->fw_ver & 0xffff00000000ull) |
767		((dev->fw_ver & 0xffff0000ull) >> 16) |
768		((dev->fw_ver & 0x0000ffffull) << 16);
769
770	MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
771	dev->cmd.max_cmds = 1 << lg;
772
773	mthca_dbg(dev, "FW version %012llx, max commands %d\n",
774		  (unsigned long long) dev->fw_ver, dev->cmd.max_cmds);
775
776	MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET);
777	MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
778
779	mthca_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x\n",
780		  (unsigned long long) dev->catas_err.addr, dev->catas_err.size);
781
782	MTHCA_GET(tmp, outbox, QUERY_FW_CMD_DB_EN_OFFSET);
783	if (tmp & 0x1) {
784		mthca_dbg(dev, "FW supports commands through doorbells\n");
785
786		MTHCA_GET(base, outbox, QUERY_FW_CMD_DB_BASE);
787		for (i = 0; i < MTHCA_CMD_NUM_DBELL_DWORDS; ++i)
788			MTHCA_GET(dev->cmd.dbell_offsets[i], outbox,
789				  QUERY_FW_CMD_DB_OFFSET + (i << 1));
790
791		mthca_setup_cmd_doorbells(dev, base);
792	}
793
794	if (mthca_is_memfree(dev)) {
795		MTHCA_GET(dev->fw.arbel.fw_pages,       outbox, QUERY_FW_SIZE_OFFSET);
796		MTHCA_GET(dev->fw.arbel.clr_int_base,   outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
797		MTHCA_GET(dev->fw.arbel.eq_arm_base,    outbox, QUERY_FW_EQ_ARM_BASE_OFFSET);
798		MTHCA_GET(dev->fw.arbel.eq_set_ci_base, outbox, QUERY_FW_EQ_SET_CI_BASE_OFFSET);
799		mthca_dbg(dev, "FW size %d KB\n", dev->fw.arbel.fw_pages << 2);
800
801		/*
802		 * Round up number of system pages needed in case
803		 * MTHCA_ICM_PAGE_SIZE < PAGE_SIZE.
804		 */
805		dev->fw.arbel.fw_pages =
806			ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE / MTHCA_ICM_PAGE_SIZE) >>
807				(PAGE_SHIFT - MTHCA_ICM_PAGE_SHIFT);
808
809		mthca_dbg(dev, "Clear int @ %llx, EQ arm @ %llx, EQ set CI @ %llx\n",
810			  (unsigned long long) dev->fw.arbel.clr_int_base,
811			  (unsigned long long) dev->fw.arbel.eq_arm_base,
812			  (unsigned long long) dev->fw.arbel.eq_set_ci_base);
813	} else {
814		MTHCA_GET(dev->fw.tavor.fw_start, outbox, QUERY_FW_START_OFFSET);
815		MTHCA_GET(dev->fw.tavor.fw_end,   outbox, QUERY_FW_END_OFFSET);
816
817		mthca_dbg(dev, "FW size %d KB (start %llx, end %llx)\n",
818			  (int) ((dev->fw.tavor.fw_end - dev->fw.tavor.fw_start) >> 10),
819			  (unsigned long long) dev->fw.tavor.fw_start,
820			  (unsigned long long) dev->fw.tavor.fw_end);
821	}
822
823out:
824	mthca_free_mailbox(dev, mailbox);
825	return err;
826}
827
828int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
829{
830	struct mthca_mailbox *mailbox;
831	u8 info;
832	u32 *outbox;
833	int err = 0;
834
835#define ENABLE_LAM_OUT_SIZE         0x100
836#define ENABLE_LAM_START_OFFSET     0x00
837#define ENABLE_LAM_END_OFFSET       0x08
838#define ENABLE_LAM_INFO_OFFSET      0x13
839
840#define ENABLE_LAM_INFO_HIDDEN_FLAG (1 << 4)
841#define ENABLE_LAM_INFO_ECC_MASK    0x3
842
843	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
844	if (IS_ERR(mailbox))
845		return PTR_ERR(mailbox);
846	outbox = mailbox->buf;
847
848	err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM,
849			    CMD_TIME_CLASS_C, status);
850
851	if (err)
852		goto out;
853
854	if (*status == MTHCA_CMD_STAT_LAM_NOT_PRE)
855		goto out;
856
857	MTHCA_GET(dev->ddr_start, outbox, ENABLE_LAM_START_OFFSET);
858	MTHCA_GET(dev->ddr_end,   outbox, ENABLE_LAM_END_OFFSET);
859	MTHCA_GET(info,           outbox, ENABLE_LAM_INFO_OFFSET);
860
861	if (!!(info & ENABLE_LAM_INFO_HIDDEN_FLAG) !=
862	    !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
863		mthca_info(dev, "FW reports that HCA-attached memory "
864			   "is %s hidden; does not match PCI config\n",
865			   (info & ENABLE_LAM_INFO_HIDDEN_FLAG) ?
866			   "" : "not");
867	}
868	if (info & ENABLE_LAM_INFO_HIDDEN_FLAG)
869		mthca_dbg(dev, "HCA-attached memory is hidden.\n");
870
871	mthca_dbg(dev, "HCA memory size %d KB (start %llx, end %llx)\n",
872		  (int) ((dev->ddr_end - dev->ddr_start) >> 10),
873		  (unsigned long long) dev->ddr_start,
874		  (unsigned long long) dev->ddr_end);
875
876out:
877	mthca_free_mailbox(dev, mailbox);
878	return err;
879}
880
881int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status)
882{
883	return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C, status);
884}
885
886int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
887{
888	struct mthca_mailbox *mailbox;
889	u8 info;
890	u32 *outbox;
891	int err = 0;
892
893#define QUERY_DDR_OUT_SIZE         0x100
894#define QUERY_DDR_START_OFFSET     0x00
895#define QUERY_DDR_END_OFFSET       0x08
896#define QUERY_DDR_INFO_OFFSET      0x13
897
898#define QUERY_DDR_INFO_HIDDEN_FLAG (1 << 4)
899#define QUERY_DDR_INFO_ECC_MASK    0x3
900
901	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
902	if (IS_ERR(mailbox))
903		return PTR_ERR(mailbox);
904	outbox = mailbox->buf;
905
906	err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR,
907			    CMD_TIME_CLASS_A, status);
908
909	if (err)
910		goto out;
911
912	MTHCA_GET(dev->ddr_start, outbox, QUERY_DDR_START_OFFSET);
913	MTHCA_GET(dev->ddr_end,   outbox, QUERY_DDR_END_OFFSET);
914	MTHCA_GET(info,           outbox, QUERY_DDR_INFO_OFFSET);
915
916	if (!!(info & QUERY_DDR_INFO_HIDDEN_FLAG) !=
917	    !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
918		mthca_info(dev, "FW reports that HCA-attached memory "
919			   "is %s hidden; does not match PCI config\n",
920			   (info & QUERY_DDR_INFO_HIDDEN_FLAG) ?
921			   "" : "not");
922	}
923	if (info & QUERY_DDR_INFO_HIDDEN_FLAG)
924		mthca_dbg(dev, "HCA-attached memory is hidden.\n");
925
926	mthca_dbg(dev, "HCA memory size %d KB (start %llx, end %llx)\n",
927		  (int) ((dev->ddr_end - dev->ddr_start) >> 10),
928		  (unsigned long long) dev->ddr_start,
929		  (unsigned long long) dev->ddr_end);
930
931out:
932	mthca_free_mailbox(dev, mailbox);
933	return err;
934}
935
936int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
937			struct mthca_dev_lim *dev_lim, u8 *status)
938{
939	struct mthca_mailbox *mailbox;
940	u32 *outbox;
941	u8 field;
942	u16 size;
943	u16 stat_rate;
944	int err;
945
946#define QUERY_DEV_LIM_OUT_SIZE             0x100
947#define QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET     0x10
948#define QUERY_DEV_LIM_MAX_QP_SZ_OFFSET      0x11
949#define QUERY_DEV_LIM_RSVD_QP_OFFSET        0x12
950#define QUERY_DEV_LIM_MAX_QP_OFFSET         0x13
951#define QUERY_DEV_LIM_RSVD_SRQ_OFFSET       0x14
952#define QUERY_DEV_LIM_MAX_SRQ_OFFSET        0x15
953#define QUERY_DEV_LIM_RSVD_EEC_OFFSET       0x16
954#define QUERY_DEV_LIM_MAX_EEC_OFFSET        0x17
955#define QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET      0x19
956#define QUERY_DEV_LIM_RSVD_CQ_OFFSET        0x1a
957#define QUERY_DEV_LIM_MAX_CQ_OFFSET         0x1b
958#define QUERY_DEV_LIM_MAX_MPT_OFFSET        0x1d
959#define QUERY_DEV_LIM_RSVD_EQ_OFFSET        0x1e
960#define QUERY_DEV_LIM_MAX_EQ_OFFSET         0x1f
961#define QUERY_DEV_LIM_RSVD_MTT_OFFSET       0x20
962#define QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET     0x21
963#define QUERY_DEV_LIM_RSVD_MRW_OFFSET       0x22
964#define QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET    0x23
965#define QUERY_DEV_LIM_MAX_AV_OFFSET         0x27
966#define QUERY_DEV_LIM_MAX_REQ_QP_OFFSET     0x29
967#define QUERY_DEV_LIM_MAX_RES_QP_OFFSET     0x2b
968#define QUERY_DEV_LIM_MAX_RDMA_OFFSET       0x2f
969#define QUERY_DEV_LIM_RSZ_SRQ_OFFSET        0x33
970#define QUERY_DEV_LIM_ACK_DELAY_OFFSET      0x35
971#define QUERY_DEV_LIM_MTU_WIDTH_OFFSET      0x36
972#define QUERY_DEV_LIM_VL_PORT_OFFSET        0x37
973#define QUERY_DEV_LIM_MAX_GID_OFFSET        0x3b
974#define QUERY_DEV_LIM_RATE_SUPPORT_OFFSET   0x3c
975#define QUERY_DEV_LIM_MAX_PKEY_OFFSET       0x3f
976#define QUERY_DEV_LIM_FLAGS_OFFSET          0x44
977#define QUERY_DEV_LIM_RSVD_UAR_OFFSET       0x48
978#define QUERY_DEV_LIM_UAR_SZ_OFFSET         0x49
979#define QUERY_DEV_LIM_PAGE_SZ_OFFSET        0x4b
980#define QUERY_DEV_LIM_MAX_SG_OFFSET         0x51
981#define QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET    0x52
982#define QUERY_DEV_LIM_MAX_SG_RQ_OFFSET      0x55
983#define QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET 0x56
984#define QUERY_DEV_LIM_MAX_QP_MCG_OFFSET     0x61
985#define QUERY_DEV_LIM_RSVD_MCG_OFFSET       0x62
986#define QUERY_DEV_LIM_MAX_MCG_OFFSET        0x63
987#define QUERY_DEV_LIM_RSVD_PD_OFFSET        0x64
988#define QUERY_DEV_LIM_MAX_PD_OFFSET         0x65
989#define QUERY_DEV_LIM_RSVD_RDD_OFFSET       0x66
990#define QUERY_DEV_LIM_MAX_RDD_OFFSET        0x67
991#define QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET   0x80
992#define QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET   0x82
993#define QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET  0x84
994#define QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET  0x86
995#define QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET   0x88
996#define QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET   0x8a
997#define QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET   0x8c
998#define QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET   0x8e
999#define QUERY_DEV_LIM_MTT_ENTRY_SZ_OFFSET   0x90
1000#define QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET   0x92
1001#define QUERY_DEV_LIM_PBL_SZ_OFFSET         0x96
1002#define QUERY_DEV_LIM_BMME_FLAGS_OFFSET     0x97
1003#define QUERY_DEV_LIM_RSVD_LKEY_OFFSET      0x98
1004#define QUERY_DEV_LIM_LAMR_OFFSET           0x9f
1005#define QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET     0xa0
1006
1007	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1008	if (IS_ERR(mailbox))
1009		return PTR_ERR(mailbox);
1010	outbox = mailbox->buf;
1011
1012	err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM,
1013			    CMD_TIME_CLASS_A, status);
1014
1015	if (err)
1016		goto out;
1017
1018	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET);
1019	dev_lim->reserved_qps = 1 << (field & 0xf);
1020	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET);
1021	dev_lim->max_qps = 1 << (field & 0x1f);
1022	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_SRQ_OFFSET);
1023	dev_lim->reserved_srqs = 1 << (field >> 4);
1024	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_OFFSET);
1025	dev_lim->max_srqs = 1 << (field & 0x1f);
1026	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EEC_OFFSET);
1027	dev_lim->reserved_eecs = 1 << (field & 0xf);
1028	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EEC_OFFSET);
1029	dev_lim->max_eecs = 1 << (field & 0x1f);
1030	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET);
1031	dev_lim->max_cq_sz = 1 << field;
1032	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_CQ_OFFSET);
1033	dev_lim->reserved_cqs = 1 << (field & 0xf);
1034	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_OFFSET);
1035	dev_lim->max_cqs = 1 << (field & 0x1f);
1036	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MPT_OFFSET);
1037	dev_lim->max_mpts = 1 << (field & 0x3f);
1038	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EQ_OFFSET);
1039	dev_lim->reserved_eqs = 1 << (field & 0xf);
1040	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EQ_OFFSET);
1041	dev_lim->max_eqs = 1 << (field & 0x7);
1042	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MTT_OFFSET);
1043	if (mthca_is_memfree(dev))
1044		dev_lim->reserved_mtts = ALIGN((1 << (field >> 4)) * sizeof(u64),
1045					       MTHCA_MTT_SEG_SIZE) / MTHCA_MTT_SEG_SIZE;
1046	else
1047		dev_lim->reserved_mtts = 1 << (field >> 4);
1048	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET);
1049	dev_lim->max_mrw_sz = 1 << field;
1050	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MRW_OFFSET);
1051	dev_lim->reserved_mrws = 1 << (field & 0xf);
1052	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET);
1053	dev_lim->max_mtt_seg = 1 << (field & 0x3f);
1054	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_REQ_QP_OFFSET);
1055	dev_lim->max_requester_per_qp = 1 << (field & 0x3f);
1056	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RES_QP_OFFSET);
1057	dev_lim->max_responder_per_qp = 1 << (field & 0x3f);
1058	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDMA_OFFSET);
1059	dev_lim->max_rdma_global = 1 << (field & 0x3f);
1060	MTHCA_GET(field, outbox, QUERY_DEV_LIM_ACK_DELAY_OFFSET);
1061	dev_lim->local_ca_ack_delay = field & 0x1f;
1062	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MTU_WIDTH_OFFSET);
1063	dev_lim->max_mtu        = field >> 4;
1064	dev_lim->max_port_width = field & 0xf;
1065	MTHCA_GET(field, outbox, QUERY_DEV_LIM_VL_PORT_OFFSET);
1066	dev_lim->max_vl    = field >> 4;
1067	dev_lim->num_ports = field & 0xf;
1068	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_GID_OFFSET);
1069	dev_lim->max_gids = 1 << (field & 0xf);
1070	MTHCA_GET(stat_rate, outbox, QUERY_DEV_LIM_RATE_SUPPORT_OFFSET);
1071	dev_lim->stat_rate_support = stat_rate;
1072	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PKEY_OFFSET);
1073	dev_lim->max_pkeys = 1 << (field & 0xf);
1074	MTHCA_GET(dev_lim->flags, outbox, QUERY_DEV_LIM_FLAGS_OFFSET);
1075	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_UAR_OFFSET);
1076	dev_lim->reserved_uars = field >> 4;
1077	MTHCA_GET(field, outbox, QUERY_DEV_LIM_UAR_SZ_OFFSET);
1078	dev_lim->uar_size = 1 << ((field & 0x3f) + 20);
1079	MTHCA_GET(field, outbox, QUERY_DEV_LIM_PAGE_SZ_OFFSET);
1080	dev_lim->min_page_sz = 1 << field;
1081	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_OFFSET);
1082	dev_lim->max_sg = field;
1083
1084	MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET);
1085	dev_lim->max_desc_sz = size;
1086
1087	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_MCG_OFFSET);
1088	dev_lim->max_qp_per_mcg = 1 << field;
1089	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MCG_OFFSET);
1090	dev_lim->reserved_mgms = field & 0xf;
1091	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MCG_OFFSET);
1092	dev_lim->max_mcgs = 1 << field;
1093	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_PD_OFFSET);
1094	dev_lim->reserved_pds = field >> 4;
1095	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PD_OFFSET);
1096	dev_lim->max_pds = 1 << (field & 0x3f);
1097	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_RDD_OFFSET);
1098	dev_lim->reserved_rdds = field >> 4;
1099	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDD_OFFSET);
1100	dev_lim->max_rdds = 1 << (field & 0x3f);
1101
1102	MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET);
1103	dev_lim->eec_entry_sz = size;
1104	MTHCA_GET(size, outbox, QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET);
1105	dev_lim->qpc_entry_sz = size;
1106	MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET);
1107	dev_lim->eeec_entry_sz = size;
1108	MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET);
1109	dev_lim->eqpc_entry_sz = size;
1110	MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET);
1111	dev_lim->eqc_entry_sz = size;
1112	MTHCA_GET(size, outbox, QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET);
1113	dev_lim->cqc_entry_sz = size;
1114	MTHCA_GET(size, outbox, QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET);
1115	dev_lim->srq_entry_sz = size;
1116	MTHCA_GET(size, outbox, QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET);
1117	dev_lim->uar_scratch_entry_sz = size;
1118
1119	if (mthca_is_memfree(dev)) {
1120		MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
1121		dev_lim->max_srq_sz = 1 << field;
1122		MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
1123		dev_lim->max_qp_sz = 1 << field;
1124		MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSZ_SRQ_OFFSET);
1125		dev_lim->hca.arbel.resize_srq = field & 1;
1126		MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET);
1127		dev_lim->max_sg = min_t(int, field, dev_lim->max_sg);
1128		MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET);
1129		dev_lim->max_desc_sz = min_t(int, size, dev_lim->max_desc_sz);
1130		MTHCA_GET(size, outbox, QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET);
1131		dev_lim->mpt_entry_sz = size;
1132		MTHCA_GET(field, outbox, QUERY_DEV_LIM_PBL_SZ_OFFSET);
1133		dev_lim->hca.arbel.max_pbl_sz = 1 << (field & 0x3f);
1134		MTHCA_GET(dev_lim->hca.arbel.bmme_flags, outbox,
1135			  QUERY_DEV_LIM_BMME_FLAGS_OFFSET);
1136		MTHCA_GET(dev_lim->hca.arbel.reserved_lkey, outbox,
1137			  QUERY_DEV_LIM_RSVD_LKEY_OFFSET);
1138		MTHCA_GET(field, outbox, QUERY_DEV_LIM_LAMR_OFFSET);
1139		dev_lim->hca.arbel.lam_required = field & 1;
1140		MTHCA_GET(dev_lim->hca.arbel.max_icm_sz, outbox,
1141			  QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET);
1142
1143		if (dev_lim->hca.arbel.bmme_flags & 1)
1144			mthca_dbg(dev, "Base MM extensions: yes "
1145				  "(flags %d, max PBL %d, rsvd L_Key %08x)\n",
1146				  dev_lim->hca.arbel.bmme_flags,
1147				  dev_lim->hca.arbel.max_pbl_sz,
1148				  dev_lim->hca.arbel.reserved_lkey);
1149		else
1150			mthca_dbg(dev, "Base MM extensions: no\n");
1151
1152		mthca_dbg(dev, "Max ICM size %lld MB\n",
1153			  (unsigned long long) dev_lim->hca.arbel.max_icm_sz >> 20);
1154	} else {
1155		MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
1156		dev_lim->max_srq_sz = (1 << field) - 1;
1157		MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
1158		dev_lim->max_qp_sz = (1 << field) - 1;
1159		MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_AV_OFFSET);
1160		dev_lim->hca.tavor.max_avs = 1 << (field & 0x3f);
1161		dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE;
1162	}
1163
1164	mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
1165		  dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz);
1166	mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
1167		  dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz);
1168	mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
1169		  dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz);
1170	mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
1171		  dev_lim->max_eqs, dev_lim->reserved_eqs, dev_lim->eqc_entry_sz);
1172	mthca_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
1173		  dev_lim->reserved_mrws, dev_lim->reserved_mtts);
1174	mthca_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
1175		  dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars);
1176	mthca_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
1177		  dev_lim->max_pds, dev_lim->reserved_mgms);
1178	mthca_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
1179		  dev_lim->max_cq_sz, dev_lim->max_qp_sz, dev_lim->max_srq_sz);
1180
1181	mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags);
1182
1183out:
1184	mthca_free_mailbox(dev, mailbox);
1185	return err;
1186}
1187
1188static void get_board_id(void *vsd, char *board_id)
1189{
1190	int i;
1191
1192#define VSD_OFFSET_SIG1		0x00
1193#define VSD_OFFSET_SIG2		0xde
1194#define VSD_OFFSET_MLX_BOARD_ID	0xd0
1195#define VSD_OFFSET_TS_BOARD_ID	0x20
1196
1197#define VSD_SIGNATURE_TOPSPIN	0x5ad
1198
1199	memset(board_id, 0, MTHCA_BOARD_ID_LEN);
1200
1201	if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
1202	    be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
1203		strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN);
1204	} else {
1205		/*
1206		 * The board ID is a string but the firmware byte
1207		 * swaps each 4-byte word before passing it back to
1208		 * us.  Therefore we need to swab it before printing.
1209		 */
1210		for (i = 0; i < 4; ++i)
1211			((u32 *) board_id)[i] =
1212				swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
1213	}
1214}
1215
1216int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
1217			struct mthca_adapter *adapter, u8 *status)
1218{
1219	struct mthca_mailbox *mailbox;
1220	u32 *outbox;
1221	int err;
1222
1223#define QUERY_ADAPTER_OUT_SIZE             0x100
1224#define QUERY_ADAPTER_VENDOR_ID_OFFSET     0x00
1225#define QUERY_ADAPTER_DEVICE_ID_OFFSET     0x04
1226#define QUERY_ADAPTER_REVISION_ID_OFFSET   0x08
1227#define QUERY_ADAPTER_INTA_PIN_OFFSET      0x10
1228#define QUERY_ADAPTER_VSD_OFFSET           0x20
1229
1230	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1231	if (IS_ERR(mailbox))
1232		return PTR_ERR(mailbox);
1233	outbox = mailbox->buf;
1234
1235	err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER,
1236			    CMD_TIME_CLASS_A, status);
1237
1238	if (err)
1239		goto out;
1240
1241	MTHCA_GET(adapter->vendor_id, outbox,   QUERY_ADAPTER_VENDOR_ID_OFFSET);
1242	MTHCA_GET(adapter->device_id, outbox,   QUERY_ADAPTER_DEVICE_ID_OFFSET);
1243	MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
1244	MTHCA_GET(adapter->inta_pin, outbox,    QUERY_ADAPTER_INTA_PIN_OFFSET);
1245
1246	get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
1247		     adapter->board_id);
1248
1249out:
1250	mthca_free_mailbox(dev, mailbox);
1251	return err;
1252}
1253
1254int mthca_INIT_HCA(struct mthca_dev *dev,
1255		   struct mthca_init_hca_param *param,
1256		   u8 *status)
1257{
1258	struct mthca_mailbox *mailbox;
1259	__be32 *inbox;
1260	int err;
1261
1262#define INIT_HCA_IN_SIZE             	 0x200
1263#define INIT_HCA_FLAGS1_OFFSET           0x00c
1264#define INIT_HCA_FLAGS2_OFFSET           0x014
1265#define INIT_HCA_QPC_OFFSET          	 0x020
1266#define  INIT_HCA_QPC_BASE_OFFSET    	 (INIT_HCA_QPC_OFFSET + 0x10)
1267#define  INIT_HCA_LOG_QP_OFFSET      	 (INIT_HCA_QPC_OFFSET + 0x17)
1268#define  INIT_HCA_EEC_BASE_OFFSET    	 (INIT_HCA_QPC_OFFSET + 0x20)
1269#define  INIT_HCA_LOG_EEC_OFFSET     	 (INIT_HCA_QPC_OFFSET + 0x27)
1270#define  INIT_HCA_SRQC_BASE_OFFSET   	 (INIT_HCA_QPC_OFFSET + 0x28)
1271#define  INIT_HCA_LOG_SRQ_OFFSET     	 (INIT_HCA_QPC_OFFSET + 0x2f)
1272#define  INIT_HCA_CQC_BASE_OFFSET    	 (INIT_HCA_QPC_OFFSET + 0x30)
1273#define  INIT_HCA_LOG_CQ_OFFSET      	 (INIT_HCA_QPC_OFFSET + 0x37)
1274#define  INIT_HCA_EQPC_BASE_OFFSET   	 (INIT_HCA_QPC_OFFSET + 0x40)
1275#define  INIT_HCA_EEEC_BASE_OFFSET   	 (INIT_HCA_QPC_OFFSET + 0x50)
1276#define  INIT_HCA_EQC_BASE_OFFSET    	 (INIT_HCA_QPC_OFFSET + 0x60)
1277#define  INIT_HCA_LOG_EQ_OFFSET      	 (INIT_HCA_QPC_OFFSET + 0x67)
1278#define  INIT_HCA_RDB_BASE_OFFSET    	 (INIT_HCA_QPC_OFFSET + 0x70)
1279#define INIT_HCA_UDAV_OFFSET         	 0x0b0
1280#define  INIT_HCA_UDAV_LKEY_OFFSET   	 (INIT_HCA_UDAV_OFFSET + 0x0)
1281#define  INIT_HCA_UDAV_PD_OFFSET     	 (INIT_HCA_UDAV_OFFSET + 0x4)
1282#define INIT_HCA_MCAST_OFFSET        	 0x0c0
1283#define  INIT_HCA_MC_BASE_OFFSET         (INIT_HCA_MCAST_OFFSET + 0x00)
1284#define  INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1285#define  INIT_HCA_MC_HASH_SZ_OFFSET      (INIT_HCA_MCAST_OFFSET + 0x16)
1286#define  INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1287#define INIT_HCA_TPT_OFFSET              0x0f0
1288#define  INIT_HCA_MPT_BASE_OFFSET        (INIT_HCA_TPT_OFFSET + 0x00)
1289#define  INIT_HCA_MTT_SEG_SZ_OFFSET      (INIT_HCA_TPT_OFFSET + 0x09)
1290#define  INIT_HCA_LOG_MPT_SZ_OFFSET      (INIT_HCA_TPT_OFFSET + 0x0b)
1291#define  INIT_HCA_MTT_BASE_OFFSET        (INIT_HCA_TPT_OFFSET + 0x10)
1292#define INIT_HCA_UAR_OFFSET              0x120
1293#define  INIT_HCA_UAR_BASE_OFFSET        (INIT_HCA_UAR_OFFSET + 0x00)
1294#define  INIT_HCA_UARC_SZ_OFFSET         (INIT_HCA_UAR_OFFSET + 0x09)
1295#define  INIT_HCA_LOG_UAR_SZ_OFFSET      (INIT_HCA_UAR_OFFSET + 0x0a)
1296#define  INIT_HCA_UAR_PAGE_SZ_OFFSET     (INIT_HCA_UAR_OFFSET + 0x0b)
1297#define  INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10)
1298#define  INIT_HCA_UAR_CTX_BASE_OFFSET    (INIT_HCA_UAR_OFFSET + 0x18)
1299
1300	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1301	if (IS_ERR(mailbox))
1302		return PTR_ERR(mailbox);
1303	inbox = mailbox->buf;
1304
1305	memset(inbox, 0, INIT_HCA_IN_SIZE);
1306
1307	if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
1308		MTHCA_PUT(inbox, 0x1, INIT_HCA_FLAGS1_OFFSET);
1309
1310#if defined(__LITTLE_ENDIAN)
1311	*(inbox + INIT_HCA_FLAGS2_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
1312#elif defined(__BIG_ENDIAN)
1313	*(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(1 << 1);
1314#else
1315#error Host endianness not defined
1316#endif
1317	/* Check port for UD address vector: */
1318	*(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(1);
1319
1320	/* We leave wqe_quota, responder_exu, etc as 0 (default) */
1321
1322	/* QPC/EEC/CQC/EQC/RDB attributes */
1323
1324	MTHCA_PUT(inbox, param->qpc_base,     INIT_HCA_QPC_BASE_OFFSET);
1325	MTHCA_PUT(inbox, param->log_num_qps,  INIT_HCA_LOG_QP_OFFSET);
1326	MTHCA_PUT(inbox, param->eec_base,     INIT_HCA_EEC_BASE_OFFSET);
1327	MTHCA_PUT(inbox, param->log_num_eecs, INIT_HCA_LOG_EEC_OFFSET);
1328	MTHCA_PUT(inbox, param->srqc_base,    INIT_HCA_SRQC_BASE_OFFSET);
1329	MTHCA_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
1330	MTHCA_PUT(inbox, param->cqc_base,     INIT_HCA_CQC_BASE_OFFSET);
1331	MTHCA_PUT(inbox, param->log_num_cqs,  INIT_HCA_LOG_CQ_OFFSET);
1332	MTHCA_PUT(inbox, param->eqpc_base,    INIT_HCA_EQPC_BASE_OFFSET);
1333	MTHCA_PUT(inbox, param->eeec_base,    INIT_HCA_EEEC_BASE_OFFSET);
1334	MTHCA_PUT(inbox, param->eqc_base,     INIT_HCA_EQC_BASE_OFFSET);
1335	MTHCA_PUT(inbox, param->log_num_eqs,  INIT_HCA_LOG_EQ_OFFSET);
1336	MTHCA_PUT(inbox, param->rdb_base,     INIT_HCA_RDB_BASE_OFFSET);
1337
1338	/* UD AV attributes */
1339
1340	/* multicast attributes */
1341
1342	MTHCA_PUT(inbox, param->mc_base,         INIT_HCA_MC_BASE_OFFSET);
1343	MTHCA_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1344	MTHCA_PUT(inbox, param->mc_hash_sz,      INIT_HCA_MC_HASH_SZ_OFFSET);
1345	MTHCA_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1346
1347	/* TPT attributes */
1348
1349	MTHCA_PUT(inbox, param->mpt_base,   INIT_HCA_MPT_BASE_OFFSET);
1350	if (!mthca_is_memfree(dev))
1351		MTHCA_PUT(inbox, param->mtt_seg_sz, INIT_HCA_MTT_SEG_SZ_OFFSET);
1352	MTHCA_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
1353	MTHCA_PUT(inbox, param->mtt_base,   INIT_HCA_MTT_BASE_OFFSET);
1354
1355	/* UAR attributes */
1356	{
1357		u8 uar_page_sz = PAGE_SHIFT - 12;
1358		MTHCA_PUT(inbox, uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1359	}
1360
1361	MTHCA_PUT(inbox, param->uar_scratch_base, INIT_HCA_UAR_SCATCH_BASE_OFFSET);
1362
1363	if (mthca_is_memfree(dev)) {
1364		MTHCA_PUT(inbox, param->log_uarc_sz, INIT_HCA_UARC_SZ_OFFSET);
1365		MTHCA_PUT(inbox, param->log_uar_sz,  INIT_HCA_LOG_UAR_SZ_OFFSET);
1366		MTHCA_PUT(inbox, param->uarc_base,   INIT_HCA_UAR_CTX_BASE_OFFSET);
1367	}
1368
1369	err = mthca_cmd(dev, mailbox->dma, 0, 0, CMD_INIT_HCA, HZ, status);
1370
1371	mthca_free_mailbox(dev, mailbox);
1372	return err;
1373}
1374
1375int mthca_INIT_IB(struct mthca_dev *dev,
1376		  struct mthca_init_ib_param *param,
1377		  int port, u8 *status)
1378{
1379	struct mthca_mailbox *mailbox;
1380	u32 *inbox;
1381	int err;
1382	u32 flags;
1383
1384#define INIT_IB_IN_SIZE          56
1385#define INIT_IB_FLAGS_OFFSET     0x00
1386#define INIT_IB_FLAG_SIG         (1 << 18)
1387#define INIT_IB_FLAG_NG          (1 << 17)
1388#define INIT_IB_FLAG_G0          (1 << 16)
1389#define INIT_IB_VL_SHIFT         4
1390#define INIT_IB_PORT_WIDTH_SHIFT 8
1391#define INIT_IB_MTU_SHIFT        12
1392#define INIT_IB_MAX_GID_OFFSET   0x06
1393#define INIT_IB_MAX_PKEY_OFFSET  0x0a
1394#define INIT_IB_GUID0_OFFSET     0x10
1395#define INIT_IB_NODE_GUID_OFFSET 0x18
1396#define INIT_IB_SI_GUID_OFFSET   0x20
1397
1398	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1399	if (IS_ERR(mailbox))
1400		return PTR_ERR(mailbox);
1401	inbox = mailbox->buf;
1402
1403	memset(inbox, 0, INIT_IB_IN_SIZE);
1404
1405	flags = 0;
1406	flags |= param->set_guid0     ? INIT_IB_FLAG_G0  : 0;
1407	flags |= param->set_node_guid ? INIT_IB_FLAG_NG  : 0;
1408	flags |= param->set_si_guid   ? INIT_IB_FLAG_SIG : 0;
1409	flags |= param->vl_cap << INIT_IB_VL_SHIFT;
1410	flags |= param->port_width << INIT_IB_PORT_WIDTH_SHIFT;
1411	flags |= param->mtu_cap << INIT_IB_MTU_SHIFT;
1412	MTHCA_PUT(inbox, flags, INIT_IB_FLAGS_OFFSET);
1413
1414	MTHCA_PUT(inbox, param->gid_cap,   INIT_IB_MAX_GID_OFFSET);
1415	MTHCA_PUT(inbox, param->pkey_cap,  INIT_IB_MAX_PKEY_OFFSET);
1416	MTHCA_PUT(inbox, param->guid0,     INIT_IB_GUID0_OFFSET);
1417	MTHCA_PUT(inbox, param->node_guid, INIT_IB_NODE_GUID_OFFSET);
1418	MTHCA_PUT(inbox, param->si_guid,   INIT_IB_SI_GUID_OFFSET);
1419
1420	err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB,
1421			CMD_TIME_CLASS_A, status);
1422
1423	mthca_free_mailbox(dev, mailbox);
1424	return err;
1425}
1426
1427int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status)
1428{
1429	return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, HZ, status);
1430}
1431
1432int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status)
1433{
1434	return mthca_cmd(dev, 0, 0, panic, CMD_CLOSE_HCA, HZ, status);
1435}
1436
1437int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
1438		 int port, u8 *status)
1439{
1440	struct mthca_mailbox *mailbox;
1441	u32 *inbox;
1442	int err;
1443	u32 flags = 0;
1444
1445#define SET_IB_IN_SIZE         0x40
1446#define SET_IB_FLAGS_OFFSET    0x00
1447#define SET_IB_FLAG_SIG        (1 << 18)
1448#define SET_IB_FLAG_RQK        (1 <<  0)
1449#define SET_IB_CAP_MASK_OFFSET 0x04
1450#define SET_IB_SI_GUID_OFFSET  0x08
1451
1452	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1453	if (IS_ERR(mailbox))
1454		return PTR_ERR(mailbox);
1455	inbox = mailbox->buf;
1456
1457	memset(inbox, 0, SET_IB_IN_SIZE);
1458
1459	flags |= param->set_si_guid     ? SET_IB_FLAG_SIG : 0;
1460	flags |= param->reset_qkey_viol ? SET_IB_FLAG_RQK : 0;
1461	MTHCA_PUT(inbox, flags, SET_IB_FLAGS_OFFSET);
1462
1463	MTHCA_PUT(inbox, param->cap_mask, SET_IB_CAP_MASK_OFFSET);
1464	MTHCA_PUT(inbox, param->si_guid,  SET_IB_SI_GUID_OFFSET);
1465
1466	err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB,
1467			CMD_TIME_CLASS_B, status);
1468
1469	mthca_free_mailbox(dev, mailbox);
1470	return err;
1471}
1472
1473int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status)
1474{
1475	return mthca_map_cmd(dev, CMD_MAP_ICM, icm, virt, status);
1476}
1477
1478int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status)
1479{
1480	struct mthca_mailbox *mailbox;
1481	__be64 *inbox;
1482	int err;
1483
1484	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1485	if (IS_ERR(mailbox))
1486		return PTR_ERR(mailbox);
1487	inbox = mailbox->buf;
1488
1489	inbox[0] = cpu_to_be64(virt);
1490	inbox[1] = cpu_to_be64(dma_addr);
1491
1492	err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM,
1493			CMD_TIME_CLASS_B, status);
1494
1495	mthca_free_mailbox(dev, mailbox);
1496
1497	if (!err)
1498		mthca_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
1499			  (unsigned long long) dma_addr, (unsigned long long) virt);
1500
1501	return err;
1502}
1503
1504int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status)
1505{
1506	mthca_dbg(dev, "Unmapping %d pages at %llx from ICM.\n",
1507		  page_count, (unsigned long long) virt);
1508
1509	return mthca_cmd(dev, virt, page_count, 0, CMD_UNMAP_ICM, CMD_TIME_CLASS_B, status);
1510}
1511
1512int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status)
1513{
1514	return mthca_map_cmd(dev, CMD_MAP_ICM_AUX, icm, -1, status);
1515}
1516
1517int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status)
1518{
1519	return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_ICM_AUX, CMD_TIME_CLASS_B, status);
1520}
1521
1522int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
1523		       u8 *status)
1524{
1525	int ret = mthca_cmd_imm(dev, icm_size, aux_pages, 0, 0, CMD_SET_ICM_SIZE,
1526				CMD_TIME_CLASS_A, status);
1527
1528	if (ret || status)
1529		return ret;
1530
1531	/*
1532	 * Round up number of system pages needed in case
1533	 * MTHCA_ICM_PAGE_SIZE < PAGE_SIZE.
1534	 */
1535	*aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MTHCA_ICM_PAGE_SIZE) >>
1536		(PAGE_SHIFT - MTHCA_ICM_PAGE_SHIFT);
1537
1538	return 0;
1539}
1540
1541int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1542		    int mpt_index, u8 *status)
1543{
1544	return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT,
1545			 CMD_TIME_CLASS_B, status);
1546}
1547
1548int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1549		    int mpt_index, u8 *status)
1550{
1551	return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
1552			     !mailbox, CMD_HW2SW_MPT,
1553			     CMD_TIME_CLASS_B, status);
1554}
1555
1556int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1557		    int num_mtt, u8 *status)
1558{
1559	return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
1560			 CMD_TIME_CLASS_B, status);
1561}
1562
1563int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status)
1564{
1565	return mthca_cmd(dev, 0, 0, 0, CMD_SYNC_TPT, CMD_TIME_CLASS_B, status);
1566}
1567
1568int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
1569		 int eq_num, u8 *status)
1570{
1571	mthca_dbg(dev, "%s mask %016llx for eqn %d\n",
1572		  unmap ? "Clearing" : "Setting",
1573		  (unsigned long long) event_mask, eq_num);
1574	return mthca_cmd(dev, event_mask, (unmap << 31) | eq_num,
1575			 0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status);
1576}
1577
1578int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1579		   int eq_num, u8 *status)
1580{
1581	return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
1582			 CMD_TIME_CLASS_A, status);
1583}
1584
1585int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1586		   int eq_num, u8 *status)
1587{
1588	return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0,
1589			     CMD_HW2SW_EQ,
1590			     CMD_TIME_CLASS_A, status);
1591}
1592
1593int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1594		   int cq_num, u8 *status)
1595{
1596	return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ,
1597			CMD_TIME_CLASS_A, status);
1598}
1599
1600int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1601		   int cq_num, u8 *status)
1602{
1603	return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0,
1604			     CMD_HW2SW_CQ,
1605			     CMD_TIME_CLASS_A, status);
1606}
1607
1608int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size,
1609		    u8 *status)
1610{
1611	struct mthca_mailbox *mailbox;
1612	__be32 *inbox;
1613	int err;
1614
1615#define RESIZE_CQ_IN_SIZE		0x40
1616#define RESIZE_CQ_LOG_SIZE_OFFSET	0x0c
1617#define RESIZE_CQ_LKEY_OFFSET		0x1c
1618
1619	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1620	if (IS_ERR(mailbox))
1621		return PTR_ERR(mailbox);
1622	inbox = mailbox->buf;
1623
1624	memset(inbox, 0, RESIZE_CQ_IN_SIZE);
1625	/*
1626	 * Leave start address fields zeroed out -- mthca assumes that
1627	 * MRs for CQs always start at virtual address 0.
1628	 */
1629	MTHCA_PUT(inbox, log_size, RESIZE_CQ_LOG_SIZE_OFFSET);
1630	MTHCA_PUT(inbox, lkey,     RESIZE_CQ_LKEY_OFFSET);
1631
1632	err = mthca_cmd(dev, mailbox->dma, cq_num, 1, CMD_RESIZE_CQ,
1633			CMD_TIME_CLASS_B, status);
1634
1635	mthca_free_mailbox(dev, mailbox);
1636	return err;
1637}
1638
1639int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1640		    int srq_num, u8 *status)
1641{
1642	return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ,
1643			CMD_TIME_CLASS_A, status);
1644}
1645
1646int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1647		    int srq_num, u8 *status)
1648{
1649	return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0,
1650			     CMD_HW2SW_SRQ,
1651			     CMD_TIME_CLASS_A, status);
1652}
1653
1654int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
1655		    struct mthca_mailbox *mailbox, u8 *status)
1656{
1657	return mthca_cmd_box(dev, 0, mailbox->dma, num, 0,
1658			     CMD_QUERY_SRQ, CMD_TIME_CLASS_A, status);
1659}
1660
1661int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status)
1662{
1663	return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
1664			 CMD_TIME_CLASS_B, status);
1665}
1666
1667int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
1668		    enum ib_qp_state next, u32 num, int is_ee,
1669		    struct mthca_mailbox *mailbox, u32 optmask,
1670		    u8 *status)
1671{
1672	static const u16 op[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
1673		[IB_QPS_RESET] = {
1674			[IB_QPS_RESET]	= CMD_ERR2RST_QPEE,
1675			[IB_QPS_ERR]	= CMD_2ERR_QPEE,
1676			[IB_QPS_INIT]	= CMD_RST2INIT_QPEE,
1677		},
1678		[IB_QPS_INIT]  = {
1679			[IB_QPS_RESET]	= CMD_ERR2RST_QPEE,
1680			[IB_QPS_ERR]	= CMD_2ERR_QPEE,
1681			[IB_QPS_INIT]	= CMD_INIT2INIT_QPEE,
1682			[IB_QPS_RTR]	= CMD_INIT2RTR_QPEE,
1683		},
1684		[IB_QPS_RTR]   = {
1685			[IB_QPS_RESET]	= CMD_ERR2RST_QPEE,
1686			[IB_QPS_ERR]	= CMD_2ERR_QPEE,
1687			[IB_QPS_RTS]	= CMD_RTR2RTS_QPEE,
1688		},
1689		[IB_QPS_RTS]   = {
1690			[IB_QPS_RESET]	= CMD_ERR2RST_QPEE,
1691			[IB_QPS_ERR]	= CMD_2ERR_QPEE,
1692			[IB_QPS_RTS]	= CMD_RTS2RTS_QPEE,
1693			[IB_QPS_SQD]	= CMD_RTS2SQD_QPEE,
1694		},
1695		[IB_QPS_SQD] = {
1696			[IB_QPS_RESET]	= CMD_ERR2RST_QPEE,
1697			[IB_QPS_ERR]	= CMD_2ERR_QPEE,
1698			[IB_QPS_RTS]	= CMD_SQD2RTS_QPEE,
1699			[IB_QPS_SQD]	= CMD_SQD2SQD_QPEE,
1700		},
1701		[IB_QPS_SQE] = {
1702			[IB_QPS_RESET]	= CMD_ERR2RST_QPEE,
1703			[IB_QPS_ERR]	= CMD_2ERR_QPEE,
1704			[IB_QPS_RTS]	= CMD_SQERR2RTS_QPEE,
1705		},
1706		[IB_QPS_ERR] = {
1707			[IB_QPS_RESET]	= CMD_ERR2RST_QPEE,
1708			[IB_QPS_ERR]	= CMD_2ERR_QPEE,
1709		}
1710	};
1711
1712	u8 op_mod = 0;
1713	int my_mailbox = 0;
1714	int err;
1715
1716	if (op[cur][next] == CMD_ERR2RST_QPEE) {
1717		op_mod = 3;	/* don't write outbox, any->reset */
1718
1719		/* For debugging */
1720		if (!mailbox) {
1721			mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1722			if (!IS_ERR(mailbox)) {
1723				my_mailbox = 1;
1724				op_mod     = 2;	/* write outbox, any->reset */
1725			} else
1726				mailbox = NULL;
1727		}
1728
1729		err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
1730				    (!!is_ee << 24) | num, op_mod,
1731				    op[cur][next], CMD_TIME_CLASS_C, status);
1732
1733		if (0 && mailbox) {
1734			int i;
1735			mthca_dbg(dev, "Dumping QP context:\n");
1736			printk(" %08x\n", be32_to_cpup(mailbox->buf));
1737			for (i = 0; i < 0x100 / 4; ++i) {
1738				if (i % 8 == 0)
1739					printk("[%02x] ", i * 4);
1740				printk(" %08x",
1741				       be32_to_cpu(((__be32 *) mailbox->buf)[i + 2]));
1742				if ((i + 1) % 8 == 0)
1743					printk("\n");
1744			}
1745		}
1746
1747		if (my_mailbox)
1748			mthca_free_mailbox(dev, mailbox);
1749	} else {
1750		if (0) {
1751			int i;
1752			mthca_dbg(dev, "Dumping QP context:\n");
1753			printk("  opt param mask: %08x\n", be32_to_cpup(mailbox->buf));
1754			for (i = 0; i < 0x100 / 4; ++i) {
1755				if (i % 8 == 0)
1756					printk("  [%02x] ", i * 4);
1757				printk(" %08x",
1758				       be32_to_cpu(((__be32 *) mailbox->buf)[i + 2]));
1759				if ((i + 1) % 8 == 0)
1760					printk("\n");
1761			}
1762		}
1763
1764		err = mthca_cmd(dev, mailbox->dma, optmask | (!!is_ee << 24) | num,
1765				op_mod, op[cur][next], CMD_TIME_CLASS_C, status);
1766	}
1767
1768	return err;
1769}
1770
1771int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
1772		   struct mthca_mailbox *mailbox, u8 *status)
1773{
1774	return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0,
1775			     CMD_QUERY_QPEE, CMD_TIME_CLASS_A, status);
1776}
1777
1778int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
1779			  u8 *status)
1780{
1781	u8 op_mod;
1782
1783	switch (type) {
1784	case IB_QPT_SMI:
1785		op_mod = 0;
1786		break;
1787	case IB_QPT_GSI:
1788		op_mod = 1;
1789		break;
1790	case IB_QPT_RAW_IPV6:
1791		op_mod = 2;
1792		break;
1793	case IB_QPT_RAW_ETY:
1794		op_mod = 3;
1795		break;
1796	default:
1797		return -EINVAL;
1798	}
1799
1800	return mthca_cmd(dev, 0, qpn, op_mod, CMD_CONF_SPECIAL_QP,
1801			 CMD_TIME_CLASS_B, status);
1802}
1803
1804int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1805		  int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
1806		  void *in_mad, void *response_mad, u8 *status)
1807{
1808	struct mthca_mailbox *inmailbox, *outmailbox;
1809	void *inbox;
1810	int err;
1811	u32 in_modifier = port;
1812	u8 op_modifier = 0;
1813
1814#define MAD_IFC_BOX_SIZE      0x400
1815#define MAD_IFC_MY_QPN_OFFSET 0x100
1816#define MAD_IFC_RQPN_OFFSET   0x108
1817#define MAD_IFC_SL_OFFSET     0x10c
1818#define MAD_IFC_G_PATH_OFFSET 0x10d
1819#define MAD_IFC_RLID_OFFSET   0x10e
1820#define MAD_IFC_PKEY_OFFSET   0x112
1821#define MAD_IFC_GRH_OFFSET    0x140
1822
1823	inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1824	if (IS_ERR(inmailbox))
1825		return PTR_ERR(inmailbox);
1826	inbox = inmailbox->buf;
1827
1828	outmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1829	if (IS_ERR(outmailbox)) {
1830		mthca_free_mailbox(dev, inmailbox);
1831		return PTR_ERR(outmailbox);
1832	}
1833
1834	memcpy(inbox, in_mad, 256);
1835
1836	/*
1837	 * Key check traps can't be generated unless we have in_wc to
1838	 * tell us where to send the trap.
1839	 */
1840	if (ignore_mkey || !in_wc)
1841		op_modifier |= 0x1;
1842	if (ignore_bkey || !in_wc)
1843		op_modifier |= 0x2;
1844
1845	if (in_wc) {
1846		u8 val;
1847
1848		memset(inbox + 256, 0, 256);
1849
1850		MTHCA_PUT(inbox, in_wc->qp->qp_num, MAD_IFC_MY_QPN_OFFSET);
1851		MTHCA_PUT(inbox, in_wc->src_qp,     MAD_IFC_RQPN_OFFSET);
1852
1853		val = in_wc->sl << 4;
1854		MTHCA_PUT(inbox, val,               MAD_IFC_SL_OFFSET);
1855
1856		val = in_wc->dlid_path_bits |
1857			(in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
1858		MTHCA_PUT(inbox, val,               MAD_IFC_G_PATH_OFFSET);
1859
1860		MTHCA_PUT(inbox, in_wc->slid,       MAD_IFC_RLID_OFFSET);
1861		MTHCA_PUT(inbox, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET);
1862
1863		if (in_grh)
1864			memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40);
1865
1866		op_modifier |= 0x4;
1867
1868		in_modifier |= in_wc->slid << 16;
1869	}
1870
1871	err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma,
1872			    in_modifier, op_modifier,
1873			    CMD_MAD_IFC, CMD_TIME_CLASS_C, status);
1874
1875	if (!err && !*status)
1876		memcpy(response_mad, outmailbox->buf, 256);
1877
1878	mthca_free_mailbox(dev, inmailbox);
1879	mthca_free_mailbox(dev, outmailbox);
1880	return err;
1881}
1882
1883int mthca_READ_MGM(struct mthca_dev *dev, int index,
1884		   struct mthca_mailbox *mailbox, u8 *status)
1885{
1886	return mthca_cmd_box(dev, 0, mailbox->dma, index, 0,
1887			     CMD_READ_MGM, CMD_TIME_CLASS_A, status);
1888}
1889
1890int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
1891		    struct mthca_mailbox *mailbox, u8 *status)
1892{
1893	return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM,
1894			 CMD_TIME_CLASS_A, status);
1895}
1896
1897int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1898		    u16 *hash, u8 *status)
1899{
1900	u64 imm;
1901	int err;
1902
1903	err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH,
1904			    CMD_TIME_CLASS_A, status);
1905
1906	*hash = imm;
1907	return err;
1908}
1909
1910int mthca_NOP(struct mthca_dev *dev, u8 *status)
1911{
1912	return mthca_cmd(dev, 0, 0x1f, 0, CMD_NOP, msecs_to_jiffies(100), status);
1913}
1914