• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/mlx4/
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/mlx4/cmd.h>
36#include <linux/cache.h>
37
38#include "fw.h"
39#include "icm.h"
40
41enum {
42	MLX4_COMMAND_INTERFACE_MIN_REV		= 2,
43	MLX4_COMMAND_INTERFACE_MAX_REV		= 3,
44	MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS	= 3,
45};
46
47extern void __buggy_use_of_MLX4_GET(void);
48extern void __buggy_use_of_MLX4_PUT(void);
49
50static int enable_qos;
51module_param(enable_qos, bool, 0444);
52MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
53
54#define MLX4_GET(dest, source, offset)				      \
55	do {							      \
56		void *__p = (char *) (source) + (offset);	      \
57		switch (sizeof (dest)) {			      \
58		case 1: (dest) = *(u8 *) __p;	    break;	      \
59		case 2: (dest) = be16_to_cpup(__p); break;	      \
60		case 4: (dest) = be32_to_cpup(__p); break;	      \
61		case 8: (dest) = be64_to_cpup(__p); break;	      \
62		default: __buggy_use_of_MLX4_GET();		      \
63		}						      \
64	} while (0)
65
66#define MLX4_PUT(dest, source, offset)				      \
67	do {							      \
68		void *__d = ((char *) (dest) + (offset));	      \
69		switch (sizeof(source)) {			      \
70		case 1: *(u8 *) __d = (source);		       break; \
71		case 2:	*(__be16 *) __d = cpu_to_be16(source); break; \
72		case 4:	*(__be32 *) __d = cpu_to_be32(source); break; \
73		case 8:	*(__be64 *) __d = cpu_to_be64(source); break; \
74		default: __buggy_use_of_MLX4_PUT();		      \
75		}						      \
76	} while (0)
77
78static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
79{
80	static const char *fname[] = {
81		[ 0] = "RC transport",
82		[ 1] = "UC transport",
83		[ 2] = "UD transport",
84		[ 3] = "XRC transport",
85		[ 4] = "reliable multicast",
86		[ 5] = "FCoIB support",
87		[ 6] = "SRQ support",
88		[ 7] = "IPoIB checksum offload",
89		[ 8] = "P_Key violation counter",
90		[ 9] = "Q_Key violation counter",
91		[10] = "VMM",
92		[12] = "DPDP",
93		[15] = "Big LSO headers",
94		[16] = "MW support",
95		[17] = "APM support",
96		[18] = "Atomic ops support",
97		[19] = "Raw multicast support",
98		[20] = "Address vector port checking support",
99		[21] = "UD multicast support",
100		[24] = "Demand paging support",
101		[25] = "Router support"
102	};
103	int i;
104
105	mlx4_dbg(dev, "DEV_CAP flags:\n");
106	for (i = 0; i < ARRAY_SIZE(fname); ++i)
107		if (fname[i] && (flags & (1 << i)))
108			mlx4_dbg(dev, "    %s\n", fname[i]);
109}
110
111int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
112{
113	struct mlx4_cmd_mailbox *mailbox;
114	u32 *inbox;
115	int err = 0;
116
117#define MOD_STAT_CFG_IN_SIZE		0x100
118
119#define MOD_STAT_CFG_PG_SZ_M_OFFSET	0x002
120#define MOD_STAT_CFG_PG_SZ_OFFSET	0x003
121
122	mailbox = mlx4_alloc_cmd_mailbox(dev);
123	if (IS_ERR(mailbox))
124		return PTR_ERR(mailbox);
125	inbox = mailbox->buf;
126
127	memset(inbox, 0, MOD_STAT_CFG_IN_SIZE);
128
129	MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
130	MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
131
132	err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
133			MLX4_CMD_TIME_CLASS_A);
134
135	mlx4_free_cmd_mailbox(dev, mailbox);
136	return err;
137}
138
139int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
140{
141	struct mlx4_cmd_mailbox *mailbox;
142	u32 *outbox;
143	u8 field;
144	u16 size;
145	u16 stat_rate;
146	int err;
147	int i;
148
149#define QUERY_DEV_CAP_OUT_SIZE		       0x100
150#define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET		0x10
151#define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET		0x11
152#define QUERY_DEV_CAP_RSVD_QP_OFFSET		0x12
153#define QUERY_DEV_CAP_MAX_QP_OFFSET		0x13
154#define QUERY_DEV_CAP_RSVD_SRQ_OFFSET		0x14
155#define QUERY_DEV_CAP_MAX_SRQ_OFFSET		0x15
156#define QUERY_DEV_CAP_RSVD_EEC_OFFSET		0x16
157#define QUERY_DEV_CAP_MAX_EEC_OFFSET		0x17
158#define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET		0x19
159#define QUERY_DEV_CAP_RSVD_CQ_OFFSET		0x1a
160#define QUERY_DEV_CAP_MAX_CQ_OFFSET		0x1b
161#define QUERY_DEV_CAP_MAX_MPT_OFFSET		0x1d
162#define QUERY_DEV_CAP_RSVD_EQ_OFFSET		0x1e
163#define QUERY_DEV_CAP_MAX_EQ_OFFSET		0x1f
164#define QUERY_DEV_CAP_RSVD_MTT_OFFSET		0x20
165#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET		0x21
166#define QUERY_DEV_CAP_RSVD_MRW_OFFSET		0x22
167#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET	0x23
168#define QUERY_DEV_CAP_MAX_AV_OFFSET		0x27
169#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET		0x29
170#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET		0x2b
171#define QUERY_DEV_CAP_MAX_GSO_OFFSET		0x2d
172#define QUERY_DEV_CAP_MAX_RDMA_OFFSET		0x2f
173#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET		0x33
174#define QUERY_DEV_CAP_ACK_DELAY_OFFSET		0x35
175#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET		0x36
176#define QUERY_DEV_CAP_VL_PORT_OFFSET		0x37
177#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET		0x38
178#define QUERY_DEV_CAP_MAX_GID_OFFSET		0x3b
179#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET	0x3c
180#define QUERY_DEV_CAP_MAX_PKEY_OFFSET		0x3f
181#define QUERY_DEV_CAP_FLAGS_OFFSET		0x44
182#define QUERY_DEV_CAP_RSVD_UAR_OFFSET		0x48
183#define QUERY_DEV_CAP_UAR_SZ_OFFSET		0x49
184#define QUERY_DEV_CAP_PAGE_SZ_OFFSET		0x4b
185#define QUERY_DEV_CAP_BF_OFFSET			0x4c
186#define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET	0x4d
187#define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET	0x4e
188#define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET	0x4f
189#define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET		0x51
190#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET	0x52
191#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET		0x55
192#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET	0x56
193#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET		0x61
194#define QUERY_DEV_CAP_RSVD_MCG_OFFSET		0x62
195#define QUERY_DEV_CAP_MAX_MCG_OFFSET		0x63
196#define QUERY_DEV_CAP_RSVD_PD_OFFSET		0x64
197#define QUERY_DEV_CAP_MAX_PD_OFFSET		0x65
198#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET	0x80
199#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET	0x82
200#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET	0x84
201#define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET	0x86
202#define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET	0x88
203#define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET	0x8a
204#define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET	0x8c
205#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET	0x8e
206#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET	0x90
207#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET	0x92
208#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET		0x94
209#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET		0x98
210#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET		0xa0
211
212	mailbox = mlx4_alloc_cmd_mailbox(dev);
213	if (IS_ERR(mailbox))
214		return PTR_ERR(mailbox);
215	outbox = mailbox->buf;
216
217	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
218			   MLX4_CMD_TIME_CLASS_A);
219	if (err)
220		goto out;
221
222	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
223	dev_cap->reserved_qps = 1 << (field & 0xf);
224	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
225	dev_cap->max_qps = 1 << (field & 0x1f);
226	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
227	dev_cap->reserved_srqs = 1 << (field >> 4);
228	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
229	dev_cap->max_srqs = 1 << (field & 0x1f);
230	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
231	dev_cap->max_cq_sz = 1 << field;
232	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
233	dev_cap->reserved_cqs = 1 << (field & 0xf);
234	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
235	dev_cap->max_cqs = 1 << (field & 0x1f);
236	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
237	dev_cap->max_mpts = 1 << (field & 0x3f);
238	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
239	dev_cap->reserved_eqs = field & 0xf;
240	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
241	dev_cap->max_eqs = 1 << (field & 0xf);
242	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
243	dev_cap->reserved_mtts = 1 << (field >> 4);
244	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
245	dev_cap->max_mrw_sz = 1 << field;
246	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
247	dev_cap->reserved_mrws = 1 << (field & 0xf);
248	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
249	dev_cap->max_mtt_seg = 1 << (field & 0x3f);
250	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
251	dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
252	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
253	dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
254	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
255	field &= 0x1f;
256	if (!field)
257		dev_cap->max_gso_sz = 0;
258	else
259		dev_cap->max_gso_sz = 1 << field;
260
261	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
262	dev_cap->max_rdma_global = 1 << (field & 0x3f);
263	MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
264	dev_cap->local_ca_ack_delay = field & 0x1f;
265	MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
266	dev_cap->num_ports = field & 0xf;
267	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
268	dev_cap->max_msg_sz = 1 << (field & 0x1f);
269	MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
270	dev_cap->stat_rate_support = stat_rate;
271	MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
272	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
273	dev_cap->reserved_uars = field >> 4;
274	MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
275	dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
276	MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
277	dev_cap->min_page_sz = 1 << field;
278
279	MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
280	if (field & 0x80) {
281		MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
282		dev_cap->bf_reg_size = 1 << (field & 0x1f);
283		MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
284		dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
285		mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
286			 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
287	} else {
288		dev_cap->bf_reg_size = 0;
289		mlx4_dbg(dev, "BlueFlame not available\n");
290	}
291
292	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
293	dev_cap->max_sq_sg = field;
294	MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
295	dev_cap->max_sq_desc_sz = size;
296
297	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
298	dev_cap->max_qp_per_mcg = 1 << field;
299	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
300	dev_cap->reserved_mgms = field & 0xf;
301	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
302	dev_cap->max_mcgs = 1 << field;
303	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
304	dev_cap->reserved_pds = field >> 4;
305	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
306	dev_cap->max_pds = 1 << (field & 0x3f);
307
308	MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
309	dev_cap->rdmarc_entry_sz = size;
310	MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
311	dev_cap->qpc_entry_sz = size;
312	MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
313	dev_cap->aux_entry_sz = size;
314	MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
315	dev_cap->altc_entry_sz = size;
316	MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
317	dev_cap->eqc_entry_sz = size;
318	MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
319	dev_cap->cqc_entry_sz = size;
320	MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
321	dev_cap->srq_entry_sz = size;
322	MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
323	dev_cap->cmpt_entry_sz = size;
324	MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
325	dev_cap->mtt_entry_sz = size;
326	MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
327	dev_cap->dmpt_entry_sz = size;
328
329	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
330	dev_cap->max_srq_sz = 1 << field;
331	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
332	dev_cap->max_qp_sz = 1 << field;
333	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
334	dev_cap->resize_srq = field & 1;
335	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
336	dev_cap->max_rq_sg = field;
337	MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
338	dev_cap->max_rq_desc_sz = size;
339
340	MLX4_GET(dev_cap->bmme_flags, outbox,
341		 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
342	MLX4_GET(dev_cap->reserved_lkey, outbox,
343		 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
344	MLX4_GET(dev_cap->max_icm_sz, outbox,
345		 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
346
347	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
348		for (i = 1; i <= dev_cap->num_ports; ++i) {
349			MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
350			dev_cap->max_vl[i]	   = field >> 4;
351			MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
352			dev_cap->ib_mtu[i]	   = field >> 4;
353			dev_cap->max_port_width[i] = field & 0xf;
354			MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
355			dev_cap->max_gids[i]	   = 1 << (field & 0xf);
356			MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
357			dev_cap->max_pkeys[i]	   = 1 << (field & 0xf);
358		}
359	} else {
360#define QUERY_PORT_SUPPORTED_TYPE_OFFSET	0x00
361#define QUERY_PORT_MTU_OFFSET			0x01
362#define QUERY_PORT_ETH_MTU_OFFSET		0x02
363#define QUERY_PORT_WIDTH_OFFSET			0x06
364#define QUERY_PORT_MAX_GID_PKEY_OFFSET		0x07
365#define QUERY_PORT_MAX_MACVLAN_OFFSET		0x0a
366#define QUERY_PORT_MAX_VL_OFFSET		0x0b
367#define QUERY_PORT_MAC_OFFSET			0x10
368
369		for (i = 1; i <= dev_cap->num_ports; ++i) {
370			err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
371					   MLX4_CMD_TIME_CLASS_B);
372			if (err)
373				goto out;
374
375			MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
376			dev_cap->supported_port_types[i] = field & 3;
377			MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
378			dev_cap->ib_mtu[i]	   = field & 0xf;
379			MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
380			dev_cap->max_port_width[i] = field & 0xf;
381			MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
382			dev_cap->max_gids[i]	   = 1 << (field >> 4);
383			dev_cap->max_pkeys[i]	   = 1 << (field & 0xf);
384			MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
385			dev_cap->max_vl[i]	   = field & 0xf;
386			MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
387			dev_cap->log_max_macs[i]  = field & 0xf;
388			dev_cap->log_max_vlans[i] = field >> 4;
389			MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
390			MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
391		}
392	}
393
394	mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
395		 dev_cap->bmme_flags, dev_cap->reserved_lkey);
396
397	/*
398	 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
399	 * we can't use any EQs whose doorbell falls on that page,
400	 * even if the EQ itself isn't reserved.
401	 */
402	dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
403				    dev_cap->reserved_eqs);
404
405	mlx4_dbg(dev, "Max ICM size %lld MB\n",
406		 (unsigned long long) dev_cap->max_icm_sz >> 20);
407	mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
408		 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
409	mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
410		 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
411	mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
412		 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
413	mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
414		 dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz);
415	mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
416		 dev_cap->reserved_mrws, dev_cap->reserved_mtts);
417	mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
418		 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
419	mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
420		 dev_cap->max_pds, dev_cap->reserved_mgms);
421	mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
422		 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
423	mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
424		 dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
425		 dev_cap->max_port_width[1]);
426	mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
427		 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
428	mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
429		 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
430	mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
431
432	dump_dev_cap_flags(dev, dev_cap->flags);
433
434out:
435	mlx4_free_cmd_mailbox(dev, mailbox);
436	return err;
437}
438
439int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
440{
441	struct mlx4_cmd_mailbox *mailbox;
442	struct mlx4_icm_iter iter;
443	__be64 *pages;
444	int lg;
445	int nent = 0;
446	int i;
447	int err = 0;
448	int ts = 0, tc = 0;
449
450	mailbox = mlx4_alloc_cmd_mailbox(dev);
451	if (IS_ERR(mailbox))
452		return PTR_ERR(mailbox);
453	memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
454	pages = mailbox->buf;
455
456	for (mlx4_icm_first(icm, &iter);
457	     !mlx4_icm_last(&iter);
458	     mlx4_icm_next(&iter)) {
459		/*
460		 * We have to pass pages that are aligned to their
461		 * size, so find the least significant 1 in the
462		 * address or size and use that as our log2 size.
463		 */
464		lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
465		if (lg < MLX4_ICM_PAGE_SHIFT) {
466			mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
467				   MLX4_ICM_PAGE_SIZE,
468				   (unsigned long long) mlx4_icm_addr(&iter),
469				   mlx4_icm_size(&iter));
470			err = -EINVAL;
471			goto out;
472		}
473
474		for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
475			if (virt != -1) {
476				pages[nent * 2] = cpu_to_be64(virt);
477				virt += 1 << lg;
478			}
479
480			pages[nent * 2 + 1] =
481				cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
482					    (lg - MLX4_ICM_PAGE_SHIFT));
483			ts += 1 << (lg - 10);
484			++tc;
485
486			if (++nent == MLX4_MAILBOX_SIZE / 16) {
487				err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
488						MLX4_CMD_TIME_CLASS_B);
489				if (err)
490					goto out;
491				nent = 0;
492			}
493		}
494	}
495
496	if (nent)
497		err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B);
498	if (err)
499		goto out;
500
501	switch (op) {
502	case MLX4_CMD_MAP_FA:
503		mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
504		break;
505	case MLX4_CMD_MAP_ICM_AUX:
506		mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
507		break;
508	case MLX4_CMD_MAP_ICM:
509		mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
510			  tc, ts, (unsigned long long) virt - (ts << 10));
511		break;
512	}
513
514out:
515	mlx4_free_cmd_mailbox(dev, mailbox);
516	return err;
517}
518
519int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
520{
521	return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
522}
523
524int mlx4_UNMAP_FA(struct mlx4_dev *dev)
525{
526	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B);
527}
528
529
530int mlx4_RUN_FW(struct mlx4_dev *dev)
531{
532	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A);
533}
534
535int mlx4_QUERY_FW(struct mlx4_dev *dev)
536{
537	struct mlx4_fw  *fw  = &mlx4_priv(dev)->fw;
538	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
539	struct mlx4_cmd_mailbox *mailbox;
540	u32 *outbox;
541	int err = 0;
542	u64 fw_ver;
543	u16 cmd_if_rev;
544	u8 lg;
545
546#define QUERY_FW_OUT_SIZE             0x100
547#define QUERY_FW_VER_OFFSET            0x00
548#define QUERY_FW_CMD_IF_REV_OFFSET     0x0a
549#define QUERY_FW_MAX_CMD_OFFSET        0x0f
550#define QUERY_FW_ERR_START_OFFSET      0x30
551#define QUERY_FW_ERR_SIZE_OFFSET       0x38
552#define QUERY_FW_ERR_BAR_OFFSET        0x3c
553
554#define QUERY_FW_SIZE_OFFSET           0x00
555#define QUERY_FW_CLR_INT_BASE_OFFSET   0x20
556#define QUERY_FW_CLR_INT_BAR_OFFSET    0x28
557
558	mailbox = mlx4_alloc_cmd_mailbox(dev);
559	if (IS_ERR(mailbox))
560		return PTR_ERR(mailbox);
561	outbox = mailbox->buf;
562
563	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
564			    MLX4_CMD_TIME_CLASS_A);
565	if (err)
566		goto out;
567
568	MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
569	/*
570	 * FW subminor version is at more significant bits than minor
571	 * version, so swap here.
572	 */
573	dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
574		((fw_ver & 0xffff0000ull) >> 16) |
575		((fw_ver & 0x0000ffffull) << 16);
576
577	MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
578	if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
579	    cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
580		mlx4_err(dev, "Installed FW has unsupported "
581			 "command interface revision %d.\n",
582			 cmd_if_rev);
583		mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
584			 (int) (dev->caps.fw_ver >> 32),
585			 (int) (dev->caps.fw_ver >> 16) & 0xffff,
586			 (int) dev->caps.fw_ver & 0xffff);
587		mlx4_err(dev, "This driver version supports only revisions %d to %d.\n",
588			 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
589		err = -ENODEV;
590		goto out;
591	}
592
593	if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
594		dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
595
596	MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
597	cmd->max_cmds = 1 << lg;
598
599	mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
600		 (int) (dev->caps.fw_ver >> 32),
601		 (int) (dev->caps.fw_ver >> 16) & 0xffff,
602		 (int) dev->caps.fw_ver & 0xffff,
603		 cmd_if_rev, cmd->max_cmds);
604
605	MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
606	MLX4_GET(fw->catas_size,   outbox, QUERY_FW_ERR_SIZE_OFFSET);
607	MLX4_GET(fw->catas_bar,    outbox, QUERY_FW_ERR_BAR_OFFSET);
608	fw->catas_bar = (fw->catas_bar >> 6) * 2;
609
610	mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
611		 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
612
613	MLX4_GET(fw->fw_pages,     outbox, QUERY_FW_SIZE_OFFSET);
614	MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
615	MLX4_GET(fw->clr_int_bar,  outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
616	fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
617
618	mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
619
620	/*
621	 * Round up number of system pages needed in case
622	 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
623	 */
624	fw->fw_pages =
625		ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
626		(PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
627
628	mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
629		 (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
630
631out:
632	mlx4_free_cmd_mailbox(dev, mailbox);
633	return err;
634}
635
636static void get_board_id(void *vsd, char *board_id)
637{
638	int i;
639
640#define VSD_OFFSET_SIG1		0x00
641#define VSD_OFFSET_SIG2		0xde
642#define VSD_OFFSET_MLX_BOARD_ID	0xd0
643#define VSD_OFFSET_TS_BOARD_ID	0x20
644
645#define VSD_SIGNATURE_TOPSPIN	0x5ad
646
647	memset(board_id, 0, MLX4_BOARD_ID_LEN);
648
649	if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
650	    be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
651		strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
652	} else {
653		/*
654		 * The board ID is a string but the firmware byte
655		 * swaps each 4-byte word before passing it back to
656		 * us.  Therefore we need to swab it before printing.
657		 */
658		for (i = 0; i < 4; ++i)
659			((u32 *) board_id)[i] =
660				swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
661	}
662}
663
664int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
665{
666	struct mlx4_cmd_mailbox *mailbox;
667	u32 *outbox;
668	int err;
669
670#define QUERY_ADAPTER_OUT_SIZE             0x100
671#define QUERY_ADAPTER_INTA_PIN_OFFSET      0x10
672#define QUERY_ADAPTER_VSD_OFFSET           0x20
673
674	mailbox = mlx4_alloc_cmd_mailbox(dev);
675	if (IS_ERR(mailbox))
676		return PTR_ERR(mailbox);
677	outbox = mailbox->buf;
678
679	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
680			   MLX4_CMD_TIME_CLASS_A);
681	if (err)
682		goto out;
683
684	MLX4_GET(adapter->inta_pin, outbox,    QUERY_ADAPTER_INTA_PIN_OFFSET);
685
686	get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
687		     adapter->board_id);
688
689out:
690	mlx4_free_cmd_mailbox(dev, mailbox);
691	return err;
692}
693
694int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
695{
696	struct mlx4_cmd_mailbox *mailbox;
697	__be32 *inbox;
698	int err;
699
700#define INIT_HCA_IN_SIZE		 0x200
701#define INIT_HCA_VERSION_OFFSET		 0x000
702#define	 INIT_HCA_VERSION		 2
703#define INIT_HCA_CACHELINE_SZ_OFFSET	 0x0e
704#define INIT_HCA_FLAGS_OFFSET		 0x014
705#define INIT_HCA_QPC_OFFSET		 0x020
706#define	 INIT_HCA_QPC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x10)
707#define	 INIT_HCA_LOG_QP_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x17)
708#define	 INIT_HCA_SRQC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x28)
709#define	 INIT_HCA_LOG_SRQ_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x2f)
710#define	 INIT_HCA_CQC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x30)
711#define	 INIT_HCA_LOG_CQ_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x37)
712#define	 INIT_HCA_ALTC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x40)
713#define	 INIT_HCA_AUXC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x50)
714#define	 INIT_HCA_EQC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x60)
715#define	 INIT_HCA_LOG_EQ_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x67)
716#define	 INIT_HCA_RDMARC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x70)
717#define	 INIT_HCA_LOG_RD_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x77)
718#define INIT_HCA_MCAST_OFFSET		 0x0c0
719#define	 INIT_HCA_MC_BASE_OFFSET	 (INIT_HCA_MCAST_OFFSET + 0x00)
720#define	 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
721#define	 INIT_HCA_LOG_MC_HASH_SZ_OFFSET	 (INIT_HCA_MCAST_OFFSET + 0x16)
722#define	 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
723#define INIT_HCA_TPT_OFFSET		 0x0f0
724#define	 INIT_HCA_DMPT_BASE_OFFSET	 (INIT_HCA_TPT_OFFSET + 0x00)
725#define	 INIT_HCA_LOG_MPT_SZ_OFFSET	 (INIT_HCA_TPT_OFFSET + 0x0b)
726#define	 INIT_HCA_MTT_BASE_OFFSET	 (INIT_HCA_TPT_OFFSET + 0x10)
727#define	 INIT_HCA_CMPT_BASE_OFFSET	 (INIT_HCA_TPT_OFFSET + 0x18)
728#define INIT_HCA_UAR_OFFSET		 0x120
729#define	 INIT_HCA_LOG_UAR_SZ_OFFSET	 (INIT_HCA_UAR_OFFSET + 0x0a)
730#define  INIT_HCA_UAR_PAGE_SZ_OFFSET     (INIT_HCA_UAR_OFFSET + 0x0b)
731
732	mailbox = mlx4_alloc_cmd_mailbox(dev);
733	if (IS_ERR(mailbox))
734		return PTR_ERR(mailbox);
735	inbox = mailbox->buf;
736
737	memset(inbox, 0, INIT_HCA_IN_SIZE);
738
739	*((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
740
741	*((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
742		(ilog2(cache_line_size()) - 4) << 5;
743
744#if defined(__LITTLE_ENDIAN)
745	*(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
746#elif defined(__BIG_ENDIAN)
747	*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
748#else
749#error Host endianness not defined
750#endif
751	/* Check port for UD address vector: */
752	*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
753
754	/* Enable IPoIB checksumming if we can: */
755	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
756		*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
757
758	/* Enable QoS support if module parameter set */
759	if (enable_qos)
760		*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
761
762	/* QPC/EEC/CQC/EQC/RDMARC attributes */
763
764	MLX4_PUT(inbox, param->qpc_base,      INIT_HCA_QPC_BASE_OFFSET);
765	MLX4_PUT(inbox, param->log_num_qps,   INIT_HCA_LOG_QP_OFFSET);
766	MLX4_PUT(inbox, param->srqc_base,     INIT_HCA_SRQC_BASE_OFFSET);
767	MLX4_PUT(inbox, param->log_num_srqs,  INIT_HCA_LOG_SRQ_OFFSET);
768	MLX4_PUT(inbox, param->cqc_base,      INIT_HCA_CQC_BASE_OFFSET);
769	MLX4_PUT(inbox, param->log_num_cqs,   INIT_HCA_LOG_CQ_OFFSET);
770	MLX4_PUT(inbox, param->altc_base,     INIT_HCA_ALTC_BASE_OFFSET);
771	MLX4_PUT(inbox, param->auxc_base,     INIT_HCA_AUXC_BASE_OFFSET);
772	MLX4_PUT(inbox, param->eqc_base,      INIT_HCA_EQC_BASE_OFFSET);
773	MLX4_PUT(inbox, param->log_num_eqs,   INIT_HCA_LOG_EQ_OFFSET);
774	MLX4_PUT(inbox, param->rdmarc_base,   INIT_HCA_RDMARC_BASE_OFFSET);
775	MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
776
777	/* multicast attributes */
778
779	MLX4_PUT(inbox, param->mc_base,		INIT_HCA_MC_BASE_OFFSET);
780	MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
781	MLX4_PUT(inbox, param->log_mc_hash_sz,  INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
782	MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
783
784	/* TPT attributes */
785
786	MLX4_PUT(inbox, param->dmpt_base,  INIT_HCA_DMPT_BASE_OFFSET);
787	MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
788	MLX4_PUT(inbox, param->mtt_base,   INIT_HCA_MTT_BASE_OFFSET);
789	MLX4_PUT(inbox, param->cmpt_base,  INIT_HCA_CMPT_BASE_OFFSET);
790
791	/* UAR attributes */
792
793	MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET);
794	MLX4_PUT(inbox, param->log_uar_sz,      INIT_HCA_LOG_UAR_SZ_OFFSET);
795
796	err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000);
797
798	if (err)
799		mlx4_err(dev, "INIT_HCA returns %d\n", err);
800
801	mlx4_free_cmd_mailbox(dev, mailbox);
802	return err;
803}
804
805int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
806{
807	struct mlx4_cmd_mailbox *mailbox;
808	u32 *inbox;
809	int err;
810	u32 flags;
811	u16 field;
812
813	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
814#define INIT_PORT_IN_SIZE          256
815#define INIT_PORT_FLAGS_OFFSET     0x00
816#define INIT_PORT_FLAG_SIG         (1 << 18)
817#define INIT_PORT_FLAG_NG          (1 << 17)
818#define INIT_PORT_FLAG_G0          (1 << 16)
819#define INIT_PORT_VL_SHIFT         4
820#define INIT_PORT_PORT_WIDTH_SHIFT 8
821#define INIT_PORT_MTU_OFFSET       0x04
822#define INIT_PORT_MAX_GID_OFFSET   0x06
823#define INIT_PORT_MAX_PKEY_OFFSET  0x0a
824#define INIT_PORT_GUID0_OFFSET     0x10
825#define INIT_PORT_NODE_GUID_OFFSET 0x18
826#define INIT_PORT_SI_GUID_OFFSET   0x20
827
828		mailbox = mlx4_alloc_cmd_mailbox(dev);
829		if (IS_ERR(mailbox))
830			return PTR_ERR(mailbox);
831		inbox = mailbox->buf;
832
833		memset(inbox, 0, INIT_PORT_IN_SIZE);
834
835		flags = 0;
836		flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
837		flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
838		MLX4_PUT(inbox, flags,		  INIT_PORT_FLAGS_OFFSET);
839
840		field = 128 << dev->caps.ib_mtu_cap[port];
841		MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
842		field = dev->caps.gid_table_len[port];
843		MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
844		field = dev->caps.pkey_table_len[port];
845		MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
846
847		err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
848			       MLX4_CMD_TIME_CLASS_A);
849
850		mlx4_free_cmd_mailbox(dev, mailbox);
851	} else
852		err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
853			       MLX4_CMD_TIME_CLASS_A);
854
855	return err;
856}
857EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
858
859int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
860{
861	return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000);
862}
863EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
864
865int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
866{
867	return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000);
868}
869
870int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
871{
872	int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
873			       MLX4_CMD_SET_ICM_SIZE,
874			       MLX4_CMD_TIME_CLASS_A);
875	if (ret)
876		return ret;
877
878	/*
879	 * Round up number of system pages needed in case
880	 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
881	 */
882	*aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
883		(PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
884
885	return 0;
886}
887
888int mlx4_NOP(struct mlx4_dev *dev)
889{
890	/* Input modifier of 0x1f means "finish as soon as possible." */
891	return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
892}
893