1/*
2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define	LINUXKPI_PARAM_PREFIX mlx4_
36
37#include <linux/etherdevice.h>
38#include <linux/mlx4/cmd.h>
39#include <linux/module.h>
40#include <linux/cache.h>
41
42#include "fw.h"
43#include "icm.h"
44
45enum {
46	MLX4_COMMAND_INTERFACE_MIN_REV		= 2,
47	MLX4_COMMAND_INTERFACE_MAX_REV		= 3,
48	MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS	= 3,
49};
50
51extern void __buggy_use_of_MLX4_GET(void);
52extern void __buggy_use_of_MLX4_PUT(void);
53
54static int enable_qos;
55module_param(enable_qos, int, 0444);
56MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
57
58#define MLX4_GET(dest, source, offset)				      \
59	do {							      \
60		void *__p = (char *) (source) + (offset);	      \
61		switch (sizeof (dest)) {			      \
62		case 1: (dest) = *(u8 *) __p;	    break;	      \
63		case 2: (dest) = be16_to_cpup(__p); break;	      \
64		case 4: (dest) = be32_to_cpup(__p); break;	      \
65		case 8: (dest) = be64_to_cpup(__p); break;	      \
66		default: __buggy_use_of_MLX4_GET();		      \
67		}						      \
68	} while (0)
69
70#define MLX4_PUT(dest, source, offset)				      \
71	do {							      \
72		void *__d = ((char *) (dest) + (offset));	      \
73		switch (sizeof(source)) {			      \
74		case 1: *(u8 *) __d = (source);		       break; \
75		case 2:	*(__be16 *) __d = cpu_to_be16(source); break; \
76		case 4:	*(__be32 *) __d = cpu_to_be32(source); break; \
77		case 8:	*(__be64 *) __d = cpu_to_be64(source); break; \
78		default: __buggy_use_of_MLX4_PUT();		      \
79		}						      \
80	} while (0)
81
82static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
83{
84	static const char *fname[] = {
85		[ 0] = "RC transport",
86		[ 1] = "UC transport",
87		[ 2] = "UD transport",
88		[ 3] = "XRC transport",
89		[ 4] = "reliable multicast",
90		[ 5] = "FCoIB support",
91		[ 6] = "SRQ support",
92		[ 7] = "IPoIB checksum offload",
93		[ 8] = "P_Key violation counter",
94		[ 9] = "Q_Key violation counter",
95		[10] = "VMM",
96		[12] = "DPDP",
97		[15] = "Big LSO headers",
98		[16] = "MW support",
99		[17] = "APM support",
100		[18] = "Atomic ops support",
101		[19] = "Raw multicast support",
102		[20] = "Address vector port checking support",
103		[21] = "UD multicast support",
104		[24] = "Demand paging support",
105		[25] = "Router support",
106		[30] = "IBoE support",
107		[32] = "Unicast loopback support",
108		[34] = "FCS header control",
109		[38] = "Wake On LAN support",
110		[40] = "UDP RSS support",
111		[41] = "Unicast VEP steering support",
112		[42] = "Multicast VEP steering support",
113		[44] = "Cross-channel (sync_qp) operations support",
114		[48] = "Counters support",
115		[59] = "Port management change event support",
116		[60] = "eSwitch support",
117		[61] = "64 byte EQE support",
118		[62] = "64 byte CQE support",
119	};
120	int i;
121
122	mlx4_dbg(dev, "DEV_CAP flags:\n");
123	for (i = 0; i < ARRAY_SIZE(fname); ++i)
124		if (fname[i] && (flags & (1LL << i)))
125			mlx4_dbg(dev, "    %s\n", fname[i]);
126}
127
128static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
129{
130	static const char * const fname[] = {
131		[0] = "RSS support",
132		[1] = "RSS Toeplitz Hash Function support",
133		[2] = "RSS XOR Hash Function support",
134		[3] = "Device manage flow steering support",
135		[4] = "FSM (MAC unti-spoofing) support",
136		[5] = "VST (control vlan insertion/stripping) support",
137		[6] = "Dynamic QP updates support",
138		[7] = "Loopback source checks support",
139		[8] = "Device managed flow steering IPoIB support",
140		[9] = "ETS configuration support",
141		[10] = "ETH backplane autoneg report",
142		[11] = "Ethernet Flow control statistics support",
143		[12] = "Recoverable error events support",
144		[13] = "Time stamping support",
145		[14] = "Report driver version to FW support"
146	};
147	int i;
148
149	for (i = 0; i < ARRAY_SIZE(fname); ++i)
150		if (fname[i] && (flags & (1LL << i)))
151			mlx4_dbg(dev, "    %s\n", fname[i]);
152}
153
154int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
155{
156	struct mlx4_cmd_mailbox *mailbox;
157	u32 *inbox;
158	int err = 0;
159
160#define MOD_STAT_CFG_IN_SIZE		0x100
161
162#define MOD_STAT_CFG_PG_SZ_M_OFFSET	0x002
163#define MOD_STAT_CFG_PG_SZ_OFFSET	0x003
164
165	mailbox = mlx4_alloc_cmd_mailbox(dev);
166	if (IS_ERR(mailbox))
167		return PTR_ERR(mailbox);
168	inbox = mailbox->buf;
169
170	memset(inbox, 0, MOD_STAT_CFG_IN_SIZE);
171
172	MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
173	MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
174
175	err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
176			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
177
178	mlx4_free_cmd_mailbox(dev, mailbox);
179	return err;
180}
181
182int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave)
183{
184	struct mlx4_cmd_mailbox *mailbox;
185	u32 *outbox;
186	u8 in_modifier;
187	u8 field;
188	u16 field16;
189	int err;
190
191#define QUERY_FUNC_BUS_OFFSET			0x00
192#define QUERY_FUNC_DEVICE_OFFSET		0x01
193#define QUERY_FUNC_FUNCTION_OFFSET		0x01
194#define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET	0x03
195#define QUERY_FUNC_RSVD_EQS_OFFSET		0x04
196#define QUERY_FUNC_MAX_EQ_OFFSET		0x06
197#define QUERY_FUNC_RSVD_UARS_OFFSET		0x0b
198
199	mailbox = mlx4_alloc_cmd_mailbox(dev);
200	if (IS_ERR(mailbox))
201		return PTR_ERR(mailbox);
202	outbox = mailbox->buf;
203
204	in_modifier = slave;
205
206	err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0,
207			MLX4_CMD_QUERY_FUNC,
208			MLX4_CMD_TIME_CLASS_A,
209			MLX4_CMD_NATIVE);
210	if (err)
211	        goto out;
212
213	MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET);
214	func->bus = field & 0xf;
215	MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET);
216	func->device = field & 0xf1;
217	MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET);
218	func->function = field & 0x7;
219	MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET);
220	func->physical_function = field & 0xf;
221	MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET);
222	func->rsvd_eqs = field16 & 0xffff;
223	MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET);
224	func->max_eq = field16 & 0xffff;
225	MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET);
226	func->rsvd_uars = field & 0x0f;
227
228	mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n",
229		func->bus, func->device, func->function, func->physical_function,
230		func->max_eq, func->rsvd_eqs, func->rsvd_uars);
231out:
232	mlx4_free_cmd_mailbox(dev, mailbox);
233	return err;
234}
235
236int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
237				struct mlx4_vhcr *vhcr,
238				struct mlx4_cmd_mailbox *inbox,
239				struct mlx4_cmd_mailbox *outbox,
240				struct mlx4_cmd_info *cmd)
241{
242	struct mlx4_priv *priv = mlx4_priv(dev);
243	u8	field, port;
244	u32	size;
245	int	err = 0;
246	struct mlx4_func func;
247
248#define QUERY_FUNC_CAP_FLAGS_OFFSET		0x0
249#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET		0x1
250#define QUERY_FUNC_CAP_PF_BHVR_OFFSET		0x4
251#define QUERY_FUNC_CAP_FMR_OFFSET		0x8
252#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP	0x10
253#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP	0x14
254#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP	0x18
255#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP	0x20
256#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP	0x24
257#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP	0x28
258#define QUERY_FUNC_CAP_MAX_EQ_OFFSET		0x2c
259#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET	0x30
260
261#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET		0x50
262#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET		0x54
263#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET		0x58
264#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET		0x60
265#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET		0x64
266#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET		0x68
267
268#define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET	0x6c
269
270#define QUERY_FUNC_CAP_FMR_FLAG			0x80
271#define QUERY_FUNC_CAP_FLAG_RDMA		0x40
272#define QUERY_FUNC_CAP_FLAG_ETH			0x80
273#define QUERY_FUNC_CAP_FLAG_QUOTAS		0x10
274#define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX	0x04
275
276#define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG	(1UL << 31)
277
278/* when opcode modifier = 1 */
279#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET		0x3
280#define QUERY_FUNC_CAP_FLAGS0_OFFSET		0x8
281#define QUERY_FUNC_CAP_FLAGS1_OFFSET		0xc
282#define QUERY_FUNC_CAP_COUNTER_INDEX_OFFSET	0xd
283
284#define QUERY_FUNC_CAP_QP0_TUNNEL		0x10
285#define QUERY_FUNC_CAP_QP0_PROXY		0x14
286#define QUERY_FUNC_CAP_QP1_TUNNEL		0x18
287#define QUERY_FUNC_CAP_QP1_PROXY		0x1c
288
289#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC	0x40
290#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN	0x80
291#define QUERY_FUNC_CAP_PROPS_DEF_COUNTER	0x20
292
293#define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80
294#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
295
296	if (vhcr->op_modifier == 1) {
297		port = vhcr->in_modifier; /* phys-port = logical-port */
298		MLX4_PUT(outbox->buf, port, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
299
300		field = 0;
301		/* ensure that phy_wqe_gid bit is not set */
302		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS0_OFFSET);
303
304		/* ensure force vlan and force mac bits are not set
305		 * and that default counter bit is set
306		 */
307		field = QUERY_FUNC_CAP_PROPS_DEF_COUNTER; /* def counter */
308		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
309
310		/* There is always default counter legal or sink counter */
311		field = mlx4_get_default_counter_index(dev, slave, vhcr->in_modifier);
312		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_COUNTER_INDEX_OFFSET);
313
314		/* size is now the QP number */
315		size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1;
316		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
317
318		size += 2;
319		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL);
320
321		size = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1;
322		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_PROXY);
323
324		size += 2;
325		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
326
327	} else if (vhcr->op_modifier == 0) {
328		/* enable rdma and ethernet interfaces, and new quota locations */
329		field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
330			 QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX);
331		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
332
333		field = dev->caps.num_ports;
334		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
335
336		size = dev->caps.function_caps; /* set PF behaviours */
337		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
338
339		field = 0; /* protected FMR support not available as yet */
340		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
341
342		size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
343		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
344		size = dev->caps.num_qps;
345		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
346
347		size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
348		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
349		size = dev->caps.num_srqs;
350		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
351
352		size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
353		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
354		size = dev->caps.num_cqs;
355		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
356
357		if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) ||
358		    mlx4_QUERY_FUNC(dev, &func, slave)) {
359			size = vhcr->in_modifier &
360				QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
361				dev->caps.num_eqs :
362				rounddown_pow_of_two(dev->caps.num_eqs);
363			MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
364			size = dev->caps.reserved_eqs;
365			MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
366		} else {
367			size = vhcr->in_modifier &
368				QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
369				func.max_eq :
370				rounddown_pow_of_two(func.max_eq);
371			MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
372			size = func.rsvd_eqs;
373			MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
374		}
375
376		size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
377		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
378		size = dev->caps.num_mpts;
379		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
380
381		size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
382		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
383		size = dev->caps.num_mtts;
384		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
385
386		size = dev->caps.num_mgms + dev->caps.num_amgms;
387		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
388		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
389
390		size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG;
391		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
392	} else
393		err = -EINVAL;
394
395	return err;
396}
397
398int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
399			struct mlx4_func_cap *func_cap)
400{
401	struct mlx4_cmd_mailbox *mailbox;
402	u32			*outbox;
403	u8			field, op_modifier;
404	u32			size;
405	int			err = 0, quotas = 0;
406	u32                     in_modifier;
407
408	op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
409	in_modifier = op_modifier ? gen_or_port :
410		QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS;
411
412	mailbox = mlx4_alloc_cmd_mailbox(dev);
413	if (IS_ERR(mailbox))
414		return PTR_ERR(mailbox);
415
416	err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier,
417			   MLX4_CMD_QUERY_FUNC_CAP,
418			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
419	if (err)
420		goto out;
421
422	outbox = mailbox->buf;
423
424	if (!op_modifier) {
425		MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
426		if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
427			mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
428			err = -EPROTONOSUPPORT;
429			goto out;
430		}
431		func_cap->flags = field;
432		quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS);
433
434		MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
435		func_cap->num_ports = field;
436
437		MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
438		func_cap->pf_context_behaviour = size;
439
440		if (quotas) {
441			MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
442			func_cap->qp_quota = size & 0xFFFFFF;
443
444			MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
445			func_cap->srq_quota = size & 0xFFFFFF;
446
447			MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
448			func_cap->cq_quota = size & 0xFFFFFF;
449
450			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
451			func_cap->mpt_quota = size & 0xFFFFFF;
452
453			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
454			func_cap->mtt_quota = size & 0xFFFFFF;
455
456			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
457			func_cap->mcg_quota = size & 0xFFFFFF;
458
459		} else {
460			MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
461			func_cap->qp_quota = size & 0xFFFFFF;
462
463			MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
464			func_cap->srq_quota = size & 0xFFFFFF;
465
466			MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
467			func_cap->cq_quota = size & 0xFFFFFF;
468
469			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
470			func_cap->mpt_quota = size & 0xFFFFFF;
471
472			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
473			func_cap->mtt_quota = size & 0xFFFFFF;
474
475			MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
476			func_cap->mcg_quota = size & 0xFFFFFF;
477		}
478		MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
479		func_cap->max_eq = size & 0xFFFFFF;
480
481		MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
482		func_cap->reserved_eq = size & 0xFFFFFF;
483
484		func_cap->extra_flags = 0;
485
486		/* Mailbox data from 0x6c and onward should only be treated if
487		 * QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags
488		 */
489		if (func_cap->flags & QUERY_FUNC_CAP_FLAG_VALID_MAILBOX) {
490			MLX4_GET(size, outbox, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
491			if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG)
492				func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP;
493		}
494
495		goto out;
496	}
497
498	/* logical port query */
499	if (gen_or_port > dev->caps.num_ports) {
500		err = -EINVAL;
501		goto out;
502	}
503
504	if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
505		MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
506		if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) {
507			mlx4_err(dev, "VLAN is enforced on this port\n");
508			err = -EPROTONOSUPPORT;
509			goto out;
510		}
511
512		if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) {
513			mlx4_err(dev, "Force mac is enabled on this port\n");
514			err = -EPROTONOSUPPORT;
515			goto out;
516		}
517	} else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
518		MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
519		if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
520			mlx4_err(dev, "phy_wqe_gid is "
521				 "enforced on this ib port\n");
522			err = -EPROTONOSUPPORT;
523			goto out;
524		}
525	}
526
527	MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
528	func_cap->physical_port = field;
529	if (func_cap->physical_port != gen_or_port) {
530		err = -ENOSYS;
531		goto out;
532	}
533
534	MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
535	if (field & QUERY_FUNC_CAP_PROPS_DEF_COUNTER) {
536		MLX4_GET(field, outbox, QUERY_FUNC_CAP_COUNTER_INDEX_OFFSET);
537		func_cap->def_counter_index = field;
538	} else {
539		func_cap->def_counter_index = MLX4_SINK_COUNTER_INDEX;
540	}
541
542	MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL);
543	func_cap->qp0_tunnel_qpn = size & 0xFFFFFF;
544
545	MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY);
546	func_cap->qp0_proxy_qpn = size & 0xFFFFFF;
547
548	MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL);
549	func_cap->qp1_tunnel_qpn = size & 0xFFFFFF;
550
551	MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
552	func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
553
554	/* All other resources are allocated by the master, but we still report
555	 * 'num' and 'reserved' capabilities as follows:
556	 * - num remains the maximum resource index
557	 * - 'num - reserved' is the total available objects of a resource, but
558	 *   resource indices may be less than 'reserved'
559	 * TODO: set per-resource quotas */
560
561out:
562	mlx4_free_cmd_mailbox(dev, mailbox);
563
564	return err;
565}
566
567int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
568{
569	struct mlx4_cmd_mailbox *mailbox;
570	u32 *outbox;
571	u8 field;
572	u32 field32, flags, ext_flags;
573	u16 size;
574	u16 stat_rate;
575	int err;
576	int i;
577
578#define QUERY_DEV_CAP_OUT_SIZE		       0x100
579#define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET		0x10
580#define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET		0x11
581#define QUERY_DEV_CAP_RSVD_QP_OFFSET		0x12
582#define QUERY_DEV_CAP_MAX_QP_OFFSET		0x13
583#define QUERY_DEV_CAP_RSVD_SRQ_OFFSET		0x14
584#define QUERY_DEV_CAP_MAX_SRQ_OFFSET		0x15
585#define QUERY_DEV_CAP_RSVD_EEC_OFFSET		0x16
586#define QUERY_DEV_CAP_MAX_EEC_OFFSET		0x17
587#define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET		0x19
588#define QUERY_DEV_CAP_RSVD_CQ_OFFSET		0x1a
589#define QUERY_DEV_CAP_MAX_CQ_OFFSET		0x1b
590#define QUERY_DEV_CAP_MAX_MPT_OFFSET		0x1d
591#define QUERY_DEV_CAP_RSVD_EQ_OFFSET		0x1e
592#define QUERY_DEV_CAP_MAX_EQ_OFFSET		0x1f
593#define QUERY_DEV_CAP_RSVD_MTT_OFFSET		0x20
594#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET		0x21
595#define QUERY_DEV_CAP_RSVD_MRW_OFFSET		0x22
596#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET	0x23
597#define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET		0x26
598#define QUERY_DEV_CAP_MAX_AV_OFFSET		0x27
599#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET		0x29
600#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET		0x2b
601#define QUERY_DEV_CAP_MAX_GSO_OFFSET		0x2d
602#define QUERY_DEV_CAP_RSS_OFFSET		0x2e
603#define QUERY_DEV_CAP_MAX_RDMA_OFFSET		0x2f
604#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET		0x33
605#define QUERY_DEV_CAP_ACK_DELAY_OFFSET		0x35
606#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET		0x36
607#define QUERY_DEV_CAP_VL_PORT_OFFSET		0x37
608#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET		0x38
609#define QUERY_DEV_CAP_MAX_GID_OFFSET		0x3b
610#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET	0x3c
611#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET	0x3e
612#define QUERY_DEV_CAP_MAX_PKEY_OFFSET		0x3f
613#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET		0x40
614#define QUERY_DEV_CAP_SYNC_QP_OFFSET		0x42
615#define QUERY_DEV_CAP_FLAGS_OFFSET		0x44
616#define QUERY_DEV_CAP_RSVD_UAR_OFFSET		0x48
617#define QUERY_DEV_CAP_UAR_SZ_OFFSET		0x49
618#define QUERY_DEV_CAP_PAGE_SZ_OFFSET		0x4b
619#define QUERY_DEV_CAP_BF_OFFSET			0x4c
620#define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET	0x4d
621#define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET	0x4e
622#define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET	0x4f
623#define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET		0x51
624#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET	0x52
625#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET		0x55
626#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET	0x56
627#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET		0x61
628#define QUERY_DEV_CAP_RSVD_MCG_OFFSET		0x62
629#define QUERY_DEV_CAP_MAX_MCG_OFFSET		0x63
630#define QUERY_DEV_CAP_RSVD_PD_OFFSET		0x64
631#define QUERY_DEV_CAP_MAX_PD_OFFSET		0x65
632#define QUERY_DEV_CAP_RSVD_XRC_OFFSET		0x66
633#define QUERY_DEV_CAP_MAX_XRC_OFFSET		0x67
634#define QUERY_DEV_CAP_MAX_BASIC_COUNTERS_OFFSET	0x68
635#define QUERY_DEV_CAP_MAX_EXTENDED_COUNTERS_OFFSET	0x6c
636#define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET	0x70
637#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET	0x76
638#define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET		0x70
639#define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET	0x74
640#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET	0x77
641#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET	0x80
642#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET	0x82
643#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET	0x84
644#define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET	0x86
645#define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET	0x88
646#define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET	0x8a
647#define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET	0x8c
648#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET	0x8e
649#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET	0x90
650#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET	0x92
651#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET		0x94
652#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET		0x98
653#define QUERY_DEV_CAP_ETS_CFG_OFFSET		0x9c
654#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET		0xa0
655
656	dev_cap->flags2 = 0;
657	mailbox = mlx4_alloc_cmd_mailbox(dev);
658	if (IS_ERR(mailbox))
659		return PTR_ERR(mailbox);
660	outbox = mailbox->buf;
661
662	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
663			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
664	if (err)
665		goto out;
666
667	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
668	dev_cap->reserved_qps = 1 << (field & 0xf);
669	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
670	dev_cap->max_qps = 1 << (field & 0x1f);
671	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
672	dev_cap->reserved_srqs = 1 << (field >> 4);
673	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
674	dev_cap->max_srqs = 1 << (field & 0x1f);
675	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
676	dev_cap->max_cq_sz = 1 << field;
677	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
678	dev_cap->reserved_cqs = 1 << (field & 0xf);
679	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
680	dev_cap->max_cqs = 1 << (field & 0x1f);
681	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
682	dev_cap->max_mpts = 1 << (field & 0x3f);
683	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
684	dev_cap->reserved_eqs = field & 0xf;
685	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
686	dev_cap->max_eqs = 1 << (field & 0xf);
687	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
688	dev_cap->reserved_mtts = 1 << (field >> 4);
689	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
690	dev_cap->max_mrw_sz = 1 << field;
691	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
692	dev_cap->reserved_mrws = 1 << (field & 0xf);
693	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
694	dev_cap->max_mtt_seg = 1 << (field & 0x3f);
695	MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET);
696	dev_cap->num_sys_eqs = size & 0xfff;
697	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
698	dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
699	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
700	dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
701	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
702	field &= 0x1f;
703	if (!field)
704		dev_cap->max_gso_sz = 0;
705	else
706		dev_cap->max_gso_sz = 1 << field;
707
708	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET);
709	if (field & 0x20)
710		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR;
711	if (field & 0x10)
712		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP;
713	field &= 0xf;
714	if (field) {
715		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS;
716		dev_cap->max_rss_tbl_sz = 1 << field;
717	} else
718		dev_cap->max_rss_tbl_sz = 0;
719	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
720	dev_cap->max_rdma_global = 1 << (field & 0x3f);
721	MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
722	dev_cap->local_ca_ack_delay = field & 0x1f;
723	MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
724	dev_cap->num_ports = field & 0xf;
725	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
726	dev_cap->max_msg_sz = 1 << (field & 0x1f);
727	MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET);
728	if (field & 0x10)
729		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN;
730	MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
731	if (field & 0x80)
732		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
733	MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
734	if (field & 0x80)
735		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
736	dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
737	MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
738	dev_cap->fs_max_num_qp_per_entry = field;
739	MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
740	dev_cap->stat_rate_support = stat_rate;
741	MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
742	if (field & 0x80)
743		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS;
744	MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
745	MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
746	dev_cap->flags = flags | (u64)ext_flags << 32;
747	MLX4_GET(field, outbox, QUERY_DEV_CAP_SYNC_QP_OFFSET);
748	dev_cap->sync_qp = field & 0x10;
749	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
750	dev_cap->reserved_uars = field >> 4;
751	MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
752	dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
753	MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
754	dev_cap->min_page_sz = 1 << field;
755
756	MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
757	if (field & 0x80) {
758		MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
759		dev_cap->bf_reg_size = 1 << (field & 0x1f);
760		MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
761		if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
762			field = 3;
763		dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
764		mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
765			 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
766	} else {
767		dev_cap->bf_reg_size = 0;
768		mlx4_dbg(dev, "BlueFlame not available\n");
769	}
770
771	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
772	dev_cap->max_sq_sg = field;
773	MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
774	dev_cap->max_sq_desc_sz = size;
775
776	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
777	dev_cap->max_qp_per_mcg = 1 << field;
778	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
779	dev_cap->reserved_mgms = field & 0xf;
780	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
781	dev_cap->max_mcgs = 1 << field;
782	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
783	dev_cap->reserved_pds = field >> 4;
784	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
785	dev_cap->max_pds = 1 << (field & 0x3f);
786	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
787	dev_cap->reserved_xrcds = field >> 4;
788	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET);
789	dev_cap->max_xrcds = 1 << (field & 0x1f);
790
791	MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
792	dev_cap->rdmarc_entry_sz = size;
793	MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
794	dev_cap->qpc_entry_sz = size;
795	MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
796	dev_cap->aux_entry_sz = size;
797	MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
798	dev_cap->altc_entry_sz = size;
799	MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
800	dev_cap->eqc_entry_sz = size;
801	MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
802	dev_cap->cqc_entry_sz = size;
803	MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
804	dev_cap->srq_entry_sz = size;
805	MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
806	dev_cap->cmpt_entry_sz = size;
807	MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
808	dev_cap->mtt_entry_sz = size;
809	MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
810	dev_cap->dmpt_entry_sz = size;
811
812	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
813	dev_cap->max_srq_sz = 1 << field;
814	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
815	dev_cap->max_qp_sz = 1 << field;
816	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
817	dev_cap->resize_srq = field & 1;
818	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
819	dev_cap->max_rq_sg = field;
820	MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
821	dev_cap->max_rq_desc_sz = size;
822
823	MLX4_GET(dev_cap->bmme_flags, outbox,
824		 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
825	MLX4_GET(dev_cap->reserved_lkey, outbox,
826		 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
827	MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETS_CFG_OFFSET);
828	if (field32 & (1 << 0))
829		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
830	if (field32 & (1 << 7))
831		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
832	if (field32 & (1 << 8))
833		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW;
834	if (field32 & (1 << 13))
835		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
836
837	MLX4_GET(dev_cap->max_icm_sz, outbox,
838		 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
839	if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
840		MLX4_GET(dev_cap->max_basic_counters, outbox,
841			 QUERY_DEV_CAP_MAX_BASIC_COUNTERS_OFFSET);
842	/* FW reports 256 however real value is 255 */
843	dev_cap->max_basic_counters = min_t(u32, dev_cap->max_basic_counters, 255);
844	if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS_EXT)
845		MLX4_GET(dev_cap->max_extended_counters, outbox,
846			 QUERY_DEV_CAP_MAX_EXTENDED_COUNTERS_OFFSET);
847
848	MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
849	if (field32 & (1 << 16))
850		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
851	if (field32 & (1 << 19))
852		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_LB_SRC_CHK;
853	if (field32 & (1 << 20))
854		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
855	if (field32 & (1 << 26))
856		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
857
858	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
859		for (i = 1; i <= dev_cap->num_ports; ++i) {
860			MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
861			dev_cap->max_vl[i]	   = field >> 4;
862			MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
863			dev_cap->ib_mtu[i]	   = field >> 4;
864			dev_cap->max_port_width[i] = field & 0xf;
865			MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
866			dev_cap->max_gids[i]	   = 1 << (field & 0xf);
867			MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
868			dev_cap->max_pkeys[i]	   = 1 << (field & 0xf);
869		}
870	} else {
871#define QUERY_PORT_SUPPORTED_TYPE_OFFSET	0x00
872#define QUERY_PORT_MTU_OFFSET			0x01
873#define QUERY_PORT_ETH_MTU_OFFSET		0x02
874#define QUERY_PORT_WIDTH_OFFSET			0x06
875#define QUERY_PORT_MAX_GID_PKEY_OFFSET		0x07
876#define QUERY_PORT_MAX_MACVLAN_OFFSET		0x0a
877#define QUERY_PORT_MAX_VL_OFFSET		0x0b
878#define QUERY_PORT_MAC_OFFSET			0x10
879#define QUERY_PORT_TRANS_VENDOR_OFFSET		0x18
880#define QUERY_PORT_WAVELENGTH_OFFSET		0x1c
881#define QUERY_PORT_TRANS_CODE_OFFSET		0x20
882
883		for (i = 1; i <= dev_cap->num_ports; ++i) {
884			err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
885					   MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
886			if (err)
887				goto out;
888
889			MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
890			dev_cap->supported_port_types[i] = field & 3;
891			dev_cap->suggested_type[i] = (field >> 3) & 1;
892			dev_cap->default_sense[i] = (field >> 4) & 1;
893			MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
894			dev_cap->ib_mtu[i]	   = field & 0xf;
895			MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
896			dev_cap->max_port_width[i] = field & 0xf;
897			MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
898			dev_cap->max_gids[i]	   = 1 << (field >> 4);
899			dev_cap->max_pkeys[i]	   = 1 << (field & 0xf);
900			MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
901			dev_cap->max_vl[i]	   = field & 0xf;
902			MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
903			dev_cap->log_max_macs[i]  = field & 0xf;
904			dev_cap->log_max_vlans[i] = field >> 4;
905			MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
906			MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
907			MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
908			dev_cap->trans_type[i] = field32 >> 24;
909			dev_cap->vendor_oui[i] = field32 & 0xffffff;
910			MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET);
911			MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET);
912		}
913	}
914
915	mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
916		 dev_cap->bmme_flags, dev_cap->reserved_lkey);
917
918	/*
919	 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
920	 * we can't use any EQs whose doorbell falls on that page,
921	 * even if the EQ itself isn't reserved.
922	 */
923	if (dev_cap->num_sys_eqs == 0)
924		dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
925					    dev_cap->reserved_eqs);
926	else
927		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS;
928
929	mlx4_dbg(dev, "Max ICM size %lld MB\n",
930		 (unsigned long long) dev_cap->max_icm_sz >> 20);
931	mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
932		 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
933	mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
934		 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
935	mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
936		 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
937	mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n",
938		dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs,
939		dev_cap->eqc_entry_sz);
940	mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
941		 dev_cap->reserved_mrws, dev_cap->reserved_mtts);
942	mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
943		 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
944	mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
945		 dev_cap->max_pds, dev_cap->reserved_mgms);
946	mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
947		 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
948	mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
949		 dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
950		 dev_cap->max_port_width[1]);
951	mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
952		 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
953	mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
954		 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
955	mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
956	mlx4_dbg(dev, "Max basic counters: %d\n", dev_cap->max_basic_counters);
957	mlx4_dbg(dev, "Max extended counters: %d\n", dev_cap->max_extended_counters);
958	mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz);
959
960	dump_dev_cap_flags(dev, dev_cap->flags);
961	dump_dev_cap_flags2(dev, dev_cap->flags2);
962
963out:
964	mlx4_free_cmd_mailbox(dev, mailbox);
965	return err;
966}
967
968int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
969			       struct mlx4_vhcr *vhcr,
970			       struct mlx4_cmd_mailbox *inbox,
971			       struct mlx4_cmd_mailbox *outbox,
972			       struct mlx4_cmd_info *cmd)
973{
974	u64	flags;
975	int	err = 0;
976	u8	field;
977
978	err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
979			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
980	if (err)
981		return err;
982
983	/* add port mng change event capability unconditionally to slaves */
984	MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
985	flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
986	MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
987
988	/* For guests, report Blueflame disabled */
989	MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
990	field &= 0x7f;
991	MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
992
993	/* turn off device-managed steering capability if not enabled */
994	if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
995		MLX4_GET(field, outbox->buf,
996			 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
997		field &= 0x7f;
998		MLX4_PUT(outbox->buf, field,
999			 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
1000	}
1001	return 0;
1002}
1003
1004int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
1005			    struct mlx4_vhcr *vhcr,
1006			    struct mlx4_cmd_mailbox *inbox,
1007			    struct mlx4_cmd_mailbox *outbox,
1008			    struct mlx4_cmd_info *cmd)
1009{
1010	struct mlx4_priv *priv = mlx4_priv(dev);
1011	u64 def_mac;
1012	u8 port_type;
1013	u16 short_field;
1014	int err;
1015	int admin_link_state;
1016
1017#define MLX4_VF_PORT_NO_LINK_SENSE_MASK	0xE0
1018#define MLX4_PORT_LINK_UP_MASK		0x80
1019#define QUERY_PORT_CUR_MAX_PKEY_OFFSET	0x0c
1020#define QUERY_PORT_CUR_MAX_GID_OFFSET	0x0e
1021
1022	err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
1023			   MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
1024			   MLX4_CMD_NATIVE);
1025
1026	if (!err && dev->caps.function != slave) {
1027		/* set slave default_mac address to be zero MAC */
1028		def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
1029		MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
1030
1031		/* get port type - currently only eth is enabled */
1032		MLX4_GET(port_type, outbox->buf,
1033			 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1034
1035		/* No link sensing allowed */
1036		port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK;
1037		/* set port type to currently operating port type */
1038		port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
1039
1040		admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state;
1041		if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state)
1042			port_type |= MLX4_PORT_LINK_UP_MASK;
1043		else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state)
1044			port_type &= ~MLX4_PORT_LINK_UP_MASK;
1045
1046		MLX4_PUT(outbox->buf, port_type,
1047			 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1048
1049		if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH)
1050			short_field = mlx4_get_slave_num_gids(dev, slave);
1051		else
1052			short_field = 1; /* slave max gids */
1053		MLX4_PUT(outbox->buf, short_field,
1054			 QUERY_PORT_CUR_MAX_GID_OFFSET);
1055
1056		short_field = dev->caps.pkey_table_len[vhcr->in_modifier];
1057		MLX4_PUT(outbox->buf, short_field,
1058			 QUERY_PORT_CUR_MAX_PKEY_OFFSET);
1059	}
1060
1061	return err;
1062}
1063
1064int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
1065				    int *gid_tbl_len, int *pkey_tbl_len)
1066{
1067	struct mlx4_cmd_mailbox *mailbox;
1068	u32			*outbox;
1069	u16			field;
1070	int			err;
1071
1072	mailbox = mlx4_alloc_cmd_mailbox(dev);
1073	if (IS_ERR(mailbox))
1074		return PTR_ERR(mailbox);
1075
1076	err =  mlx4_cmd_box(dev, 0, mailbox->dma, port, 0,
1077			    MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
1078			    MLX4_CMD_WRAPPED);
1079	if (err)
1080		goto out;
1081
1082	outbox = mailbox->buf;
1083
1084	MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET);
1085	*gid_tbl_len = field;
1086
1087	MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET);
1088	*pkey_tbl_len = field;
1089
1090out:
1091	mlx4_free_cmd_mailbox(dev, mailbox);
1092	return err;
1093}
1094EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len);
1095
1096int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
1097{
1098	struct mlx4_cmd_mailbox *mailbox;
1099	struct mlx4_icm_iter iter;
1100	__be64 *pages;
1101	int lg;
1102	int nent = 0;
1103	int i;
1104	int err = 0;
1105	int ts = 0, tc = 0;
1106
1107	mailbox = mlx4_alloc_cmd_mailbox(dev);
1108	if (IS_ERR(mailbox))
1109		return PTR_ERR(mailbox);
1110	memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
1111	pages = mailbox->buf;
1112
1113	for (mlx4_icm_first(icm, &iter);
1114	     !mlx4_icm_last(&iter);
1115	     mlx4_icm_next(&iter)) {
1116		/*
1117		 * We have to pass pages that are aligned to their
1118		 * size, so find the least significant 1 in the
1119		 * address or size and use that as our log2 size.
1120		 */
1121		lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
1122		if (lg < MLX4_ICM_PAGE_SHIFT) {
1123			mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
1124				   MLX4_ICM_PAGE_SIZE,
1125				   (unsigned long long) mlx4_icm_addr(&iter),
1126				   mlx4_icm_size(&iter));
1127			err = -EINVAL;
1128			goto out;
1129		}
1130
1131		for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
1132			if (virt != -1) {
1133				pages[nent * 2] = cpu_to_be64(virt);
1134				virt += 1 << lg;
1135			}
1136
1137			pages[nent * 2 + 1] =
1138				cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
1139					    (lg - MLX4_ICM_PAGE_SHIFT));
1140			ts += 1 << (lg - 10);
1141			++tc;
1142
1143			if (++nent == MLX4_MAILBOX_SIZE / 16) {
1144				err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1145						MLX4_CMD_TIME_CLASS_B,
1146						MLX4_CMD_NATIVE);
1147				if (err)
1148					goto out;
1149				nent = 0;
1150			}
1151		}
1152	}
1153
1154	if (nent)
1155		err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1156			       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1157	if (err)
1158		goto out;
1159
1160	switch (op) {
1161	case MLX4_CMD_MAP_FA:
1162		mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
1163		break;
1164	case MLX4_CMD_MAP_ICM_AUX:
1165		mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
1166		break;
1167	case MLX4_CMD_MAP_ICM:
1168		mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
1169			  tc, ts, (unsigned long long) virt - (ts << 10));
1170		break;
1171	}
1172
1173out:
1174	mlx4_free_cmd_mailbox(dev, mailbox);
1175	return err;
1176}
1177
1178int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
1179{
1180	return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
1181}
1182
1183int mlx4_UNMAP_FA(struct mlx4_dev *dev)
1184{
1185	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
1186			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1187}
1188
1189
1190int mlx4_RUN_FW(struct mlx4_dev *dev)
1191{
1192	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
1193			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1194}
1195
1196int mlx4_QUERY_FW(struct mlx4_dev *dev)
1197{
1198	struct mlx4_fw  *fw  = &mlx4_priv(dev)->fw;
1199	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
1200	struct mlx4_cmd_mailbox *mailbox;
1201	u32 *outbox;
1202	int err = 0;
1203	u64 fw_ver;
1204	u16 cmd_if_rev;
1205	u8 lg;
1206
1207#define QUERY_FW_OUT_SIZE             0x100
1208#define QUERY_FW_VER_OFFSET            0x00
1209#define QUERY_FW_PPF_ID		       0x09
1210#define QUERY_FW_CMD_IF_REV_OFFSET     0x0a
1211#define QUERY_FW_MAX_CMD_OFFSET        0x0f
1212#define QUERY_FW_ERR_START_OFFSET      0x30
1213#define QUERY_FW_ERR_SIZE_OFFSET       0x38
1214#define QUERY_FW_ERR_BAR_OFFSET        0x3c
1215
1216#define QUERY_FW_SIZE_OFFSET           0x00
1217#define QUERY_FW_CLR_INT_BASE_OFFSET   0x20
1218#define QUERY_FW_CLR_INT_BAR_OFFSET    0x28
1219
1220#define QUERY_FW_COMM_BASE_OFFSET      0x40
1221#define QUERY_FW_COMM_BAR_OFFSET       0x48
1222
1223#define QUERY_FW_CLOCK_OFFSET	       0x50
1224#define QUERY_FW_CLOCK_BAR	       0x58
1225
1226	mailbox = mlx4_alloc_cmd_mailbox(dev);
1227	if (IS_ERR(mailbox))
1228		return PTR_ERR(mailbox);
1229	outbox = mailbox->buf;
1230
1231	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1232			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1233	if (err)
1234		goto out;
1235
1236	MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
1237	/*
1238	 * FW subminor version is at more significant bits than minor
1239	 * version, so swap here.
1240	 */
1241	dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
1242		((fw_ver & 0xffff0000ull) >> 16) |
1243		((fw_ver & 0x0000ffffull) << 16);
1244
1245	MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
1246	dev->caps.function = lg;
1247
1248	if (mlx4_is_slave(dev))
1249		goto out;
1250
1251
1252	MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
1253	if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
1254	    cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
1255		mlx4_err(dev, "Installed FW has unsupported "
1256			 "command interface revision %d.\n",
1257			 cmd_if_rev);
1258		mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
1259			 (int) (dev->caps.fw_ver >> 32),
1260			 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1261			 (int) dev->caps.fw_ver & 0xffff);
1262		mlx4_err(dev, "This driver version supports only revisions %d to %d.\n",
1263			 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
1264		err = -ENODEV;
1265		goto out;
1266	}
1267
1268	if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
1269		dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
1270
1271	MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
1272	cmd->max_cmds = 1 << lg;
1273
1274	mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
1275		 (int) (dev->caps.fw_ver >> 32),
1276		 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1277		 (int) dev->caps.fw_ver & 0xffff,
1278		 cmd_if_rev, cmd->max_cmds);
1279
1280	MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
1281	MLX4_GET(fw->catas_size,   outbox, QUERY_FW_ERR_SIZE_OFFSET);
1282	MLX4_GET(fw->catas_bar,    outbox, QUERY_FW_ERR_BAR_OFFSET);
1283	fw->catas_bar = (fw->catas_bar >> 6) * 2;
1284
1285	mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
1286		 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
1287
1288	MLX4_GET(fw->fw_pages,     outbox, QUERY_FW_SIZE_OFFSET);
1289	MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
1290	MLX4_GET(fw->clr_int_bar,  outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
1291	fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
1292
1293	MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
1294	MLX4_GET(fw->comm_bar,  outbox, QUERY_FW_COMM_BAR_OFFSET);
1295	fw->comm_bar = (fw->comm_bar >> 6) * 2;
1296	mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
1297		 fw->comm_bar, (unsigned long long)fw->comm_base);
1298	mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
1299
1300	MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET);
1301	MLX4_GET(fw->clock_bar,    outbox, QUERY_FW_CLOCK_BAR);
1302	fw->clock_bar = (fw->clock_bar >> 6) * 2;
1303	mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n",
1304		 fw->comm_bar, (unsigned long long)fw->comm_base);
1305
1306	/*
1307	 * Round up number of system pages needed in case
1308	 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1309	 */
1310	fw->fw_pages =
1311		ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
1312		(PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
1313
1314	mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
1315		 (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
1316
1317out:
1318	mlx4_free_cmd_mailbox(dev, mailbox);
1319	return err;
1320}
1321
1322int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
1323			  struct mlx4_vhcr *vhcr,
1324			  struct mlx4_cmd_mailbox *inbox,
1325			  struct mlx4_cmd_mailbox *outbox,
1326			  struct mlx4_cmd_info *cmd)
1327{
1328	u8 *outbuf;
1329	int err;
1330
1331	outbuf = outbox->buf;
1332	err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1333			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1334	if (err)
1335		return err;
1336
1337	/* for slaves, set pci PPF ID to invalid and zero out everything
1338	 * else except FW version */
1339	outbuf[0] = outbuf[1] = 0;
1340	memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
1341	outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID;
1342
1343	return 0;
1344}
1345
1346static void get_board_id(void *vsd, char *board_id, char *vsdstr)
1347{
1348	int i;
1349
1350#define VSD_OFFSET_SIG1		0x00
1351#define VSD_OFFSET_SIG2		0xde
1352#define VSD_OFFSET_MLX_BOARD_ID	0xd0
1353#define VSD_OFFSET_TS_BOARD_ID	0x20
1354#define VSD_LEN			0xd0
1355
1356#define VSD_SIGNATURE_TOPSPIN	0x5ad
1357
1358	memset(vsdstr, 0, MLX4_VSD_LEN);
1359
1360	for (i = 0; i < VSD_LEN / 4; i++)
1361		((u32 *)vsdstr)[i] =
1362			swab32(*(u32 *)(vsd + i * 4));
1363
1364	memset(board_id, 0, MLX4_BOARD_ID_LEN);
1365
1366	if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
1367	    be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
1368		strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
1369	} else {
1370		/*
1371		 * The board ID is a string but the firmware byte
1372		 * swaps each 4-byte word before passing it back to
1373		 * us.  Therefore we need to swab it before printing.
1374		 */
1375		for (i = 0; i < 4; ++i)
1376			((u32 *) board_id)[i] =
1377				swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
1378	}
1379}
1380
1381int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
1382{
1383	struct mlx4_cmd_mailbox *mailbox;
1384	u32 *outbox;
1385	int err;
1386
1387#define QUERY_ADAPTER_OUT_SIZE             0x100
1388#define QUERY_ADAPTER_INTA_PIN_OFFSET      0x10
1389#define QUERY_ADAPTER_VSD_OFFSET           0x20
1390#define QUERY_ADAPTER_VSD_VENDOR_ID_OFFSET 0x1e
1391
1392	mailbox = mlx4_alloc_cmd_mailbox(dev);
1393	if (IS_ERR(mailbox))
1394		return PTR_ERR(mailbox);
1395	outbox = mailbox->buf;
1396
1397	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
1398			   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1399	if (err)
1400		goto out;
1401
1402	MLX4_GET(adapter->inta_pin, outbox,    QUERY_ADAPTER_INTA_PIN_OFFSET);
1403
1404	adapter->vsd_vendor_id = be16_to_cpup((u16 *)outbox +
1405				QUERY_ADAPTER_VSD_VENDOR_ID_OFFSET / 2);
1406
1407	get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
1408		     adapter->board_id, adapter->vsd);
1409
1410out:
1411	mlx4_free_cmd_mailbox(dev, mailbox);
1412	return err;
1413}
1414
1415int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1416{
1417	struct mlx4_cmd_mailbox *mailbox;
1418	__be32 *inbox;
1419	u32 mw_enable;
1420	int err;
1421
1422#define INIT_HCA_IN_SIZE		 0x200
1423#define INIT_HCA_DRV_NAME_FOR_FW_MAX_SIZE 64
1424#define INIT_HCA_VERSION_OFFSET		 0x000
1425#define	 INIT_HCA_VERSION		 2
1426#define INIT_HCA_CACHELINE_SZ_OFFSET	 0x0e
1427#define INIT_HCA_FLAGS_OFFSET		 0x014
1428#define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018
1429#define INIT_HCA_QPC_OFFSET		 0x020
1430#define	 INIT_HCA_QPC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x10)
1431#define	 INIT_HCA_LOG_QP_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x17)
1432#define	 INIT_HCA_SRQC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x28)
1433#define	 INIT_HCA_LOG_SRQ_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x2f)
1434#define	 INIT_HCA_CQC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x30)
1435#define	 INIT_HCA_LOG_CQ_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x37)
1436#define	 INIT_HCA_EQE_CQE_OFFSETS	 (INIT_HCA_QPC_OFFSET + 0x38)
1437#define	 INIT_HCA_ALTC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x40)
1438#define	 INIT_HCA_AUXC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x50)
1439#define	 INIT_HCA_EQC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x60)
1440#define	 INIT_HCA_LOG_EQ_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x67)
1441#define	INIT_HCA_NUM_SYS_EQS_OFFSET	(INIT_HCA_QPC_OFFSET + 0x6a)
1442#define	 INIT_HCA_RDMARC_BASE_OFFSET	 (INIT_HCA_QPC_OFFSET + 0x70)
1443#define	 INIT_HCA_LOG_RD_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x77)
1444#define INIT_HCA_MCAST_OFFSET		 0x0c0
1445#define	 INIT_HCA_MC_BASE_OFFSET	 (INIT_HCA_MCAST_OFFSET + 0x00)
1446#define	 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1447#define	 INIT_HCA_LOG_MC_HASH_SZ_OFFSET	 (INIT_HCA_MCAST_OFFSET + 0x16)
1448#define  INIT_HCA_UC_STEERING_OFFSET	 (INIT_HCA_MCAST_OFFSET + 0x18)
1449#define	 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1450#define  INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN	0x6
1451#define  INIT_HCA_DRIVER_VERSION_OFFSET   0x140
1452#define  INIT_HCA_FS_PARAM_OFFSET         0x1d0
1453#define  INIT_HCA_FS_BASE_OFFSET          (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1454#define  INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET  (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1455#define  INIT_HCA_FS_LOG_TABLE_SZ_OFFSET  (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1456#define  INIT_HCA_FS_ETH_BITS_OFFSET      (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1457#define  INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1458#define  INIT_HCA_FS_IB_BITS_OFFSET       (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1459#define  INIT_HCA_FS_IB_NUM_ADDRS_OFFSET  (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1460#define INIT_HCA_TPT_OFFSET		 0x0f0
1461#define	 INIT_HCA_DMPT_BASE_OFFSET	 (INIT_HCA_TPT_OFFSET + 0x00)
1462#define  INIT_HCA_TPT_MW_OFFSET		 (INIT_HCA_TPT_OFFSET + 0x08)
1463#define  INIT_HCA_TPT_MW_ENABLE		 (1 << 31)
1464#define	 INIT_HCA_LOG_MPT_SZ_OFFSET	 (INIT_HCA_TPT_OFFSET + 0x0b)
1465#define	 INIT_HCA_MTT_BASE_OFFSET	 (INIT_HCA_TPT_OFFSET + 0x10)
1466#define	 INIT_HCA_CMPT_BASE_OFFSET	 (INIT_HCA_TPT_OFFSET + 0x18)
1467#define INIT_HCA_UAR_OFFSET		 0x120
1468#define	 INIT_HCA_LOG_UAR_SZ_OFFSET	 (INIT_HCA_UAR_OFFSET + 0x0a)
1469#define  INIT_HCA_UAR_PAGE_SZ_OFFSET     (INIT_HCA_UAR_OFFSET + 0x0b)
1470
1471	mailbox = mlx4_alloc_cmd_mailbox(dev);
1472	if (IS_ERR(mailbox))
1473		return PTR_ERR(mailbox);
1474	inbox = mailbox->buf;
1475
1476	memset(inbox, 0, INIT_HCA_IN_SIZE);
1477
1478	*((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
1479
1480	*((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
1481		((ilog2(cache_line_size()) - 4) << 5) | (1 << 4);
1482
1483#if defined(__LITTLE_ENDIAN)
1484	*(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
1485#elif defined(__BIG_ENDIAN)
1486	*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
1487#else
1488#error Host endianness not defined
1489#endif
1490	/* Check port for UD address vector: */
1491	*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
1492
1493	/* Enable IPoIB checksumming if we can: */
1494	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
1495		*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
1496
1497	/* Enable QoS support if module parameter set */
1498	if (enable_qos)
1499		*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
1500
1501	/* Enable fast drop performance optimization */
1502	if (dev->caps.fast_drop)
1503		*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 7);
1504
1505	/* enable counters */
1506	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
1507		*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
1508
1509	/* CX3 is capable of extending CQEs\EQEs from 32 to 64 bytes */
1510	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
1511		*(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
1512		dev->caps.eqe_size   = 64;
1513		dev->caps.eqe_factor = 1;
1514	} else {
1515		dev->caps.eqe_size   = 32;
1516		dev->caps.eqe_factor = 0;
1517	}
1518
1519	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) {
1520		*(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30);
1521		dev->caps.cqe_size   = 64;
1522		dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE;
1523	} else {
1524		dev->caps.cqe_size   = 32;
1525	}
1526
1527	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
1528		*(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31);
1529
1530	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW) {
1531		strncpy((u8 *)mailbox->buf + INIT_HCA_DRIVER_VERSION_OFFSET,
1532			DRV_NAME_FOR_FW,
1533			INIT_HCA_DRV_NAME_FOR_FW_MAX_SIZE - 1);
1534		mlx4_dbg(dev, "Reporting Driver Version to FW: %s\n",
1535			 (u8 *)mailbox->buf + INIT_HCA_DRIVER_VERSION_OFFSET);
1536	}
1537
1538	/* QPC/EEC/CQC/EQC/RDMARC attributes */
1539
1540	MLX4_PUT(inbox, param->qpc_base,      INIT_HCA_QPC_BASE_OFFSET);
1541	MLX4_PUT(inbox, param->log_num_qps,   INIT_HCA_LOG_QP_OFFSET);
1542	MLX4_PUT(inbox, param->srqc_base,     INIT_HCA_SRQC_BASE_OFFSET);
1543	MLX4_PUT(inbox, param->log_num_srqs,  INIT_HCA_LOG_SRQ_OFFSET);
1544	MLX4_PUT(inbox, param->cqc_base,      INIT_HCA_CQC_BASE_OFFSET);
1545	MLX4_PUT(inbox, param->log_num_cqs,   INIT_HCA_LOG_CQ_OFFSET);
1546	MLX4_PUT(inbox, param->altc_base,     INIT_HCA_ALTC_BASE_OFFSET);
1547	MLX4_PUT(inbox, param->auxc_base,     INIT_HCA_AUXC_BASE_OFFSET);
1548	MLX4_PUT(inbox, param->eqc_base,      INIT_HCA_EQC_BASE_OFFSET);
1549	MLX4_PUT(inbox, param->log_num_eqs,   INIT_HCA_LOG_EQ_OFFSET);
1550	MLX4_PUT(inbox, param->num_sys_eqs,   INIT_HCA_NUM_SYS_EQS_OFFSET);
1551	MLX4_PUT(inbox, param->rdmarc_base,   INIT_HCA_RDMARC_BASE_OFFSET);
1552	MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
1553
1554	/* steering attributes */
1555	if (dev->caps.steering_mode ==
1556	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
1557		*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
1558			cpu_to_be32(1 <<
1559				    INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
1560
1561		MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
1562		MLX4_PUT(inbox, param->log_mc_entry_sz,
1563			 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1564		MLX4_PUT(inbox, param->log_mc_table_sz,
1565			 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1566		/* Enable Ethernet flow steering
1567		 * with udp unicast and tcp unicast
1568		 */
1569		MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1570			 INIT_HCA_FS_ETH_BITS_OFFSET);
1571		MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1572			 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
1573		/* Enable IPoIB flow steering
1574		 * with udp unicast and tcp unicast
1575		 */
1576		MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1577			 INIT_HCA_FS_IB_BITS_OFFSET);
1578		MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1579			 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
1580	} else {
1581		MLX4_PUT(inbox, param->mc_base,	INIT_HCA_MC_BASE_OFFSET);
1582		MLX4_PUT(inbox, param->log_mc_entry_sz,
1583			 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1584		MLX4_PUT(inbox, param->log_mc_hash_sz,
1585			 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1586		MLX4_PUT(inbox, param->log_mc_table_sz,
1587			 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1588		if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
1589			MLX4_PUT(inbox, (u8) (1 << 3),
1590				 INIT_HCA_UC_STEERING_OFFSET);
1591	}
1592
1593	/* TPT attributes */
1594
1595	MLX4_PUT(inbox, param->dmpt_base,  INIT_HCA_DMPT_BASE_OFFSET);
1596	mw_enable = param->mw_enable ? INIT_HCA_TPT_MW_ENABLE : 0;
1597	MLX4_PUT(inbox, mw_enable,	   INIT_HCA_TPT_MW_OFFSET);
1598	MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
1599	MLX4_PUT(inbox, param->mtt_base,   INIT_HCA_MTT_BASE_OFFSET);
1600	MLX4_PUT(inbox, param->cmpt_base,  INIT_HCA_CMPT_BASE_OFFSET);
1601
1602	/* UAR attributes */
1603
1604	MLX4_PUT(inbox, param->uar_page_sz,	INIT_HCA_UAR_PAGE_SZ_OFFSET);
1605	MLX4_PUT(inbox, param->log_uar_sz,      INIT_HCA_LOG_UAR_SZ_OFFSET);
1606
1607	err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
1608		       MLX4_CMD_NATIVE);
1609
1610	if (err)
1611		mlx4_err(dev, "INIT_HCA returns %d\n", err);
1612
1613	mlx4_free_cmd_mailbox(dev, mailbox);
1614	return err;
1615}
1616
1617int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1618		   struct mlx4_init_hca_param *param)
1619{
1620	struct mlx4_cmd_mailbox *mailbox;
1621	__be32 *outbox;
1622	u32 dword_field;
1623	u32 mw_enable;
1624	int err;
1625	u8 byte_field;
1626
1627#define QUERY_HCA_GLOBAL_CAPS_OFFSET	0x04
1628#define QUERY_HCA_CORE_CLOCK_OFFSET	0x0c
1629
1630	mailbox = mlx4_alloc_cmd_mailbox(dev);
1631	if (IS_ERR(mailbox))
1632		return PTR_ERR(mailbox);
1633	outbox = mailbox->buf;
1634
1635	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1636			   MLX4_CMD_QUERY_HCA,
1637			   MLX4_CMD_TIME_CLASS_B,
1638			   !mlx4_is_slave(dev));
1639	if (err)
1640		goto out;
1641
1642	MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
1643	MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
1644
1645	/* QPC/EEC/CQC/EQC/RDMARC attributes */
1646
1647	MLX4_GET(param->qpc_base,      outbox, INIT_HCA_QPC_BASE_OFFSET);
1648	MLX4_GET(param->log_num_qps,   outbox, INIT_HCA_LOG_QP_OFFSET);
1649	MLX4_GET(param->srqc_base,     outbox, INIT_HCA_SRQC_BASE_OFFSET);
1650	MLX4_GET(param->log_num_srqs,  outbox, INIT_HCA_LOG_SRQ_OFFSET);
1651	MLX4_GET(param->cqc_base,      outbox, INIT_HCA_CQC_BASE_OFFSET);
1652	MLX4_GET(param->log_num_cqs,   outbox, INIT_HCA_LOG_CQ_OFFSET);
1653	MLX4_GET(param->altc_base,     outbox, INIT_HCA_ALTC_BASE_OFFSET);
1654	MLX4_GET(param->auxc_base,     outbox, INIT_HCA_AUXC_BASE_OFFSET);
1655	MLX4_GET(param->eqc_base,      outbox, INIT_HCA_EQC_BASE_OFFSET);
1656	MLX4_GET(param->log_num_eqs,   outbox, INIT_HCA_LOG_EQ_OFFSET);
1657	MLX4_GET(param->num_sys_eqs,   outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
1658	MLX4_GET(param->rdmarc_base,   outbox, INIT_HCA_RDMARC_BASE_OFFSET);
1659	MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
1660
1661	MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
1662	if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
1663		param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1664	} else {
1665		MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
1666		if (byte_field & 0x8)
1667			param->steering_mode = MLX4_STEERING_MODE_B0;
1668		else
1669			param->steering_mode = MLX4_STEERING_MODE_A0;
1670	}
1671	/* steering attributes */
1672	if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
1673		MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
1674		MLX4_GET(param->log_mc_entry_sz, outbox,
1675			 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1676		MLX4_GET(param->log_mc_table_sz, outbox,
1677			 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1678	} else {
1679		MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
1680		MLX4_GET(param->log_mc_entry_sz, outbox,
1681			 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1682		MLX4_GET(param->log_mc_hash_sz,  outbox,
1683			 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1684		MLX4_GET(param->log_mc_table_sz, outbox,
1685			 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1686	}
1687
1688	/* CX3 is capable of extending CQEs\EQEs from 32 to 64 bytes */
1689	MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS);
1690	if (byte_field & 0x20) /* 64-bytes eqe enabled */
1691		param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
1692	if (byte_field & 0x40) /* 64-bytes cqe enabled */
1693		param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
1694
1695	/* TPT attributes */
1696
1697	MLX4_GET(param->dmpt_base,  outbox, INIT_HCA_DMPT_BASE_OFFSET);
1698	MLX4_GET(mw_enable,	    outbox, INIT_HCA_TPT_MW_OFFSET);
1699	param->mw_enable = (mw_enable & INIT_HCA_TPT_MW_ENABLE) ==
1700			   INIT_HCA_TPT_MW_ENABLE;
1701	MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
1702	MLX4_GET(param->mtt_base,   outbox, INIT_HCA_MTT_BASE_OFFSET);
1703	MLX4_GET(param->cmpt_base,  outbox, INIT_HCA_CMPT_BASE_OFFSET);
1704
1705	/* UAR attributes */
1706
1707	MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1708	MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
1709
1710out:
1711	mlx4_free_cmd_mailbox(dev, mailbox);
1712
1713	return err;
1714}
1715
1716/* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
1717 * and real QP0 are active, so that the paravirtualized QP0 is ready
1718 * to operate */
1719static int check_qp0_state(struct mlx4_dev *dev, int function, int port)
1720{
1721	struct mlx4_priv *priv = mlx4_priv(dev);
1722	/* irrelevant if not infiniband */
1723	if (priv->mfunc.master.qp0_state[port].proxy_qp0_active &&
1724	    priv->mfunc.master.qp0_state[port].qp0_active)
1725		return 1;
1726	return 0;
1727}
1728
1729int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
1730			   struct mlx4_vhcr *vhcr,
1731			   struct mlx4_cmd_mailbox *inbox,
1732			   struct mlx4_cmd_mailbox *outbox,
1733			   struct mlx4_cmd_info *cmd)
1734{
1735	struct mlx4_priv *priv = mlx4_priv(dev);
1736	int port = vhcr->in_modifier;
1737	int err;
1738
1739	if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
1740		return 0;
1741
1742	if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
1743		/* Enable port only if it was previously disabled */
1744		if (!priv->mfunc.master.init_port_ref[port]) {
1745			err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1746				       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1747			if (err)
1748				return err;
1749		}
1750		priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1751	} else {
1752		if (slave == mlx4_master_func_num(dev)) {
1753			if (check_qp0_state(dev, slave, port) &&
1754			    !priv->mfunc.master.qp0_state[port].port_active) {
1755				err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1756					       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1757				if (err)
1758					return err;
1759				priv->mfunc.master.qp0_state[port].port_active = 1;
1760				priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1761			}
1762		} else
1763			priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1764	}
1765	++priv->mfunc.master.init_port_ref[port];
1766	return 0;
1767}
1768
1769int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
1770{
1771	struct mlx4_cmd_mailbox *mailbox;
1772	u32 *inbox;
1773	int err;
1774	u32 flags;
1775	u16 field;
1776
1777	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1778#define INIT_PORT_IN_SIZE          256
1779#define INIT_PORT_FLAGS_OFFSET     0x00
1780#define INIT_PORT_FLAG_SIG         (1 << 18)
1781#define INIT_PORT_FLAG_NG          (1 << 17)
1782#define INIT_PORT_FLAG_G0          (1 << 16)
1783#define INIT_PORT_VL_SHIFT         4
1784#define INIT_PORT_PORT_WIDTH_SHIFT 8
1785#define INIT_PORT_MTU_OFFSET       0x04
1786#define INIT_PORT_MAX_GID_OFFSET   0x06
1787#define INIT_PORT_MAX_PKEY_OFFSET  0x0a
1788#define INIT_PORT_GUID0_OFFSET     0x10
1789#define INIT_PORT_NODE_GUID_OFFSET 0x18
1790#define INIT_PORT_SI_GUID_OFFSET   0x20
1791
1792		mailbox = mlx4_alloc_cmd_mailbox(dev);
1793		if (IS_ERR(mailbox))
1794			return PTR_ERR(mailbox);
1795		inbox = mailbox->buf;
1796
1797		memset(inbox, 0, INIT_PORT_IN_SIZE);
1798
1799		flags = 0;
1800		flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
1801		flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
1802		MLX4_PUT(inbox, flags,		  INIT_PORT_FLAGS_OFFSET);
1803
1804		field = 128 << dev->caps.ib_mtu_cap[port];
1805		MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
1806		field = dev->caps.gid_table_len[port];
1807		MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
1808		field = dev->caps.pkey_table_len[port];
1809		MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
1810
1811		err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
1812			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1813
1814		mlx4_free_cmd_mailbox(dev, mailbox);
1815	} else
1816		err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1817			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1818
1819	return err;
1820}
1821EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
1822
1823int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
1824			    struct mlx4_vhcr *vhcr,
1825			    struct mlx4_cmd_mailbox *inbox,
1826			    struct mlx4_cmd_mailbox *outbox,
1827			    struct mlx4_cmd_info *cmd)
1828{
1829	struct mlx4_priv *priv = mlx4_priv(dev);
1830	int port = vhcr->in_modifier;
1831	int err;
1832
1833	if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
1834	    (1 << port)))
1835		return 0;
1836
1837	if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
1838		if (priv->mfunc.master.init_port_ref[port] == 1) {
1839			err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
1840				       1000, MLX4_CMD_NATIVE);
1841			if (err)
1842				return err;
1843		}
1844		priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1845	} else {
1846		/* infiniband port */
1847		if (slave == mlx4_master_func_num(dev)) {
1848			if (!priv->mfunc.master.qp0_state[port].qp0_active &&
1849			    priv->mfunc.master.qp0_state[port].port_active) {
1850				err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
1851					       1000, MLX4_CMD_NATIVE);
1852				if (err)
1853					return err;
1854				priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1855				priv->mfunc.master.qp0_state[port].port_active = 0;
1856			}
1857		} else
1858			priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1859	}
1860	--priv->mfunc.master.init_port_ref[port];
1861	return 0;
1862}
1863
1864int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
1865{
1866	return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
1867			MLX4_CMD_WRAPPED);
1868}
1869EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
1870
1871int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
1872{
1873	return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
1874			MLX4_CMD_NATIVE);
1875}
1876
1877int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
1878{
1879	int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
1880			       MLX4_CMD_SET_ICM_SIZE,
1881			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1882	if (ret)
1883		return ret;
1884
1885	/*
1886	 * Round up number of system pages needed in case
1887	 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1888	 */
1889	*aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
1890		(PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
1891
1892	return 0;
1893}
1894
1895int mlx4_NOP(struct mlx4_dev *dev)
1896{
1897	/* Input modifier of 0x1f means "finish as soon as possible." */
1898	return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1899}
1900
1901int mlx4_query_diag_counters(struct mlx4_dev *dev, int array_length,
1902			     u8 op_modifier, u32 in_offset[],
1903			     u32 counter_out[])
1904{
1905	struct mlx4_cmd_mailbox *mailbox;
1906	u32 *outbox;
1907	int ret;
1908	int i;
1909
1910	mailbox = mlx4_alloc_cmd_mailbox(dev);
1911	if (IS_ERR(mailbox))
1912		return PTR_ERR(mailbox);
1913	outbox = mailbox->buf;
1914
1915	ret = mlx4_cmd_box(dev, 0, mailbox->dma, 0, op_modifier,
1916			   MLX4_CMD_DIAG_RPRT, MLX4_CMD_TIME_CLASS_A,
1917			   MLX4_CMD_NATIVE);
1918	if (ret)
1919		goto out;
1920
1921	for (i = 0; i < array_length; i++) {
1922		if (in_offset[i] > MLX4_MAILBOX_SIZE) {
1923			ret = -EINVAL;
1924			goto out;
1925		}
1926
1927		MLX4_GET(counter_out[i], outbox, in_offset[i]);
1928	}
1929
1930out:
1931	mlx4_free_cmd_mailbox(dev, mailbox);
1932	return ret;
1933}
1934EXPORT_SYMBOL_GPL(mlx4_query_diag_counters);
1935
1936int mlx4_MOD_STAT_CFG_wrapper(struct mlx4_dev *dev, int slave,
1937			  struct mlx4_vhcr *vhcr,
1938			  struct mlx4_cmd_mailbox *inbox,
1939			  struct mlx4_cmd_mailbox *outbox,
1940			  struct mlx4_cmd_info *cmd)
1941{
1942	return -EPERM;
1943}
1944
1945#define MLX4_WOL_SETUP_MODE (5 << 28)
1946int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
1947{
1948	u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
1949
1950	return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
1951			    MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
1952			    MLX4_CMD_NATIVE);
1953}
1954EXPORT_SYMBOL_GPL(mlx4_wol_read);
1955
1956int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
1957{
1958	u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
1959
1960	return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
1961			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1962}
1963EXPORT_SYMBOL_GPL(mlx4_wol_write);
1964
1965enum {
1966	ADD_TO_MCG = 0x26,
1967};
1968
1969
1970void mlx4_opreq_action(struct work_struct *work)
1971{
1972	struct mlx4_priv *priv = container_of(work, struct mlx4_priv, opreq_task);
1973	struct mlx4_dev *dev = &priv->dev;
1974	int num_tasks = atomic_read(&priv->opreq_count);
1975	struct mlx4_cmd_mailbox *mailbox;
1976	struct mlx4_mgm *mgm;
1977	u32 *outbox;
1978	u32 modifier;
1979	u16 token;
1980	u16 type_m;
1981	u16 type;
1982	int err;
1983	u32 num_qps;
1984	struct mlx4_qp qp;
1985	int i;
1986	u8 rem_mcg;
1987	u8 prot;
1988
1989#define GET_OP_REQ_MODIFIER_OFFSET	0x08
1990#define GET_OP_REQ_TOKEN_OFFSET		0x14
1991#define GET_OP_REQ_TYPE_OFFSET		0x1a
1992#define GET_OP_REQ_DATA_OFFSET		0x20
1993
1994	mailbox = mlx4_alloc_cmd_mailbox(dev);
1995	if (IS_ERR(mailbox)) {
1996		mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
1997		return;
1998	}
1999	outbox = mailbox->buf;
2000
2001	while (num_tasks) {
2002		err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
2003				   MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
2004				   MLX4_CMD_NATIVE);
2005		if (err) {
2006			mlx4_err(dev, "Failed to retreive required operation: %d\n", err);
2007			return;
2008		}
2009		MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
2010		MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
2011		MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
2012		type_m = type >> 12;
2013		type &= 0xfff;
2014
2015		switch (type) {
2016		case ADD_TO_MCG:
2017			if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
2018				mlx4_warn(dev, "ADD MCG operation is not supported in "
2019					       "DEVICE_MANAGED steerign mode\n");
2020				err = EPERM;
2021				break;
2022			}
2023			mgm = (struct mlx4_mgm *) ((u8 *) (outbox) + GET_OP_REQ_DATA_OFFSET);
2024			num_qps = be32_to_cpu(mgm->members_count) & MGM_QPN_MASK;
2025			rem_mcg = ((u8 *) (&mgm->members_count))[0] & 1;
2026			prot = ((u8 *) (&mgm->members_count))[0] >> 6;
2027
2028			for (i = 0; i < num_qps; i++) {
2029				qp.qpn = be32_to_cpu(mgm->qp[i]);
2030				if (rem_mcg)
2031					err = mlx4_multicast_detach(dev, &qp, mgm->gid, prot, 0);
2032				else
2033					err = mlx4_multicast_attach(dev, &qp, mgm->gid, mgm->gid[5] ,0, prot, NULL);
2034				if (err)
2035					break;
2036			}
2037			break;
2038		default:
2039			mlx4_warn(dev, "Bad type for required operation\n");
2040			err = EINVAL;
2041			break;
2042		}
2043		err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16), 1,
2044			       MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
2045			       MLX4_CMD_NATIVE);
2046		if (err) {
2047			mlx4_err(dev, "Failed to acknowledge required request: %d\n", err);
2048			goto out;
2049		}
2050		memset(outbox, 0, 0xffc);
2051		num_tasks = atomic_dec_return(&priv->opreq_count);
2052	}
2053
2054out:
2055	mlx4_free_cmd_mailbox(dev, mailbox);
2056}
2057