mlx5_main.c revision 329209
1/*-
2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/mlx5_main.c 329209 2018-02-13 15:13:15Z hselasky $
26 */
27
28#define	LINUXKPI_PARAM_PREFIX mlx5_
29
30#include <linux/kmod.h>
31#include <linux/module.h>
32#include <linux/errno.h>
33#include <linux/pci.h>
34#include <linux/dma-mapping.h>
35#include <linux/slab.h>
36#include <linux/io-mapping.h>
37#include <linux/interrupt.h>
38#include <dev/mlx5/driver.h>
39#include <dev/mlx5/cq.h>
40#include <dev/mlx5/qp.h>
41#include <dev/mlx5/srq.h>
42#include <linux/delay.h>
43#include <dev/mlx5/mlx5_ifc.h>
44#include "mlx5_core.h"
45#include "fs_core.h"
46
47MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
48MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
49MODULE_LICENSE("Dual BSD/GPL");
50#if (__FreeBSD_version >= 1100000)
51MODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1);
52#endif
53MODULE_VERSION(mlx5, 1);
54
55int mlx5_core_debug_mask;
56module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644);
57MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
58
59#define MLX5_DEFAULT_PROF	2
60static int prof_sel = MLX5_DEFAULT_PROF;
61module_param_named(prof_sel, prof_sel, int, 0444);
62MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
63
64#define NUMA_NO_NODE       -1
65
66struct workqueue_struct *mlx5_core_wq;
67static LIST_HEAD(intf_list);
68static LIST_HEAD(dev_list);
69static DEFINE_MUTEX(intf_mutex);
70
71struct mlx5_device_context {
72	struct list_head	list;
73	struct mlx5_interface  *intf;
74	void		       *context;
75};
76
77enum {
78	MLX5_ATOMIC_REQ_MODE_BE = 0x0,
79	MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
80};
81
82static struct mlx5_profile profiles[] = {
83	[0] = {
84		.mask           = 0,
85	},
86	[1] = {
87		.mask		= MLX5_PROF_MASK_QP_SIZE,
88		.log_max_qp	= 12,
89	},
90	[2] = {
91		.mask		= MLX5_PROF_MASK_QP_SIZE |
92				  MLX5_PROF_MASK_MR_CACHE,
93		.log_max_qp	= 17,
94		.mr_cache[0]	= {
95			.size	= 500,
96			.limit	= 250
97		},
98		.mr_cache[1]	= {
99			.size	= 500,
100			.limit	= 250
101		},
102		.mr_cache[2]	= {
103			.size	= 500,
104			.limit	= 250
105		},
106		.mr_cache[3]	= {
107			.size	= 500,
108			.limit	= 250
109		},
110		.mr_cache[4]	= {
111			.size	= 500,
112			.limit	= 250
113		},
114		.mr_cache[5]	= {
115			.size	= 500,
116			.limit	= 250
117		},
118		.mr_cache[6]	= {
119			.size	= 500,
120			.limit	= 250
121		},
122		.mr_cache[7]	= {
123			.size	= 500,
124			.limit	= 250
125		},
126		.mr_cache[8]	= {
127			.size	= 500,
128			.limit	= 250
129		},
130		.mr_cache[9]	= {
131			.size	= 500,
132			.limit	= 250
133		},
134		.mr_cache[10]	= {
135			.size	= 500,
136			.limit	= 250
137		},
138		.mr_cache[11]	= {
139			.size	= 500,
140			.limit	= 250
141		},
142		.mr_cache[12]	= {
143			.size	= 64,
144			.limit	= 32
145		},
146		.mr_cache[13]	= {
147			.size	= 32,
148			.limit	= 16
149		},
150		.mr_cache[14]	= {
151			.size	= 16,
152			.limit	= 8
153		},
154	},
155	[3] = {
156		.mask		= MLX5_PROF_MASK_QP_SIZE,
157		.log_max_qp	= 17,
158	},
159};
160
161static int set_dma_caps(struct pci_dev *pdev)
162{
163	int err;
164
165	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
166	if (err) {
167		device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit PCI DMA mask\n");
168		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
169		if (err) {
170			device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set PCI DMA mask, aborting\n");
171			return err;
172		}
173	}
174
175	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
176	if (err) {
177		device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit consistent PCI DMA mask\n");
178		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
179		if (err) {
180			device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set consistent PCI DMA mask, aborting\n");
181			return err;
182		}
183	}
184
185	dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
186	return err;
187}
188
189static int request_bar(struct pci_dev *pdev)
190{
191	int err = 0;
192
193	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
194		device_printf((&pdev->dev)->bsddev, "ERR: ""Missing registers BAR, aborting\n");
195		return -ENODEV;
196	}
197
198	err = pci_request_regions(pdev, DRIVER_NAME);
199	if (err)
200		device_printf((&pdev->dev)->bsddev, "ERR: ""Couldn't get PCI resources, aborting\n");
201
202	return err;
203}
204
205static void release_bar(struct pci_dev *pdev)
206{
207	pci_release_regions(pdev);
208}
209
210static int mlx5_enable_msix(struct mlx5_core_dev *dev)
211{
212	struct mlx5_priv *priv = &dev->priv;
213	struct mlx5_eq_table *table = &priv->eq_table;
214	int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
215	int nvec;
216	int i;
217
218	nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
219	       MLX5_EQ_VEC_COMP_BASE;
220	nvec = min_t(int, nvec, num_eqs);
221	if (nvec <= MLX5_EQ_VEC_COMP_BASE)
222		return -ENOMEM;
223
224	priv->msix_arr = kzalloc(nvec * sizeof(*priv->msix_arr), GFP_KERNEL);
225
226	priv->irq_info = kzalloc(nvec * sizeof(*priv->irq_info), GFP_KERNEL);
227
228	for (i = 0; i < nvec; i++)
229		priv->msix_arr[i].entry = i;
230
231	nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
232				     MLX5_EQ_VEC_COMP_BASE + 1, nvec);
233	if (nvec < 0)
234		return nvec;
235
236	table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
237
238	return 0;
239
240}
241
242static void mlx5_disable_msix(struct mlx5_core_dev *dev)
243{
244	struct mlx5_priv *priv = &dev->priv;
245
246	pci_disable_msix(dev->pdev);
247	kfree(priv->irq_info);
248	kfree(priv->msix_arr);
249}
250
251struct mlx5_reg_host_endianess {
252	u8	he;
253	u8      rsvd[15];
254};
255
256
257#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
258
259enum {
260	MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
261				MLX5_DEV_CAP_FLAG_DCT |
262				MLX5_DEV_CAP_FLAG_DRAIN_SIGERR,
263};
264
265static u16 to_fw_pkey_sz(u32 size)
266{
267	switch (size) {
268	case 128:
269		return 0;
270	case 256:
271		return 1;
272	case 512:
273		return 2;
274	case 1024:
275		return 3;
276	case 2048:
277		return 4;
278	case 4096:
279		return 5;
280	default:
281		printf("mlx5_core: WARN: ""invalid pkey table size %d\n", size);
282		return 0;
283	}
284}
285
286int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
287		       enum mlx5_cap_mode cap_mode)
288{
289	u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
290	int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
291	void *out, *hca_caps;
292	u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
293	int err;
294
295	memset(in, 0, sizeof(in));
296	out = kzalloc(out_sz, GFP_KERNEL);
297
298	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
299	MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
300	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
301	if (err)
302		goto query_ex;
303
304	err = mlx5_cmd_status_to_err_v2(out);
305	if (err) {
306		mlx5_core_warn(dev,
307			       "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
308			       cap_type, cap_mode, err);
309		goto query_ex;
310	}
311
312	hca_caps =  MLX5_ADDR_OF(query_hca_cap_out, out, capability);
313
314	switch (cap_mode) {
315	case HCA_CAP_OPMOD_GET_MAX:
316		memcpy(dev->hca_caps_max[cap_type], hca_caps,
317		       MLX5_UN_SZ_BYTES(hca_cap_union));
318		break;
319	case HCA_CAP_OPMOD_GET_CUR:
320		memcpy(dev->hca_caps_cur[cap_type], hca_caps,
321		       MLX5_UN_SZ_BYTES(hca_cap_union));
322		break;
323	default:
324		mlx5_core_warn(dev,
325			       "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
326			       cap_type, cap_mode);
327		err = -EINVAL;
328		break;
329	}
330query_ex:
331	kfree(out);
332	return err;
333}
334
335static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
336{
337	u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
338	int err;
339
340	memset(out, 0, sizeof(out));
341
342	MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
343	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
344	if (err)
345		return err;
346
347	err = mlx5_cmd_status_to_err_v2(out);
348
349	return err;
350}
351
352static int handle_hca_cap(struct mlx5_core_dev *dev)
353{
354	void *set_ctx = NULL;
355	struct mlx5_profile *prof = dev->profile;
356	int err = -ENOMEM;
357	int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
358	void *set_hca_cap;
359
360	set_ctx = kzalloc(set_sz, GFP_KERNEL);
361
362	err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
363	if (err)
364		goto query_ex;
365
366	err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
367	if (err)
368		goto query_ex;
369
370	set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
371				   capability);
372	memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
373	       MLX5_ST_SZ_BYTES(cmd_hca_cap));
374
375	mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
376		      mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
377		      128);
378	/* we limit the size of the pkey table to 128 entries for now */
379	MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
380		 to_fw_pkey_sz(128));
381
382	if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
383		MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
384			 prof->log_max_qp);
385
386	/* disable cmdif checksum */
387	MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
388
389	/* enable drain sigerr */
390	MLX5_SET(cmd_hca_cap, set_hca_cap, drain_sigerr, 1);
391
392	MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
393
394	err = set_caps(dev, set_ctx, set_sz);
395
396query_ex:
397	kfree(set_ctx);
398	return err;
399}
400
401static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
402{
403	void *set_ctx;
404	void *set_hca_cap;
405	int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
406	int req_endianness;
407	int err;
408
409	if (MLX5_CAP_GEN(dev, atomic)) {
410		err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
411					 HCA_CAP_OPMOD_GET_MAX);
412		if (err)
413			return err;
414
415		err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
416					 HCA_CAP_OPMOD_GET_CUR);
417		if (err)
418			return err;
419	} else {
420		return 0;
421	}
422
423	req_endianness =
424		MLX5_CAP_ATOMIC(dev,
425				supported_atomic_req_8B_endianess_mode_1);
426
427	if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
428		return 0;
429
430	set_ctx = kzalloc(set_sz, GFP_KERNEL);
431	if (!set_ctx)
432		return -ENOMEM;
433
434	MLX5_SET(set_hca_cap_in, set_ctx, op_mod,
435		 MLX5_SET_HCA_CAP_OP_MOD_ATOMIC << 1);
436	set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
437
438	/* Set requestor to host endianness */
439	MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode,
440		 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
441
442	err = set_caps(dev, set_ctx, set_sz);
443
444	kfree(set_ctx);
445	return err;
446}
447
448static int set_hca_ctrl(struct mlx5_core_dev *dev)
449{
450	struct mlx5_reg_host_endianess he_in;
451	struct mlx5_reg_host_endianess he_out;
452	int err;
453
454	if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
455	    !MLX5_CAP_GEN(dev, roce))
456		return 0;
457
458	memset(&he_in, 0, sizeof(he_in));
459	he_in.he = MLX5_SET_HOST_ENDIANNESS;
460	err = mlx5_core_access_reg(dev, &he_in,  sizeof(he_in),
461					&he_out, sizeof(he_out),
462					MLX5_REG_HOST_ENDIANNESS, 0, 1);
463	return err;
464}
465
466static int mlx5_core_enable_hca(struct mlx5_core_dev *dev)
467{
468	u32 in[MLX5_ST_SZ_DW(enable_hca_in)];
469	u32 out[MLX5_ST_SZ_DW(enable_hca_out)];
470
471	memset(in, 0, sizeof(in));
472	MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
473	memset(out, 0, sizeof(out));
474	return mlx5_cmd_exec_check_status(dev, in,  sizeof(in),
475					       out, sizeof(out));
476}
477
478static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
479{
480	u32 in[MLX5_ST_SZ_DW(disable_hca_in)];
481	u32 out[MLX5_ST_SZ_DW(disable_hca_out)];
482
483	memset(in, 0, sizeof(in));
484
485	MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
486	memset(out, 0, sizeof(out));
487	return mlx5_cmd_exec_check_status(dev, in,  sizeof(in),
488					       out, sizeof(out));
489}
490
491static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
492{
493	u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
494	u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
495	u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
496	u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
497	int err;
498	u32 sup_issi;
499
500	memset(query_in, 0, sizeof(query_in));
501	memset(query_out, 0, sizeof(query_out));
502
503	MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
504
505	err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
506					 query_out, sizeof(query_out));
507	if (err) {
508		if (((struct mlx5_outbox_hdr *)query_out)->status ==
509		    MLX5_CMD_STAT_BAD_OP_ERR) {
510			pr_debug("Only ISSI 0 is supported\n");
511			return 0;
512		}
513
514		printf("mlx5_core: ERR: ""failed to query ISSI\n");
515		return err;
516	}
517
518	sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
519
520	if (sup_issi & (1 << 1)) {
521		memset(set_in, 0, sizeof(set_in));
522		memset(set_out, 0, sizeof(set_out));
523
524		MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
525		MLX5_SET(set_issi_in, set_in, current_issi, 1);
526
527		err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
528						 set_out, sizeof(set_out));
529		if (err) {
530			printf("mlx5_core: ERR: ""failed to set ISSI=1\n");
531			return err;
532		}
533
534		dev->issi = 1;
535
536		return 0;
537	} else if (sup_issi & (1 << 0)) {
538		return 0;
539	}
540
541	return -ENOTSUPP;
542}
543
544
545int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
546{
547	struct mlx5_eq_table *table = &dev->priv.eq_table;
548	struct mlx5_eq *eq;
549	int err = -ENOENT;
550
551	spin_lock(&table->lock);
552	list_for_each_entry(eq, &table->comp_eqs_list, list) {
553		if (eq->index == vector) {
554			*eqn = eq->eqn;
555			*irqn = eq->irqn;
556			err = 0;
557			break;
558		}
559	}
560	spin_unlock(&table->lock);
561
562	return err;
563}
564EXPORT_SYMBOL(mlx5_vector2eqn);
565
566int mlx5_rename_eq(struct mlx5_core_dev *dev, int eq_ix, char *name)
567{
568	struct mlx5_priv *priv = &dev->priv;
569	struct mlx5_eq_table *table = &priv->eq_table;
570	struct mlx5_eq *eq;
571	int err = -ENOENT;
572
573	spin_lock(&table->lock);
574	list_for_each_entry(eq, &table->comp_eqs_list, list) {
575		if (eq->index == eq_ix) {
576			int irq_ix = eq_ix + MLX5_EQ_VEC_COMP_BASE;
577
578			snprintf(priv->irq_info[irq_ix].name, MLX5_MAX_IRQ_NAME,
579				 "%s-%d", name, eq_ix);
580
581			err = 0;
582			break;
583		}
584	}
585	spin_unlock(&table->lock);
586
587	return err;
588}
589
590static void free_comp_eqs(struct mlx5_core_dev *dev)
591{
592	struct mlx5_eq_table *table = &dev->priv.eq_table;
593	struct mlx5_eq *eq, *n;
594
595	spin_lock(&table->lock);
596	list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
597		list_del(&eq->list);
598		spin_unlock(&table->lock);
599		if (mlx5_destroy_unmap_eq(dev, eq))
600			mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
601				       eq->eqn);
602		kfree(eq);
603		spin_lock(&table->lock);
604	}
605	spin_unlock(&table->lock);
606}
607
608static int alloc_comp_eqs(struct mlx5_core_dev *dev)
609{
610	struct mlx5_eq_table *table = &dev->priv.eq_table;
611	char name[MLX5_MAX_IRQ_NAME];
612	struct mlx5_eq *eq;
613	int ncomp_vec;
614	int nent;
615	int err;
616	int i;
617
618	INIT_LIST_HEAD(&table->comp_eqs_list);
619	ncomp_vec = table->num_comp_vectors;
620	nent = MLX5_COMP_EQ_SIZE;
621	for (i = 0; i < ncomp_vec; i++) {
622		eq = kzalloc(sizeof(*eq), GFP_KERNEL);
623
624		snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
625		err = mlx5_create_map_eq(dev, eq,
626					 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
627					 name, &dev->priv.uuari.uars[0]);
628		if (err) {
629			kfree(eq);
630			goto clean;
631		}
632		mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
633		eq->index = i;
634		spin_lock(&table->lock);
635		list_add_tail(&eq->list, &table->comp_eqs_list);
636		spin_unlock(&table->lock);
637	}
638
639	return 0;
640
641clean:
642	free_comp_eqs(dev);
643	return err;
644}
645
646static int map_bf_area(struct mlx5_core_dev *dev)
647{
648	resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
649	resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
650
651	dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
652
653	return dev->priv.bf_mapping ? 0 : -ENOMEM;
654}
655
656static void unmap_bf_area(struct mlx5_core_dev *dev)
657{
658	if (dev->priv.bf_mapping)
659		io_mapping_free(dev->priv.bf_mapping);
660}
661
662static inline int fw_initializing(struct mlx5_core_dev *dev)
663{
664	return ioread32be(&dev->iseg->initializing) >> 31;
665}
666
667static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
668{
669	u64 end = jiffies + msecs_to_jiffies(max_wait_mili);
670	int err = 0;
671
672	while (fw_initializing(dev)) {
673		if (time_after(jiffies, end)) {
674			err = -EBUSY;
675			break;
676		}
677		msleep(FW_INIT_WAIT_MS);
678	}
679
680	return err;
681}
682
683static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
684{
685	struct mlx5_priv *priv = &dev->priv;
686	int err;
687
688	dev->pdev = pdev;
689	pci_set_drvdata(dev->pdev, dev);
690	strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
691	priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
692
693	mutex_init(&priv->pgdir_mutex);
694	INIT_LIST_HEAD(&priv->pgdir_list);
695	spin_lock_init(&priv->mkey_lock);
696
697	priv->numa_node = NUMA_NO_NODE;
698
699	err = pci_enable_device(pdev);
700	if (err) {
701		device_printf((&pdev->dev)->bsddev, "ERR: ""Cannot enable PCI device, aborting\n");
702		goto err_dbg;
703	}
704
705	err = request_bar(pdev);
706	if (err) {
707		device_printf((&pdev->dev)->bsddev, "ERR: ""error requesting BARs, aborting\n");
708		goto err_disable;
709	}
710
711	pci_set_master(pdev);
712
713	err = set_dma_caps(pdev);
714	if (err) {
715		device_printf((&pdev->dev)->bsddev, "ERR: ""Failed setting DMA capabilities mask, aborting\n");
716		goto err_clr_master;
717	}
718
719	dev->iseg = ioremap(pci_resource_start(dev->pdev, 0),
720			    sizeof(*dev->iseg));
721	if (!dev->iseg) {
722		err = -ENOMEM;
723		device_printf((&pdev->dev)->bsddev, "ERR: ""Failed mapping initialization segment, aborting\n");
724		goto err_clr_master;
725	}
726	device_printf((&pdev->dev)->bsddev, "INFO: ""firmware version: %d.%d.%d\n", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
727
728	/*
729	 * On load removing any previous indication of internal error,
730	 * device is up
731	 */
732	dev->state = MLX5_DEVICE_STATE_UP;
733
734	err = mlx5_cmd_init(dev);
735	if (err) {
736		device_printf((&pdev->dev)->bsddev, "ERR: ""Failed initializing command interface, aborting\n");
737		goto err_unmap;
738	}
739
740	err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
741	if (err) {
742		device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Firmware over %d MS in initializing state, aborting\n", FW_INIT_TIMEOUT_MILI);
743		goto err_cmd_cleanup;
744	}
745
746	mlx5_pagealloc_init(dev);
747
748	err = mlx5_core_enable_hca(dev);
749	if (err) {
750		device_printf((&pdev->dev)->bsddev, "ERR: ""enable hca failed\n");
751		goto err_pagealloc_cleanup;
752	}
753
754	err = mlx5_core_set_issi(dev);
755	if (err) {
756		device_printf((&pdev->dev)->bsddev, "ERR: ""failed to set issi\n");
757		goto err_disable_hca;
758	}
759
760	err = mlx5_pagealloc_start(dev);
761	if (err) {
762		device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_pagealloc_start failed\n");
763		goto err_disable_hca;
764	}
765
766	err = mlx5_satisfy_startup_pages(dev, 1);
767	if (err) {
768		device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate boot pages\n");
769		goto err_pagealloc_stop;
770	}
771
772	err = set_hca_ctrl(dev);
773	if (err) {
774		device_printf((&pdev->dev)->bsddev, "ERR: ""set_hca_ctrl failed\n");
775		goto reclaim_boot_pages;
776	}
777
778	err = handle_hca_cap(dev);
779	if (err) {
780		device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap failed\n");
781		goto reclaim_boot_pages;
782	}
783
784	err = handle_hca_cap_atomic(dev);
785	if (err) {
786		device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap_atomic failed\n");
787		goto reclaim_boot_pages;
788	}
789
790	err = mlx5_satisfy_startup_pages(dev, 0);
791	if (err) {
792		device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate init pages\n");
793		goto reclaim_boot_pages;
794	}
795
796	err = mlx5_cmd_init_hca(dev);
797	if (err) {
798		device_printf((&pdev->dev)->bsddev, "ERR: ""init hca failed\n");
799		goto reclaim_boot_pages;
800	}
801
802	mlx5_start_health_poll(dev);
803
804	err = mlx5_query_hca_caps(dev);
805	if (err) {
806		device_printf((&pdev->dev)->bsddev, "ERR: ""query hca failed\n");
807		goto err_stop_poll;
808	}
809
810	err = mlx5_query_board_id(dev);
811	if (err) {
812		device_printf((&pdev->dev)->bsddev, "ERR: ""query board id failed\n");
813		goto err_stop_poll;
814	}
815
816	err = mlx5_enable_msix(dev);
817	if (err) {
818		device_printf((&pdev->dev)->bsddev, "ERR: ""enable msix failed\n");
819		goto err_stop_poll;
820	}
821
822	err = mlx5_eq_init(dev);
823	if (err) {
824		device_printf((&pdev->dev)->bsddev, "ERR: ""failed to initialize eq\n");
825		goto disable_msix;
826	}
827
828	err = mlx5_alloc_uuars(dev, &priv->uuari);
829	if (err) {
830		device_printf((&pdev->dev)->bsddev, "ERR: ""Failed allocating uar, aborting\n");
831		goto err_eq_cleanup;
832	}
833
834	err = mlx5_start_eqs(dev);
835	if (err) {
836		device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to start pages and async EQs\n");
837		goto err_free_uar;
838	}
839
840	err = alloc_comp_eqs(dev);
841	if (err) {
842		device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to alloc completion EQs\n");
843		goto err_stop_eqs;
844	}
845
846	if (map_bf_area(dev))
847		device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to map blue flame area\n");
848
849	MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
850
851	mlx5_init_cq_table(dev);
852	mlx5_init_qp_table(dev);
853	mlx5_init_srq_table(dev);
854	mlx5_init_mr_table(dev);
855
856	err = mlx5_init_fs(dev);
857	if (err) {
858		mlx5_core_err(dev, "flow steering init %d\n", err);
859		goto err_init_tables;
860	}
861
862	return 0;
863
864err_init_tables:
865	mlx5_cleanup_mr_table(dev);
866	mlx5_cleanup_srq_table(dev);
867	mlx5_cleanup_qp_table(dev);
868	mlx5_cleanup_cq_table(dev);
869	unmap_bf_area(dev);
870
871err_stop_eqs:
872	mlx5_stop_eqs(dev);
873
874err_free_uar:
875	mlx5_free_uuars(dev, &priv->uuari);
876
877err_eq_cleanup:
878	mlx5_eq_cleanup(dev);
879
880disable_msix:
881	mlx5_disable_msix(dev);
882
883err_stop_poll:
884	mlx5_stop_health_poll(dev);
885	if (mlx5_cmd_teardown_hca(dev)) {
886		device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n");
887		return err;
888	}
889
890reclaim_boot_pages:
891	mlx5_reclaim_startup_pages(dev);
892
893err_pagealloc_stop:
894	mlx5_pagealloc_stop(dev);
895
896err_disable_hca:
897	mlx5_core_disable_hca(dev);
898
899err_pagealloc_cleanup:
900	mlx5_pagealloc_cleanup(dev);
901err_cmd_cleanup:
902	mlx5_cmd_cleanup(dev);
903
904err_unmap:
905	iounmap(dev->iseg);
906
907err_clr_master:
908	pci_clear_master(dev->pdev);
909	release_bar(dev->pdev);
910
911err_disable:
912	pci_disable_device(dev->pdev);
913
914err_dbg:
915	dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
916	return err;
917}
918
919static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
920{
921	struct mlx5_priv *priv = &dev->priv;
922
923	mlx5_cleanup_fs(dev);
924	mlx5_cleanup_mr_table(dev);
925	mlx5_cleanup_srq_table(dev);
926	mlx5_cleanup_qp_table(dev);
927	mlx5_cleanup_cq_table(dev);
928	unmap_bf_area(dev);
929	mlx5_wait_for_reclaim_vfs_pages(dev);
930	free_comp_eqs(dev);
931	mlx5_stop_eqs(dev);
932	mlx5_free_uuars(dev, &priv->uuari);
933	mlx5_eq_cleanup(dev);
934	mlx5_disable_msix(dev);
935	mlx5_stop_health_poll(dev);
936	if (mlx5_cmd_teardown_hca(dev)) {
937		device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n");
938		return;
939	}
940	mlx5_pagealloc_stop(dev);
941	mlx5_reclaim_startup_pages(dev);
942	mlx5_core_disable_hca(dev);
943	mlx5_pagealloc_cleanup(dev);
944	mlx5_cmd_cleanup(dev);
945	iounmap(dev->iseg);
946	pci_clear_master(dev->pdev);
947	release_bar(dev->pdev);
948	pci_disable_device(dev->pdev);
949}
950
951static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
952{
953	struct mlx5_device_context *dev_ctx;
954	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
955
956	dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
957
958	dev_ctx->intf    = intf;
959	dev_ctx->context = intf->add(dev);
960
961	if (dev_ctx->context) {
962		spin_lock_irq(&priv->ctx_lock);
963		list_add_tail(&dev_ctx->list, &priv->ctx_list);
964		spin_unlock_irq(&priv->ctx_lock);
965	} else {
966		kfree(dev_ctx);
967	}
968}
969
970static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
971{
972	struct mlx5_device_context *dev_ctx;
973	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
974
975	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
976		if (dev_ctx->intf == intf) {
977			spin_lock_irq(&priv->ctx_lock);
978			list_del(&dev_ctx->list);
979			spin_unlock_irq(&priv->ctx_lock);
980
981			intf->remove(dev, dev_ctx->context);
982			kfree(dev_ctx);
983			return;
984		}
985}
986static int mlx5_register_device(struct mlx5_core_dev *dev)
987{
988	struct mlx5_priv *priv = &dev->priv;
989	struct mlx5_interface *intf;
990
991	mutex_lock(&intf_mutex);
992	list_add_tail(&priv->dev_list, &dev_list);
993	list_for_each_entry(intf, &intf_list, list)
994		mlx5_add_device(intf, priv);
995	mutex_unlock(&intf_mutex);
996
997	return 0;
998}
999static void mlx5_unregister_device(struct mlx5_core_dev *dev)
1000{
1001	struct mlx5_priv *priv = &dev->priv;
1002	struct mlx5_interface *intf;
1003
1004	mutex_lock(&intf_mutex);
1005	list_for_each_entry(intf, &intf_list, list)
1006		mlx5_remove_device(intf, priv);
1007	list_del(&priv->dev_list);
1008	mutex_unlock(&intf_mutex);
1009}
1010
1011int mlx5_register_interface(struct mlx5_interface *intf)
1012{
1013	struct mlx5_priv *priv;
1014
1015	if (!intf->add || !intf->remove)
1016		return -EINVAL;
1017
1018	mutex_lock(&intf_mutex);
1019	list_add_tail(&intf->list, &intf_list);
1020	list_for_each_entry(priv, &dev_list, dev_list)
1021		mlx5_add_device(intf, priv);
1022	mutex_unlock(&intf_mutex);
1023
1024	return 0;
1025}
1026EXPORT_SYMBOL(mlx5_register_interface);
1027
1028void mlx5_unregister_interface(struct mlx5_interface *intf)
1029{
1030	struct mlx5_priv *priv;
1031
1032	mutex_lock(&intf_mutex);
1033	list_for_each_entry(priv, &dev_list, dev_list)
1034	       mlx5_remove_device(intf, priv);
1035	list_del(&intf->list);
1036	mutex_unlock(&intf_mutex);
1037}
1038EXPORT_SYMBOL(mlx5_unregister_interface);
1039
1040void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
1041{
1042	struct mlx5_priv *priv = &mdev->priv;
1043	struct mlx5_device_context *dev_ctx;
1044	unsigned long flags;
1045	void *result = NULL;
1046
1047	spin_lock_irqsave(&priv->ctx_lock, flags);
1048
1049	list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
1050		if ((dev_ctx->intf->protocol == protocol) &&
1051		    dev_ctx->intf->get_dev) {
1052			result = dev_ctx->intf->get_dev(dev_ctx->context);
1053			break;
1054		}
1055
1056	spin_unlock_irqrestore(&priv->ctx_lock, flags);
1057
1058	return result;
1059}
1060EXPORT_SYMBOL(mlx5_get_protocol_dev);
1061
1062static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
1063			    unsigned long param)
1064{
1065	struct mlx5_priv *priv = &dev->priv;
1066	struct mlx5_device_context *dev_ctx;
1067	unsigned long flags;
1068
1069	spin_lock_irqsave(&priv->ctx_lock, flags);
1070
1071	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
1072		if (dev_ctx->intf->event)
1073			dev_ctx->intf->event(dev, dev_ctx->context, event, param);
1074
1075	spin_unlock_irqrestore(&priv->ctx_lock, flags);
1076}
1077
1078struct mlx5_core_event_handler {
1079	void (*event)(struct mlx5_core_dev *dev,
1080		      enum mlx5_dev_event event,
1081		      void *data);
1082};
1083
1084
1085static int init_one(struct pci_dev *pdev,
1086		    const struct pci_device_id *id)
1087{
1088	struct mlx5_core_dev *dev;
1089	struct mlx5_priv *priv;
1090	int err;
1091
1092	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1093	priv = &dev->priv;
1094	if (id)
1095		priv->pci_dev_data = id->driver_data;
1096
1097	if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profiles)) {
1098		printf("mlx5_core: WARN: ""selected profile out of range, selecting default (%d)\n", MLX5_DEFAULT_PROF);
1099		prof_sel = MLX5_DEFAULT_PROF;
1100	}
1101	dev->profile = &profiles[prof_sel];
1102	dev->event = mlx5_core_event;
1103
1104	INIT_LIST_HEAD(&priv->ctx_list);
1105	spin_lock_init(&priv->ctx_lock);
1106	err = mlx5_dev_init(dev, pdev);
1107	if (err) {
1108		device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_dev_init failed %d\n", err);
1109		goto out;
1110	}
1111
1112	err = mlx5_register_device(dev);
1113	if (err) {
1114		device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_register_device failed %d\n", err);
1115		goto out_init;
1116	}
1117
1118
1119	return 0;
1120
1121out_init:
1122	mlx5_dev_cleanup(dev);
1123out:
1124	kfree(dev);
1125	return err;
1126}
1127
1128static void remove_one(struct pci_dev *pdev)
1129{
1130	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
1131
1132	mlx5_unregister_device(dev);
1133	mlx5_dev_cleanup(dev);
1134	kfree(dev);
1135}
1136
1137static const struct pci_device_id mlx5_core_pci_table[] = {
1138	{ PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */
1139	{ PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */
1140	{ PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */
1141	{ PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */
1142	{ PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */
1143	{ PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */
1144	{ PCI_VDEVICE(MELLANOX, 4119) }, /* ConnectX-5 */
1145	{ PCI_VDEVICE(MELLANOX, 4120) }, /* ConnectX-5 VF */
1146	{ PCI_VDEVICE(MELLANOX, 4121) },
1147	{ PCI_VDEVICE(MELLANOX, 4122) },
1148	{ PCI_VDEVICE(MELLANOX, 4123) },
1149	{ PCI_VDEVICE(MELLANOX, 4124) },
1150	{ PCI_VDEVICE(MELLANOX, 4125) },
1151	{ PCI_VDEVICE(MELLANOX, 4126) },
1152	{ PCI_VDEVICE(MELLANOX, 4127) },
1153	{ PCI_VDEVICE(MELLANOX, 4128) },
1154	{ PCI_VDEVICE(MELLANOX, 4129) },
1155	{ PCI_VDEVICE(MELLANOX, 4130) },
1156	{ PCI_VDEVICE(MELLANOX, 4131) },
1157	{ PCI_VDEVICE(MELLANOX, 4132) },
1158	{ PCI_VDEVICE(MELLANOX, 4133) },
1159	{ PCI_VDEVICE(MELLANOX, 4134) },
1160	{ PCI_VDEVICE(MELLANOX, 4135) },
1161	{ PCI_VDEVICE(MELLANOX, 4136) },
1162	{ PCI_VDEVICE(MELLANOX, 4137) },
1163	{ PCI_VDEVICE(MELLANOX, 4138) },
1164	{ PCI_VDEVICE(MELLANOX, 4139) },
1165	{ PCI_VDEVICE(MELLANOX, 4140) },
1166	{ PCI_VDEVICE(MELLANOX, 4141) },
1167	{ PCI_VDEVICE(MELLANOX, 4142) },
1168	{ PCI_VDEVICE(MELLANOX, 4143) },
1169	{ PCI_VDEVICE(MELLANOX, 4144) },
1170	{ 0, }
1171};
1172
1173MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
1174
1175static struct pci_driver mlx5_core_driver = {
1176	.name           = DRIVER_NAME,
1177	.id_table       = mlx5_core_pci_table,
1178	.probe          = init_one,
1179	.remove         = remove_one
1180};
1181
1182static int __init init(void)
1183{
1184	int err;
1185
1186	mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq");
1187	if (!mlx5_core_wq) {
1188		err = -ENOMEM;
1189		goto err_debug;
1190	}
1191	mlx5_health_init();
1192
1193	err = pci_register_driver(&mlx5_core_driver);
1194	if (err)
1195		goto err_health;
1196
1197
1198	return 0;
1199
1200err_health:
1201	mlx5_health_cleanup();
1202	destroy_workqueue(mlx5_core_wq);
1203err_debug:
1204	return err;
1205}
1206
1207static void __exit cleanup(void)
1208{
1209	pci_unregister_driver(&mlx5_core_driver);
1210	mlx5_health_cleanup();
1211	destroy_workqueue(mlx5_core_wq);
1212}
1213
1214module_init(init);
1215module_exit(cleanup);
1216
1217void mlx5_enter_error_state(struct mlx5_core_dev *dev)
1218{
1219	if (dev->state != MLX5_DEVICE_STATE_UP)
1220		return;
1221
1222	dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1223	mlx5_trigger_cmd_completions(dev);
1224}
1225EXPORT_SYMBOL(mlx5_enter_error_state);
1226