mlx5_main.c revision 329200
1/*-
2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/mlx5_main.c 329200 2018-02-13 14:37:21Z hselasky $
26 */
27
28#define	LINUXKPI_PARAM_PREFIX mlx5_
29
30#include <linux/kmod.h>
31#include <linux/module.h>
32#include <linux/errno.h>
33#include <linux/pci.h>
34#include <linux/dma-mapping.h>
35#include <linux/slab.h>
36#include <linux/io-mapping.h>
37#include <linux/interrupt.h>
38#include <dev/mlx5/driver.h>
39#include <dev/mlx5/cq.h>
40#include <dev/mlx5/qp.h>
41#include <dev/mlx5/srq.h>
42#include <linux/delay.h>
43#include <dev/mlx5/mlx5_ifc.h>
44#include "mlx5_core.h"
45#include "fs_core.h"
46
47MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
48MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
49MODULE_LICENSE("Dual BSD/GPL");
50#if (__FreeBSD_version >= 1100000)
51MODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1);
52#endif
53MODULE_VERSION(mlx5, 1);
54
55int mlx5_core_debug_mask;
56module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644);
57MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
58
59#define MLX5_DEFAULT_PROF	2
60static int prof_sel = MLX5_DEFAULT_PROF;
61module_param_named(prof_sel, prof_sel, int, 0444);
62MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
63
64#define NUMA_NO_NODE       -1
65
66struct workqueue_struct *mlx5_core_wq;
67static LIST_HEAD(intf_list);
68static LIST_HEAD(dev_list);
69static DEFINE_MUTEX(intf_mutex);
70
71struct mlx5_device_context {
72	struct list_head	list;
73	struct mlx5_interface  *intf;
74	void		       *context;
75};
76
77static struct mlx5_profile profiles[] = {
78	[0] = {
79		.mask           = 0,
80	},
81	[1] = {
82		.mask		= MLX5_PROF_MASK_QP_SIZE,
83		.log_max_qp	= 12,
84	},
85	[2] = {
86		.mask		= MLX5_PROF_MASK_QP_SIZE |
87				  MLX5_PROF_MASK_MR_CACHE,
88		.log_max_qp	= 17,
89		.mr_cache[0]	= {
90			.size	= 500,
91			.limit	= 250
92		},
93		.mr_cache[1]	= {
94			.size	= 500,
95			.limit	= 250
96		},
97		.mr_cache[2]	= {
98			.size	= 500,
99			.limit	= 250
100		},
101		.mr_cache[3]	= {
102			.size	= 500,
103			.limit	= 250
104		},
105		.mr_cache[4]	= {
106			.size	= 500,
107			.limit	= 250
108		},
109		.mr_cache[5]	= {
110			.size	= 500,
111			.limit	= 250
112		},
113		.mr_cache[6]	= {
114			.size	= 500,
115			.limit	= 250
116		},
117		.mr_cache[7]	= {
118			.size	= 500,
119			.limit	= 250
120		},
121		.mr_cache[8]	= {
122			.size	= 500,
123			.limit	= 250
124		},
125		.mr_cache[9]	= {
126			.size	= 500,
127			.limit	= 250
128		},
129		.mr_cache[10]	= {
130			.size	= 500,
131			.limit	= 250
132		},
133		.mr_cache[11]	= {
134			.size	= 500,
135			.limit	= 250
136		},
137		.mr_cache[12]	= {
138			.size	= 64,
139			.limit	= 32
140		},
141		.mr_cache[13]	= {
142			.size	= 32,
143			.limit	= 16
144		},
145		.mr_cache[14]	= {
146			.size	= 16,
147			.limit	= 8
148		},
149	},
150	[3] = {
151		.mask		= MLX5_PROF_MASK_QP_SIZE,
152		.log_max_qp	= 17,
153	},
154};
155
156static int set_dma_caps(struct pci_dev *pdev)
157{
158	int err;
159
160	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
161	if (err) {
162		device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit PCI DMA mask\n");
163		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
164		if (err) {
165			device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set PCI DMA mask, aborting\n");
166			return err;
167		}
168	}
169
170	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
171	if (err) {
172		device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit consistent PCI DMA mask\n");
173		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
174		if (err) {
175			device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set consistent PCI DMA mask, aborting\n");
176			return err;
177		}
178	}
179
180	dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
181	return err;
182}
183
184static int request_bar(struct pci_dev *pdev)
185{
186	int err = 0;
187
188	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
189		device_printf((&pdev->dev)->bsddev, "ERR: ""Missing registers BAR, aborting\n");
190		return -ENODEV;
191	}
192
193	err = pci_request_regions(pdev, DRIVER_NAME);
194	if (err)
195		device_printf((&pdev->dev)->bsddev, "ERR: ""Couldn't get PCI resources, aborting\n");
196
197	return err;
198}
199
200static void release_bar(struct pci_dev *pdev)
201{
202	pci_release_regions(pdev);
203}
204
205static int mlx5_enable_msix(struct mlx5_core_dev *dev)
206{
207	struct mlx5_priv *priv = &dev->priv;
208	struct mlx5_eq_table *table = &priv->eq_table;
209	int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
210	int nvec;
211	int i;
212
213	nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
214	       MLX5_EQ_VEC_COMP_BASE;
215	nvec = min_t(int, nvec, num_eqs);
216	if (nvec <= MLX5_EQ_VEC_COMP_BASE)
217		return -ENOMEM;
218
219	priv->msix_arr = kzalloc(nvec * sizeof(*priv->msix_arr), GFP_KERNEL);
220
221	priv->irq_info = kzalloc(nvec * sizeof(*priv->irq_info), GFP_KERNEL);
222
223	for (i = 0; i < nvec; i++)
224		priv->msix_arr[i].entry = i;
225
226	nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
227				     MLX5_EQ_VEC_COMP_BASE + 1, nvec);
228	if (nvec < 0)
229		return nvec;
230
231	table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
232
233	return 0;
234
235}
236
237static void mlx5_disable_msix(struct mlx5_core_dev *dev)
238{
239	struct mlx5_priv *priv = &dev->priv;
240
241	pci_disable_msix(dev->pdev);
242	kfree(priv->irq_info);
243	kfree(priv->msix_arr);
244}
245
246struct mlx5_reg_host_endianess {
247	u8	he;
248	u8      rsvd[15];
249};
250
251
252#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
253
254enum {
255	MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
256				MLX5_DEV_CAP_FLAG_DCT |
257				MLX5_DEV_CAP_FLAG_DRAIN_SIGERR,
258};
259
260static u16 to_fw_pkey_sz(u32 size)
261{
262	switch (size) {
263	case 128:
264		return 0;
265	case 256:
266		return 1;
267	case 512:
268		return 2;
269	case 1024:
270		return 3;
271	case 2048:
272		return 4;
273	case 4096:
274		return 5;
275	default:
276		printf("mlx5_core: WARN: ""invalid pkey table size %d\n", size);
277		return 0;
278	}
279}
280
281int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
282		       enum mlx5_cap_mode cap_mode)
283{
284	u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
285	int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
286	void *out, *hca_caps;
287	u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
288	int err;
289
290	memset(in, 0, sizeof(in));
291	out = kzalloc(out_sz, GFP_KERNEL);
292
293	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
294	MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
295	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
296	if (err)
297		goto query_ex;
298
299	err = mlx5_cmd_status_to_err_v2(out);
300	if (err) {
301		mlx5_core_warn(dev,
302			       "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
303			       cap_type, cap_mode, err);
304		goto query_ex;
305	}
306
307	hca_caps =  MLX5_ADDR_OF(query_hca_cap_out, out, capability);
308
309	switch (cap_mode) {
310	case HCA_CAP_OPMOD_GET_MAX:
311		memcpy(dev->hca_caps_max[cap_type], hca_caps,
312		       MLX5_UN_SZ_BYTES(hca_cap_union));
313		break;
314	case HCA_CAP_OPMOD_GET_CUR:
315		memcpy(dev->hca_caps_cur[cap_type], hca_caps,
316		       MLX5_UN_SZ_BYTES(hca_cap_union));
317		break;
318	default:
319		mlx5_core_warn(dev,
320			       "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
321			       cap_type, cap_mode);
322		err = -EINVAL;
323		break;
324	}
325query_ex:
326	kfree(out);
327	return err;
328}
329
330static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
331{
332	u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
333	int err;
334
335	memset(out, 0, sizeof(out));
336
337	MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
338	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
339	if (err)
340		return err;
341
342	err = mlx5_cmd_status_to_err_v2(out);
343
344	return err;
345}
346
347static int handle_hca_cap(struct mlx5_core_dev *dev)
348{
349	void *set_ctx = NULL;
350	struct mlx5_profile *prof = dev->profile;
351	int err = -ENOMEM;
352	int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
353	void *set_hca_cap;
354
355	set_ctx = kzalloc(set_sz, GFP_KERNEL);
356
357	err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
358	if (err)
359		goto query_ex;
360
361	err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
362	if (err)
363		goto query_ex;
364
365	set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
366				   capability);
367	memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
368	       MLX5_ST_SZ_BYTES(cmd_hca_cap));
369
370	mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
371		      mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
372		      128);
373	/* we limit the size of the pkey table to 128 entries for now */
374	MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
375		 to_fw_pkey_sz(128));
376
377	if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
378		MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
379			 prof->log_max_qp);
380
381	/* disable cmdif checksum */
382	MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
383
384	/* enable drain sigerr */
385	MLX5_SET(cmd_hca_cap, set_hca_cap, drain_sigerr, 1);
386
387	MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
388
389	err = set_caps(dev, set_ctx, set_sz);
390
391query_ex:
392	kfree(set_ctx);
393	return err;
394}
395
396static int set_hca_ctrl(struct mlx5_core_dev *dev)
397{
398	struct mlx5_reg_host_endianess he_in;
399	struct mlx5_reg_host_endianess he_out;
400	int err;
401
402	if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
403	    !MLX5_CAP_GEN(dev, roce))
404		return 0;
405
406	memset(&he_in, 0, sizeof(he_in));
407	he_in.he = MLX5_SET_HOST_ENDIANNESS;
408	err = mlx5_core_access_reg(dev, &he_in,  sizeof(he_in),
409					&he_out, sizeof(he_out),
410					MLX5_REG_HOST_ENDIANNESS, 0, 1);
411	return err;
412}
413
414static int mlx5_core_enable_hca(struct mlx5_core_dev *dev)
415{
416	u32 in[MLX5_ST_SZ_DW(enable_hca_in)];
417	u32 out[MLX5_ST_SZ_DW(enable_hca_out)];
418
419	memset(in, 0, sizeof(in));
420	MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
421	memset(out, 0, sizeof(out));
422	return mlx5_cmd_exec_check_status(dev, in,  sizeof(in),
423					       out, sizeof(out));
424}
425
426static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
427{
428	u32 in[MLX5_ST_SZ_DW(disable_hca_in)];
429	u32 out[MLX5_ST_SZ_DW(disable_hca_out)];
430
431	memset(in, 0, sizeof(in));
432
433	MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
434	memset(out, 0, sizeof(out));
435	return mlx5_cmd_exec_check_status(dev, in,  sizeof(in),
436					       out, sizeof(out));
437}
438
439static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
440{
441	u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
442	u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
443	u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
444	u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
445	int err;
446	u32 sup_issi;
447
448	memset(query_in, 0, sizeof(query_in));
449	memset(query_out, 0, sizeof(query_out));
450
451	MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
452
453	err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
454					 query_out, sizeof(query_out));
455	if (err) {
456		if (((struct mlx5_outbox_hdr *)query_out)->status ==
457		    MLX5_CMD_STAT_BAD_OP_ERR) {
458			pr_debug("Only ISSI 0 is supported\n");
459			return 0;
460		}
461
462		printf("mlx5_core: ERR: ""failed to query ISSI\n");
463		return err;
464	}
465
466	sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
467
468	if (sup_issi & (1 << 1)) {
469		memset(set_in, 0, sizeof(set_in));
470		memset(set_out, 0, sizeof(set_out));
471
472		MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
473		MLX5_SET(set_issi_in, set_in, current_issi, 1);
474
475		err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
476						 set_out, sizeof(set_out));
477		if (err) {
478			printf("mlx5_core: ERR: ""failed to set ISSI=1\n");
479			return err;
480		}
481
482		dev->issi = 1;
483
484		return 0;
485	} else if (sup_issi & (1 << 0)) {
486		return 0;
487	}
488
489	return -ENOTSUPP;
490}
491
492
493int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
494{
495	struct mlx5_eq_table *table = &dev->priv.eq_table;
496	struct mlx5_eq *eq;
497	int err = -ENOENT;
498
499	spin_lock(&table->lock);
500	list_for_each_entry(eq, &table->comp_eqs_list, list) {
501		if (eq->index == vector) {
502			*eqn = eq->eqn;
503			*irqn = eq->irqn;
504			err = 0;
505			break;
506		}
507	}
508	spin_unlock(&table->lock);
509
510	return err;
511}
512EXPORT_SYMBOL(mlx5_vector2eqn);
513
514int mlx5_rename_eq(struct mlx5_core_dev *dev, int eq_ix, char *name)
515{
516	struct mlx5_priv *priv = &dev->priv;
517	struct mlx5_eq_table *table = &priv->eq_table;
518	struct mlx5_eq *eq;
519	int err = -ENOENT;
520
521	spin_lock(&table->lock);
522	list_for_each_entry(eq, &table->comp_eqs_list, list) {
523		if (eq->index == eq_ix) {
524			int irq_ix = eq_ix + MLX5_EQ_VEC_COMP_BASE;
525
526			snprintf(priv->irq_info[irq_ix].name, MLX5_MAX_IRQ_NAME,
527				 "%s-%d", name, eq_ix);
528
529			err = 0;
530			break;
531		}
532	}
533	spin_unlock(&table->lock);
534
535	return err;
536}
537
538static void free_comp_eqs(struct mlx5_core_dev *dev)
539{
540	struct mlx5_eq_table *table = &dev->priv.eq_table;
541	struct mlx5_eq *eq, *n;
542
543	spin_lock(&table->lock);
544	list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
545		list_del(&eq->list);
546		spin_unlock(&table->lock);
547		if (mlx5_destroy_unmap_eq(dev, eq))
548			mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
549				       eq->eqn);
550		kfree(eq);
551		spin_lock(&table->lock);
552	}
553	spin_unlock(&table->lock);
554}
555
556static int alloc_comp_eqs(struct mlx5_core_dev *dev)
557{
558	struct mlx5_eq_table *table = &dev->priv.eq_table;
559	char name[MLX5_MAX_IRQ_NAME];
560	struct mlx5_eq *eq;
561	int ncomp_vec;
562	int nent;
563	int err;
564	int i;
565
566	INIT_LIST_HEAD(&table->comp_eqs_list);
567	ncomp_vec = table->num_comp_vectors;
568	nent = MLX5_COMP_EQ_SIZE;
569	for (i = 0; i < ncomp_vec; i++) {
570		eq = kzalloc(sizeof(*eq), GFP_KERNEL);
571
572		snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
573		err = mlx5_create_map_eq(dev, eq,
574					 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
575					 name, &dev->priv.uuari.uars[0]);
576		if (err) {
577			kfree(eq);
578			goto clean;
579		}
580		mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
581		eq->index = i;
582		spin_lock(&table->lock);
583		list_add_tail(&eq->list, &table->comp_eqs_list);
584		spin_unlock(&table->lock);
585	}
586
587	return 0;
588
589clean:
590	free_comp_eqs(dev);
591	return err;
592}
593
594static int map_bf_area(struct mlx5_core_dev *dev)
595{
596	resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
597	resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
598
599	dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
600
601	return dev->priv.bf_mapping ? 0 : -ENOMEM;
602}
603
604static void unmap_bf_area(struct mlx5_core_dev *dev)
605{
606	if (dev->priv.bf_mapping)
607		io_mapping_free(dev->priv.bf_mapping);
608}
609
610static inline int fw_initializing(struct mlx5_core_dev *dev)
611{
612	return ioread32be(&dev->iseg->initializing) >> 31;
613}
614
615static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
616{
617	u64 end = jiffies + msecs_to_jiffies(max_wait_mili);
618	int err = 0;
619
620	while (fw_initializing(dev)) {
621		if (time_after(jiffies, end)) {
622			err = -EBUSY;
623			break;
624		}
625		msleep(FW_INIT_WAIT_MS);
626	}
627
628	return err;
629}
630
631static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
632{
633	struct mlx5_priv *priv = &dev->priv;
634	int err;
635
636	dev->pdev = pdev;
637	pci_set_drvdata(dev->pdev, dev);
638	strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
639	priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
640
641	mutex_init(&priv->pgdir_mutex);
642	INIT_LIST_HEAD(&priv->pgdir_list);
643	spin_lock_init(&priv->mkey_lock);
644
645	priv->numa_node = NUMA_NO_NODE;
646
647	err = pci_enable_device(pdev);
648	if (err) {
649		device_printf((&pdev->dev)->bsddev, "ERR: ""Cannot enable PCI device, aborting\n");
650		goto err_dbg;
651	}
652
653	err = request_bar(pdev);
654	if (err) {
655		device_printf((&pdev->dev)->bsddev, "ERR: ""error requesting BARs, aborting\n");
656		goto err_disable;
657	}
658
659	pci_set_master(pdev);
660
661	err = set_dma_caps(pdev);
662	if (err) {
663		device_printf((&pdev->dev)->bsddev, "ERR: ""Failed setting DMA capabilities mask, aborting\n");
664		goto err_clr_master;
665	}
666
667	dev->iseg = ioremap(pci_resource_start(dev->pdev, 0),
668			    sizeof(*dev->iseg));
669	if (!dev->iseg) {
670		err = -ENOMEM;
671		device_printf((&pdev->dev)->bsddev, "ERR: ""Failed mapping initialization segment, aborting\n");
672		goto err_clr_master;
673	}
674	device_printf((&pdev->dev)->bsddev, "INFO: ""firmware version: %d.%d.%d\n", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
675
676	/*
677	 * On load removing any previous indication of internal error,
678	 * device is up
679	 */
680	dev->state = MLX5_DEVICE_STATE_UP;
681
682	err = mlx5_cmd_init(dev);
683	if (err) {
684		device_printf((&pdev->dev)->bsddev, "ERR: ""Failed initializing command interface, aborting\n");
685		goto err_unmap;
686	}
687
688	err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
689	if (err) {
690		device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Firmware over %d MS in initializing state, aborting\n", FW_INIT_TIMEOUT_MILI);
691		goto err_cmd_cleanup;
692	}
693
694	mlx5_pagealloc_init(dev);
695
696	err = mlx5_core_enable_hca(dev);
697	if (err) {
698		device_printf((&pdev->dev)->bsddev, "ERR: ""enable hca failed\n");
699		goto err_pagealloc_cleanup;
700	}
701
702	err = mlx5_core_set_issi(dev);
703	if (err) {
704		device_printf((&pdev->dev)->bsddev, "ERR: ""failed to set issi\n");
705		goto err_disable_hca;
706	}
707
708	err = mlx5_pagealloc_start(dev);
709	if (err) {
710		device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_pagealloc_start failed\n");
711		goto err_disable_hca;
712	}
713
714	err = mlx5_satisfy_startup_pages(dev, 1);
715	if (err) {
716		device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate boot pages\n");
717		goto err_pagealloc_stop;
718	}
719
720	err = handle_hca_cap(dev);
721	if (err) {
722		device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap failed\n");
723		goto reclaim_boot_pages;
724	}
725
726	err = set_hca_ctrl(dev);
727	if (err) {
728		device_printf((&pdev->dev)->bsddev, "ERR: ""set_hca_ctrl failed\n");
729		goto reclaim_boot_pages;
730	}
731
732	err = mlx5_satisfy_startup_pages(dev, 0);
733	if (err) {
734		device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate init pages\n");
735		goto reclaim_boot_pages;
736	}
737
738	err = mlx5_cmd_init_hca(dev);
739	if (err) {
740		device_printf((&pdev->dev)->bsddev, "ERR: ""init hca failed\n");
741		goto reclaim_boot_pages;
742	}
743
744	mlx5_start_health_poll(dev);
745
746	err = mlx5_query_hca_caps(dev);
747	if (err) {
748		device_printf((&pdev->dev)->bsddev, "ERR: ""query hca failed\n");
749		goto err_stop_poll;
750	}
751
752	err = mlx5_query_board_id(dev);
753	if (err) {
754		device_printf((&pdev->dev)->bsddev, "ERR: ""query board id failed\n");
755		goto err_stop_poll;
756	}
757
758	err = mlx5_enable_msix(dev);
759	if (err) {
760		device_printf((&pdev->dev)->bsddev, "ERR: ""enable msix failed\n");
761		goto err_stop_poll;
762	}
763
764	err = mlx5_eq_init(dev);
765	if (err) {
766		device_printf((&pdev->dev)->bsddev, "ERR: ""failed to initialize eq\n");
767		goto disable_msix;
768	}
769
770	err = mlx5_alloc_uuars(dev, &priv->uuari);
771	if (err) {
772		device_printf((&pdev->dev)->bsddev, "ERR: ""Failed allocating uar, aborting\n");
773		goto err_eq_cleanup;
774	}
775
776	err = mlx5_start_eqs(dev);
777	if (err) {
778		device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to start pages and async EQs\n");
779		goto err_free_uar;
780	}
781
782	err = alloc_comp_eqs(dev);
783	if (err) {
784		device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to alloc completion EQs\n");
785		goto err_stop_eqs;
786	}
787
788	if (map_bf_area(dev))
789		device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to map blue flame area\n");
790
791	MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
792
793	mlx5_init_cq_table(dev);
794	mlx5_init_qp_table(dev);
795	mlx5_init_srq_table(dev);
796	mlx5_init_mr_table(dev);
797
798	err = mlx5_init_fs(dev);
799	if (err) {
800		mlx5_core_err(dev, "flow steering init %d\n", err);
801		goto err_init_tables;
802	}
803
804	return 0;
805
806err_init_tables:
807	mlx5_cleanup_mr_table(dev);
808	mlx5_cleanup_srq_table(dev);
809	mlx5_cleanup_qp_table(dev);
810	mlx5_cleanup_cq_table(dev);
811	unmap_bf_area(dev);
812
813err_stop_eqs:
814	mlx5_stop_eqs(dev);
815
816err_free_uar:
817	mlx5_free_uuars(dev, &priv->uuari);
818
819err_eq_cleanup:
820	mlx5_eq_cleanup(dev);
821
822disable_msix:
823	mlx5_disable_msix(dev);
824
825err_stop_poll:
826	mlx5_stop_health_poll(dev);
827	if (mlx5_cmd_teardown_hca(dev)) {
828		device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n");
829		return err;
830	}
831
832reclaim_boot_pages:
833	mlx5_reclaim_startup_pages(dev);
834
835err_pagealloc_stop:
836	mlx5_pagealloc_stop(dev);
837
838err_disable_hca:
839	mlx5_core_disable_hca(dev);
840
841err_pagealloc_cleanup:
842	mlx5_pagealloc_cleanup(dev);
843err_cmd_cleanup:
844	mlx5_cmd_cleanup(dev);
845
846err_unmap:
847	iounmap(dev->iseg);
848
849err_clr_master:
850	pci_clear_master(dev->pdev);
851	release_bar(dev->pdev);
852
853err_disable:
854	pci_disable_device(dev->pdev);
855
856err_dbg:
857	dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
858	return err;
859}
860
861static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
862{
863	struct mlx5_priv *priv = &dev->priv;
864
865	mlx5_cleanup_fs(dev);
866	mlx5_cleanup_mr_table(dev);
867	mlx5_cleanup_srq_table(dev);
868	mlx5_cleanup_qp_table(dev);
869	mlx5_cleanup_cq_table(dev);
870	unmap_bf_area(dev);
871	mlx5_wait_for_reclaim_vfs_pages(dev);
872	free_comp_eqs(dev);
873	mlx5_stop_eqs(dev);
874	mlx5_free_uuars(dev, &priv->uuari);
875	mlx5_eq_cleanup(dev);
876	mlx5_disable_msix(dev);
877	mlx5_stop_health_poll(dev);
878	if (mlx5_cmd_teardown_hca(dev)) {
879		device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n");
880		return;
881	}
882	mlx5_pagealloc_stop(dev);
883	mlx5_reclaim_startup_pages(dev);
884	mlx5_core_disable_hca(dev);
885	mlx5_pagealloc_cleanup(dev);
886	mlx5_cmd_cleanup(dev);
887	iounmap(dev->iseg);
888	pci_clear_master(dev->pdev);
889	release_bar(dev->pdev);
890	pci_disable_device(dev->pdev);
891}
892
893static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
894{
895	struct mlx5_device_context *dev_ctx;
896	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
897
898	dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
899
900	dev_ctx->intf    = intf;
901	dev_ctx->context = intf->add(dev);
902
903	if (dev_ctx->context) {
904		spin_lock_irq(&priv->ctx_lock);
905		list_add_tail(&dev_ctx->list, &priv->ctx_list);
906		spin_unlock_irq(&priv->ctx_lock);
907	} else {
908		kfree(dev_ctx);
909	}
910}
911
912static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
913{
914	struct mlx5_device_context *dev_ctx;
915	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
916
917	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
918		if (dev_ctx->intf == intf) {
919			spin_lock_irq(&priv->ctx_lock);
920			list_del(&dev_ctx->list);
921			spin_unlock_irq(&priv->ctx_lock);
922
923			intf->remove(dev, dev_ctx->context);
924			kfree(dev_ctx);
925			return;
926		}
927}
928static int mlx5_register_device(struct mlx5_core_dev *dev)
929{
930	struct mlx5_priv *priv = &dev->priv;
931	struct mlx5_interface *intf;
932
933	mutex_lock(&intf_mutex);
934	list_add_tail(&priv->dev_list, &dev_list);
935	list_for_each_entry(intf, &intf_list, list)
936		mlx5_add_device(intf, priv);
937	mutex_unlock(&intf_mutex);
938
939	return 0;
940}
941static void mlx5_unregister_device(struct mlx5_core_dev *dev)
942{
943	struct mlx5_priv *priv = &dev->priv;
944	struct mlx5_interface *intf;
945
946	mutex_lock(&intf_mutex);
947	list_for_each_entry(intf, &intf_list, list)
948		mlx5_remove_device(intf, priv);
949	list_del(&priv->dev_list);
950	mutex_unlock(&intf_mutex);
951}
952
953int mlx5_register_interface(struct mlx5_interface *intf)
954{
955	struct mlx5_priv *priv;
956
957	if (!intf->add || !intf->remove)
958		return -EINVAL;
959
960	mutex_lock(&intf_mutex);
961	list_add_tail(&intf->list, &intf_list);
962	list_for_each_entry(priv, &dev_list, dev_list)
963		mlx5_add_device(intf, priv);
964	mutex_unlock(&intf_mutex);
965
966	return 0;
967}
968EXPORT_SYMBOL(mlx5_register_interface);
969
970void mlx5_unregister_interface(struct mlx5_interface *intf)
971{
972	struct mlx5_priv *priv;
973
974	mutex_lock(&intf_mutex);
975	list_for_each_entry(priv, &dev_list, dev_list)
976	       mlx5_remove_device(intf, priv);
977	list_del(&intf->list);
978	mutex_unlock(&intf_mutex);
979}
980EXPORT_SYMBOL(mlx5_unregister_interface);
981
982void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
983{
984	struct mlx5_priv *priv = &mdev->priv;
985	struct mlx5_device_context *dev_ctx;
986	unsigned long flags;
987	void *result = NULL;
988
989	spin_lock_irqsave(&priv->ctx_lock, flags);
990
991	list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
992		if ((dev_ctx->intf->protocol == protocol) &&
993		    dev_ctx->intf->get_dev) {
994			result = dev_ctx->intf->get_dev(dev_ctx->context);
995			break;
996		}
997
998	spin_unlock_irqrestore(&priv->ctx_lock, flags);
999
1000	return result;
1001}
1002EXPORT_SYMBOL(mlx5_get_protocol_dev);
1003
1004static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
1005			    unsigned long param)
1006{
1007	struct mlx5_priv *priv = &dev->priv;
1008	struct mlx5_device_context *dev_ctx;
1009	unsigned long flags;
1010
1011	spin_lock_irqsave(&priv->ctx_lock, flags);
1012
1013	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
1014		if (dev_ctx->intf->event)
1015			dev_ctx->intf->event(dev, dev_ctx->context, event, param);
1016
1017	spin_unlock_irqrestore(&priv->ctx_lock, flags);
1018}
1019
1020struct mlx5_core_event_handler {
1021	void (*event)(struct mlx5_core_dev *dev,
1022		      enum mlx5_dev_event event,
1023		      void *data);
1024};
1025
1026
1027static int init_one(struct pci_dev *pdev,
1028		    const struct pci_device_id *id)
1029{
1030	struct mlx5_core_dev *dev;
1031	struct mlx5_priv *priv;
1032	int err;
1033
1034	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1035	priv = &dev->priv;
1036	if (id)
1037		priv->pci_dev_data = id->driver_data;
1038
1039	if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profiles)) {
1040		printf("mlx5_core: WARN: ""selected profile out of range, selecting default (%d)\n", MLX5_DEFAULT_PROF);
1041		prof_sel = MLX5_DEFAULT_PROF;
1042	}
1043	dev->profile = &profiles[prof_sel];
1044	dev->event = mlx5_core_event;
1045
1046	INIT_LIST_HEAD(&priv->ctx_list);
1047	spin_lock_init(&priv->ctx_lock);
1048	err = mlx5_dev_init(dev, pdev);
1049	if (err) {
1050		device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_dev_init failed %d\n", err);
1051		goto out;
1052	}
1053
1054	err = mlx5_register_device(dev);
1055	if (err) {
1056		device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_register_device failed %d\n", err);
1057		goto out_init;
1058	}
1059
1060
1061	return 0;
1062
1063out_init:
1064	mlx5_dev_cleanup(dev);
1065out:
1066	kfree(dev);
1067	return err;
1068}
1069
1070static void remove_one(struct pci_dev *pdev)
1071{
1072	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
1073
1074	mlx5_unregister_device(dev);
1075	mlx5_dev_cleanup(dev);
1076	kfree(dev);
1077}
1078
1079static const struct pci_device_id mlx5_core_pci_table[] = {
1080	{ PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */
1081	{ PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */
1082	{ PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */
1083	{ PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */
1084	{ PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */
1085	{ PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */
1086	{ PCI_VDEVICE(MELLANOX, 4119) }, /* ConnectX-5 */
1087	{ PCI_VDEVICE(MELLANOX, 4120) }, /* ConnectX-5 VF */
1088	{ PCI_VDEVICE(MELLANOX, 4121) },
1089	{ PCI_VDEVICE(MELLANOX, 4122) },
1090	{ PCI_VDEVICE(MELLANOX, 4123) },
1091	{ PCI_VDEVICE(MELLANOX, 4124) },
1092	{ PCI_VDEVICE(MELLANOX, 4125) },
1093	{ PCI_VDEVICE(MELLANOX, 4126) },
1094	{ PCI_VDEVICE(MELLANOX, 4127) },
1095	{ PCI_VDEVICE(MELLANOX, 4128) },
1096	{ PCI_VDEVICE(MELLANOX, 4129) },
1097	{ PCI_VDEVICE(MELLANOX, 4130) },
1098	{ PCI_VDEVICE(MELLANOX, 4131) },
1099	{ PCI_VDEVICE(MELLANOX, 4132) },
1100	{ PCI_VDEVICE(MELLANOX, 4133) },
1101	{ PCI_VDEVICE(MELLANOX, 4134) },
1102	{ PCI_VDEVICE(MELLANOX, 4135) },
1103	{ PCI_VDEVICE(MELLANOX, 4136) },
1104	{ PCI_VDEVICE(MELLANOX, 4137) },
1105	{ PCI_VDEVICE(MELLANOX, 4138) },
1106	{ PCI_VDEVICE(MELLANOX, 4139) },
1107	{ PCI_VDEVICE(MELLANOX, 4140) },
1108	{ PCI_VDEVICE(MELLANOX, 4141) },
1109	{ PCI_VDEVICE(MELLANOX, 4142) },
1110	{ PCI_VDEVICE(MELLANOX, 4143) },
1111	{ PCI_VDEVICE(MELLANOX, 4144) },
1112	{ 0, }
1113};
1114
1115MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
1116
1117static struct pci_driver mlx5_core_driver = {
1118	.name           = DRIVER_NAME,
1119	.id_table       = mlx5_core_pci_table,
1120	.probe          = init_one,
1121	.remove         = remove_one
1122};
1123
1124static int __init init(void)
1125{
1126	int err;
1127
1128	mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq");
1129	if (!mlx5_core_wq) {
1130		err = -ENOMEM;
1131		goto err_debug;
1132	}
1133	mlx5_health_init();
1134
1135	err = pci_register_driver(&mlx5_core_driver);
1136	if (err)
1137		goto err_health;
1138
1139
1140	return 0;
1141
1142err_health:
1143	mlx5_health_cleanup();
1144	destroy_workqueue(mlx5_core_wq);
1145err_debug:
1146	return err;
1147}
1148
1149static void __exit cleanup(void)
1150{
1151	pci_unregister_driver(&mlx5_core_driver);
1152	mlx5_health_cleanup();
1153	destroy_workqueue(mlx5_core_wq);
1154}
1155
1156module_init(init);
1157module_exit(cleanup);
1158
1159void mlx5_enter_error_state(struct mlx5_core_dev *dev)
1160{
1161	if (dev->state != MLX5_DEVICE_STATE_UP)
1162		return;
1163
1164	dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1165	mlx5_trigger_cmd_completions(dev);
1166}
1167EXPORT_SYMBOL(mlx5_enter_error_state);
1168