Lines Matching refs:dev

25  * $FreeBSD: releng/11.0/sys/dev/mlx5/mlx5_core/mlx5_main.c 300676 2016-05-25 12:03:21Z hselasky $
38 #include <dev/mlx5/driver.h>
39 #include <dev/mlx5/cq.h>
40 #include <dev/mlx5/qp.h>
41 #include <dev/mlx5/srq.h>
43 #include <dev/mlx5/mlx5_ifc.h>
165 device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit PCI DMA mask\n");
168 device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set PCI DMA mask, aborting\n");
175 device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit consistent PCI DMA mask\n");
178 device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set consistent PCI DMA mask, aborting\n");
183 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
192 device_printf((&pdev->dev)->bsddev, "ERR: ""Missing registers BAR, aborting\n");
198 device_printf((&pdev->dev)->bsddev, "ERR: ""Couldn't get PCI resources, aborting\n");
208 static int mlx5_enable_msix(struct mlx5_core_dev *dev)
210 struct mlx5_priv *priv = &dev->priv;
212 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
216 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
229 nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
240 static void mlx5_disable_msix(struct mlx5_core_dev *dev)
242 struct mlx5_priv *priv = &dev->priv;
244 pci_disable_msix(dev->pdev);
283 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
297 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
303 mlx5_core_warn(dev,
313 memcpy(dev->hca_caps_max[cap_type], hca_caps,
317 memcpy(dev->hca_caps_cur[cap_type], hca_caps,
321 mlx5_core_warn(dev,
322 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
332 static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
340 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
349 static int handle_hca_cap(struct mlx5_core_dev *dev)
352 struct mlx5_profile *prof = dev->profile;
359 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
363 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
369 memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
372 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
373 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
388 err = set_caps(dev, set_ctx, set_sz);
395 static int set_hca_ctrl(struct mlx5_core_dev *dev)
403 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
409 static int mlx5_core_enable_hca(struct mlx5_core_dev *dev)
417 return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
421 static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
430 return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
434 static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
448 err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
470 err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
477 dev->issi = 1;
488 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
490 struct mlx5_eq_table *table = &dev->priv.eq_table;
509 int mlx5_rename_eq(struct mlx5_core_dev *dev, int eq_ix, char *name)
511 struct mlx5_priv *priv = &dev->priv;
533 static void free_comp_eqs(struct mlx5_core_dev *dev)
535 struct mlx5_eq_table *table = &dev->priv.eq_table;
542 if (mlx5_destroy_unmap_eq(dev, eq))
543 mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
551 static int alloc_comp_eqs(struct mlx5_core_dev *dev)
553 struct mlx5_eq_table *table = &dev->priv.eq_table;
568 err = mlx5_create_map_eq(dev, eq,
570 name, &dev->priv.uuari.uars[0]);
575 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
585 free_comp_eqs(dev);
589 static int map_bf_area(struct mlx5_core_dev *dev)
591 resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
592 resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
594 dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
596 return dev->priv.bf_mapping ? 0 : -ENOMEM;
599 static void unmap_bf_area(struct mlx5_core_dev *dev)
601 if (dev->priv.bf_mapping)
602 io_mapping_free(dev->priv.bf_mapping);
605 static inline int fw_initializing(struct mlx5_core_dev *dev)
607 return ioread32be(&dev->iseg->initializing) >> 31;
610 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
615 while (fw_initializing(dev)) {
626 static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
628 struct mlx5_priv *priv = &dev->priv;
631 dev->pdev = pdev;
632 pci_set_drvdata(dev->pdev, dev);
633 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
644 device_printf((&pdev->dev)->bsddev, "ERR: ""Cannot enable PCI device, aborting\n");
650 device_printf((&pdev->dev)->bsddev, "ERR: ""error requesting BARs, aborting\n");
658 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed setting DMA capabilities mask, aborting\n");
662 dev->iseg = ioremap(pci_resource_start(dev->pdev, 0),
663 sizeof(*dev->iseg));
664 if (!dev->iseg) {
666 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed mapping initialization segment, aborting\n");
669 device_printf((&pdev->dev)->bsddev, "INFO: ""firmware version: %d.%d.%d\n", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
671 err = mlx5_cmd_init(dev);
673 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed initializing command interface, aborting\n");
677 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
679 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Firmware over %d MS in initializing state, aborting\n", FW_INIT_TIMEOUT_MILI);
683 mlx5_pagealloc_init(dev);
685 err = mlx5_core_enable_hca(dev);
687 device_printf((&pdev->dev)->bsddev, "ERR: ""enable hca failed\n");
691 err = mlx5_core_set_issi(dev);
693 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to set issi\n");
697 err = mlx5_pagealloc_start(dev);
699 device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_pagealloc_start failed\n");
703 err = mlx5_satisfy_startup_pages(dev, 1);
705 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate boot pages\n");
709 err = set_hca_ctrl(dev);
711 device_printf((&pdev->dev)->bsddev, "ERR: ""set_hca_ctrl failed\n");
715 err = handle_hca_cap(dev);
717 device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap failed\n");
721 err = mlx5_satisfy_startup_pages(dev, 0);
723 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate init pages\n");
727 err = mlx5_cmd_init_hca(dev);
729 device_printf((&pdev->dev)->bsddev, "ERR: ""init hca failed\n");
733 mlx5_start_health_poll(dev);
735 err = mlx5_query_hca_caps(dev);
737 device_printf((&pdev->dev)->bsddev, "ERR: ""query hca failed\n");
741 err = mlx5_query_board_id(dev);
743 device_printf((&pdev->dev)->bsddev, "ERR: ""query board id failed\n");
747 err = mlx5_enable_msix(dev);
749 device_printf((&pdev->dev)->bsddev, "ERR: ""enable msix failed\n");
753 err = mlx5_eq_init(dev);
755 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to initialize eq\n");
759 err = mlx5_alloc_uuars(dev, &priv->uuari);
761 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed allocating uar, aborting\n");
765 err = mlx5_start_eqs(dev);
767 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to start pages and async EQs\n");
771 err = alloc_comp_eqs(dev);
773 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to alloc completion EQs\n");
777 if (map_bf_area(dev))
778 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to map blue flame area\n");
782 mlx5_init_cq_table(dev);
783 mlx5_init_qp_table(dev);
784 mlx5_init_srq_table(dev);
785 mlx5_init_mr_table(dev);
790 mlx5_stop_eqs(dev);
793 mlx5_free_uuars(dev, &priv->uuari);
796 mlx5_eq_cleanup(dev);
799 mlx5_disable_msix(dev);
802 mlx5_stop_health_poll(dev);
803 if (mlx5_cmd_teardown_hca(dev)) {
804 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n");
809 mlx5_reclaim_startup_pages(dev);
812 mlx5_pagealloc_stop(dev);
815 mlx5_core_disable_hca(dev);
818 mlx5_pagealloc_cleanup(dev);
820 mlx5_cmd_cleanup(dev);
823 iounmap(dev->iseg);
826 pci_clear_master(dev->pdev);
827 release_bar(dev->pdev);
830 pci_disable_device(dev->pdev);
836 static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
838 struct mlx5_priv *priv = &dev->priv;
840 mlx5_cleanup_mr_table(dev);
841 mlx5_cleanup_srq_table(dev);
842 mlx5_cleanup_qp_table(dev);
843 mlx5_cleanup_cq_table(dev);
844 unmap_bf_area(dev);
845 free_comp_eqs(dev);
846 mlx5_stop_eqs(dev);
847 mlx5_free_uuars(dev, &priv->uuari);
848 mlx5_eq_cleanup(dev);
849 mlx5_disable_msix(dev);
850 mlx5_stop_health_poll(dev);
851 if (mlx5_cmd_teardown_hca(dev)) {
852 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n");
855 mlx5_pagealloc_stop(dev);
856 mlx5_reclaim_startup_pages(dev);
857 mlx5_core_disable_hca(dev);
858 mlx5_pagealloc_cleanup(dev);
859 mlx5_cmd_cleanup(dev);
860 iounmap(dev->iseg);
861 pci_clear_master(dev->pdev);
862 release_bar(dev->pdev);
863 pci_disable_device(dev->pdev);
869 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
874 dev_ctx->context = intf->add(dev);
888 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
896 intf->remove(dev, dev_ctx->context);
901 static int mlx5_register_device(struct mlx5_core_dev *dev)
903 struct mlx5_priv *priv = &dev->priv;
914 static void mlx5_unregister_device(struct mlx5_core_dev *dev)
916 struct mlx5_priv *priv = &dev->priv;
977 static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
980 struct mlx5_priv *priv = &dev->priv;
988 dev_ctx->intf->event(dev, dev_ctx->context, event, param);
994 void (*event)(struct mlx5_core_dev *dev,
1003 struct mlx5_core_dev *dev;
1007 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1008 priv = &dev->priv;
1014 dev->profile = &profiles[prof_sel];
1015 dev->event = mlx5_core_event;
1019 err = mlx5_dev_init(dev, pdev);
1021 device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_dev_init failed %d\n", err);
1025 err = mlx5_register_device(dev);
1027 device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_register_device failed %d\n", err);
1035 mlx5_dev_cleanup(dev);
1037 kfree(dev);
1043 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1045 mlx5_unregister_device(dev);
1046 mlx5_dev_cleanup(dev);
1047 kfree(dev);