Lines Matching refs:dev

40 #include <dev/mlx4/cmd.h>
41 #include <dev/mlx4/qp.h>
50 void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
52 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
57 qp = __mlx4_qp_lookup(dev, qpn);
64 mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
75 static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0)
79 u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev);
82 *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn &&
83 qp->qpn <= dev->phys_caps.base_sqpn + 1;
88 static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
134 struct mlx4_priv *priv = mlx4_priv(dev);
146 ret = mlx4_cmd(dev, 0, qp->qpn, 2,
148 if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
150 is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
160 mailbox = mlx4_alloc_cmd_mailbox(dev);
165 u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
173 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
175 cpu_to_be16(mlx4_qp_roce_entropy(dev, qp->qpn));
183 ret = mlx4_cmd(dev, mailbox->dma,
188 if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
205 mlx4_free_cmd_mailbox(dev, mailbox);
209 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
215 return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
220 int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
226 struct mlx4_priv *priv = mlx4_priv(dev);
248 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
256 flags &= dev->caps.alloc_res_qp_mask;
258 if (mlx4_is_mfunc(dev)) {
261 err = mlx4_cmd_imm(dev, in_param, &out_param,
271 return __mlx4_qp_reserve_range(dev, cnt, align, base, flags);
275 void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
277 struct mlx4_priv *priv = mlx4_priv(dev);
280 if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
285 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
290 if (mlx4_is_mfunc(dev)) {
293 err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
297 mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
301 __mlx4_qp_release_range(dev, base_qpn, cnt);
305 int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
307 struct mlx4_priv *priv = mlx4_priv(dev);
311 err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp);
315 err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp);
319 err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp);
323 err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp);
327 err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp);
334 mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
337 mlx4_table_put(dev, &qp_table->altc_table, qpn);
340 mlx4_table_put(dev, &qp_table->auxc_table, qpn);
343 mlx4_table_put(dev, &qp_table->qp_table, qpn);
349 static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
353 if (mlx4_is_mfunc(dev)) {
355 return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM,
359 return __mlx4_qp_alloc_icm(dev, qpn, gfp);
362 void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
364 struct mlx4_priv *priv = mlx4_priv(dev);
367 mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
368 mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
369 mlx4_table_put(dev, &qp_table->altc_table, qpn);
370 mlx4_table_put(dev, &qp_table->auxc_table, qpn);
371 mlx4_table_put(dev, &qp_table->qp_table, qpn);
374 static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
378 if (mlx4_is_mfunc(dev)) {
380 if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
383 mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
385 __mlx4_qp_free_icm(dev, qpn);
388 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
390 struct mlx4_priv *priv = mlx4_priv(dev);
399 err = mlx4_qp_alloc_icm(dev, qpn, gfp);
404 err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
405 (dev->caps.num_qps - 1), qp);
416 mlx4_qp_free_icm(dev, qpn);
422 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
435 mailbox = mlx4_alloc_cmd_mailbox(dev);
447 if (!(dev->caps.flags2
449 mlx4_warn(dev,
482 err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
486 mlx4_free_cmd_mailbox(dev, mailbox);
491 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
493 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
497 radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
502 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
508 mlx4_qp_free_icm(dev, qp->qpn);
512 static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
514 return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
522 static int mlx4_create_zones(struct mlx4_dev *dev,
529 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
548 err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_GENERAL, dev->caps.num_qps,
568 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
585 last_offset = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
712 static void mlx4_cleanup_qp_zones(struct mlx4_dev *dev)
714 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
739 int mlx4_init_qp_table(struct mlx4_dev *dev)
741 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
748 u32 max_table_offset = dev->caps.dmfs_high_rate_qpn_base +
749 dev->caps.dmfs_high_rate_qpn_range;
752 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
753 if (mlx4_is_slave(dev))
764 fixed_reserved_from_bot_rv += dev->caps.reserved_qps_cnt[k];
772 dev->phys_caps.base_sqpn = ALIGN(bottom_reserved_for_rss_bitmap, 8);
777 int last_base = dev->caps.num_qps;
784 if (dev->caps.reserved_qps_cnt[sort[j]] >
785 dev->caps.reserved_qps_cnt[sort[j - 1]])
791 last_base -= dev->caps.reserved_qps_cnt[sort[i]];
792 dev->caps.reserved_qps_base[sort[i]] = last_base;
794 dev->caps.reserved_qps_cnt[sort[i]];
808 reserved_from_bot = mlx4_num_reserved_sqps(dev);
809 if (reserved_from_bot + reserved_from_top > dev->caps.num_qps) {
810 mlx4_err(dev, "Number of reserved QPs is higher than number of QPs\n");
814 err = mlx4_create_zones(dev, reserved_from_bot, reserved_from_bot,
822 if (mlx4_is_mfunc(dev)) {
824 dev->phys_caps.base_proxy_sqpn = dev->phys_caps.base_sqpn + 8;
825 dev->phys_caps.base_tunnel_sqpn = dev->phys_caps.base_sqpn + 8 + 8 * MLX4_MFUNC_MAX;
829 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
830 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
831 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
832 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
834 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
835 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) {
840 for (k = 0; k < dev->caps.num_ports; k++) {
841 dev->caps.qp0_proxy[k] = dev->phys_caps.base_proxy_sqpn +
842 8 * mlx4_master_func_num(dev) + k;
843 dev->caps.qp0_tunnel[k] = dev->caps.qp0_proxy[k] + 8 * MLX4_MFUNC_MAX;
844 dev->caps.qp1_proxy[k] = dev->phys_caps.base_proxy_sqpn +
845 8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k;
846 dev->caps.qp1_tunnel[k] = dev->caps.qp1_proxy[k] + 8 * MLX4_MFUNC_MAX;
851 err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn);
858 kfree(dev->caps.qp0_tunnel);
859 kfree(dev->caps.qp0_proxy);
860 kfree(dev->caps.qp1_tunnel);
861 kfree(dev->caps.qp1_proxy);
862 dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
863 dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
864 mlx4_cleanup_qp_zones(dev);
868 void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
870 if (mlx4_is_slave(dev))
873 mlx4_CONF_SPECIAL_QP(dev, 0);
875 mlx4_cleanup_qp_zones(dev);
878 int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
884 mailbox = mlx4_alloc_cmd_mailbox(dev);
888 err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
894 mlx4_free_cmd_mailbox(dev, mailbox);
899 int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
917 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
920 mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
932 u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn)
939 err = mlx4_qp_query(dev, &qp, &context);