Lines Matching refs:eq

10 #include <linux/mlx5/eq.h>
15 #include "lib/eq.h"
94 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
96 struct mlx5_cq_table *table = &eq->cq_table;
114 struct mlx5_eq *eq = &eq_comp->core;
119 eqe = next_eqe_sw(eq);
133 cq = mlx5_eq_cq_get(eq, cqn);
139 dev_dbg_ratelimited(eq->dev->device,
143 ++eq->cons_index;
145 } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
148 eq_update_ci(eq, 1);
161 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
165 disable_irq(eq->core.irqn);
166 count_eqe = eq->core.cons_index;
167 mlx5_eq_comp_int(&eq->irq_nb, 0, NULL);
168 count_eqe = eq->core.cons_index - count_eqe;
169 enable_irq(eq->core.irqn);
174 static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, bool recovery,
176 __acquires(&eq->lock)
179 spin_lock(&eq->lock);
181 spin_lock_irqsave(&eq->lock, *flags);
184 static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, bool recovery,
186 __releases(&eq->lock)
189 spin_unlock(&eq->lock);
191 spin_unlock_irqrestore(&eq->lock, *flags);
204 struct mlx5_eq *eq = &eq_async->core;
212 dev = eq->dev;
218 eqe = next_eqe_sw(eq);
232 ++eq->cons_index;
234 } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
237 eq_update_ci(eq, 1);
245 struct mlx5_eq_async *eq = &dev->priv.eq_table->cmd_eq;
248 eqes = mlx5_eq_async_int(&eq->irq_nb, ASYNC_EQ_RECOVER, NULL);
253 static void init_eq_buf(struct mlx5_eq *eq)
258 for (i = 0; i < eq_get_size(eq); i++) {
259 eqe = get_eqe(eq, i);
265 create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
269 struct mlx5_cq_table *cq_table = &eq->cq_table;
286 eq->cons_index = 0;
289 &eq->frag_buf, dev->priv.numa_node);
293 mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc);
294 init_eq_buf(eq);
296 eq->irq = param->irq;
297 vecidx = mlx5_irq_get_index(eq->irq);
300 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->frag_buf.npages;
309 mlx5_fill_page_frag_array(&eq->frag_buf, pas);
320 MLX5_SET(eqc, eqc, log_eq_size, eq->fbc.log_sz);
324 eq->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
330 eq->vecidx = vecidx;
331 eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
332 eq->irqn = pci_irq_vector(dev->pdev, vecidx);
333 eq->dev = dev;
334 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
336 err = mlx5_debug_eq_add(dev, eq);
344 mlx5_cmd_destroy_eq(dev, eq->eqn);
350 mlx5_frag_buf_free(dev, &eq->frag_buf);
356 * @dev : Device which owns the eq
357 * @eq : EQ to enable
364 int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
369 err = mlx5_irq_attach_nb(eq->irq, nb);
371 eq_update_ci(eq, 1);
379 * @dev : Device which owns the eq
380 * @eq : EQ to disable
385 void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
388 mlx5_irq_detach_nb(eq->irq, nb);
392 static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
396 mlx5_debug_eq_remove(dev, eq);
398 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
400 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
401 eq->eqn);
403 mlx5_frag_buf_free(dev, &eq->frag_buf);
407 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
409 struct mlx5_cq_table *table = &eq->cq_table;
419 void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
421 struct mlx5_cq_table *table = &eq->cq_table;
429 mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n",
430 eq->eqn, cq->cqn);
435 mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n",
436 eq->eqn, cq->cqn);
479 struct mlx5_eq *eq, struct mlx5_eq_param *param)
485 err = create_map_eq(dev, eq, param);
490 static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
496 err = destroy_unmap_eq(dev, eq);
507 struct mlx5_eq *eq;
513 eq = &eqt->async_eq.core;
517 mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
520 cq = mlx5_eq_cq_get(eq, cqn);
522 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
604 setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
609 eq->irq_nb.notifier_call = mlx5_eq_async_int;
610 spin_lock_init(&eq->lock);
612 err = create_async_eq(dev, &eq->core, param);
617 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
620 destroy_async_eq(dev, &eq->core);
626 struct mlx5_eq_async *eq, const char *name)
630 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
631 err = destroy_async_eq(dev, &eq->core);
633 mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n",
751 struct mlx5_eq *eq = kvzalloc_node(sizeof(*eq), GFP_KERNEL,
755 if (!eq)
759 err = create_async_eq(dev, eq, param);
761 kvfree(eq);
762 eq = ERR_PTR(err);
765 return eq;
769 int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
773 if (IS_ERR(eq))
776 err = destroy_async_eq(dev, eq);
780 kvfree(eq);
786 struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
788 u32 ci = eq->cons_index + cc;
789 u32 nent = eq_get_size(eq);
792 eqe = get_eqe(eq, ci & (nent - 1));
804 void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
806 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
809 eq->cons_index += cc;
810 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
973 static void destroy_comp_eq(struct mlx5_core_dev *dev, struct mlx5_eq_comp *eq, u16 vecidx)
978 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
979 if (destroy_unmap_eq(dev, &eq->core))
981 eq->core.eqn);
982 tasklet_disable(&eq->tasklet_ctx.task);
983 kfree(eq);
1008 struct mlx5_eq_comp *eq;
1026 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node);
1027 if (!eq) {
1032 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
1033 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
1034 spin_lock_init(&eq->tasklet_ctx.lock);
1035 tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb);
1038 eq->irq_nb.notifier_call = mlx5_eq_comp_int;
1044 err = create_map_eq(dev, &eq->core, &param);
1047 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
1049 destroy_unmap_eq(dev, &eq->core);
1053 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
1054 err = xa_err(xa_store(&table->comp_eqs, vecidx, eq, GFP_KERNEL));
1059 return eq->core.eqn;
1062 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
1064 kfree(eq);
1073 struct mlx5_eq_comp *eq;
1077 eq = xa_load(&table->comp_eqs, vecidx);
1078 if (eq) {
1079 *eqn = eq->core.eqn;
1099 struct mlx5_eq_comp *eq;
1108 eq = xa_load(&table->comp_eqs, vector);
1109 *irqn = eq->core.irqn;
1123 struct mlx5_eq_comp *eq;
1125 eq = xa_load(&table->comp_eqs, vector);
1126 if (eq)
1127 return mlx5_irq_get_affinity_mask(eq->core.irq);
1157 struct mlx5_eq_comp *eq;
1160 xa_for_each(&table->comp_eqs, index, eq)
1161 if (eq->core.eqn == eqn)
1162 return eq;
1237 struct mlx5_eq_comp *eq;
1240 xa_for_each(&table->comp_eqs, index, eq)
1241 destroy_comp_eq(dev, eq, index);