12061Sjkh/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
218434Sache/* Copyright (c) 2018-2021, Mellanox Technologies inc.  All rights reserved. */
32061Sjkh
42061Sjkh#ifndef __LIB_MLX5_EQ_H__
515603Smarkm#define __LIB_MLX5_EQ_H__
62061Sjkh#include <linux/mlx5/driver.h>
72061Sjkh#include <linux/mlx5/eq.h>
83197Scsgr#include <linux/mlx5/cq.h>
93197Scsgr
102061Sjkh#define MLX5_EQE_SIZE       (sizeof(struct mlx5_eqe))
1112483Speter
122160Scsgrstruct mlx5_eq_tasklet {
132834Swollman	struct list_head      list;
142061Sjkh	struct list_head      process_list;
152061Sjkh	struct tasklet_struct task;
162160Scsgr	spinlock_t            lock; /* lock completion tasklet list */
1717308Speter};
181594Srgrimes
1917308Speterstruct mlx5_cq_table {
2017308Speter	spinlock_t              lock;	/* protect radix tree */
2117308Speter	struct radix_tree_root  tree;
2217308Speter};
2317308Speter
2417308Speterstruct mlx5_eq {
2517308Speter	struct mlx5_frag_buf_ctrl fbc;
2617308Speter	struct mlx5_frag_buf    frag_buf;
2717308Speter	struct mlx5_core_dev    *dev;
2817308Speter	struct mlx5_cq_table    cq_table;
2917308Speter	__be32 __iomem	        *doorbell;
302061Sjkh	u32                     cons_index;
312061Sjkh	unsigned int            vecidx;
321594Srgrimes	unsigned int            irqn;
337407Srgrimes	u8                      eqn;
347407Srgrimes	struct mlx5_rsc_debug   *dbg;
357108Sphk	struct mlx5_irq         *irq;
367108Sphk};
377108Sphk
387407Srgrimesstruct mlx5_eq_async {
397407Srgrimes	struct mlx5_eq          core;
407407Srgrimes	struct notifier_block   irq_nb;
417108Sphk	spinlock_t              lock; /* To avoid irq EQ handle races with resiliency flows */
422061Sjkh};
432061Sjkh
442061Sjkhstruct mlx5_eq_comp {
4517308Speter	struct mlx5_eq          core;
462061Sjkh	struct notifier_block   irq_nb;
472061Sjkh	struct mlx5_eq_tasklet  tasklet_ctx;
482061Sjkh	struct list_head        list;
492061Sjkh};
502061Sjkh
513197Scsgrstatic inline u32 eq_get_size(struct mlx5_eq *eq)
522626Scsgr{
532626Scsgr	return eq->fbc.sz_m1 + 1;
542061Sjkh}
552061Sjkh
562061Sjkhstatic inline struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
572061Sjkh{
582061Sjkh	return mlx5_frag_buf_get_wqe(&eq->fbc, entry);
592061Sjkh}
602061Sjkh
612061Sjkhstatic inline struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
622061Sjkh{
632061Sjkh	struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & eq->fbc.sz_m1);
642061Sjkh
652061Sjkh	return (eqe->owner ^ (eq->cons_index >> eq->fbc.log_sz)) & 1 ? NULL : eqe;
662061Sjkh}
672061Sjkh
682061Sjkhstatic inline void eq_update_ci(struct mlx5_eq *eq, int arm)
692061Sjkh{
702061Sjkh	__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
712061Sjkh	u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
722834Swollman
732834Swollman	__raw_writel((__force u32)cpu_to_be32(val), addr);
742834Swollman	/* We still want ordering, just not swabbing, so add a barrier */
752834Swollman	mb();
762834Swollman}
772834Swollman
781594Srgrimesint mlx5_eq_table_init(struct mlx5_core_dev *dev);
794486Sphkvoid mlx5_eq_table_cleanup(struct mlx5_core_dev *dev);
804486Sphkint mlx5_eq_table_create(struct mlx5_core_dev *dev);
814486Sphkvoid mlx5_eq_table_destroy(struct mlx5_core_dev *dev);
824486Sphk
834486Sphkint mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
842061Sjkhvoid mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
852061Sjkhstruct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
862061Sjkhstruct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
872061Sjkhvoid mlx5_cq_tasklet_cb(struct tasklet_struct *t);
882061Sjkh
892061Sjkhu32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq);
902061Sjkhvoid mlx5_cmd_eq_recover(struct mlx5_core_dev *dev);
912061Sjkhvoid mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev);
922061Sjkhvoid mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev);
9317308Speter
942061Sjkhint mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
952061Sjkhvoid mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
962061Sjkhvoid mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
972061Sjkhvoid mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
982061Sjkh
9912483Speter/* This function should only be called after mlx5_cmd_force_teardown_hca */
10012483Spetervoid mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
10112483Speter
10212483Speter#ifdef CONFIG_RFS_ACCEL
1032061Sjkhstruct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
1042061Sjkh#endif
1058854Srgrimes
1062061Sjkhint mlx5_comp_irqn_get(struct mlx5_core_dev *dev, int vector, unsigned int *irqn);
1072061Sjkh
10812483Speter#endif
1092061Sjkh