1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3
4#include "mlx5_core.h"
5#include "mlx5_irq.h"
6#include "pci_irq.h"
7
8static void cpu_put(struct mlx5_irq_pool *pool, int cpu)
9{
10	pool->irqs_per_cpu[cpu]--;
11}
12
13static void cpu_get(struct mlx5_irq_pool *pool, int cpu)
14{
15	pool->irqs_per_cpu[cpu]++;
16}
17
18/* Gets the least loaded CPU. e.g.: the CPU with least IRQs bound to it */
19static int cpu_get_least_loaded(struct mlx5_irq_pool *pool,
20				const struct cpumask *req_mask)
21{
22	int best_cpu = -1;
23	int cpu;
24
25	for_each_cpu_and(cpu, req_mask, cpu_online_mask) {
26		/* CPU has zero IRQs on it. No need to search any more CPUs. */
27		if (!pool->irqs_per_cpu[cpu]) {
28			best_cpu = cpu;
29			break;
30		}
31		if (best_cpu < 0)
32			best_cpu = cpu;
33		if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu])
34			best_cpu = cpu;
35	}
36	if (best_cpu == -1) {
37		/* There isn't online CPUs in req_mask */
38		mlx5_core_err(pool->dev, "NO online CPUs in req_mask (%*pbl)\n",
39			      cpumask_pr_args(req_mask));
40		best_cpu = cpumask_first(cpu_online_mask);
41	}
42	pool->irqs_per_cpu[best_cpu]++;
43	return best_cpu;
44}
45
46/* Creating an IRQ from irq_pool */
47static struct mlx5_irq *
48irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
49{
50	struct irq_affinity_desc auto_desc = {};
51	u32 irq_index;
52	int err;
53
54	err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL);
55	if (err)
56		return ERR_PTR(err);
57	if (pool->irqs_per_cpu) {
58		if (cpumask_weight(&af_desc->mask) > 1)
59			/* if req_mask contain more then one CPU, set the least loadad CPU
60			 * of req_mask
61			 */
62			cpumask_set_cpu(cpu_get_least_loaded(pool, &af_desc->mask),
63					&auto_desc.mask);
64		else
65			cpu_get(pool, cpumask_first(&af_desc->mask));
66	}
67	return mlx5_irq_alloc(pool, irq_index,
68			      cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
69			      NULL);
70}
71
72/* Looking for the IRQ with the smallest refcount that fits req_mask.
73 * If pool is sf_comp_pool, then we are looking for an IRQ with any of the
74 * requested CPUs in req_mask.
75 * for example: req_mask = 0xf, irq0_mask = 0x10, irq1_mask = 0x1. irq0_mask
76 * isn't subset of req_mask, so we will skip it. irq1_mask is subset of req_mask,
77 * we don't skip it.
78 * If pool is sf_ctrl_pool, then all IRQs have the same mask, so any IRQ will
79 * fit. And since mask is subset of itself, we will pass the first if bellow.
80 */
81static struct mlx5_irq *
82irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, const struct cpumask *req_mask)
83{
84	int start = pool->xa_num_irqs.min;
85	int end = pool->xa_num_irqs.max;
86	struct mlx5_irq *irq = NULL;
87	struct mlx5_irq *iter;
88	int irq_refcount = 0;
89	unsigned long index;
90
91	lockdep_assert_held(&pool->lock);
92	xa_for_each_range(&pool->irqs, index, iter, start, end) {
93		struct cpumask *iter_mask = mlx5_irq_get_affinity_mask(iter);
94		int iter_refcount = mlx5_irq_read_locked(iter);
95
96		if (!cpumask_subset(iter_mask, req_mask))
97			/* skip IRQs with a mask which is not subset of req_mask */
98			continue;
99		if (iter_refcount < pool->min_threshold)
100			/* If we found an IRQ with less than min_thres, return it */
101			return iter;
102		if (!irq || iter_refcount < irq_refcount) {
103			/* In case we won't find an IRQ with less than min_thres,
104			 * keep a pointer to the least used IRQ
105			 */
106			irq_refcount = iter_refcount;
107			irq = iter;
108		}
109	}
110	return irq;
111}
112
113/**
114 * mlx5_irq_affinity_request - request an IRQ according to the given mask.
115 * @pool: IRQ pool to request from.
116 * @af_desc: affinity descriptor for this IRQ.
117 *
118 * This function returns a pointer to IRQ, or ERR_PTR in case of error.
119 */
120struct mlx5_irq *
121mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
122{
123	struct mlx5_irq *least_loaded_irq, *new_irq;
124
125	mutex_lock(&pool->lock);
126	least_loaded_irq = irq_pool_find_least_loaded(pool, &af_desc->mask);
127	if (least_loaded_irq &&
128	    mlx5_irq_read_locked(least_loaded_irq) < pool->min_threshold)
129		goto out;
130	/* We didn't find an IRQ with less than min_thres, try to allocate a new IRQ */
131	new_irq = irq_pool_request_irq(pool, af_desc);
132	if (IS_ERR(new_irq)) {
133		if (!least_loaded_irq) {
134			/* We failed to create an IRQ and we didn't find an IRQ */
135			mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n",
136				      PTR_ERR(new_irq));
137			mutex_unlock(&pool->lock);
138			return new_irq;
139		}
140		/* We failed to create a new IRQ for the requested affinity,
141		 * sharing existing IRQ.
142		 */
143		goto out;
144	}
145	least_loaded_irq = new_irq;
146	goto unlock;
147out:
148	mlx5_irq_get_locked(least_loaded_irq);
149	if (mlx5_irq_read_locked(least_loaded_irq) > pool->max_threshold)
150		mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n",
151			      pci_irq_vector(pool->dev->pdev,
152					     mlx5_irq_get_index(least_loaded_irq)), pool->name,
153			      mlx5_irq_read_locked(least_loaded_irq) / MLX5_EQ_REFS_PER_IRQ);
154unlock:
155	mutex_unlock(&pool->lock);
156	return least_loaded_irq;
157}
158
159void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
160{
161	struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
162	int cpu;
163
164	cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
165	synchronize_irq(pci_irq_vector(pool->dev->pdev,
166				       mlx5_irq_get_index(irq)));
167	if (mlx5_irq_put(irq))
168		if (pool->irqs_per_cpu)
169			cpu_put(pool, cpu);
170}
171