1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3
4#include <linux/bits.h>
5#include <linux/netlink.h>
6#include <linux/refcount.h>
7#include <linux/xarray.h>
8#include <net/devlink.h>
9
10#include "spectrum.h"
11
12struct mlxsw_sp_port_range_reg {
13	struct mlxsw_sp_port_range range;
14	refcount_t refcount;
15	u32 index;
16};
17
18struct mlxsw_sp_port_range_core {
19	struct xarray prr_xa;
20	struct xa_limit prr_ids;
21	atomic_t prr_count;
22};
23
24static int
25mlxsw_sp_port_range_reg_configure(struct mlxsw_sp *mlxsw_sp,
26				  const struct mlxsw_sp_port_range_reg *prr)
27{
28	char pprr_pl[MLXSW_REG_PPRR_LEN];
29
30	/* We do not care if packet is IPv4/IPv6 and TCP/UDP, so set all four
31	 * fields.
32	 */
33	mlxsw_reg_pprr_pack(pprr_pl, prr->index);
34	mlxsw_reg_pprr_ipv4_set(pprr_pl, true);
35	mlxsw_reg_pprr_ipv6_set(pprr_pl, true);
36	mlxsw_reg_pprr_src_set(pprr_pl, prr->range.source);
37	mlxsw_reg_pprr_dst_set(pprr_pl, !prr->range.source);
38	mlxsw_reg_pprr_tcp_set(pprr_pl, true);
39	mlxsw_reg_pprr_udp_set(pprr_pl, true);
40	mlxsw_reg_pprr_port_range_min_set(pprr_pl, prr->range.min);
41	mlxsw_reg_pprr_port_range_max_set(pprr_pl, prr->range.max);
42
43	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pprr), pprr_pl);
44}
45
46static struct mlxsw_sp_port_range_reg *
47mlxsw_sp_port_range_reg_create(struct mlxsw_sp *mlxsw_sp,
48			       const struct mlxsw_sp_port_range *range,
49			       struct netlink_ext_ack *extack)
50{
51	struct mlxsw_sp_port_range_core *pr_core = mlxsw_sp->pr_core;
52	struct mlxsw_sp_port_range_reg *prr;
53	int err;
54
55	prr = kzalloc(sizeof(*prr), GFP_KERNEL);
56	if (!prr)
57		return ERR_PTR(-ENOMEM);
58
59	prr->range = *range;
60	refcount_set(&prr->refcount, 1);
61
62	err = xa_alloc(&pr_core->prr_xa, &prr->index, prr, pr_core->prr_ids,
63		       GFP_KERNEL);
64	if (err) {
65		if (err == -EBUSY)
66			NL_SET_ERR_MSG_MOD(extack, "Exceeded number of port range registers");
67		goto err_xa_alloc;
68	}
69
70	err = mlxsw_sp_port_range_reg_configure(mlxsw_sp, prr);
71	if (err) {
72		NL_SET_ERR_MSG_MOD(extack, "Failed to configure port range register");
73		goto err_reg_configure;
74	}
75
76	atomic_inc(&pr_core->prr_count);
77
78	return prr;
79
80err_reg_configure:
81	xa_erase(&pr_core->prr_xa, prr->index);
82err_xa_alloc:
83	kfree(prr);
84	return ERR_PTR(err);
85}
86
87static void mlxsw_sp_port_range_reg_destroy(struct mlxsw_sp *mlxsw_sp,
88					    struct mlxsw_sp_port_range_reg *prr)
89{
90	struct mlxsw_sp_port_range_core *pr_core = mlxsw_sp->pr_core;
91
92	atomic_dec(&pr_core->prr_count);
93	xa_erase(&pr_core->prr_xa, prr->index);
94	kfree(prr);
95}
96
97static struct mlxsw_sp_port_range_reg *
98mlxsw_sp_port_range_reg_find(struct mlxsw_sp *mlxsw_sp,
99			     const struct mlxsw_sp_port_range *range)
100{
101	struct mlxsw_sp_port_range_core *pr_core = mlxsw_sp->pr_core;
102	struct mlxsw_sp_port_range_reg *prr;
103	unsigned long index;
104
105	xa_for_each(&pr_core->prr_xa, index, prr) {
106		if (prr->range.min == range->min &&
107		    prr->range.max == range->max &&
108		    prr->range.source == range->source)
109			return prr;
110	}
111
112	return NULL;
113}
114
115int mlxsw_sp_port_range_reg_get(struct mlxsw_sp *mlxsw_sp,
116				const struct mlxsw_sp_port_range *range,
117				struct netlink_ext_ack *extack,
118				u8 *p_prr_index)
119{
120	struct mlxsw_sp_port_range_reg *prr;
121
122	prr = mlxsw_sp_port_range_reg_find(mlxsw_sp, range);
123	if (prr) {
124		refcount_inc(&prr->refcount);
125		*p_prr_index = prr->index;
126		return 0;
127	}
128
129	prr = mlxsw_sp_port_range_reg_create(mlxsw_sp, range, extack);
130	if (IS_ERR(prr))
131		return PTR_ERR(prr);
132
133	*p_prr_index = prr->index;
134
135	return 0;
136}
137
138void mlxsw_sp_port_range_reg_put(struct mlxsw_sp *mlxsw_sp, u8 prr_index)
139{
140	struct mlxsw_sp_port_range_core *pr_core = mlxsw_sp->pr_core;
141	struct mlxsw_sp_port_range_reg *prr;
142
143	prr = xa_load(&pr_core->prr_xa, prr_index);
144	if (WARN_ON(!prr))
145		return;
146
147	if (!refcount_dec_and_test(&prr->refcount))
148		return;
149
150	mlxsw_sp_port_range_reg_destroy(mlxsw_sp, prr);
151}
152
153static u64 mlxsw_sp_port_range_reg_occ_get(void *priv)
154{
155	struct mlxsw_sp_port_range_core *pr_core = priv;
156
157	return atomic_read(&pr_core->prr_count);
158}
159
160int mlxsw_sp_port_range_init(struct mlxsw_sp *mlxsw_sp)
161{
162	struct mlxsw_sp_port_range_core *pr_core;
163	struct mlxsw_core *core = mlxsw_sp->core;
164	u64 max;
165
166	if (!MLXSW_CORE_RES_VALID(core, ACL_MAX_L4_PORT_RANGE))
167		return -EIO;
168	max = MLXSW_CORE_RES_GET(core, ACL_MAX_L4_PORT_RANGE);
169
170	/* Each port range register is represented using a single bit in the
171	 * two bytes "l4_port_range" ACL key element.
172	 */
173	WARN_ON(max > BITS_PER_BYTE * sizeof(u16));
174
175	pr_core = kzalloc(sizeof(*mlxsw_sp->pr_core), GFP_KERNEL);
176	if (!pr_core)
177		return -ENOMEM;
178	mlxsw_sp->pr_core = pr_core;
179
180	pr_core->prr_ids.max = max - 1;
181	xa_init_flags(&pr_core->prr_xa, XA_FLAGS_ALLOC);
182
183	devl_resource_occ_get_register(priv_to_devlink(core),
184				       MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS,
185				       mlxsw_sp_port_range_reg_occ_get,
186				       pr_core);
187
188	return 0;
189}
190
191void mlxsw_sp_port_range_fini(struct mlxsw_sp *mlxsw_sp)
192{
193	struct mlxsw_sp_port_range_core *pr_core = mlxsw_sp->pr_core;
194
195	devl_resource_occ_get_unregister(priv_to_devlink(mlxsw_sp->core),
196					 MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS);
197	WARN_ON(!xa_empty(&pr_core->prr_xa));
198	xa_destroy(&pr_core->prr_xa);
199	kfree(pr_core);
200}
201