1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3
4#include <net/pkt_cls.h>
5#include "htb.h"
6#include "en.h"
7#include "../qos.h"
8
9struct mlx5e_qos_node {
10	struct hlist_node hnode;
11	struct mlx5e_qos_node *parent;
12	u64 rate;
13	u32 bw_share;
14	u32 max_average_bw;
15	u32 hw_id;
16	u32 classid; /* 16-bit, except root. */
17	u16 qid;
18};
19
20struct mlx5e_htb {
21	DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
22	DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
23	struct mlx5_core_dev *mdev;
24	struct net_device *netdev;
25	struct mlx5e_priv *priv;
26	struct mlx5e_selq *selq;
27};
28
29#define MLX5E_QOS_QID_INNER 0xffff
30#define MLX5E_HTB_CLASSID_ROOT 0xffffffff
31
32/* Software representation of the QoS tree */
33
34int mlx5e_htb_enumerate_leaves(struct mlx5e_htb *htb, mlx5e_fp_htb_enumerate callback, void *data)
35{
36	struct mlx5e_qos_node *node = NULL;
37	int bkt, err;
38
39	hash_for_each(htb->qos_tc2node, bkt, node, hnode) {
40		if (node->qid == MLX5E_QOS_QID_INNER)
41			continue;
42		err = callback(data, node->qid, node->hw_id);
43		if (err)
44			return err;
45	}
46	return 0;
47}
48
49int mlx5e_htb_cur_leaf_nodes(struct mlx5e_htb *htb)
50{
51	int last;
52
53	last = find_last_bit(htb->qos_used_qids, mlx5e_qos_max_leaf_nodes(htb->mdev));
54	return last == mlx5e_qos_max_leaf_nodes(htb->mdev) ? 0 : last + 1;
55}
56
57static int mlx5e_htb_find_unused_qos_qid(struct mlx5e_htb *htb)
58{
59	int size = mlx5e_qos_max_leaf_nodes(htb->mdev);
60	struct mlx5e_priv *priv = htb->priv;
61	int res;
62
63	WARN_ONCE(!mutex_is_locked(&priv->state_lock), "%s: state_lock is not held\n", __func__);
64	res = find_first_zero_bit(htb->qos_used_qids, size);
65
66	return res == size ? -ENOSPC : res;
67}
68
69static struct mlx5e_qos_node *
70mlx5e_htb_node_create_leaf(struct mlx5e_htb *htb, u16 classid, u16 qid,
71			   struct mlx5e_qos_node *parent)
72{
73	struct mlx5e_qos_node *node;
74
75	node = kzalloc(sizeof(*node), GFP_KERNEL);
76	if (!node)
77		return ERR_PTR(-ENOMEM);
78
79	node->parent = parent;
80
81	node->qid = qid;
82	__set_bit(qid, htb->qos_used_qids);
83
84	node->classid = classid;
85	hash_add_rcu(htb->qos_tc2node, &node->hnode, classid);
86
87	mlx5e_update_tx_netdev_queues(htb->priv);
88
89	return node;
90}
91
92static struct mlx5e_qos_node *mlx5e_htb_node_create_root(struct mlx5e_htb *htb)
93{
94	struct mlx5e_qos_node *node;
95
96	node = kzalloc(sizeof(*node), GFP_KERNEL);
97	if (!node)
98		return ERR_PTR(-ENOMEM);
99
100	node->qid = MLX5E_QOS_QID_INNER;
101	node->classid = MLX5E_HTB_CLASSID_ROOT;
102	hash_add_rcu(htb->qos_tc2node, &node->hnode, node->classid);
103
104	return node;
105}
106
107static struct mlx5e_qos_node *mlx5e_htb_node_find(struct mlx5e_htb *htb, u32 classid)
108{
109	struct mlx5e_qos_node *node = NULL;
110
111	hash_for_each_possible(htb->qos_tc2node, node, hnode, classid) {
112		if (node->classid == classid)
113			break;
114	}
115
116	return node;
117}
118
119static struct mlx5e_qos_node *mlx5e_htb_node_find_rcu(struct mlx5e_htb *htb, u32 classid)
120{
121	struct mlx5e_qos_node *node = NULL;
122
123	hash_for_each_possible_rcu(htb->qos_tc2node, node, hnode, classid) {
124		if (node->classid == classid)
125			break;
126	}
127
128	return node;
129}
130
131static void mlx5e_htb_node_delete(struct mlx5e_htb *htb, struct mlx5e_qos_node *node)
132{
133	hash_del_rcu(&node->hnode);
134	if (node->qid != MLX5E_QOS_QID_INNER) {
135		__clear_bit(node->qid, htb->qos_used_qids);
136		mlx5e_update_tx_netdev_queues(htb->priv);
137	}
138	/* Make sure this qid is no longer selected by mlx5e_select_queue, so
139	 * that mlx5e_reactivate_qos_sq can safely restart the netdev TX queue.
140	 */
141	synchronize_net();
142	kfree(node);
143}
144
145/* TX datapath API */
146
147int mlx5e_htb_get_txq_by_classid(struct mlx5e_htb *htb, u16 classid)
148{
149	struct mlx5e_qos_node *node;
150	u16 qid;
151	int res;
152
153	rcu_read_lock();
154
155	node = mlx5e_htb_node_find_rcu(htb, classid);
156	if (!node) {
157		res = -ENOENT;
158		goto out;
159	}
160	qid = READ_ONCE(node->qid);
161	if (qid == MLX5E_QOS_QID_INNER) {
162		res = -EINVAL;
163		goto out;
164	}
165	res = mlx5e_qid_from_qos(&htb->priv->channels, qid);
166
167out:
168	rcu_read_unlock();
169	return res;
170}
171
172/* HTB TC handlers */
173
174static int
175mlx5e_htb_root_add(struct mlx5e_htb *htb, u16 htb_maj_id, u16 htb_defcls,
176		   struct netlink_ext_ack *extack)
177{
178	struct mlx5e_priv *priv = htb->priv;
179	struct mlx5e_qos_node *root;
180	bool opened;
181	int err;
182
183	qos_dbg(htb->mdev, "TC_HTB_CREATE handle %04x:, default :%04x\n", htb_maj_id, htb_defcls);
184
185	mlx5e_selq_prepare_htb(htb->selq, htb_maj_id, htb_defcls);
186
187	opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
188	if (opened) {
189		err = mlx5e_qos_alloc_queues(priv, &priv->channels);
190		if (err)
191			goto err_cancel_selq;
192	}
193
194	root = mlx5e_htb_node_create_root(htb);
195	if (IS_ERR(root)) {
196		err = PTR_ERR(root);
197		goto err_free_queues;
198	}
199
200	err = mlx5_qos_create_root_node(htb->mdev, &root->hw_id);
201	if (err) {
202		NL_SET_ERR_MSG_MOD(extack, "Firmware error. Try upgrading firmware.");
203		goto err_sw_node_delete;
204	}
205
206	mlx5e_selq_apply(htb->selq);
207
208	return 0;
209
210err_sw_node_delete:
211	mlx5e_htb_node_delete(htb, root);
212
213err_free_queues:
214	if (opened)
215		mlx5e_qos_close_all_queues(&priv->channels);
216err_cancel_selq:
217	mlx5e_selq_cancel(htb->selq);
218	return err;
219}
220
221static int mlx5e_htb_root_del(struct mlx5e_htb *htb)
222{
223	struct mlx5e_priv *priv = htb->priv;
224	struct mlx5e_qos_node *root;
225	int err;
226
227	qos_dbg(htb->mdev, "TC_HTB_DESTROY\n");
228
229	/* Wait until real_num_tx_queues is updated for mlx5e_select_queue,
230	 * so that we can safely switch to its non-HTB non-PTP fastpath.
231	 */
232	synchronize_net();
233
234	mlx5e_selq_prepare_htb(htb->selq, 0, 0);
235	mlx5e_selq_apply(htb->selq);
236
237	root = mlx5e_htb_node_find(htb, MLX5E_HTB_CLASSID_ROOT);
238	if (!root) {
239		qos_err(htb->mdev, "Failed to find the root node in the QoS tree\n");
240		return -ENOENT;
241	}
242	err = mlx5_qos_destroy_node(htb->mdev, root->hw_id);
243	if (err)
244		qos_err(htb->mdev, "Failed to destroy root node %u, err = %d\n",
245			root->hw_id, err);
246	mlx5e_htb_node_delete(htb, root);
247
248	mlx5e_qos_deactivate_all_queues(&priv->channels);
249	mlx5e_qos_close_all_queues(&priv->channels);
250
251	return err;
252}
253
254static int mlx5e_htb_convert_rate(struct mlx5e_htb *htb, u64 rate,
255				  struct mlx5e_qos_node *parent, u32 *bw_share)
256{
257	u64 share = 0;
258
259	while (parent->classid != MLX5E_HTB_CLASSID_ROOT && !parent->max_average_bw)
260		parent = parent->parent;
261
262	if (parent->max_average_bw)
263		share = div64_u64(div_u64(rate * 100, BYTES_IN_MBIT),
264				  parent->max_average_bw);
265	else
266		share = 101;
267
268	*bw_share = share == 0 ? 1 : share > 100 ? 0 : share;
269
270	qos_dbg(htb->mdev, "Convert: rate %llu, parent ceil %llu -> bw_share %u\n",
271		rate, (u64)parent->max_average_bw * BYTES_IN_MBIT, *bw_share);
272
273	return 0;
274}
275
276static void mlx5e_htb_convert_ceil(struct mlx5e_htb *htb, u64 ceil, u32 *max_average_bw)
277{
278	/* Hardware treats 0 as "unlimited", set at least 1. */
279	*max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1);
280
281	qos_dbg(htb->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
282		ceil, *max_average_bw);
283}
284
285int
286mlx5e_htb_leaf_alloc_queue(struct mlx5e_htb *htb, u16 classid,
287			   u32 parent_classid, u64 rate, u64 ceil,
288			   struct netlink_ext_ack *extack)
289{
290	struct mlx5e_qos_node *node, *parent;
291	struct mlx5e_priv *priv = htb->priv;
292	int qid;
293	int err;
294
295	qos_dbg(htb->mdev, "TC_HTB_LEAF_ALLOC_QUEUE classid %04x, parent %04x, rate %llu, ceil %llu\n",
296		classid, parent_classid, rate, ceil);
297
298	qid = mlx5e_htb_find_unused_qos_qid(htb);
299	if (qid < 0) {
300		NL_SET_ERR_MSG_MOD(extack, "Maximum amount of leaf classes is reached.");
301		return qid;
302	}
303
304	parent = mlx5e_htb_node_find(htb, parent_classid);
305	if (!parent)
306		return -EINVAL;
307
308	node = mlx5e_htb_node_create_leaf(htb, classid, qid, parent);
309	if (IS_ERR(node))
310		return PTR_ERR(node);
311
312	node->rate = rate;
313	mlx5e_htb_convert_rate(htb, rate, node->parent, &node->bw_share);
314	mlx5e_htb_convert_ceil(htb, ceil, &node->max_average_bw);
315
316	err = mlx5_qos_create_leaf_node(htb->mdev, node->parent->hw_id,
317					node->bw_share, node->max_average_bw,
318					&node->hw_id);
319	if (err) {
320		NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
321		qos_err(htb->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
322			classid, err);
323		mlx5e_htb_node_delete(htb, node);
324		return err;
325	}
326
327	if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
328		err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
329		if (err) {
330			NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
331			qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
332				 classid, err);
333		} else {
334			mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
335		}
336	}
337
338	return mlx5e_qid_from_qos(&priv->channels, node->qid);
339}
340
341int
342mlx5e_htb_leaf_to_inner(struct mlx5e_htb *htb, u16 classid, u16 child_classid,
343			u64 rate, u64 ceil, struct netlink_ext_ack *extack)
344{
345	struct mlx5e_qos_node *node, *child;
346	struct mlx5e_priv *priv = htb->priv;
347	int err, tmp_err;
348	u32 new_hw_id;
349	u16 qid;
350
351	qos_dbg(htb->mdev, "TC_HTB_LEAF_TO_INNER classid %04x, upcoming child %04x, rate %llu, ceil %llu\n",
352		classid, child_classid, rate, ceil);
353
354	node = mlx5e_htb_node_find(htb, classid);
355	if (!node)
356		return -ENOENT;
357
358	err = mlx5_qos_create_inner_node(htb->mdev, node->parent->hw_id,
359					 node->bw_share, node->max_average_bw,
360					 &new_hw_id);
361	if (err) {
362		NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating an inner node.");
363		qos_err(htb->mdev, "Failed to create an inner node (class %04x), err = %d\n",
364			classid, err);
365		return err;
366	}
367
368	/* Intentionally reuse the qid for the upcoming first child. */
369	child = mlx5e_htb_node_create_leaf(htb, child_classid, node->qid, node);
370	if (IS_ERR(child)) {
371		err = PTR_ERR(child);
372		goto err_destroy_hw_node;
373	}
374
375	child->rate = rate;
376	mlx5e_htb_convert_rate(htb, rate, node, &child->bw_share);
377	mlx5e_htb_convert_ceil(htb, ceil, &child->max_average_bw);
378
379	err = mlx5_qos_create_leaf_node(htb->mdev, new_hw_id, child->bw_share,
380					child->max_average_bw, &child->hw_id);
381	if (err) {
382		NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
383		qos_err(htb->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
384			classid, err);
385		goto err_delete_sw_node;
386	}
387
388	/* No fail point. */
389
390	qid = node->qid;
391	/* Pairs with mlx5e_htb_get_txq_by_classid. */
392	WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
393
394	if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
395		mlx5e_deactivate_qos_sq(priv, qid);
396		mlx5e_close_qos_sq(priv, qid);
397	}
398
399	err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
400	if (err) /* Not fatal. */
401		qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
402			 node->hw_id, classid, err);
403
404	node->hw_id = new_hw_id;
405
406	if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
407		err = mlx5e_open_qos_sq(priv, &priv->channels, child->qid, child->hw_id);
408		if (err) {
409			NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
410			qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
411				 classid, err);
412		} else {
413			mlx5e_activate_qos_sq(priv, child->qid, child->hw_id);
414		}
415	}
416
417	return 0;
418
419err_delete_sw_node:
420	child->qid = MLX5E_QOS_QID_INNER;
421	mlx5e_htb_node_delete(htb, child);
422
423err_destroy_hw_node:
424	tmp_err = mlx5_qos_destroy_node(htb->mdev, new_hw_id);
425	if (tmp_err) /* Not fatal. */
426		qos_warn(htb->mdev, "Failed to roll back creation of an inner node %u (class %04x), err = %d\n",
427			 new_hw_id, classid, tmp_err);
428	return err;
429}
430
431static struct mlx5e_qos_node *mlx5e_htb_node_find_by_qid(struct mlx5e_htb *htb, u16 qid)
432{
433	struct mlx5e_qos_node *node = NULL;
434	int bkt;
435
436	hash_for_each(htb->qos_tc2node, bkt, node, hnode)
437		if (node->qid == qid)
438			break;
439
440	return node;
441}
442
443int mlx5e_htb_leaf_del(struct mlx5e_htb *htb, u16 *classid,
444		       struct netlink_ext_ack *extack)
445{
446	struct mlx5e_priv *priv = htb->priv;
447	struct mlx5e_qos_node *node;
448	struct netdev_queue *txq;
449	u16 qid, moved_qid;
450	bool opened;
451	int err;
452
453	qos_dbg(htb->mdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
454
455	node = mlx5e_htb_node_find(htb, *classid);
456	if (!node)
457		return -ENOENT;
458
459	/* Store qid for reuse. */
460	qid = node->qid;
461
462	opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
463	if (opened) {
464		txq = netdev_get_tx_queue(htb->netdev,
465					  mlx5e_qid_from_qos(&priv->channels, qid));
466		mlx5e_deactivate_qos_sq(priv, qid);
467		mlx5e_close_qos_sq(priv, qid);
468	}
469
470	err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
471	if (err) /* Not fatal. */
472		qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
473			 node->hw_id, *classid, err);
474
475	mlx5e_htb_node_delete(htb, node);
476
477	moved_qid = mlx5e_htb_cur_leaf_nodes(htb);
478
479	if (moved_qid == 0) {
480		/* The last QoS SQ was just destroyed. */
481		if (opened)
482			mlx5e_reactivate_qos_sq(priv, qid, txq);
483		return 0;
484	}
485	moved_qid--;
486
487	if (moved_qid < qid) {
488		/* The highest QoS SQ was just destroyed. */
489		WARN(moved_qid != qid - 1, "Gaps in queue numeration: destroyed queue %u, the highest queue is %u",
490		     qid, moved_qid);
491		if (opened)
492			mlx5e_reactivate_qos_sq(priv, qid, txq);
493		return 0;
494	}
495
496	WARN(moved_qid == qid, "Can't move node with qid %u to itself", qid);
497	qos_dbg(htb->mdev, "Moving QoS SQ %u to %u\n", moved_qid, qid);
498
499	node = mlx5e_htb_node_find_by_qid(htb, moved_qid);
500	WARN(!node, "Could not find a node with qid %u to move to queue %u",
501	     moved_qid, qid);
502
503	/* Stop traffic to the old queue. */
504	WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
505	__clear_bit(moved_qid, priv->htb->qos_used_qids);
506
507	if (opened) {
508		txq = netdev_get_tx_queue(htb->netdev,
509					  mlx5e_qid_from_qos(&priv->channels, moved_qid));
510		mlx5e_deactivate_qos_sq(priv, moved_qid);
511		mlx5e_close_qos_sq(priv, moved_qid);
512	}
513
514	/* Prevent packets from the old class from getting into the new one. */
515	mlx5e_reset_qdisc(htb->netdev, moved_qid);
516
517	__set_bit(qid, htb->qos_used_qids);
518	WRITE_ONCE(node->qid, qid);
519
520	if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
521		err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
522		if (err) {
523			NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
524			qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x) while moving qid %u to %u, err = %d\n",
525				 node->classid, moved_qid, qid, err);
526		} else {
527			mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
528		}
529	}
530
531	mlx5e_update_tx_netdev_queues(priv);
532	if (opened)
533		mlx5e_reactivate_qos_sq(priv, moved_qid, txq);
534
535	*classid = node->classid;
536	return 0;
537}
538
539int
540mlx5e_htb_leaf_del_last(struct mlx5e_htb *htb, u16 classid, bool force,
541			struct netlink_ext_ack *extack)
542{
543	struct mlx5e_qos_node *node, *parent;
544	struct mlx5e_priv *priv = htb->priv;
545	u32 old_hw_id, new_hw_id;
546	int err, saved_err = 0;
547	u16 qid;
548
549	qos_dbg(htb->mdev, "TC_HTB_LEAF_DEL_LAST%s classid %04x\n",
550		force ? "_FORCE" : "", classid);
551
552	node = mlx5e_htb_node_find(htb, classid);
553	if (!node)
554		return -ENOENT;
555
556	err = mlx5_qos_create_leaf_node(htb->mdev, node->parent->parent->hw_id,
557					node->parent->bw_share,
558					node->parent->max_average_bw,
559					&new_hw_id);
560	if (err) {
561		NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
562		qos_err(htb->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
563			classid, err);
564		if (!force)
565			return err;
566		saved_err = err;
567	}
568
569	/* Store qid for reuse and prevent clearing the bit. */
570	qid = node->qid;
571	/* Pairs with mlx5e_htb_get_txq_by_classid. */
572	WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
573
574	if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
575		mlx5e_deactivate_qos_sq(priv, qid);
576		mlx5e_close_qos_sq(priv, qid);
577	}
578
579	/* Prevent packets from the old class from getting into the new one. */
580	mlx5e_reset_qdisc(htb->netdev, qid);
581
582	err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
583	if (err) /* Not fatal. */
584		qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
585			 node->hw_id, classid, err);
586
587	parent = node->parent;
588	mlx5e_htb_node_delete(htb, node);
589
590	node = parent;
591	WRITE_ONCE(node->qid, qid);
592
593	/* Early return on error in force mode. Parent will still be an inner
594	 * node to be deleted by a following delete operation.
595	 */
596	if (saved_err)
597		return saved_err;
598
599	old_hw_id = node->hw_id;
600	node->hw_id = new_hw_id;
601
602	if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
603		err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
604		if (err) {
605			NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
606			qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
607				 classid, err);
608		} else {
609			mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
610		}
611	}
612
613	err = mlx5_qos_destroy_node(htb->mdev, old_hw_id);
614	if (err) /* Not fatal. */
615		qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
616			 node->hw_id, classid, err);
617
618	return 0;
619}
620
621static int
622mlx5e_htb_update_children(struct mlx5e_htb *htb, struct mlx5e_qos_node *node,
623			  struct netlink_ext_ack *extack)
624{
625	struct mlx5e_qos_node *child;
626	int err = 0;
627	int bkt;
628
629	hash_for_each(htb->qos_tc2node, bkt, child, hnode) {
630		u32 old_bw_share = child->bw_share;
631		int err_one;
632
633		if (child->parent != node)
634			continue;
635
636		mlx5e_htb_convert_rate(htb, child->rate, node, &child->bw_share);
637		if (child->bw_share == old_bw_share)
638			continue;
639
640		err_one = mlx5_qos_update_node(htb->mdev, child->bw_share,
641					       child->max_average_bw, child->hw_id);
642		if (!err && err_one) {
643			err = err_one;
644
645			NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a child node.");
646			qos_err(htb->mdev, "Failed to modify a child node (class %04x), err = %d\n",
647				node->classid, err);
648		}
649	}
650
651	return err;
652}
653
654int
655mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
656		      struct netlink_ext_ack *extack)
657{
658	u32 bw_share, max_average_bw;
659	struct mlx5e_qos_node *node;
660	bool ceil_changed = false;
661	int err;
662
663	qos_dbg(htb->mdev, "TC_HTB_LEAF_MODIFY classid %04x, rate %llu, ceil %llu\n",
664		classid, rate, ceil);
665
666	node = mlx5e_htb_node_find(htb, classid);
667	if (!node)
668		return -ENOENT;
669
670	node->rate = rate;
671	mlx5e_htb_convert_rate(htb, rate, node->parent, &bw_share);
672	mlx5e_htb_convert_ceil(htb, ceil, &max_average_bw);
673
674	err = mlx5_qos_update_node(htb->mdev, bw_share,
675				   max_average_bw, node->hw_id);
676	if (err) {
677		NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
678		qos_err(htb->mdev, "Failed to modify a node (class %04x), err = %d\n",
679			classid, err);
680		return err;
681	}
682
683	if (max_average_bw != node->max_average_bw)
684		ceil_changed = true;
685
686	node->bw_share = bw_share;
687	node->max_average_bw = max_average_bw;
688
689	if (ceil_changed)
690		err = mlx5e_htb_update_children(htb, node, extack);
691
692	return err;
693}
694
695struct mlx5e_htb *mlx5e_htb_alloc(void)
696{
697	return kvzalloc(sizeof(struct mlx5e_htb), GFP_KERNEL);
698}
699
700void mlx5e_htb_free(struct mlx5e_htb *htb)
701{
702	kvfree(htb);
703}
704
705int mlx5e_htb_init(struct mlx5e_htb *htb, struct tc_htb_qopt_offload *htb_qopt,
706		   struct net_device *netdev, struct mlx5_core_dev *mdev,
707		   struct mlx5e_selq *selq, struct mlx5e_priv *priv)
708{
709	htb->mdev = mdev;
710	htb->netdev = netdev;
711	htb->selq = selq;
712	htb->priv = priv;
713	hash_init(htb->qos_tc2node);
714	return mlx5e_htb_root_add(htb, htb_qopt->parent_classid, htb_qopt->classid,
715				  htb_qopt->extack);
716}
717
718void mlx5e_htb_cleanup(struct mlx5e_htb *htb)
719{
720	mlx5e_htb_root_del(htb);
721}
722
723