1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright(c) 2023 Intel Corporation */
3
4#define dev_fmt(fmt) "RateLimiting: " fmt
5
6#include <asm/errno.h>
7#include <asm/div64.h>
8
9#include <linux/dev_printk.h>
10#include <linux/kernel.h>
11#include <linux/pci.h>
12#include <linux/slab.h>
13#include <linux/units.h>
14
15#include "adf_accel_devices.h"
16#include "adf_common_drv.h"
17#include "adf_rl_admin.h"
18#include "adf_rl.h"
19#include "adf_sysfs_rl.h"
20
21#define RL_TOKEN_GRANULARITY_PCIEIN_BUCKET	0U
22#define RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET	0U
23#define RL_TOKEN_PCIE_SIZE			64
24#define RL_TOKEN_ASYM_SIZE			1024
25#define RL_CSR_SIZE				4U
26#define RL_CAPABILITY_MASK			GENMASK(6, 4)
27#define RL_CAPABILITY_VALUE			0x70
28#define RL_VALIDATE_NON_ZERO(input)		((input) == 0)
29#define ROOT_MASK				GENMASK(1, 0)
30#define CLUSTER_MASK				GENMASK(3, 0)
31#define LEAF_MASK				GENMASK(5, 0)
32
33static int validate_user_input(struct adf_accel_dev *accel_dev,
34			       struct adf_rl_sla_input_data *sla_in,
35			       bool is_update)
36{
37	const unsigned long rp_mask = sla_in->rp_mask;
38	size_t rp_mask_size;
39	int i, cnt;
40
41	if (sla_in->pir < sla_in->cir) {
42		dev_notice(&GET_DEV(accel_dev),
43			   "PIR must be >= CIR, setting PIR to CIR\n");
44		sla_in->pir = sla_in->cir;
45	}
46
47	if (!is_update) {
48		cnt = 0;
49		rp_mask_size = sizeof(sla_in->rp_mask) * BITS_PER_BYTE;
50		for_each_set_bit(i, &rp_mask, rp_mask_size) {
51			if (++cnt > RL_RP_CNT_PER_LEAF_MAX) {
52				dev_notice(&GET_DEV(accel_dev),
53					   "Too many ring pairs selected for this SLA\n");
54				return -EINVAL;
55			}
56		}
57
58		if (sla_in->srv >= ADF_SVC_NONE) {
59			dev_notice(&GET_DEV(accel_dev),
60				   "Wrong service type\n");
61			return -EINVAL;
62		}
63
64		if (sla_in->type > RL_LEAF) {
65			dev_notice(&GET_DEV(accel_dev),
66				   "Wrong node type\n");
67			return -EINVAL;
68		}
69
70		if (sla_in->parent_id < RL_PARENT_DEFAULT_ID ||
71		    sla_in->parent_id >= RL_NODES_CNT_MAX) {
72			dev_notice(&GET_DEV(accel_dev),
73				   "Wrong parent ID\n");
74			return -EINVAL;
75		}
76	}
77
78	return 0;
79}
80
81static int validate_sla_id(struct adf_accel_dev *accel_dev, int sla_id)
82{
83	struct rl_sla *sla;
84
85	if (sla_id <= RL_SLA_EMPTY_ID || sla_id >= RL_NODES_CNT_MAX) {
86		dev_notice(&GET_DEV(accel_dev), "Provided ID is out of bounds\n");
87		return -EINVAL;
88	}
89
90	sla = accel_dev->rate_limiting->sla[sla_id];
91
92	if (!sla) {
93		dev_notice(&GET_DEV(accel_dev), "SLA with provided ID does not exist\n");
94		return -EINVAL;
95	}
96
97	if (sla->type != RL_LEAF) {
98		dev_notice(&GET_DEV(accel_dev), "This ID is reserved for internal use\n");
99		return -EINVAL;
100	}
101
102	return 0;
103}
104
105/**
106 * find_parent() - Find the parent for a new SLA
107 * @rl_data: pointer to ratelimiting data
108 * @sla_in: pointer to user input data for a new SLA
109 *
110 * Function returns a pointer to the parent SLA. If the parent ID is provided
111 * as input in the user data, then such ID is validated and the parent SLA
112 * is returned.
113 * Otherwise, it returns the default parent SLA (root or cluster) for
114 * the new object.
115 *
116 * Return:
117 * * Pointer to the parent SLA object
118 * * NULL - when parent cannot be found
119 */
120static struct rl_sla *find_parent(struct adf_rl *rl_data,
121				  struct adf_rl_sla_input_data *sla_in)
122{
123	int input_parent_id = sla_in->parent_id;
124	struct rl_sla *root = NULL;
125	struct rl_sla *parent_sla;
126	int i;
127
128	if (sla_in->type == RL_ROOT)
129		return NULL;
130
131	if (input_parent_id > RL_PARENT_DEFAULT_ID) {
132		parent_sla = rl_data->sla[input_parent_id];
133		/*
134		 * SLA can be a parent if it has the same service as the child
135		 * and its type is higher in the hierarchy,
136		 * for example the parent type of a LEAF must be a CLUSTER.
137		 */
138		if (parent_sla && parent_sla->srv == sla_in->srv &&
139		    parent_sla->type == sla_in->type - 1)
140			return parent_sla;
141
142		return NULL;
143	}
144
145	/* If input_parent_id is not valid, get root for this service type. */
146	for (i = 0; i < RL_ROOT_MAX; i++) {
147		if (rl_data->root[i] && rl_data->root[i]->srv == sla_in->srv) {
148			root = rl_data->root[i];
149			break;
150		}
151	}
152
153	if (!root)
154		return NULL;
155
156	/*
157	 * If the type of this SLA is cluster, then return the root.
158	 * Otherwise, find the default (i.e. first) cluster for this service.
159	 */
160	if (sla_in->type == RL_CLUSTER)
161		return root;
162
163	for (i = 0; i < RL_CLUSTER_MAX; i++) {
164		if (rl_data->cluster[i] && rl_data->cluster[i]->parent == root)
165			return rl_data->cluster[i];
166	}
167
168	return NULL;
169}
170
171static enum adf_cfg_service_type srv_to_cfg_svc_type(enum adf_base_services rl_srv)
172{
173	switch (rl_srv) {
174	case ADF_SVC_ASYM:
175		return ASYM;
176	case ADF_SVC_SYM:
177		return SYM;
178	case ADF_SVC_DC:
179		return COMP;
180	default:
181		return UNUSED;
182	}
183}
184
185/**
186 * get_sla_arr_of_type() - Returns a pointer to SLA type specific array
187 * @rl_data: pointer to ratelimiting data
188 * @type: SLA type
189 * @sla_arr: pointer to variable where requested pointer will be stored
190 *
191 * Return: Max number of elements allowed for the returned array
192 */
193static u32 get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type,
194			       struct rl_sla ***sla_arr)
195{
196	switch (type) {
197	case RL_LEAF:
198		*sla_arr = rl_data->leaf;
199		return RL_LEAF_MAX;
200	case RL_CLUSTER:
201		*sla_arr = rl_data->cluster;
202		return RL_CLUSTER_MAX;
203	case RL_ROOT:
204		*sla_arr = rl_data->root;
205		return RL_ROOT_MAX;
206	default:
207		*sla_arr = NULL;
208		return 0;
209	}
210}
211
212static bool is_service_enabled(struct adf_accel_dev *accel_dev,
213			       enum adf_base_services rl_srv)
214{
215	enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(rl_srv);
216	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
217	u8 rps_per_bundle = hw_data->num_banks_per_vf;
218	int i;
219
220	for (i = 0; i < rps_per_bundle; i++) {
221		if (GET_SRV_TYPE(accel_dev, i) == arb_srv)
222			return true;
223	}
224
225	return false;
226}
227
228/**
229 * prepare_rp_ids() - Creates an array of ring pair IDs from bitmask
230 * @accel_dev: pointer to acceleration device structure
231 * @sla: SLA object data where result will be written
232 * @rp_mask: bitmask of ring pair IDs
233 *
234 * Function tries to convert provided bitmap to an array of IDs. It checks if
235 * RPs aren't in use, are assigned to SLA  service or if a number of provided
236 * IDs is not too big. If successful, writes the result into the field
237 * sla->ring_pairs_cnt.
238 *
239 * Return:
240 * * 0		- ok
241 * * -EINVAL	- ring pairs array cannot be created from provided mask
242 */
243static int prepare_rp_ids(struct adf_accel_dev *accel_dev, struct rl_sla *sla,
244			  const unsigned long rp_mask)
245{
246	enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(sla->srv);
247	u16 rps_per_bundle = GET_HW_DATA(accel_dev)->num_banks_per_vf;
248	bool *rp_in_use = accel_dev->rate_limiting->rp_in_use;
249	size_t rp_cnt_max = ARRAY_SIZE(sla->ring_pairs_ids);
250	u16 rp_id_max = GET_HW_DATA(accel_dev)->num_banks;
251	u16 cnt = 0;
252	u16 rp_id;
253
254	for_each_set_bit(rp_id, &rp_mask, rp_id_max) {
255		if (cnt >= rp_cnt_max) {
256			dev_notice(&GET_DEV(accel_dev),
257				   "Assigned more ring pairs than supported");
258			return -EINVAL;
259		}
260
261		if (rp_in_use[rp_id]) {
262			dev_notice(&GET_DEV(accel_dev),
263				   "RP %u already assigned to other SLA", rp_id);
264			return -EINVAL;
265		}
266
267		if (GET_SRV_TYPE(accel_dev, rp_id % rps_per_bundle) != arb_srv) {
268			dev_notice(&GET_DEV(accel_dev),
269				   "RP %u does not support SLA service", rp_id);
270			return -EINVAL;
271		}
272
273		sla->ring_pairs_ids[cnt++] = rp_id;
274	}
275
276	sla->ring_pairs_cnt = cnt;
277
278	return 0;
279}
280
281static void mark_rps_usage(struct rl_sla *sla, bool *rp_in_use, bool used)
282{
283	u16 rp_id;
284	int i;
285
286	for (i = 0; i < sla->ring_pairs_cnt; i++) {
287		rp_id = sla->ring_pairs_ids[i];
288		rp_in_use[rp_id] = used;
289	}
290}
291
292static void assign_rps_to_leaf(struct adf_accel_dev *accel_dev,
293			       struct rl_sla *sla, bool clear)
294{
295	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
296	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
297	u32 base_offset = hw_data->rl_data.r2l_offset;
298	u32 node_id = clear ? 0U : (sla->node_id & LEAF_MASK);
299	u32 offset;
300	int i;
301
302	for (i = 0; i < sla->ring_pairs_cnt; i++) {
303		offset = base_offset + (RL_CSR_SIZE * sla->ring_pairs_ids[i]);
304		ADF_CSR_WR(pmisc_addr, offset, node_id);
305	}
306}
307
308static void assign_leaf_to_cluster(struct adf_accel_dev *accel_dev,
309				   struct rl_sla *sla, bool clear)
310{
311	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
312	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
313	u32 base_offset = hw_data->rl_data.l2c_offset;
314	u32 node_id = sla->node_id & LEAF_MASK;
315	u32 parent_id = clear ? 0U : (sla->parent->node_id & CLUSTER_MASK);
316	u32 offset;
317
318	offset = base_offset + (RL_CSR_SIZE * node_id);
319	ADF_CSR_WR(pmisc_addr, offset, parent_id);
320}
321
322static void assign_cluster_to_root(struct adf_accel_dev *accel_dev,
323				   struct rl_sla *sla, bool clear)
324{
325	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
326	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
327	u32 base_offset = hw_data->rl_data.c2s_offset;
328	u32 node_id = sla->node_id & CLUSTER_MASK;
329	u32 parent_id = clear ? 0U : (sla->parent->node_id & ROOT_MASK);
330	u32 offset;
331
332	offset = base_offset + (RL_CSR_SIZE * node_id);
333	ADF_CSR_WR(pmisc_addr, offset, parent_id);
334}
335
336static void assign_node_to_parent(struct adf_accel_dev *accel_dev,
337				  struct rl_sla *sla, bool clear_assignment)
338{
339	switch (sla->type) {
340	case RL_LEAF:
341		assign_rps_to_leaf(accel_dev, sla, clear_assignment);
342		assign_leaf_to_cluster(accel_dev, sla, clear_assignment);
343		break;
344	case RL_CLUSTER:
345		assign_cluster_to_root(accel_dev, sla, clear_assignment);
346		break;
347	default:
348		break;
349	}
350}
351
352/**
353 * can_parent_afford_sla() - Verifies if parent allows to create an SLA
354 * @sla_in: pointer to user input data for a new SLA
355 * @sla_parent: pointer to parent SLA object
356 * @sla_cir: current child CIR value (only for update)
357 * @is_update: request is a update
358 *
359 * Algorithm verifies if parent has enough remaining budget to take assignment
360 * of a child with provided parameters. In update case current CIR value must be
361 * returned to budget first.
362 * PIR value cannot exceed the PIR assigned to parent.
363 *
364 * Return:
365 * * true	- SLA can be created
366 * * false	- SLA cannot be created
367 */
368static bool can_parent_afford_sla(struct adf_rl_sla_input_data *sla_in,
369				  struct rl_sla *sla_parent, u32 sla_cir,
370				  bool is_update)
371{
372	u32 rem_cir = sla_parent->rem_cir;
373
374	if (is_update)
375		rem_cir += sla_cir;
376
377	if (sla_in->cir > rem_cir || sla_in->pir > sla_parent->pir)
378		return false;
379
380	return true;
381}
382
383/**
384 * can_node_afford_update() - Verifies if SLA can be updated with input data
385 * @sla_in: pointer to user input data for a new SLA
386 * @sla: pointer to SLA object selected for update
387 *
388 * Algorithm verifies if a new CIR value is big enough to satisfy currently
389 * assigned child SLAs and if PIR can be updated
390 *
391 * Return:
392 * * true	- SLA can be updated
393 * * false	- SLA cannot be updated
394 */
395static bool can_node_afford_update(struct adf_rl_sla_input_data *sla_in,
396				   struct rl_sla *sla)
397{
398	u32 cir_in_use = sla->cir - sla->rem_cir;
399
400	/* new CIR cannot be smaller then currently consumed value */
401	if (cir_in_use > sla_in->cir)
402		return false;
403
404	/* PIR of root/cluster cannot be reduced in node with assigned children */
405	if (sla_in->pir < sla->pir && sla->type != RL_LEAF && cir_in_use > 0)
406		return false;
407
408	return true;
409}
410
411static bool is_enough_budget(struct adf_rl *rl_data, struct rl_sla *sla,
412			     struct adf_rl_sla_input_data *sla_in,
413			     bool is_update)
414{
415	u32 max_val = rl_data->device_data->scale_ref;
416	struct rl_sla *parent = sla->parent;
417	bool ret = true;
418
419	if (sla_in->cir > max_val || sla_in->pir > max_val)
420		ret = false;
421
422	switch (sla->type) {
423	case RL_LEAF:
424		ret &= can_parent_afford_sla(sla_in, parent, sla->cir,
425						  is_update);
426		break;
427	case RL_CLUSTER:
428		ret &= can_parent_afford_sla(sla_in, parent, sla->cir,
429						  is_update);
430
431		if (is_update)
432			ret &= can_node_afford_update(sla_in, sla);
433
434		break;
435	case RL_ROOT:
436		if (is_update)
437			ret &= can_node_afford_update(sla_in, sla);
438
439		break;
440	default:
441		ret = false;
442		break;
443	}
444
445	return ret;
446}
447
448static void update_budget(struct rl_sla *sla, u32 old_cir, bool is_update)
449{
450	switch (sla->type) {
451	case RL_LEAF:
452		if (is_update)
453			sla->parent->rem_cir += old_cir;
454
455		sla->parent->rem_cir -= sla->cir;
456		sla->rem_cir = 0;
457		break;
458	case RL_CLUSTER:
459		if (is_update) {
460			sla->parent->rem_cir += old_cir;
461			sla->rem_cir = sla->cir - (old_cir - sla->rem_cir);
462		} else {
463			sla->rem_cir = sla->cir;
464		}
465
466		sla->parent->rem_cir -= sla->cir;
467		break;
468	case RL_ROOT:
469		if (is_update)
470			sla->rem_cir = sla->cir - (old_cir - sla->rem_cir);
471		else
472			sla->rem_cir = sla->cir;
473		break;
474	default:
475		break;
476	}
477}
478
479/**
480 * get_next_free_sla_id() - finds next free ID in the SLA array
481 * @rl_data: Pointer to ratelimiting data structure
482 *
483 * Return:
484 * * 0 : RL_NODES_CNT_MAX	- correct ID
485 * * -ENOSPC			- all SLA slots are in use
486 */
487static int get_next_free_sla_id(struct adf_rl *rl_data)
488{
489	int i = 0;
490
491	while (i < RL_NODES_CNT_MAX && rl_data->sla[i++])
492		;
493
494	if (i == RL_NODES_CNT_MAX)
495		return -ENOSPC;
496
497	return i - 1;
498}
499
500/**
501 * get_next_free_node_id() - finds next free ID in the array of that node type
502 * @rl_data: Pointer to ratelimiting data structure
503 * @sla: Pointer to SLA object for which the ID is searched
504 *
505 * Return:
506 * * 0 : RL_[NODE_TYPE]_MAX	- correct ID
507 * * -ENOSPC			- all slots of that type are in use
508 */
509static int get_next_free_node_id(struct adf_rl *rl_data, struct rl_sla *sla)
510{
511	struct adf_hw_device_data *hw_device = GET_HW_DATA(rl_data->accel_dev);
512	int max_id, i, step, rp_per_leaf;
513	struct rl_sla **sla_list;
514
515	rp_per_leaf = hw_device->num_banks / hw_device->num_banks_per_vf;
516
517	/*
518	 * Static nodes mapping:
519	 * root0 - cluster[0,4,8,12] - leaf[0-15]
520	 * root1 - cluster[1,5,9,13] - leaf[16-31]
521	 * root2 - cluster[2,6,10,14] - leaf[32-47]
522	 */
523	switch (sla->type) {
524	case RL_LEAF:
525		i = sla->srv * rp_per_leaf;
526		step = 1;
527		max_id = i + rp_per_leaf;
528		sla_list = rl_data->leaf;
529		break;
530	case RL_CLUSTER:
531		i = sla->srv;
532		step = 4;
533		max_id = RL_CLUSTER_MAX;
534		sla_list = rl_data->cluster;
535		break;
536	case RL_ROOT:
537		return sla->srv;
538	default:
539		return -EINVAL;
540	}
541
542	while (i < max_id && sla_list[i])
543		i += step;
544
545	if (i >= max_id)
546		return -ENOSPC;
547
548	return i;
549}
550
551u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val,
552				  enum adf_base_services svc_type)
553{
554	struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
555	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
556	u64 avail_slice_cycles, allocated_tokens;
557
558	if (!sla_val)
559		return 0;
560
561	avail_slice_cycles = hw_data->clock_frequency;
562
563	switch (svc_type) {
564	case ADF_SVC_ASYM:
565		avail_slice_cycles *= device_data->slices.pke_cnt;
566		break;
567	case ADF_SVC_SYM:
568		avail_slice_cycles *= device_data->slices.cph_cnt;
569		break;
570	case ADF_SVC_DC:
571		avail_slice_cycles *= device_data->slices.dcpr_cnt;
572		break;
573	default:
574		break;
575	}
576
577	do_div(avail_slice_cycles, device_data->scan_interval);
578	allocated_tokens = avail_slice_cycles * sla_val;
579	do_div(allocated_tokens, device_data->scale_ref);
580
581	return allocated_tokens;
582}
583
584u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val,
585			       enum adf_base_services svc_type)
586{
587	struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
588	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
589	u64 allocated_ae_cycles, avail_ae_cycles;
590
591	if (!sla_val)
592		return 0;
593
594	avail_ae_cycles = hw_data->clock_frequency;
595	avail_ae_cycles *= hw_data->get_num_aes(hw_data) - 1;
596	do_div(avail_ae_cycles, device_data->scan_interval);
597
598	sla_val *= device_data->max_tp[svc_type];
599	sla_val /= device_data->scale_ref;
600
601	allocated_ae_cycles = (sla_val * avail_ae_cycles);
602	do_div(allocated_ae_cycles, device_data->max_tp[svc_type]);
603
604	return allocated_ae_cycles;
605}
606
607u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val,
608			    enum adf_base_services svc_type, bool is_bw_out)
609{
610	struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
611	u64 sla_to_bytes, allocated_bw, sla_scaled;
612
613	if (!sla_val)
614		return 0;
615
616	sla_to_bytes = sla_val;
617	sla_to_bytes *= device_data->max_tp[svc_type];
618	do_div(sla_to_bytes, device_data->scale_ref);
619
620	sla_to_bytes *= (svc_type == ADF_SVC_ASYM) ? RL_TOKEN_ASYM_SIZE :
621						     BYTES_PER_MBIT;
622	if (svc_type == ADF_SVC_DC && is_bw_out)
623		sla_to_bytes *= device_data->slices.dcpr_cnt -
624				device_data->dcpr_correction;
625
626	sla_scaled = sla_to_bytes * device_data->pcie_scale_mul;
627	do_div(sla_scaled, device_data->pcie_scale_div);
628	allocated_bw = sla_scaled;
629	do_div(allocated_bw, RL_TOKEN_PCIE_SIZE);
630	do_div(allocated_bw, device_data->scan_interval);
631
632	return allocated_bw;
633}
634
635/**
636 * add_new_sla_entry() - creates a new SLA object and fills it with user data
637 * @accel_dev: pointer to acceleration device structure
638 * @sla_in: pointer to user input data for a new SLA
639 * @sla_out: Pointer to variable that will contain the address of a new
640 *	     SLA object if the operation succeeds
641 *
642 * Return:
643 * * 0		- ok
644 * * -ENOMEM	- memory allocation failed
645 * * -EINVAL	- invalid user input
646 * * -ENOSPC	- all available SLAs are in use
647 */
648static int add_new_sla_entry(struct adf_accel_dev *accel_dev,
649			     struct adf_rl_sla_input_data *sla_in,
650			     struct rl_sla **sla_out)
651{
652	struct adf_rl *rl_data = accel_dev->rate_limiting;
653	struct rl_sla *sla;
654	int ret = 0;
655
656	sla = kzalloc(sizeof(*sla), GFP_KERNEL);
657	if (!sla) {
658		ret = -ENOMEM;
659		goto ret_err;
660	}
661	*sla_out = sla;
662
663	if (!is_service_enabled(accel_dev, sla_in->srv)) {
664		dev_notice(&GET_DEV(accel_dev),
665			   "Provided service is not enabled\n");
666		ret = -EINVAL;
667		goto ret_err;
668	}
669
670	sla->srv = sla_in->srv;
671	sla->type = sla_in->type;
672	ret = get_next_free_node_id(rl_data, sla);
673	if (ret < 0) {
674		dev_notice(&GET_DEV(accel_dev),
675			   "Exceeded number of available nodes for that service\n");
676		goto ret_err;
677	}
678	sla->node_id = ret;
679
680	ret = get_next_free_sla_id(rl_data);
681	if (ret < 0) {
682		dev_notice(&GET_DEV(accel_dev),
683			   "Allocated maximum SLAs number\n");
684		goto ret_err;
685	}
686	sla->sla_id = ret;
687
688	sla->parent = find_parent(rl_data, sla_in);
689	if (!sla->parent && sla->type != RL_ROOT) {
690		if (sla_in->parent_id != RL_PARENT_DEFAULT_ID)
691			dev_notice(&GET_DEV(accel_dev),
692				   "Provided parent ID does not exist or cannot be parent for this SLA.");
693		else
694			dev_notice(&GET_DEV(accel_dev),
695				   "Unable to find parent node for this service. Is service enabled?");
696		ret = -EINVAL;
697		goto ret_err;
698	}
699
700	if (sla->type == RL_LEAF) {
701		ret = prepare_rp_ids(accel_dev, sla, sla_in->rp_mask);
702		if (!sla->ring_pairs_cnt || ret) {
703			dev_notice(&GET_DEV(accel_dev),
704				   "Unable to find ring pairs to assign to the leaf");
705			if (!ret)
706				ret = -EINVAL;
707
708			goto ret_err;
709		}
710	}
711
712	return 0;
713
714ret_err:
715	kfree(sla);
716	*sla_out = NULL;
717
718	return ret;
719}
720
721static int initialize_default_nodes(struct adf_accel_dev *accel_dev)
722{
723	struct adf_rl *rl_data = accel_dev->rate_limiting;
724	struct adf_rl_hw_data *device_data = rl_data->device_data;
725	struct adf_rl_sla_input_data sla_in = { };
726	int ret = 0;
727	int i;
728
729	/* Init root for each enabled service */
730	sla_in.type = RL_ROOT;
731	sla_in.parent_id = RL_PARENT_DEFAULT_ID;
732
733	for (i = 0; i < ADF_SVC_NONE; i++) {
734		if (!is_service_enabled(accel_dev, i))
735			continue;
736
737		sla_in.cir = device_data->scale_ref;
738		sla_in.pir = sla_in.cir;
739		sla_in.srv = i;
740
741		ret = adf_rl_add_sla(accel_dev, &sla_in);
742		if (ret)
743			return ret;
744	}
745
746	/* Init default cluster for each root */
747	sla_in.type = RL_CLUSTER;
748	for (i = 0; i < ADF_SVC_NONE; i++) {
749		if (!rl_data->root[i])
750			continue;
751
752		sla_in.cir = rl_data->root[i]->cir;
753		sla_in.pir = sla_in.cir;
754		sla_in.srv = rl_data->root[i]->srv;
755
756		ret = adf_rl_add_sla(accel_dev, &sla_in);
757		if (ret)
758			return ret;
759	}
760
761	return 0;
762}
763
764static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla)
765{
766	bool *rp_in_use = rl_data->rp_in_use;
767	struct rl_sla **sla_type_arr = NULL;
768	int i, sla_id, node_id;
769	u32 old_cir;
770
771	sla_id = sla->sla_id;
772	node_id = sla->node_id;
773	old_cir = sla->cir;
774	sla->cir = 0;
775	sla->pir = 0;
776
777	for (i = 0; i < sla->ring_pairs_cnt; i++)
778		rp_in_use[sla->ring_pairs_ids[i]] = false;
779
780	update_budget(sla, old_cir, true);
781	get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
782	assign_node_to_parent(rl_data->accel_dev, sla, true);
783	adf_rl_send_admin_delete_msg(rl_data->accel_dev, node_id, sla->type);
784	mark_rps_usage(sla, rl_data->rp_in_use, false);
785
786	kfree(sla);
787	rl_data->sla[sla_id] = NULL;
788	sla_type_arr[node_id] = NULL;
789}
790
791static void free_all_sla(struct adf_accel_dev *accel_dev)
792{
793	struct adf_rl *rl_data = accel_dev->rate_limiting;
794	int sla_id;
795
796	mutex_lock(&rl_data->rl_lock);
797
798	for (sla_id = 0; sla_id < RL_NODES_CNT_MAX; sla_id++) {
799		if (!rl_data->sla[sla_id])
800			continue;
801
802		kfree(rl_data->sla[sla_id]);
803		rl_data->sla[sla_id] = NULL;
804	}
805
806	mutex_unlock(&rl_data->rl_lock);
807}
808
809/**
810 * add_update_sla() - handles the creation and the update of an SLA
811 * @accel_dev: pointer to acceleration device structure
812 * @sla_in: pointer to user input data for a new/updated SLA
813 * @is_update: flag to indicate if this is an update or an add operation
814 *
815 * Return:
816 * * 0		- ok
817 * * -ENOMEM	- memory allocation failed
818 * * -EINVAL	- user input data cannot be used to create SLA
819 * * -ENOSPC	- all available SLAs are in use
820 */
821static int add_update_sla(struct adf_accel_dev *accel_dev,
822			  struct adf_rl_sla_input_data *sla_in, bool is_update)
823{
824	struct adf_rl *rl_data = accel_dev->rate_limiting;
825	struct rl_sla **sla_type_arr = NULL;
826	struct rl_sla *sla = NULL;
827	u32 old_cir = 0;
828	int ret;
829
830	if (!sla_in) {
831		dev_warn(&GET_DEV(accel_dev),
832			 "SLA input data pointer is missing\n");
833		return -EFAULT;
834	}
835
836	mutex_lock(&rl_data->rl_lock);
837
838	/* Input validation */
839	ret = validate_user_input(accel_dev, sla_in, is_update);
840	if (ret)
841		goto ret_err;
842
843	if (is_update) {
844		ret = validate_sla_id(accel_dev, sla_in->sla_id);
845		if (ret)
846			goto ret_err;
847
848		sla = rl_data->sla[sla_in->sla_id];
849		old_cir = sla->cir;
850	} else {
851		ret = add_new_sla_entry(accel_dev, sla_in, &sla);
852		if (ret)
853			goto ret_err;
854	}
855
856	if (!is_enough_budget(rl_data, sla, sla_in, is_update)) {
857		dev_notice(&GET_DEV(accel_dev),
858			   "Input value exceeds the remaining budget%s\n",
859			   is_update ? " or more budget is already in use" : "");
860		ret = -EINVAL;
861		goto ret_err;
862	}
863	sla->cir = sla_in->cir;
864	sla->pir = sla_in->pir;
865
866	/* Apply SLA */
867	assign_node_to_parent(accel_dev, sla, false);
868	ret = adf_rl_send_admin_add_update_msg(accel_dev, sla, is_update);
869	if (ret) {
870		dev_notice(&GET_DEV(accel_dev),
871			   "Failed to apply an SLA\n");
872		goto ret_err;
873	}
874	update_budget(sla, old_cir, is_update);
875
876	if (!is_update) {
877		mark_rps_usage(sla, rl_data->rp_in_use, true);
878		get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
879		sla_type_arr[sla->node_id] = sla;
880		rl_data->sla[sla->sla_id] = sla;
881	}
882
883	sla_in->sla_id = sla->sla_id;
884	goto ret_ok;
885
886ret_err:
887	if (!is_update) {
888		sla_in->sla_id = -1;
889		kfree(sla);
890	}
891ret_ok:
892	mutex_unlock(&rl_data->rl_lock);
893	return ret;
894}
895
896/**
897 * adf_rl_add_sla() - handles the creation of an SLA
898 * @accel_dev: pointer to acceleration device structure
899 * @sla_in: pointer to user input data required to add an SLA
900 *
901 * Return:
902 * * 0		- ok
903 * * -ENOMEM	- memory allocation failed
904 * * -EINVAL	- invalid user input
905 * * -ENOSPC	- all available SLAs are in use
906 */
907int adf_rl_add_sla(struct adf_accel_dev *accel_dev,
908		   struct adf_rl_sla_input_data *sla_in)
909{
910	return add_update_sla(accel_dev, sla_in, false);
911}
912
913/**
914 * adf_rl_update_sla() - handles the update of an SLA
915 * @accel_dev: pointer to acceleration device structure
916 * @sla_in: pointer to user input data required to update an SLA
917 *
918 * Return:
919 * * 0		- ok
920 * * -EINVAL	- user input data cannot be used to update SLA
921 */
922int adf_rl_update_sla(struct adf_accel_dev *accel_dev,
923		      struct adf_rl_sla_input_data *sla_in)
924{
925	return add_update_sla(accel_dev, sla_in, true);
926}
927
928/**
929 * adf_rl_get_sla() - returns an existing SLA data
930 * @accel_dev: pointer to acceleration device structure
931 * @sla_in: pointer to user data where SLA info will be stored
932 *
933 * The sla_id for which data are requested should be set in sla_id structure
934 *
935 * Return:
936 * * 0		- ok
937 * * -EINVAL	- provided sla_id does not exist
938 */
939int adf_rl_get_sla(struct adf_accel_dev *accel_dev,
940		   struct adf_rl_sla_input_data *sla_in)
941{
942	struct rl_sla *sla;
943	int ret, i;
944
945	ret = validate_sla_id(accel_dev, sla_in->sla_id);
946	if (ret)
947		return ret;
948
949	sla = accel_dev->rate_limiting->sla[sla_in->sla_id];
950	sla_in->type = sla->type;
951	sla_in->srv = sla->srv;
952	sla_in->cir = sla->cir;
953	sla_in->pir = sla->pir;
954	sla_in->rp_mask = 0U;
955	if (sla->parent)
956		sla_in->parent_id = sla->parent->sla_id;
957	else
958		sla_in->parent_id = RL_PARENT_DEFAULT_ID;
959
960	for (i = 0; i < sla->ring_pairs_cnt; i++)
961		sla_in->rp_mask |= BIT(sla->ring_pairs_ids[i]);
962
963	return 0;
964}
965
966/**
967 * adf_rl_get_capability_remaining() - returns the remaining SLA value (CIR) for
968 *				       selected service or provided sla_id
969 * @accel_dev: pointer to acceleration device structure
970 * @srv: service ID for which capability is requested
971 * @sla_id: ID of the cluster or root to which we want assign a new SLA
972 *
973 * Check if the provided SLA id is valid. If it is and the service matches
974 * the requested service and the type is cluster or root, return the remaining
975 * capability.
976 * If the provided ID does not match the service or type, return the remaining
977 * capacity of the default cluster for that service.
978 *
979 * Return:
980 * * Positive value	- correct remaining value
981 * * -EINVAL		- algorithm cannot find a remaining value for provided data
982 */
983int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev,
984				    enum adf_base_services srv, int sla_id)
985{
986	struct adf_rl *rl_data = accel_dev->rate_limiting;
987	struct rl_sla *sla = NULL;
988	int i;
989
990	if (srv >= ADF_SVC_NONE)
991		return -EINVAL;
992
993	if (sla_id > RL_SLA_EMPTY_ID && !validate_sla_id(accel_dev, sla_id)) {
994		sla = rl_data->sla[sla_id];
995
996		if (sla->srv == srv && sla->type <= RL_CLUSTER)
997			goto ret_ok;
998	}
999
1000	for (i = 0; i < RL_CLUSTER_MAX; i++) {
1001		if (!rl_data->cluster[i])
1002			continue;
1003
1004		if (rl_data->cluster[i]->srv == srv) {
1005			sla = rl_data->cluster[i];
1006			goto ret_ok;
1007		}
1008	}
1009
1010	return -EINVAL;
1011ret_ok:
1012	return sla->rem_cir;
1013}
1014
1015/**
1016 * adf_rl_remove_sla() - removes provided sla_id
1017 * @accel_dev: pointer to acceleration device structure
1018 * @sla_id: ID of the cluster or root to which we want assign an new SLA
1019 *
1020 * Return:
1021 * * 0		- ok
1022 * * -EINVAL	- wrong sla_id or it still have assigned children
1023 */
1024int adf_rl_remove_sla(struct adf_accel_dev *accel_dev, u32 sla_id)
1025{
1026	struct adf_rl *rl_data = accel_dev->rate_limiting;
1027	struct rl_sla *sla;
1028	int ret = 0;
1029
1030	mutex_lock(&rl_data->rl_lock);
1031	ret = validate_sla_id(accel_dev, sla_id);
1032	if (ret)
1033		goto err_ret;
1034
1035	sla = rl_data->sla[sla_id];
1036
1037	if (sla->type < RL_LEAF && sla->rem_cir != sla->cir) {
1038		dev_notice(&GET_DEV(accel_dev),
1039			   "To remove parent SLA all its children must be removed first");
1040		ret = -EINVAL;
1041		goto err_ret;
1042	}
1043
1044	clear_sla(rl_data, sla);
1045
1046err_ret:
1047	mutex_unlock(&rl_data->rl_lock);
1048	return ret;
1049}
1050
1051/**
1052 * adf_rl_remove_sla_all() - removes all SLAs from device
1053 * @accel_dev: pointer to acceleration device structure
1054 * @incl_default: set to true if default SLAs also should be removed
1055 */
1056void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default)
1057{
1058	struct adf_rl *rl_data = accel_dev->rate_limiting;
1059	int end_type = incl_default ? RL_ROOT : RL_LEAF;
1060	struct rl_sla **sla_type_arr = NULL;
1061	u32 max_id;
1062	int i, j;
1063
1064	mutex_lock(&rl_data->rl_lock);
1065
1066	/* Unregister and remove all SLAs */
1067	for (j = RL_LEAF; j >= end_type; j--) {
1068		max_id = get_sla_arr_of_type(rl_data, j, &sla_type_arr);
1069
1070		for (i = 0; i < max_id; i++) {
1071			if (!sla_type_arr[i])
1072				continue;
1073
1074			clear_sla(rl_data, sla_type_arr[i]);
1075		}
1076	}
1077
1078	mutex_unlock(&rl_data->rl_lock);
1079}
1080
1081int adf_rl_init(struct adf_accel_dev *accel_dev)
1082{
1083	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
1084	struct adf_rl_hw_data *rl_hw_data = &hw_data->rl_data;
1085	struct adf_rl *rl;
1086	int ret = 0;
1087
1088	/* Validate device parameters */
1089	if (RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_ASYM]) ||
1090	    RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_SYM]) ||
1091	    RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_DC]) ||
1092	    RL_VALIDATE_NON_ZERO(rl_hw_data->scan_interval) ||
1093	    RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_div) ||
1094	    RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_mul) ||
1095	    RL_VALIDATE_NON_ZERO(rl_hw_data->scale_ref)) {
1096		ret = -EOPNOTSUPP;
1097		goto err_ret;
1098	}
1099
1100	rl = kzalloc(sizeof(*rl), GFP_KERNEL);
1101	if (!rl) {
1102		ret = -ENOMEM;
1103		goto err_ret;
1104	}
1105
1106	mutex_init(&rl->rl_lock);
1107	rl->device_data = &accel_dev->hw_device->rl_data;
1108	rl->accel_dev = accel_dev;
1109	accel_dev->rate_limiting = rl;
1110
1111err_ret:
1112	return ret;
1113}
1114
1115int adf_rl_start(struct adf_accel_dev *accel_dev)
1116{
1117	struct adf_rl_hw_data *rl_hw_data = &GET_HW_DATA(accel_dev)->rl_data;
1118	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
1119	u16 fw_caps =  GET_HW_DATA(accel_dev)->fw_capabilities;
1120	int ret;
1121
1122	if (!accel_dev->rate_limiting) {
1123		ret = -EOPNOTSUPP;
1124		goto ret_err;
1125	}
1126
1127	if ((fw_caps & RL_CAPABILITY_MASK) != RL_CAPABILITY_VALUE) {
1128		dev_info(&GET_DEV(accel_dev), "not supported\n");
1129		ret = -EOPNOTSUPP;
1130		goto ret_free;
1131	}
1132
1133	ADF_CSR_WR(pmisc_addr, rl_hw_data->pciin_tb_offset,
1134		   RL_TOKEN_GRANULARITY_PCIEIN_BUCKET);
1135	ADF_CSR_WR(pmisc_addr, rl_hw_data->pciout_tb_offset,
1136		   RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET);
1137
1138	ret = adf_rl_send_admin_init_msg(accel_dev, &rl_hw_data->slices);
1139	if (ret) {
1140		dev_err(&GET_DEV(accel_dev), "initialization failed\n");
1141		goto ret_free;
1142	}
1143
1144	ret = initialize_default_nodes(accel_dev);
1145	if (ret) {
1146		dev_err(&GET_DEV(accel_dev),
1147			"failed to initialize default SLAs\n");
1148		goto ret_sla_rm;
1149	}
1150
1151	ret = adf_sysfs_rl_add(accel_dev);
1152	if (ret) {
1153		dev_err(&GET_DEV(accel_dev), "failed to add sysfs interface\n");
1154		goto ret_sysfs_rm;
1155	}
1156
1157	return 0;
1158
1159ret_sysfs_rm:
1160	adf_sysfs_rl_rm(accel_dev);
1161ret_sla_rm:
1162	adf_rl_remove_sla_all(accel_dev, true);
1163ret_free:
1164	kfree(accel_dev->rate_limiting);
1165	accel_dev->rate_limiting = NULL;
1166ret_err:
1167	return ret;
1168}
1169
1170void adf_rl_stop(struct adf_accel_dev *accel_dev)
1171{
1172	if (!accel_dev->rate_limiting)
1173		return;
1174
1175	adf_sysfs_rl_rm(accel_dev);
1176	free_all_sla(accel_dev);
1177}
1178
1179void adf_rl_exit(struct adf_accel_dev *accel_dev)
1180{
1181	if (!accel_dev->rate_limiting)
1182		return;
1183
1184	kfree(accel_dev->rate_limiting);
1185	accel_dev->rate_limiting = NULL;
1186}
1187