1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright(c) 2022 Intel Corporation */
3#include <linux/module.h>
4#include <linux/slab.h>
5#include "adf_accel_devices.h"
6#include "adf_common_drv.h"
7#include "adf_transport.h"
8#include "adf_transport_access_macros.h"
9#include "adf_cfg.h"
10#include "adf_cfg_strings.h"
11#include "qat_compression.h"
12#include "icp_qat_fw.h"
13
14#define SEC ADF_KERNEL_SEC
15
16static struct service_hndl qat_compression;
17
18void qat_compression_put_instance(struct qat_compression_instance *inst)
19{
20	atomic_dec(&inst->refctr);
21	adf_dev_put(inst->accel_dev);
22}
23
24static int qat_compression_free_instances(struct adf_accel_dev *accel_dev)
25{
26	struct qat_compression_instance *inst;
27	struct list_head *list_ptr, *tmp;
28	int i;
29
30	list_for_each_safe(list_ptr, tmp, &accel_dev->compression_list) {
31		inst = list_entry(list_ptr,
32				  struct qat_compression_instance, list);
33
34		for (i = 0; i < atomic_read(&inst->refctr); i++)
35			qat_compression_put_instance(inst);
36
37		if (inst->dc_tx)
38			adf_remove_ring(inst->dc_tx);
39
40		if (inst->dc_rx)
41			adf_remove_ring(inst->dc_rx);
42
43		list_del(list_ptr);
44		kfree(inst);
45	}
46	return 0;
47}
48
49struct qat_compression_instance *qat_compression_get_instance_node(int node)
50{
51	struct qat_compression_instance *inst = NULL;
52	struct adf_accel_dev *accel_dev = NULL;
53	unsigned long best = ~0;
54	struct list_head *itr;
55
56	list_for_each(itr, adf_devmgr_get_head()) {
57		struct adf_accel_dev *tmp_dev;
58		unsigned long ctr;
59		int tmp_dev_node;
60
61		tmp_dev = list_entry(itr, struct adf_accel_dev, list);
62		tmp_dev_node = dev_to_node(&GET_DEV(tmp_dev));
63
64		if ((node == tmp_dev_node || tmp_dev_node < 0) &&
65		    adf_dev_started(tmp_dev) && !list_empty(&tmp_dev->compression_list)) {
66			ctr = atomic_read(&tmp_dev->ref_count);
67			if (best > ctr) {
68				accel_dev = tmp_dev;
69				best = ctr;
70			}
71		}
72	}
73
74	if (!accel_dev) {
75		pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
76		/* Get any started device */
77		list_for_each(itr, adf_devmgr_get_head()) {
78			struct adf_accel_dev *tmp_dev;
79
80			tmp_dev = list_entry(itr, struct adf_accel_dev, list);
81			if (adf_dev_started(tmp_dev) &&
82			    !list_empty(&tmp_dev->compression_list)) {
83				accel_dev = tmp_dev;
84				break;
85			}
86		}
87	}
88
89	if (!accel_dev)
90		return NULL;
91
92	best = ~0;
93	list_for_each(itr, &accel_dev->compression_list) {
94		struct qat_compression_instance *tmp_inst;
95		unsigned long ctr;
96
97		tmp_inst = list_entry(itr, struct qat_compression_instance, list);
98		ctr = atomic_read(&tmp_inst->refctr);
99		if (best > ctr) {
100			inst = tmp_inst;
101			best = ctr;
102		}
103	}
104	if (inst) {
105		if (adf_dev_get(accel_dev)) {
106			dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
107			return NULL;
108		}
109		atomic_inc(&inst->refctr);
110	}
111	return inst;
112}
113
114static int qat_compression_create_instances(struct adf_accel_dev *accel_dev)
115{
116	struct qat_compression_instance *inst;
117	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
118	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
119	unsigned long num_inst, num_msg_dc;
120	unsigned long bank;
121	int msg_size;
122	int ret;
123	int i;
124
125	INIT_LIST_HEAD(&accel_dev->compression_list);
126	strscpy(key, ADF_NUM_DC, sizeof(key));
127	ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
128	if (ret)
129		return ret;
130
131	ret = kstrtoul(val, 10, &num_inst);
132	if (ret)
133		return ret;
134
135	for (i = 0; i < num_inst; i++) {
136		inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
137				    dev_to_node(&GET_DEV(accel_dev)));
138		if (!inst) {
139			ret = -ENOMEM;
140			goto err;
141		}
142
143		list_add_tail(&inst->list, &accel_dev->compression_list);
144		inst->id = i;
145		atomic_set(&inst->refctr, 0);
146		inst->accel_dev = accel_dev;
147		inst->build_deflate_ctx = GET_DC_OPS(accel_dev)->build_deflate_ctx;
148
149		snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
150		ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
151		if (ret)
152			return ret;
153
154		ret = kstrtoul(val, 10, &bank);
155		if (ret)
156			return ret;
157
158		snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
159		ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
160		if (ret)
161			return ret;
162
163		ret = kstrtoul(val, 10, &num_msg_dc);
164		if (ret)
165			return ret;
166
167		msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
168		snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
169		ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc,
170				      msg_size, key, NULL, 0, &inst->dc_tx);
171		if (ret)
172			return ret;
173
174		msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
175		snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
176		ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc,
177				      msg_size, key, qat_comp_alg_callback, 0,
178				      &inst->dc_rx);
179		if (ret)
180			return ret;
181
182		inst->dc_data = accel_dev->dc_data;
183		INIT_LIST_HEAD(&inst->backlog.list);
184		spin_lock_init(&inst->backlog.lock);
185	}
186	return 0;
187err:
188	qat_compression_free_instances(accel_dev);
189	return ret;
190}
191
192static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
193{
194	struct device *dev = &GET_DEV(accel_dev);
195	dma_addr_t obuff_p = DMA_MAPPING_ERROR;
196	size_t ovf_buff_sz = QAT_COMP_MAX_SKID;
197	struct adf_dc_data *dc_data = NULL;
198	u8 *obuff = NULL;
199
200	dc_data = devm_kzalloc(dev, sizeof(*dc_data), GFP_KERNEL);
201	if (!dc_data)
202		goto err;
203
204	obuff = kzalloc_node(ovf_buff_sz, GFP_KERNEL, dev_to_node(dev));
205	if (!obuff)
206		goto err;
207
208	obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_FROM_DEVICE);
209	if (unlikely(dma_mapping_error(dev, obuff_p)))
210		goto err;
211
212	dc_data->ovf_buff = obuff;
213	dc_data->ovf_buff_p = obuff_p;
214	dc_data->ovf_buff_sz = ovf_buff_sz;
215
216	accel_dev->dc_data = dc_data;
217
218	return 0;
219
220err:
221	accel_dev->dc_data = NULL;
222	kfree(obuff);
223	devm_kfree(dev, dc_data);
224	return -ENOMEM;
225}
226
227static void qat_free_dc_data(struct adf_accel_dev *accel_dev)
228{
229	struct adf_dc_data *dc_data = accel_dev->dc_data;
230	struct device *dev = &GET_DEV(accel_dev);
231
232	if (!dc_data)
233		return;
234
235	dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz,
236			 DMA_FROM_DEVICE);
237	kfree_sensitive(dc_data->ovf_buff);
238	devm_kfree(dev, dc_data);
239	accel_dev->dc_data = NULL;
240}
241
242static int qat_compression_init(struct adf_accel_dev *accel_dev)
243{
244	int ret;
245
246	ret = qat_compression_alloc_dc_data(accel_dev);
247	if (ret)
248		return ret;
249
250	ret = qat_compression_create_instances(accel_dev);
251	if (ret)
252		qat_free_dc_data(accel_dev);
253
254	return ret;
255}
256
257static int qat_compression_shutdown(struct adf_accel_dev *accel_dev)
258{
259	qat_free_dc_data(accel_dev);
260	return qat_compression_free_instances(accel_dev);
261}
262
263static int qat_compression_event_handler(struct adf_accel_dev *accel_dev,
264					 enum adf_event event)
265{
266	int ret;
267
268	switch (event) {
269	case ADF_EVENT_INIT:
270		ret = qat_compression_init(accel_dev);
271		break;
272	case ADF_EVENT_SHUTDOWN:
273		ret = qat_compression_shutdown(accel_dev);
274		break;
275	case ADF_EVENT_RESTARTING:
276	case ADF_EVENT_RESTARTED:
277	case ADF_EVENT_START:
278	case ADF_EVENT_STOP:
279	default:
280		ret = 0;
281	}
282	return ret;
283}
284
285int qat_compression_register(void)
286{
287	memset(&qat_compression, 0, sizeof(qat_compression));
288	qat_compression.event_hld = qat_compression_event_handler;
289	qat_compression.name = "qat_compression";
290	return adf_service_register(&qat_compression);
291}
292
293int qat_compression_unregister(void)
294{
295	return adf_service_unregister(&qat_compression);
296}
297