1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 HiSilicon Limited. */
3#include <crypto/internal/acompress.h>
4#include <linux/bitfield.h>
5#include <linux/bitmap.h>
6#include <linux/dma-mapping.h>
7#include <linux/scatterlist.h>
8#include "zip.h"
9
10/* hisi_zip_sqe dw3 */
11#define HZIP_BD_STATUS_M			GENMASK(7, 0)
12/* hisi_zip_sqe dw7 */
13#define HZIP_IN_SGE_DATA_OFFSET_M		GENMASK(23, 0)
14#define HZIP_SQE_TYPE_M				GENMASK(31, 28)
15/* hisi_zip_sqe dw8 */
16#define HZIP_OUT_SGE_DATA_OFFSET_M		GENMASK(23, 0)
17/* hisi_zip_sqe dw9 */
18#define HZIP_REQ_TYPE_M				GENMASK(7, 0)
19#define HZIP_ALG_TYPE_DEFLATE			0x01
20#define HZIP_BUF_TYPE_M				GENMASK(11, 8)
21#define HZIP_SGL				0x1
22
23#define HZIP_ALG_PRIORITY			300
24#define HZIP_SGL_SGE_NR				10
25
26#define HZIP_ALG_DEFLATE			GENMASK(5, 4)
27
28static DEFINE_MUTEX(zip_algs_lock);
29static unsigned int zip_available_devs;
30
31enum hisi_zip_alg_type {
32	HZIP_ALG_TYPE_COMP = 0,
33	HZIP_ALG_TYPE_DECOMP = 1,
34};
35
36enum {
37	HZIP_QPC_COMP,
38	HZIP_QPC_DECOMP,
39	HZIP_CTX_Q_NUM
40};
41
42#define COMP_NAME_TO_TYPE(alg_name)					\
43	(!strcmp((alg_name), "deflate") ? HZIP_ALG_TYPE_DEFLATE : 0)
44
45struct hisi_zip_req {
46	struct acomp_req *req;
47	struct hisi_acc_hw_sgl *hw_src;
48	struct hisi_acc_hw_sgl *hw_dst;
49	dma_addr_t dma_src;
50	dma_addr_t dma_dst;
51	u16 req_id;
52};
53
54struct hisi_zip_req_q {
55	struct hisi_zip_req *q;
56	unsigned long *req_bitmap;
57	rwlock_t req_lock;
58	u16 size;
59};
60
61struct hisi_zip_qp_ctx {
62	struct hisi_qp *qp;
63	struct hisi_zip_req_q req_q;
64	struct hisi_acc_sgl_pool *sgl_pool;
65	struct hisi_zip *zip_dev;
66	struct hisi_zip_ctx *ctx;
67};
68
69struct hisi_zip_sqe_ops {
70	u8 sqe_type;
71	void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
72	void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
73	void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type);
74	void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type);
75	void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
76	void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type);
77	u32 (*get_tag)(struct hisi_zip_sqe *sqe);
78	u32 (*get_status)(struct hisi_zip_sqe *sqe);
79	u32 (*get_dstlen)(struct hisi_zip_sqe *sqe);
80};
81
82struct hisi_zip_ctx {
83	struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM];
84	const struct hisi_zip_sqe_ops *ops;
85};
86
87static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
88{
89	int ret;
90	u16 n;
91
92	if (!val)
93		return -EINVAL;
94
95	ret = kstrtou16(val, 10, &n);
96	if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX)
97		return -EINVAL;
98
99	return param_set_ushort(val, kp);
100}
101
102static const struct kernel_param_ops sgl_sge_nr_ops = {
103	.set = sgl_sge_nr_set,
104	.get = param_get_ushort,
105};
106
107static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
108module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
109MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
110
111static struct hisi_zip_req *hisi_zip_create_req(struct hisi_zip_qp_ctx *qp_ctx,
112						struct acomp_req *req)
113{
114	struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
115	struct hisi_zip_req *q = req_q->q;
116	struct hisi_zip_req *req_cache;
117	int req_id;
118
119	write_lock(&req_q->req_lock);
120
121	req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size);
122	if (req_id >= req_q->size) {
123		write_unlock(&req_q->req_lock);
124		dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
125		return ERR_PTR(-EAGAIN);
126	}
127	set_bit(req_id, req_q->req_bitmap);
128
129	write_unlock(&req_q->req_lock);
130
131	req_cache = q + req_id;
132	req_cache->req_id = req_id;
133	req_cache->req = req;
134
135	return req_cache;
136}
137
138static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
139				struct hisi_zip_req *req)
140{
141	struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
142
143	write_lock(&req_q->req_lock);
144	clear_bit(req->req_id, req_q->req_bitmap);
145	write_unlock(&req_q->req_lock);
146}
147
148static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
149{
150	sqe->source_addr_l = lower_32_bits(req->dma_src);
151	sqe->source_addr_h = upper_32_bits(req->dma_src);
152	sqe->dest_addr_l = lower_32_bits(req->dma_dst);
153	sqe->dest_addr_h = upper_32_bits(req->dma_dst);
154}
155
156static void hisi_zip_fill_buf_size(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
157{
158	struct acomp_req *a_req = req->req;
159
160	sqe->input_data_length = a_req->slen;
161	sqe->dest_avail_out = a_req->dlen;
162}
163
164static void hisi_zip_fill_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
165{
166	u32 val;
167
168	val = sqe->dw9 & ~HZIP_BUF_TYPE_M;
169	val |= FIELD_PREP(HZIP_BUF_TYPE_M, buf_type);
170	sqe->dw9 = val;
171}
172
173static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type)
174{
175	u32 val;
176
177	val = sqe->dw9 & ~HZIP_REQ_TYPE_M;
178	val |= FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
179	sqe->dw9 = val;
180}
181
182static void hisi_zip_fill_tag(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
183{
184	sqe->dw26 = req->req_id;
185}
186
187static void hisi_zip_fill_sqe_type(struct hisi_zip_sqe *sqe, u8 sqe_type)
188{
189	u32 val;
190
191	val = sqe->dw7 & ~HZIP_SQE_TYPE_M;
192	val |= FIELD_PREP(HZIP_SQE_TYPE_M, sqe_type);
193	sqe->dw7 = val;
194}
195
196static void hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe,
197			      u8 req_type, struct hisi_zip_req *req)
198{
199	const struct hisi_zip_sqe_ops *ops = ctx->ops;
200
201	memset(sqe, 0, sizeof(struct hisi_zip_sqe));
202
203	ops->fill_addr(sqe, req);
204	ops->fill_buf_size(sqe, req);
205	ops->fill_buf_type(sqe, HZIP_SGL);
206	ops->fill_req_type(sqe, req_type);
207	ops->fill_tag(sqe, req);
208	ops->fill_sqe_type(sqe, ops->sqe_type);
209}
210
211static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
212			    struct hisi_zip_req *req)
213{
214	struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
215	struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
216	struct acomp_req *a_req = req->req;
217	struct hisi_qp *qp = qp_ctx->qp;
218	struct device *dev = &qp->qm->pdev->dev;
219	struct hisi_zip_sqe zip_sqe;
220	int ret;
221
222	if (unlikely(!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen))
223		return -EINVAL;
224
225	req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
226						    req->req_id << 1, &req->dma_src);
227	if (IS_ERR(req->hw_src)) {
228		dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
229			PTR_ERR(req->hw_src));
230		return PTR_ERR(req->hw_src);
231	}
232
233	req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
234						    (req->req_id << 1) + 1,
235						    &req->dma_dst);
236	if (IS_ERR(req->hw_dst)) {
237		ret = PTR_ERR(req->hw_dst);
238		dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
239			ret);
240		goto err_unmap_input;
241	}
242
243	hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp->req_type, req);
244
245	/* send command to start a task */
246	atomic64_inc(&dfx->send_cnt);
247	ret = hisi_qp_send(qp, &zip_sqe);
248	if (unlikely(ret < 0)) {
249		atomic64_inc(&dfx->send_busy_cnt);
250		ret = -EAGAIN;
251		dev_dbg_ratelimited(dev, "failed to send request!\n");
252		goto err_unmap_output;
253	}
254
255	return -EINPROGRESS;
256
257err_unmap_output:
258	hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
259err_unmap_input:
260	hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
261	return ret;
262}
263
264static u32 hisi_zip_get_tag(struct hisi_zip_sqe *sqe)
265{
266	return sqe->dw26;
267}
268
269static u32 hisi_zip_get_status(struct hisi_zip_sqe *sqe)
270{
271	return sqe->dw3 & HZIP_BD_STATUS_M;
272}
273
274static u32 hisi_zip_get_dstlen(struct hisi_zip_sqe *sqe)
275{
276	return sqe->produced;
277}
278
279static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
280{
281	struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
282	const struct hisi_zip_sqe_ops *ops = qp_ctx->ctx->ops;
283	struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
284	struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
285	struct device *dev = &qp->qm->pdev->dev;
286	struct hisi_zip_sqe *sqe = data;
287	u32 tag = ops->get_tag(sqe);
288	struct hisi_zip_req *req = req_q->q + tag;
289	struct acomp_req *acomp_req = req->req;
290	int err = 0;
291	u32 status;
292
293	atomic64_inc(&dfx->recv_cnt);
294	status = ops->get_status(sqe);
295	if (unlikely(status != 0 && status != HZIP_NC_ERR)) {
296		dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
297			(qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
298			sqe->produced);
299		atomic64_inc(&dfx->err_bd_cnt);
300		err = -EIO;
301	}
302
303	hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
304	hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
305
306	acomp_req->dlen = ops->get_dstlen(sqe);
307
308	if (acomp_req->base.complete)
309		acomp_request_complete(acomp_req, err);
310
311	hisi_zip_remove_req(qp_ctx, req);
312}
313
314static int hisi_zip_acompress(struct acomp_req *acomp_req)
315{
316	struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
317	struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
318	struct device *dev = &qp_ctx->qp->qm->pdev->dev;
319	struct hisi_zip_req *req;
320	int ret;
321
322	req = hisi_zip_create_req(qp_ctx, acomp_req);
323	if (IS_ERR(req))
324		return PTR_ERR(req);
325
326	ret = hisi_zip_do_work(qp_ctx, req);
327	if (unlikely(ret != -EINPROGRESS)) {
328		dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
329		hisi_zip_remove_req(qp_ctx, req);
330	}
331
332	return ret;
333}
334
335static int hisi_zip_adecompress(struct acomp_req *acomp_req)
336{
337	struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
338	struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
339	struct device *dev = &qp_ctx->qp->qm->pdev->dev;
340	struct hisi_zip_req *req;
341	int ret;
342
343	req = hisi_zip_create_req(qp_ctx, acomp_req);
344	if (IS_ERR(req))
345		return PTR_ERR(req);
346
347	ret = hisi_zip_do_work(qp_ctx, req);
348	if (unlikely(ret != -EINPROGRESS)) {
349		dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
350				     ret);
351		hisi_zip_remove_req(qp_ctx, req);
352	}
353
354	return ret;
355}
356
357static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *qp_ctx,
358			     int alg_type, int req_type)
359{
360	struct device *dev = &qp->qm->pdev->dev;
361	int ret;
362
363	qp->req_type = req_type;
364	qp->alg_type = alg_type;
365	qp->qp_ctx = qp_ctx;
366
367	ret = hisi_qm_start_qp(qp, 0);
368	if (ret < 0) {
369		dev_err(dev, "failed to start qp (%d)!\n", ret);
370		return ret;
371	}
372
373	qp_ctx->qp = qp;
374
375	return 0;
376}
377
378static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *qp_ctx)
379{
380	hisi_qm_stop_qp(qp_ctx->qp);
381	hisi_qm_free_qps(&qp_ctx->qp, 1);
382}
383
384static const struct hisi_zip_sqe_ops hisi_zip_ops = {
385	.sqe_type		= 0x3,
386	.fill_addr		= hisi_zip_fill_addr,
387	.fill_buf_size		= hisi_zip_fill_buf_size,
388	.fill_buf_type		= hisi_zip_fill_buf_type,
389	.fill_req_type		= hisi_zip_fill_req_type,
390	.fill_tag		= hisi_zip_fill_tag,
391	.fill_sqe_type		= hisi_zip_fill_sqe_type,
392	.get_tag		= hisi_zip_get_tag,
393	.get_status		= hisi_zip_get_status,
394	.get_dstlen		= hisi_zip_get_dstlen,
395};
396
397static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node)
398{
399	struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
400	struct hisi_zip_qp_ctx *qp_ctx;
401	struct hisi_zip *hisi_zip;
402	int ret, i, j;
403
404	ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node);
405	if (ret) {
406		pr_err("failed to create zip qps (%d)!\n", ret);
407		return -ENODEV;
408	}
409
410	hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm);
411
412	for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
413		/* alg_type = 0 for compress, 1 for decompress in hw sqe */
414		qp_ctx = &hisi_zip_ctx->qp_ctx[i];
415		qp_ctx->ctx = hisi_zip_ctx;
416		ret = hisi_zip_start_qp(qps[i], qp_ctx, i, req_type);
417		if (ret) {
418			for (j = i - 1; j >= 0; j--)
419				hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp);
420
421			hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM);
422			return ret;
423		}
424
425		qp_ctx->zip_dev = hisi_zip;
426	}
427
428	hisi_zip_ctx->ops = &hisi_zip_ops;
429
430	return 0;
431}
432
433static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
434{
435	int i;
436
437	for (i = 0; i < HZIP_CTX_Q_NUM; i++)
438		hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]);
439}
440
441static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
442{
443	u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
444	struct hisi_zip_req_q *req_q;
445	int i, ret;
446
447	for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
448		req_q = &ctx->qp_ctx[i].req_q;
449		req_q->size = q_depth;
450
451		req_q->req_bitmap = bitmap_zalloc(req_q->size, GFP_KERNEL);
452		if (!req_q->req_bitmap) {
453			ret = -ENOMEM;
454			if (i == 0)
455				return ret;
456
457			goto err_free_comp_q;
458		}
459		rwlock_init(&req_q->req_lock);
460
461		req_q->q = kcalloc(req_q->size, sizeof(struct hisi_zip_req),
462				   GFP_KERNEL);
463		if (!req_q->q) {
464			ret = -ENOMEM;
465			if (i == 0)
466				goto err_free_comp_bitmap;
467			else
468				goto err_free_decomp_bitmap;
469		}
470	}
471
472	return 0;
473
474err_free_decomp_bitmap:
475	bitmap_free(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap);
476err_free_comp_q:
477	kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q);
478err_free_comp_bitmap:
479	bitmap_free(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap);
480	return ret;
481}
482
483static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx)
484{
485	int i;
486
487	for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
488		kfree(ctx->qp_ctx[i].req_q.q);
489		bitmap_free(ctx->qp_ctx[i].req_q.req_bitmap);
490	}
491}
492
493static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
494{
495	u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
496	struct hisi_zip_qp_ctx *tmp;
497	struct device *dev;
498	int i;
499
500	for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
501		tmp = &ctx->qp_ctx[i];
502		dev = &tmp->qp->qm->pdev->dev;
503		tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, q_depth << 1,
504							 sgl_sge_nr);
505		if (IS_ERR(tmp->sgl_pool)) {
506			if (i == 1)
507				goto err_free_sgl_pool0;
508			return -ENOMEM;
509		}
510	}
511
512	return 0;
513
514err_free_sgl_pool0:
515	hisi_acc_free_sgl_pool(&ctx->qp_ctx[HZIP_QPC_COMP].qp->qm->pdev->dev,
516			       ctx->qp_ctx[HZIP_QPC_COMP].sgl_pool);
517	return -ENOMEM;
518}
519
520static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx)
521{
522	int i;
523
524	for (i = 0; i < HZIP_CTX_Q_NUM; i++)
525		hisi_acc_free_sgl_pool(&ctx->qp_ctx[i].qp->qm->pdev->dev,
526				       ctx->qp_ctx[i].sgl_pool);
527}
528
529static void hisi_zip_set_acomp_cb(struct hisi_zip_ctx *ctx,
530				  void (*fn)(struct hisi_qp *, void *))
531{
532	int i;
533
534	for (i = 0; i < HZIP_CTX_Q_NUM; i++)
535		ctx->qp_ctx[i].qp->req_cb = fn;
536}
537
538static int hisi_zip_acomp_init(struct crypto_acomp *tfm)
539{
540	const char *alg_name = crypto_tfm_alg_name(&tfm->base);
541	struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
542	struct device *dev;
543	int ret;
544
545	ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name), tfm->base.node);
546	if (ret) {
547		pr_err("failed to init ctx (%d)!\n", ret);
548		return ret;
549	}
550
551	dev = &ctx->qp_ctx[0].qp->qm->pdev->dev;
552
553	ret = hisi_zip_create_req_q(ctx);
554	if (ret) {
555		dev_err(dev, "failed to create request queue (%d)!\n", ret);
556		goto err_ctx_exit;
557	}
558
559	ret = hisi_zip_create_sgl_pool(ctx);
560	if (ret) {
561		dev_err(dev, "failed to create sgl pool (%d)!\n", ret);
562		goto err_release_req_q;
563	}
564
565	hisi_zip_set_acomp_cb(ctx, hisi_zip_acomp_cb);
566
567	return 0;
568
569err_release_req_q:
570	hisi_zip_release_req_q(ctx);
571err_ctx_exit:
572	hisi_zip_ctx_exit(ctx);
573	return ret;
574}
575
576static void hisi_zip_acomp_exit(struct crypto_acomp *tfm)
577{
578	struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
579
580	hisi_zip_set_acomp_cb(ctx, NULL);
581	hisi_zip_release_sgl_pool(ctx);
582	hisi_zip_release_req_q(ctx);
583	hisi_zip_ctx_exit(ctx);
584}
585
586static struct acomp_alg hisi_zip_acomp_deflate = {
587	.init			= hisi_zip_acomp_init,
588	.exit			= hisi_zip_acomp_exit,
589	.compress		= hisi_zip_acompress,
590	.decompress		= hisi_zip_adecompress,
591	.base			= {
592		.cra_name		= "deflate",
593		.cra_driver_name	= "hisi-deflate-acomp",
594		.cra_flags		= CRYPTO_ALG_ASYNC,
595		.cra_module		= THIS_MODULE,
596		.cra_priority		= HZIP_ALG_PRIORITY,
597		.cra_ctxsize		= sizeof(struct hisi_zip_ctx),
598	}
599};
600
601static int hisi_zip_register_deflate(struct hisi_qm *qm)
602{
603	int ret;
604
605	if (!hisi_zip_alg_support(qm, HZIP_ALG_DEFLATE))
606		return 0;
607
608	ret = crypto_register_acomp(&hisi_zip_acomp_deflate);
609	if (ret)
610		dev_err(&qm->pdev->dev, "failed to register to deflate (%d)!\n", ret);
611
612	return ret;
613}
614
615static void hisi_zip_unregister_deflate(struct hisi_qm *qm)
616{
617	if (!hisi_zip_alg_support(qm, HZIP_ALG_DEFLATE))
618		return;
619
620	crypto_unregister_acomp(&hisi_zip_acomp_deflate);
621}
622
623int hisi_zip_register_to_crypto(struct hisi_qm *qm)
624{
625	int ret = 0;
626
627	mutex_lock(&zip_algs_lock);
628	if (zip_available_devs++)
629		goto unlock;
630
631	ret = hisi_zip_register_deflate(qm);
632	if (ret)
633		zip_available_devs--;
634
635unlock:
636	mutex_unlock(&zip_algs_lock);
637	return ret;
638}
639
640void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
641{
642	mutex_lock(&zip_algs_lock);
643	if (--zip_available_devs)
644		goto unlock;
645
646	hisi_zip_unregister_deflate(qm);
647
648unlock:
649	mutex_unlock(&zip_algs_lock);
650}
651