1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Asynchronous Compression operations
4 *
5 * Copyright (c) 2016, Intel Corporation
6 * Authors: Weigang Li <weigang.li@intel.com>
7 *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8 */
9#ifndef _CRYPTO_ACOMP_H
10#define _CRYPTO_ACOMP_H
11
12#include <linux/atomic.h>
13#include <linux/container_of.h>
14#include <linux/crypto.h>
15
16#define CRYPTO_ACOMP_ALLOC_OUTPUT	0x00000001
17#define CRYPTO_ACOMP_DST_MAX		131072
18
19/**
20 * struct acomp_req - asynchronous (de)compression request
21 *
22 * @base:	Common attributes for asynchronous crypto requests
23 * @src:	Source Data
24 * @dst:	Destination data
25 * @slen:	Size of the input buffer
26 * @dlen:	Size of the output buffer and number of bytes produced
27 * @flags:	Internal flags
28 * @__ctx:	Start of private context data
29 */
30struct acomp_req {
31	struct crypto_async_request base;
32	struct scatterlist *src;
33	struct scatterlist *dst;
34	unsigned int slen;
35	unsigned int dlen;
36	u32 flags;
37	void *__ctx[] CRYPTO_MINALIGN_ATTR;
38};
39
40/**
41 * struct crypto_acomp - user-instantiated objects which encapsulate
42 * algorithms and core processing logic
43 *
44 * @compress:		Function performs a compress operation
45 * @decompress:		Function performs a de-compress operation
46 * @dst_free:		Frees destination buffer if allocated inside the
47 *			algorithm
48 * @reqsize:		Context size for (de)compression requests
49 * @base:		Common crypto API algorithm data structure
50 */
51struct crypto_acomp {
52	int (*compress)(struct acomp_req *req);
53	int (*decompress)(struct acomp_req *req);
54	void (*dst_free)(struct scatterlist *dst);
55	unsigned int reqsize;
56	struct crypto_tfm base;
57};
58
59/*
60 * struct crypto_istat_compress - statistics for compress algorithm
61 * @compress_cnt:	number of compress requests
62 * @compress_tlen:	total data size handled by compress requests
63 * @decompress_cnt:	number of decompress requests
64 * @decompress_tlen:	total data size handled by decompress requests
65 * @err_cnt:		number of error for compress requests
66 */
67struct crypto_istat_compress {
68	atomic64_t compress_cnt;
69	atomic64_t compress_tlen;
70	atomic64_t decompress_cnt;
71	atomic64_t decompress_tlen;
72	atomic64_t err_cnt;
73};
74
75#ifdef CONFIG_CRYPTO_STATS
76#define COMP_ALG_COMMON_STATS struct crypto_istat_compress stat;
77#else
78#define COMP_ALG_COMMON_STATS
79#endif
80
81#define COMP_ALG_COMMON {			\
82	COMP_ALG_COMMON_STATS			\
83						\
84	struct crypto_alg base;			\
85}
86struct comp_alg_common COMP_ALG_COMMON;
87
88/**
89 * DOC: Asynchronous Compression API
90 *
91 * The Asynchronous Compression API is used with the algorithms of type
92 * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
93 */
94
95/**
96 * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
97 * @alg_name:	is the cra_name / name or cra_driver_name / driver name of the
98 *		compression algorithm e.g. "deflate"
99 * @type:	specifies the type of the algorithm
100 * @mask:	specifies the mask for the algorithm
101 *
102 * Allocate a handle for a compression algorithm. The returned struct
103 * crypto_acomp is the handle that is required for any subsequent
104 * API invocation for the compression operations.
105 *
106 * Return:	allocated handle in case of success; IS_ERR() is true in case
107 *		of an error, PTR_ERR() returns the error code.
108 */
109struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
110					u32 mask);
111/**
112 * crypto_alloc_acomp_node() -- allocate ACOMPRESS tfm handle with desired NUMA node
113 * @alg_name:	is the cra_name / name or cra_driver_name / driver name of the
114 *		compression algorithm e.g. "deflate"
115 * @type:	specifies the type of the algorithm
116 * @mask:	specifies the mask for the algorithm
117 * @node:	specifies the NUMA node the ZIP hardware belongs to
118 *
119 * Allocate a handle for a compression algorithm. Drivers should try to use
120 * (de)compressors on the specified NUMA node.
121 * The returned struct crypto_acomp is the handle that is required for any
122 * subsequent API invocation for the compression operations.
123 *
124 * Return:	allocated handle in case of success; IS_ERR() is true in case
125 *		of an error, PTR_ERR() returns the error code.
126 */
127struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
128					u32 mask, int node);
129
130static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
131{
132	return &tfm->base;
133}
134
135static inline struct comp_alg_common *__crypto_comp_alg_common(
136	struct crypto_alg *alg)
137{
138	return container_of(alg, struct comp_alg_common, base);
139}
140
141static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
142{
143	return container_of(tfm, struct crypto_acomp, base);
144}
145
146static inline struct comp_alg_common *crypto_comp_alg_common(
147	struct crypto_acomp *tfm)
148{
149	return __crypto_comp_alg_common(crypto_acomp_tfm(tfm)->__crt_alg);
150}
151
152static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
153{
154	return tfm->reqsize;
155}
156
157static inline void acomp_request_set_tfm(struct acomp_req *req,
158					 struct crypto_acomp *tfm)
159{
160	req->base.tfm = crypto_acomp_tfm(tfm);
161}
162
163static inline bool acomp_is_async(struct crypto_acomp *tfm)
164{
165	return crypto_comp_alg_common(tfm)->base.cra_flags &
166	       CRYPTO_ALG_ASYNC;
167}
168
169static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
170{
171	return __crypto_acomp_tfm(req->base.tfm);
172}
173
174/**
175 * crypto_free_acomp() -- free ACOMPRESS tfm handle
176 *
177 * @tfm:	ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
178 *
179 * If @tfm is a NULL or error pointer, this function does nothing.
180 */
181static inline void crypto_free_acomp(struct crypto_acomp *tfm)
182{
183	crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
184}
185
186static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
187{
188	type &= ~CRYPTO_ALG_TYPE_MASK;
189	type |= CRYPTO_ALG_TYPE_ACOMPRESS;
190	mask |= CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
191
192	return crypto_has_alg(alg_name, type, mask);
193}
194
195/**
196 * acomp_request_alloc() -- allocates asynchronous (de)compression request
197 *
198 * @tfm:	ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
199 *
200 * Return:	allocated handle in case of success or NULL in case of an error
201 */
202struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
203
204/**
205 * acomp_request_free() -- zeroize and free asynchronous (de)compression
206 *			   request as well as the output buffer if allocated
207 *			   inside the algorithm
208 *
209 * @req:	request to free
210 */
211void acomp_request_free(struct acomp_req *req);
212
213/**
214 * acomp_request_set_callback() -- Sets an asynchronous callback
215 *
216 * Callback will be called when an asynchronous operation on a given
217 * request is finished.
218 *
219 * @req:	request that the callback will be set for
220 * @flgs:	specify for instance if the operation may backlog
221 * @cmlp:	callback which will be called
222 * @data:	private data used by the caller
223 */
224static inline void acomp_request_set_callback(struct acomp_req *req,
225					      u32 flgs,
226					      crypto_completion_t cmpl,
227					      void *data)
228{
229	req->base.complete = cmpl;
230	req->base.data = data;
231	req->base.flags &= CRYPTO_ACOMP_ALLOC_OUTPUT;
232	req->base.flags |= flgs & ~CRYPTO_ACOMP_ALLOC_OUTPUT;
233}
234
235/**
236 * acomp_request_set_params() -- Sets request parameters
237 *
238 * Sets parameters required by an acomp operation
239 *
240 * @req:	asynchronous compress request
241 * @src:	pointer to input buffer scatterlist
242 * @dst:	pointer to output buffer scatterlist. If this is NULL, the
243 *		acomp layer will allocate the output memory
244 * @slen:	size of the input buffer
245 * @dlen:	size of the output buffer. If dst is NULL, this can be used by
246 *		the user to specify the maximum amount of memory to allocate
247 */
248static inline void acomp_request_set_params(struct acomp_req *req,
249					    struct scatterlist *src,
250					    struct scatterlist *dst,
251					    unsigned int slen,
252					    unsigned int dlen)
253{
254	req->src = src;
255	req->dst = dst;
256	req->slen = slen;
257	req->dlen = dlen;
258
259	req->flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT;
260	if (!req->dst)
261		req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
262}
263
264static inline struct crypto_istat_compress *comp_get_stat(
265	struct comp_alg_common *alg)
266{
267#ifdef CONFIG_CRYPTO_STATS
268	return &alg->stat;
269#else
270	return NULL;
271#endif
272}
273
274static inline int crypto_comp_errstat(struct comp_alg_common *alg, int err)
275{
276	if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
277		return err;
278
279	if (err && err != -EINPROGRESS && err != -EBUSY)
280		atomic64_inc(&comp_get_stat(alg)->err_cnt);
281
282	return err;
283}
284
285/**
286 * crypto_acomp_compress() -- Invoke asynchronous compress operation
287 *
288 * Function invokes the asynchronous compress operation
289 *
290 * @req:	asynchronous compress request
291 *
292 * Return:	zero on success; error code in case of error
293 */
294static inline int crypto_acomp_compress(struct acomp_req *req)
295{
296	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
297	struct comp_alg_common *alg;
298
299	alg = crypto_comp_alg_common(tfm);
300
301	if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
302		struct crypto_istat_compress *istat = comp_get_stat(alg);
303
304		atomic64_inc(&istat->compress_cnt);
305		atomic64_add(req->slen, &istat->compress_tlen);
306	}
307
308	return crypto_comp_errstat(alg, tfm->compress(req));
309}
310
311/**
312 * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
313 *
314 * Function invokes the asynchronous decompress operation
315 *
316 * @req:	asynchronous compress request
317 *
318 * Return:	zero on success; error code in case of error
319 */
320static inline int crypto_acomp_decompress(struct acomp_req *req)
321{
322	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
323	struct comp_alg_common *alg;
324
325	alg = crypto_comp_alg_common(tfm);
326
327	if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
328		struct crypto_istat_compress *istat = comp_get_stat(alg);
329
330		atomic64_inc(&istat->decompress_cnt);
331		atomic64_add(req->slen, &istat->decompress_tlen);
332	}
333
334	return crypto_comp_errstat(alg, tfm->decompress(req));
335}
336
337#endif
338