1// SPDX-License-Identifier: GPL-2.0-only
2
3/*
4 * Copyright (C) 2021, Linaro Limited. All rights reserved.
5 */
6#include <linux/dma-mapping.h>
7#include <linux/interrupt.h>
8#include <crypto/gcm.h>
9#include <crypto/authenc.h>
10#include <crypto/internal/aead.h>
11#include <crypto/internal/des.h>
12#include <crypto/sha1.h>
13#include <crypto/sha2.h>
14#include <crypto/scatterwalk.h>
15#include "aead.h"
16
17#define CCM_NONCE_ADATA_SHIFT		6
18#define CCM_NONCE_AUTHSIZE_SHIFT	3
19#define MAX_CCM_ADATA_HEADER_LEN        6
20
21static LIST_HEAD(aead_algs);
22
23static void qce_aead_done(void *data)
24{
25	struct crypto_async_request *async_req = data;
26	struct aead_request *req = aead_request_cast(async_req);
27	struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
28	struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
29	struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
30	struct qce_device *qce = tmpl->qce;
31	struct qce_result_dump *result_buf = qce->dma.result_buf;
32	enum dma_data_direction dir_src, dir_dst;
33	bool diff_dst;
34	int error;
35	u32 status;
36	unsigned int totallen;
37	unsigned char tag[SHA256_DIGEST_SIZE] = {0};
38	int ret = 0;
39
40	diff_dst = (req->src != req->dst) ? true : false;
41	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
42	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
43
44	error = qce_dma_terminate_all(&qce->dma);
45	if (error)
46		dev_dbg(qce->dev, "aead dma termination error (%d)\n",
47			error);
48	if (diff_dst)
49		dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
50
51	dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
52
53	if (IS_CCM(rctx->flags)) {
54		if (req->assoclen) {
55			sg_free_table(&rctx->src_tbl);
56			if (diff_dst)
57				sg_free_table(&rctx->dst_tbl);
58		} else {
59			if (!(IS_DECRYPT(rctx->flags) && !diff_dst))
60				sg_free_table(&rctx->dst_tbl);
61		}
62	} else {
63		sg_free_table(&rctx->dst_tbl);
64	}
65
66	error = qce_check_status(qce, &status);
67	if (error < 0 && (error != -EBADMSG))
68		dev_err(qce->dev, "aead operation error (%x)\n", status);
69
70	if (IS_ENCRYPT(rctx->flags)) {
71		totallen = req->cryptlen + req->assoclen;
72		if (IS_CCM(rctx->flags))
73			scatterwalk_map_and_copy(rctx->ccmresult_buf, req->dst,
74						 totallen, ctx->authsize, 1);
75		else
76			scatterwalk_map_and_copy(result_buf->auth_iv, req->dst,
77						 totallen, ctx->authsize, 1);
78
79	} else if (!IS_CCM(rctx->flags)) {
80		totallen = req->cryptlen + req->assoclen - ctx->authsize;
81		scatterwalk_map_and_copy(tag, req->src, totallen, ctx->authsize, 0);
82		ret = memcmp(result_buf->auth_iv, tag, ctx->authsize);
83		if (ret) {
84			pr_err("Bad message error\n");
85			error = -EBADMSG;
86		}
87	}
88
89	qce->async_req_done(qce, error);
90}
91
92static struct scatterlist *
93qce_aead_prepare_result_buf(struct sg_table *tbl, struct aead_request *req)
94{
95	struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
96	struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
97	struct qce_device *qce = tmpl->qce;
98
99	sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
100	return qce_sgtable_add(tbl, &rctx->result_sg, QCE_RESULT_BUF_SZ);
101}
102
103static struct scatterlist *
104qce_aead_prepare_ccm_result_buf(struct sg_table *tbl, struct aead_request *req)
105{
106	struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
107
108	sg_init_one(&rctx->result_sg, rctx->ccmresult_buf, QCE_BAM_BURST_SIZE);
109	return qce_sgtable_add(tbl, &rctx->result_sg, QCE_BAM_BURST_SIZE);
110}
111
112static struct scatterlist *
113qce_aead_prepare_dst_buf(struct aead_request *req)
114{
115	struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
116	struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
117	struct qce_device *qce = tmpl->qce;
118	struct scatterlist *sg, *msg_sg, __sg[2];
119	gfp_t gfp;
120	unsigned int assoclen = req->assoclen;
121	unsigned int totallen;
122	int ret;
123
124	totallen = rctx->cryptlen + assoclen;
125	rctx->dst_nents = sg_nents_for_len(req->dst, totallen);
126	if (rctx->dst_nents < 0) {
127		dev_err(qce->dev, "Invalid numbers of dst SG.\n");
128		return ERR_PTR(-EINVAL);
129	}
130	if (IS_CCM(rctx->flags))
131		rctx->dst_nents += 2;
132	else
133		rctx->dst_nents += 1;
134
135	gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
136						GFP_KERNEL : GFP_ATOMIC;
137	ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
138	if (ret)
139		return ERR_PTR(ret);
140
141	if (IS_CCM(rctx->flags) && assoclen) {
142		/* Get the dst buffer */
143		msg_sg = scatterwalk_ffwd(__sg, req->dst, assoclen);
144
145		sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->adata_sg,
146				     rctx->assoclen);
147		if (IS_ERR(sg)) {
148			ret = PTR_ERR(sg);
149			goto dst_tbl_free;
150		}
151		/* dst buffer */
152		sg = qce_sgtable_add(&rctx->dst_tbl, msg_sg, rctx->cryptlen);
153		if (IS_ERR(sg)) {
154			ret = PTR_ERR(sg);
155			goto dst_tbl_free;
156		}
157		totallen = rctx->cryptlen + rctx->assoclen;
158	} else {
159		if (totallen) {
160			sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, totallen);
161			if (IS_ERR(sg))
162				goto dst_tbl_free;
163		}
164	}
165	if (IS_CCM(rctx->flags))
166		sg = qce_aead_prepare_ccm_result_buf(&rctx->dst_tbl, req);
167	else
168		sg = qce_aead_prepare_result_buf(&rctx->dst_tbl, req);
169
170	if (IS_ERR(sg))
171		goto dst_tbl_free;
172
173	sg_mark_end(sg);
174	rctx->dst_sg = rctx->dst_tbl.sgl;
175	rctx->dst_nents = sg_nents_for_len(rctx->dst_sg, totallen) + 1;
176
177	return sg;
178
179dst_tbl_free:
180	sg_free_table(&rctx->dst_tbl);
181	return sg;
182}
183
184static int
185qce_aead_ccm_prepare_buf_assoclen(struct aead_request *req)
186{
187	struct scatterlist *sg, *msg_sg, __sg[2];
188	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
189	struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
190	struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
191	unsigned int assoclen = rctx->assoclen;
192	unsigned int adata_header_len, cryptlen, totallen;
193	gfp_t gfp;
194	bool diff_dst;
195	int ret;
196
197	if (IS_DECRYPT(rctx->flags))
198		cryptlen = rctx->cryptlen + ctx->authsize;
199	else
200		cryptlen = rctx->cryptlen;
201	totallen = cryptlen + req->assoclen;
202
203	/* Get the msg */
204	msg_sg = scatterwalk_ffwd(__sg, req->src, req->assoclen);
205
206	rctx->adata = kzalloc((ALIGN(assoclen, 16) + MAX_CCM_ADATA_HEADER_LEN) *
207			       sizeof(unsigned char), GFP_ATOMIC);
208	if (!rctx->adata)
209		return -ENOMEM;
210
211	/*
212	 * Format associated data (RFC3610 and NIST 800-38C)
213	 * Even though specification allows for AAD to be up to 2^64 - 1 bytes,
214	 * the assoclen field in aead_request is unsigned int and thus limits
215	 * the AAD to be up to 2^32 - 1 bytes. So we handle only two scenarios
216	 * while forming the header for AAD.
217	 */
218	if (assoclen < 0xff00) {
219		adata_header_len = 2;
220		*(__be16 *)rctx->adata = cpu_to_be16(assoclen);
221	} else {
222		adata_header_len = 6;
223		*(__be16 *)rctx->adata = cpu_to_be16(0xfffe);
224		*(__be32 *)(rctx->adata + 2) = cpu_to_be32(assoclen);
225	}
226
227	/* Copy the associated data */
228	if (sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, assoclen),
229			      rctx->adata + adata_header_len,
230			      assoclen) != assoclen)
231		return -EINVAL;
232
233	/* Pad associated data to block size */
234	rctx->assoclen = ALIGN(assoclen + adata_header_len, 16);
235
236	diff_dst = (req->src != req->dst) ? true : false;
237
238	if (diff_dst)
239		rctx->src_nents = sg_nents_for_len(req->src, totallen) + 1;
240	else
241		rctx->src_nents = sg_nents_for_len(req->src, totallen) + 2;
242
243	gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
244	ret = sg_alloc_table(&rctx->src_tbl, rctx->src_nents, gfp);
245	if (ret)
246		return ret;
247
248	/* Associated Data */
249	sg_init_one(&rctx->adata_sg, rctx->adata, rctx->assoclen);
250	sg = qce_sgtable_add(&rctx->src_tbl, &rctx->adata_sg,
251			     rctx->assoclen);
252	if (IS_ERR(sg)) {
253		ret = PTR_ERR(sg);
254		goto err_free;
255	}
256	/* src msg */
257	sg = qce_sgtable_add(&rctx->src_tbl, msg_sg, cryptlen);
258	if (IS_ERR(sg)) {
259		ret = PTR_ERR(sg);
260		goto err_free;
261	}
262	if (!diff_dst) {
263		/*
264		 * For decrypt, when src and dst buffers are same, there is already space
265		 * in the buffer for padded 0's which is output in lieu of
266		 * the MAC that is input. So skip the below.
267		 */
268		if (!IS_DECRYPT(rctx->flags)) {
269			sg = qce_aead_prepare_ccm_result_buf(&rctx->src_tbl, req);
270			if (IS_ERR(sg)) {
271				ret = PTR_ERR(sg);
272				goto err_free;
273			}
274		}
275	}
276	sg_mark_end(sg);
277	rctx->src_sg = rctx->src_tbl.sgl;
278	totallen = cryptlen + rctx->assoclen;
279	rctx->src_nents = sg_nents_for_len(rctx->src_sg, totallen);
280
281	if (diff_dst) {
282		sg = qce_aead_prepare_dst_buf(req);
283		if (IS_ERR(sg)) {
284			ret = PTR_ERR(sg);
285			goto err_free;
286		}
287	} else {
288		if (IS_ENCRYPT(rctx->flags))
289			rctx->dst_nents = rctx->src_nents + 1;
290		else
291			rctx->dst_nents = rctx->src_nents;
292		rctx->dst_sg = rctx->src_sg;
293	}
294
295	return 0;
296err_free:
297	sg_free_table(&rctx->src_tbl);
298	return ret;
299}
300
301static int qce_aead_prepare_buf(struct aead_request *req)
302{
303	struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
304	struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
305	struct qce_device *qce = tmpl->qce;
306	struct scatterlist *sg;
307	bool diff_dst = (req->src != req->dst) ? true : false;
308	unsigned int totallen;
309
310	totallen = rctx->cryptlen + rctx->assoclen;
311
312	sg = qce_aead_prepare_dst_buf(req);
313	if (IS_ERR(sg))
314		return PTR_ERR(sg);
315	if (diff_dst) {
316		rctx->src_nents = sg_nents_for_len(req->src, totallen);
317		if (rctx->src_nents < 0) {
318			dev_err(qce->dev, "Invalid numbers of src SG.\n");
319			return -EINVAL;
320		}
321		rctx->src_sg = req->src;
322	} else {
323		rctx->src_nents = rctx->dst_nents - 1;
324		rctx->src_sg = rctx->dst_sg;
325	}
326	return 0;
327}
328
329static int qce_aead_ccm_prepare_buf(struct aead_request *req)
330{
331	struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
332	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
333	struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
334	struct scatterlist *sg;
335	bool diff_dst = (req->src != req->dst) ? true : false;
336	unsigned int cryptlen;
337
338	if (rctx->assoclen)
339		return qce_aead_ccm_prepare_buf_assoclen(req);
340
341	if (IS_ENCRYPT(rctx->flags))
342		return qce_aead_prepare_buf(req);
343
344	cryptlen = rctx->cryptlen + ctx->authsize;
345	if (diff_dst) {
346		rctx->src_nents = sg_nents_for_len(req->src, cryptlen);
347		rctx->src_sg = req->src;
348		sg = qce_aead_prepare_dst_buf(req);
349		if (IS_ERR(sg))
350			return PTR_ERR(sg);
351	} else {
352		rctx->src_nents = sg_nents_for_len(req->src, cryptlen);
353		rctx->src_sg = req->src;
354		rctx->dst_nents = rctx->src_nents;
355		rctx->dst_sg = rctx->src_sg;
356	}
357
358	return 0;
359}
360
361static int qce_aead_create_ccm_nonce(struct qce_aead_reqctx *rctx, struct qce_aead_ctx *ctx)
362{
363	unsigned int msglen_size, ivsize;
364	u8 msg_len[4];
365	int i;
366
367	if (!rctx || !rctx->iv)
368		return -EINVAL;
369
370	msglen_size = rctx->iv[0] + 1;
371
372	/* Verify that msg len size is valid */
373	if (msglen_size < 2 || msglen_size > 8)
374		return -EINVAL;
375
376	ivsize = rctx->ivsize;
377
378	/*
379	 * Clear the msglen bytes in IV.
380	 * Else the h/w engine and nonce will use any stray value pending there.
381	 */
382	if (!IS_CCM_RFC4309(rctx->flags)) {
383		for (i = 0; i < msglen_size; i++)
384			rctx->iv[ivsize - i - 1] = 0;
385	}
386
387	/*
388	 * The crypto framework encodes cryptlen as unsigned int. Thus, even though
389	 * spec allows for upto 8 bytes to encode msg_len only 4 bytes are needed.
390	 */
391	if (msglen_size > 4)
392		msglen_size = 4;
393
394	memcpy(&msg_len[0], &rctx->cryptlen, 4);
395
396	memcpy(&rctx->ccm_nonce[0], rctx->iv, rctx->ivsize);
397	if (rctx->assoclen)
398		rctx->ccm_nonce[0] |= 1 << CCM_NONCE_ADATA_SHIFT;
399	rctx->ccm_nonce[0] |= ((ctx->authsize - 2) / 2) <<
400				CCM_NONCE_AUTHSIZE_SHIFT;
401	for (i = 0; i < msglen_size; i++)
402		rctx->ccm_nonce[QCE_MAX_NONCE - i - 1] = msg_len[i];
403
404	return 0;
405}
406
407static int
408qce_aead_async_req_handle(struct crypto_async_request *async_req)
409{
410	struct aead_request *req = aead_request_cast(async_req);
411	struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
412	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
413	struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
414	struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
415	struct qce_device *qce = tmpl->qce;
416	enum dma_data_direction dir_src, dir_dst;
417	bool diff_dst;
418	int dst_nents, src_nents, ret;
419
420	if (IS_CCM_RFC4309(rctx->flags)) {
421		memset(rctx->ccm_rfc4309_iv, 0, QCE_MAX_IV_SIZE);
422		rctx->ccm_rfc4309_iv[0] = 3;
423		memcpy(&rctx->ccm_rfc4309_iv[1], ctx->ccm4309_salt, QCE_CCM4309_SALT_SIZE);
424		memcpy(&rctx->ccm_rfc4309_iv[4], req->iv, 8);
425		rctx->iv = rctx->ccm_rfc4309_iv;
426		rctx->ivsize = AES_BLOCK_SIZE;
427	} else {
428		rctx->iv = req->iv;
429		rctx->ivsize = crypto_aead_ivsize(tfm);
430	}
431	if (IS_CCM_RFC4309(rctx->flags))
432		rctx->assoclen = req->assoclen - 8;
433	else
434		rctx->assoclen = req->assoclen;
435
436	diff_dst = (req->src != req->dst) ? true : false;
437	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
438	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
439
440	if (IS_CCM(rctx->flags)) {
441		ret = qce_aead_create_ccm_nonce(rctx, ctx);
442		if (ret)
443			return ret;
444	}
445	if (IS_CCM(rctx->flags))
446		ret = qce_aead_ccm_prepare_buf(req);
447	else
448		ret = qce_aead_prepare_buf(req);
449
450	if (ret)
451		return ret;
452	dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
453	if (!dst_nents) {
454		ret = -EIO;
455		goto error_free;
456	}
457
458	if (diff_dst) {
459		src_nents = dma_map_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
460		if (src_nents < 0) {
461			ret = src_nents;
462			goto error_unmap_dst;
463		}
464	} else {
465		if (IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags))
466			src_nents = dst_nents;
467		else
468			src_nents = dst_nents - 1;
469	}
470
471	ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, src_nents, rctx->dst_sg, dst_nents,
472			       qce_aead_done, async_req);
473	if (ret)
474		goto error_unmap_src;
475
476	qce_dma_issue_pending(&qce->dma);
477
478	ret = qce_start(async_req, tmpl->crypto_alg_type);
479	if (ret)
480		goto error_terminate;
481
482	return 0;
483
484error_terminate:
485	qce_dma_terminate_all(&qce->dma);
486error_unmap_src:
487	if (diff_dst)
488		dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
489error_unmap_dst:
490	dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
491error_free:
492	if (IS_CCM(rctx->flags) && rctx->assoclen) {
493		sg_free_table(&rctx->src_tbl);
494		if (diff_dst)
495			sg_free_table(&rctx->dst_tbl);
496	} else {
497		sg_free_table(&rctx->dst_tbl);
498	}
499	return ret;
500}
501
502static int qce_aead_crypt(struct aead_request *req, int encrypt)
503{
504	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
505	struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
506	struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
507	struct qce_alg_template *tmpl = to_aead_tmpl(tfm);
508	unsigned int blocksize = crypto_aead_blocksize(tfm);
509
510	rctx->flags  = tmpl->alg_flags;
511	rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
512
513	if (encrypt)
514		rctx->cryptlen = req->cryptlen;
515	else
516		rctx->cryptlen = req->cryptlen - ctx->authsize;
517
518	/* CE does not handle 0 length messages */
519	if (!rctx->cryptlen) {
520		if (!(IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags)))
521			ctx->need_fallback = true;
522	}
523
524	/* If fallback is needed, schedule and exit */
525	if (ctx->need_fallback) {
526		/* Reset need_fallback in case the same ctx is used for another transaction */
527		ctx->need_fallback = false;
528
529		aead_request_set_tfm(&rctx->fallback_req, ctx->fallback);
530		aead_request_set_callback(&rctx->fallback_req, req->base.flags,
531					  req->base.complete, req->base.data);
532		aead_request_set_crypt(&rctx->fallback_req, req->src,
533				       req->dst, req->cryptlen, req->iv);
534		aead_request_set_ad(&rctx->fallback_req, req->assoclen);
535
536		return encrypt ? crypto_aead_encrypt(&rctx->fallback_req) :
537				 crypto_aead_decrypt(&rctx->fallback_req);
538	}
539
540	/*
541	 * CBC algorithms require message lengths to be
542	 * multiples of block size.
543	 */
544	if (IS_CBC(rctx->flags) && !IS_ALIGNED(rctx->cryptlen, blocksize))
545		return -EINVAL;
546
547	/* RFC4309 supported AAD size 16 bytes/20 bytes */
548	if (IS_CCM_RFC4309(rctx->flags))
549		if (crypto_ipsec_check_assoclen(req->assoclen))
550			return -EINVAL;
551
552	return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
553}
554
555static int qce_aead_encrypt(struct aead_request *req)
556{
557	return qce_aead_crypt(req, 1);
558}
559
560static int qce_aead_decrypt(struct aead_request *req)
561{
562	return qce_aead_crypt(req, 0);
563}
564
565static int qce_aead_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
566			       unsigned int keylen)
567{
568	struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
569	unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
570
571	if (IS_CCM_RFC4309(flags)) {
572		if (keylen < QCE_CCM4309_SALT_SIZE)
573			return -EINVAL;
574		keylen -= QCE_CCM4309_SALT_SIZE;
575		memcpy(ctx->ccm4309_salt, key + keylen, QCE_CCM4309_SALT_SIZE);
576	}
577
578	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256 && keylen != AES_KEYSIZE_192)
579		return -EINVAL;
580
581	ctx->enc_keylen = keylen;
582	ctx->auth_keylen = keylen;
583
584	memcpy(ctx->enc_key, key, keylen);
585	memcpy(ctx->auth_key, key, keylen);
586
587	if (keylen == AES_KEYSIZE_192)
588		ctx->need_fallback = true;
589
590	return IS_CCM_RFC4309(flags) ?
591		crypto_aead_setkey(ctx->fallback, key, keylen + QCE_CCM4309_SALT_SIZE) :
592		crypto_aead_setkey(ctx->fallback, key, keylen);
593}
594
595static int qce_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
596{
597	struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
598	struct crypto_authenc_keys authenc_keys;
599	unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
600	u32 _key[6];
601	int err;
602
603	err = crypto_authenc_extractkeys(&authenc_keys, key, keylen);
604	if (err)
605		return err;
606
607	if (authenc_keys.enckeylen > QCE_MAX_KEY_SIZE ||
608	    authenc_keys.authkeylen > QCE_MAX_KEY_SIZE)
609		return -EINVAL;
610
611	if (IS_DES(flags)) {
612		err = verify_aead_des_key(tfm, authenc_keys.enckey, authenc_keys.enckeylen);
613		if (err)
614			return err;
615	} else if (IS_3DES(flags)) {
616		err = verify_aead_des3_key(tfm, authenc_keys.enckey, authenc_keys.enckeylen);
617		if (err)
618			return err;
619		/*
620		 * The crypto engine does not support any two keys
621		 * being the same for triple des algorithms. The
622		 * verify_skcipher_des3_key does not check for all the
623		 * below conditions. Schedule fallback in this case.
624		 */
625		memcpy(_key, authenc_keys.enckey, DES3_EDE_KEY_SIZE);
626		if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) ||
627		    !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) ||
628		    !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
629			ctx->need_fallback = true;
630	} else if (IS_AES(flags)) {
631		/* No random key sizes */
632		if (authenc_keys.enckeylen != AES_KEYSIZE_128 &&
633		    authenc_keys.enckeylen != AES_KEYSIZE_192 &&
634		    authenc_keys.enckeylen != AES_KEYSIZE_256)
635			return -EINVAL;
636		if (authenc_keys.enckeylen == AES_KEYSIZE_192)
637			ctx->need_fallback = true;
638	}
639
640	ctx->enc_keylen = authenc_keys.enckeylen;
641	ctx->auth_keylen = authenc_keys.authkeylen;
642
643	memcpy(ctx->enc_key, authenc_keys.enckey, authenc_keys.enckeylen);
644
645	memset(ctx->auth_key, 0, sizeof(ctx->auth_key));
646	memcpy(ctx->auth_key, authenc_keys.authkey, authenc_keys.authkeylen);
647
648	return crypto_aead_setkey(ctx->fallback, key, keylen);
649}
650
651static int qce_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
652{
653	struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
654	unsigned long flags = to_aead_tmpl(tfm)->alg_flags;
655
656	if (IS_CCM(flags)) {
657		if (authsize < 4 || authsize > 16 || authsize % 2)
658			return -EINVAL;
659		if (IS_CCM_RFC4309(flags) && (authsize < 8 || authsize % 4))
660			return -EINVAL;
661	}
662	ctx->authsize = authsize;
663
664	return crypto_aead_setauthsize(ctx->fallback, authsize);
665}
666
667static int qce_aead_init(struct crypto_aead *tfm)
668{
669	struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
670
671	ctx->need_fallback = false;
672	ctx->fallback = crypto_alloc_aead(crypto_tfm_alg_name(&tfm->base),
673					  0, CRYPTO_ALG_NEED_FALLBACK);
674
675	if (IS_ERR(ctx->fallback))
676		return PTR_ERR(ctx->fallback);
677
678	crypto_aead_set_reqsize_dma(tfm, sizeof(struct qce_aead_reqctx) +
679					 crypto_aead_reqsize(ctx->fallback));
680	return 0;
681}
682
683static void qce_aead_exit(struct crypto_aead *tfm)
684{
685	struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
686
687	crypto_free_aead(ctx->fallback);
688}
689
690struct qce_aead_def {
691	unsigned long flags;
692	const char *name;
693	const char *drv_name;
694	unsigned int blocksize;
695	unsigned int chunksize;
696	unsigned int ivsize;
697	unsigned int maxauthsize;
698};
699
700static const struct qce_aead_def aead_def[] = {
701	{
702		.flags          = QCE_ALG_DES | QCE_MODE_CBC | QCE_HASH_SHA1_HMAC,
703		.name           = "authenc(hmac(sha1),cbc(des))",
704		.drv_name       = "authenc-hmac-sha1-cbc-des-qce",
705		.blocksize      = DES_BLOCK_SIZE,
706		.ivsize         = DES_BLOCK_SIZE,
707		.maxauthsize	= SHA1_DIGEST_SIZE,
708	},
709	{
710		.flags          = QCE_ALG_3DES | QCE_MODE_CBC | QCE_HASH_SHA1_HMAC,
711		.name           = "authenc(hmac(sha1),cbc(des3_ede))",
712		.drv_name       = "authenc-hmac-sha1-cbc-3des-qce",
713		.blocksize      = DES3_EDE_BLOCK_SIZE,
714		.ivsize         = DES3_EDE_BLOCK_SIZE,
715		.maxauthsize	= SHA1_DIGEST_SIZE,
716	},
717	{
718		.flags          = QCE_ALG_DES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
719		.name           = "authenc(hmac(sha256),cbc(des))",
720		.drv_name       = "authenc-hmac-sha256-cbc-des-qce",
721		.blocksize      = DES_BLOCK_SIZE,
722		.ivsize         = DES_BLOCK_SIZE,
723		.maxauthsize	= SHA256_DIGEST_SIZE,
724	},
725	{
726		.flags          = QCE_ALG_3DES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
727		.name           = "authenc(hmac(sha256),cbc(des3_ede))",
728		.drv_name       = "authenc-hmac-sha256-cbc-3des-qce",
729		.blocksize      = DES3_EDE_BLOCK_SIZE,
730		.ivsize         = DES3_EDE_BLOCK_SIZE,
731		.maxauthsize	= SHA256_DIGEST_SIZE,
732	},
733	{
734		.flags          =  QCE_ALG_AES | QCE_MODE_CBC | QCE_HASH_SHA256_HMAC,
735		.name           = "authenc(hmac(sha256),cbc(aes))",
736		.drv_name       = "authenc-hmac-sha256-cbc-aes-qce",
737		.blocksize      = AES_BLOCK_SIZE,
738		.ivsize         = AES_BLOCK_SIZE,
739		.maxauthsize	= SHA256_DIGEST_SIZE,
740	},
741	{
742		.flags          =  QCE_ALG_AES | QCE_MODE_CCM,
743		.name           = "ccm(aes)",
744		.drv_name       = "ccm-aes-qce",
745		.blocksize	= 1,
746		.ivsize         = AES_BLOCK_SIZE,
747		.maxauthsize	= AES_BLOCK_SIZE,
748	},
749	{
750		.flags          =  QCE_ALG_AES | QCE_MODE_CCM | QCE_MODE_CCM_RFC4309,
751		.name           = "rfc4309(ccm(aes))",
752		.drv_name       = "rfc4309-ccm-aes-qce",
753		.blocksize	= 1,
754		.ivsize         = 8,
755		.maxauthsize	= AES_BLOCK_SIZE,
756	},
757};
758
759static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_device *qce)
760{
761	struct qce_alg_template *tmpl;
762	struct aead_alg *alg;
763	int ret;
764
765	tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
766	if (!tmpl)
767		return -ENOMEM;
768
769	alg = &tmpl->alg.aead;
770
771	snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
772	snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
773		 def->drv_name);
774
775	alg->base.cra_blocksize		= def->blocksize;
776	alg->chunksize			= def->chunksize;
777	alg->ivsize			= def->ivsize;
778	alg->maxauthsize		= def->maxauthsize;
779	if (IS_CCM(def->flags))
780		alg->setkey		= qce_aead_ccm_setkey;
781	else
782		alg->setkey		= qce_aead_setkey;
783	alg->setauthsize		= qce_aead_setauthsize;
784	alg->encrypt			= qce_aead_encrypt;
785	alg->decrypt			= qce_aead_decrypt;
786	alg->init			= qce_aead_init;
787	alg->exit			= qce_aead_exit;
788
789	alg->base.cra_priority		= 300;
790	alg->base.cra_flags		= CRYPTO_ALG_ASYNC |
791					  CRYPTO_ALG_ALLOCATES_MEMORY |
792					  CRYPTO_ALG_KERN_DRIVER_ONLY |
793					  CRYPTO_ALG_NEED_FALLBACK;
794	alg->base.cra_ctxsize		= sizeof(struct qce_aead_ctx);
795	alg->base.cra_alignmask		= 0;
796	alg->base.cra_module		= THIS_MODULE;
797
798	INIT_LIST_HEAD(&tmpl->entry);
799	tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AEAD;
800	tmpl->alg_flags = def->flags;
801	tmpl->qce = qce;
802
803	ret = crypto_register_aead(alg);
804	if (ret) {
805		dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
806		kfree(tmpl);
807		return ret;
808	}
809
810	list_add_tail(&tmpl->entry, &aead_algs);
811	dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
812	return 0;
813}
814
815static void qce_aead_unregister(struct qce_device *qce)
816{
817	struct qce_alg_template *tmpl, *n;
818
819	list_for_each_entry_safe(tmpl, n, &aead_algs, entry) {
820		crypto_unregister_aead(&tmpl->alg.aead);
821		list_del(&tmpl->entry);
822		kfree(tmpl);
823	}
824}
825
826static int qce_aead_register(struct qce_device *qce)
827{
828	int ret, i;
829
830	for (i = 0; i < ARRAY_SIZE(aead_def); i++) {
831		ret = qce_aead_register_one(&aead_def[i], qce);
832		if (ret)
833			goto err;
834	}
835
836	return 0;
837err:
838	qce_aead_unregister(qce);
839	return ret;
840}
841
842const struct qce_algo_ops aead_ops = {
843	.type = CRYPTO_ALG_TYPE_AEAD,
844	.register_algs = qce_aead_register,
845	.unregister_algs = qce_aead_unregister,
846	.async_req_handle = qce_aead_async_req_handle,
847};
848