1// SPDX-License-Identifier: GPL-2.0
2/*
3 * amlogic-cipher.c - hardware cryptographic offloader for Amlogic GXL SoC
4 *
5 * Copyright (C) 2018-2019 Corentin LABBE <clabbe@baylibre.com>
6 *
7 * This file add support for AES cipher with 128,192,256 bits keysize in
8 * CBC and ECB mode.
9 */
10
11#include <linux/crypto.h>
12#include <linux/delay.h>
13#include <linux/io.h>
14#include <crypto/scatterwalk.h>
15#include <linux/scatterlist.h>
16#include <linux/dma-mapping.h>
17#include <crypto/internal/skcipher.h>
18#include "amlogic-gxl.h"
19
20static int get_engine_number(struct meson_dev *mc)
21{
22	return atomic_inc_return(&mc->flow) % MAXFLOW;
23}
24
25static bool meson_cipher_need_fallback(struct skcipher_request *areq)
26{
27	struct scatterlist *src_sg = areq->src;
28	struct scatterlist *dst_sg = areq->dst;
29
30	if (areq->cryptlen == 0)
31		return true;
32
33	if (sg_nents(src_sg) != sg_nents(dst_sg))
34		return true;
35
36	/* KEY/IV descriptors use 3 desc */
37	if (sg_nents(src_sg) > MAXDESC - 3 || sg_nents(dst_sg) > MAXDESC - 3)
38		return true;
39
40	while (src_sg && dst_sg) {
41		if ((src_sg->length % 16) != 0)
42			return true;
43		if ((dst_sg->length % 16) != 0)
44			return true;
45		if (src_sg->length != dst_sg->length)
46			return true;
47		if (!IS_ALIGNED(src_sg->offset, sizeof(u32)))
48			return true;
49		if (!IS_ALIGNED(dst_sg->offset, sizeof(u32)))
50			return true;
51		src_sg = sg_next(src_sg);
52		dst_sg = sg_next(dst_sg);
53	}
54
55	return false;
56}
57
58static int meson_cipher_do_fallback(struct skcipher_request *areq)
59{
60	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
61	struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
62	struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
63	int err;
64#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
65	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
66	struct meson_alg_template *algt;
67
68	algt = container_of(alg, struct meson_alg_template, alg.skcipher.base);
69	algt->stat_fb++;
70#endif
71	skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
72	skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
73				      areq->base.complete, areq->base.data);
74	skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
75				   areq->cryptlen, areq->iv);
76
77	if (rctx->op_dir == MESON_DECRYPT)
78		err = crypto_skcipher_decrypt(&rctx->fallback_req);
79	else
80		err = crypto_skcipher_encrypt(&rctx->fallback_req);
81	return err;
82}
83
84static int meson_cipher(struct skcipher_request *areq)
85{
86	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
87	struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
88	struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
89	struct meson_dev *mc = op->mc;
90	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
91	struct meson_alg_template *algt;
92	int flow = rctx->flow;
93	unsigned int todo, eat, len;
94	struct scatterlist *src_sg = areq->src;
95	struct scatterlist *dst_sg = areq->dst;
96	struct meson_desc *desc;
97	int nr_sgs, nr_sgd;
98	int i, err = 0;
99	unsigned int keyivlen, ivsize, offset, tloffset;
100	dma_addr_t phykeyiv;
101	void *backup_iv = NULL, *bkeyiv;
102	u32 v;
103
104	algt = container_of(alg, struct meson_alg_template, alg.skcipher.base);
105
106	dev_dbg(mc->dev, "%s %s %u %x IV(%u) key=%u flow=%d\n", __func__,
107		crypto_tfm_alg_name(areq->base.tfm),
108		areq->cryptlen,
109		rctx->op_dir, crypto_skcipher_ivsize(tfm),
110		op->keylen, flow);
111
112#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
113	algt->stat_req++;
114	mc->chanlist[flow].stat_req++;
115#endif
116
117	/*
118	 * The hardware expect a list of meson_desc structures.
119	 * The 2 first structures store key
120	 * The third stores IV
121	 */
122	bkeyiv = kzalloc(48, GFP_KERNEL | GFP_DMA);
123	if (!bkeyiv)
124		return -ENOMEM;
125
126	memcpy(bkeyiv, op->key, op->keylen);
127	keyivlen = op->keylen;
128
129	ivsize = crypto_skcipher_ivsize(tfm);
130	if (areq->iv && ivsize > 0) {
131		if (ivsize > areq->cryptlen) {
132			dev_err(mc->dev, "invalid ivsize=%d vs len=%d\n", ivsize, areq->cryptlen);
133			err = -EINVAL;
134			goto theend;
135		}
136		memcpy(bkeyiv + 32, areq->iv, ivsize);
137		keyivlen = 48;
138		if (rctx->op_dir == MESON_DECRYPT) {
139			backup_iv = kzalloc(ivsize, GFP_KERNEL);
140			if (!backup_iv) {
141				err = -ENOMEM;
142				goto theend;
143			}
144			offset = areq->cryptlen - ivsize;
145			scatterwalk_map_and_copy(backup_iv, areq->src, offset,
146						 ivsize, 0);
147		}
148	}
149	if (keyivlen == 24)
150		keyivlen = 32;
151
152	phykeyiv = dma_map_single(mc->dev, bkeyiv, keyivlen,
153				  DMA_TO_DEVICE);
154	err = dma_mapping_error(mc->dev, phykeyiv);
155	if (err) {
156		dev_err(mc->dev, "Cannot DMA MAP KEY IV\n");
157		goto theend;
158	}
159
160	tloffset = 0;
161	eat = 0;
162	i = 0;
163	while (keyivlen > eat) {
164		desc = &mc->chanlist[flow].tl[tloffset];
165		memset(desc, 0, sizeof(struct meson_desc));
166		todo = min(keyivlen - eat, 16u);
167		desc->t_src = cpu_to_le32(phykeyiv + i * 16);
168		desc->t_dst = cpu_to_le32(i * 16);
169		v = (MODE_KEY << 20) | DESC_OWN | 16;
170		desc->t_status = cpu_to_le32(v);
171
172		eat += todo;
173		i++;
174		tloffset++;
175	}
176
177	if (areq->src == areq->dst) {
178		nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
179				    DMA_BIDIRECTIONAL);
180		if (!nr_sgs) {
181			dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
182			err = -EINVAL;
183			goto theend;
184		}
185		nr_sgd = nr_sgs;
186	} else {
187		nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
188				    DMA_TO_DEVICE);
189		if (!nr_sgs || nr_sgs > MAXDESC - 3) {
190			dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
191			err = -EINVAL;
192			goto theend;
193		}
194		nr_sgd = dma_map_sg(mc->dev, areq->dst, sg_nents(areq->dst),
195				    DMA_FROM_DEVICE);
196		if (!nr_sgd || nr_sgd > MAXDESC - 3) {
197			dev_err(mc->dev, "Invalid SG count %d\n", nr_sgd);
198			err = -EINVAL;
199			goto theend;
200		}
201	}
202
203	src_sg = areq->src;
204	dst_sg = areq->dst;
205	len = areq->cryptlen;
206	while (src_sg) {
207		desc = &mc->chanlist[flow].tl[tloffset];
208		memset(desc, 0, sizeof(struct meson_desc));
209
210		desc->t_src = cpu_to_le32(sg_dma_address(src_sg));
211		desc->t_dst = cpu_to_le32(sg_dma_address(dst_sg));
212		todo = min(len, sg_dma_len(src_sg));
213		v = (op->keymode << 20) | DESC_OWN | todo | (algt->blockmode << 26);
214		if (rctx->op_dir)
215			v |= DESC_ENCRYPTION;
216		len -= todo;
217
218		if (!sg_next(src_sg))
219			v |= DESC_LAST;
220		desc->t_status = cpu_to_le32(v);
221		tloffset++;
222		src_sg = sg_next(src_sg);
223		dst_sg = sg_next(dst_sg);
224	}
225
226	reinit_completion(&mc->chanlist[flow].complete);
227	mc->chanlist[flow].status = 0;
228	writel(mc->chanlist[flow].t_phy | 2, mc->base + (flow << 2));
229	wait_for_completion_interruptible_timeout(&mc->chanlist[flow].complete,
230						  msecs_to_jiffies(500));
231	if (mc->chanlist[flow].status == 0) {
232		dev_err(mc->dev, "DMA timeout for flow %d\n", flow);
233		err = -EINVAL;
234	}
235
236	dma_unmap_single(mc->dev, phykeyiv, keyivlen, DMA_TO_DEVICE);
237
238	if (areq->src == areq->dst) {
239		dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_BIDIRECTIONAL);
240	} else {
241		dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
242		dma_unmap_sg(mc->dev, areq->dst, sg_nents(areq->dst), DMA_FROM_DEVICE);
243	}
244
245	if (areq->iv && ivsize > 0) {
246		if (rctx->op_dir == MESON_DECRYPT) {
247			memcpy(areq->iv, backup_iv, ivsize);
248		} else {
249			scatterwalk_map_and_copy(areq->iv, areq->dst,
250						 areq->cryptlen - ivsize,
251						 ivsize, 0);
252		}
253	}
254theend:
255	kfree_sensitive(bkeyiv);
256	kfree_sensitive(backup_iv);
257
258	return err;
259}
260
261int meson_handle_cipher_request(struct crypto_engine *engine, void *areq)
262{
263	int err;
264	struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
265
266	err = meson_cipher(breq);
267	local_bh_disable();
268	crypto_finalize_skcipher_request(engine, breq, err);
269	local_bh_enable();
270
271	return 0;
272}
273
274int meson_skdecrypt(struct skcipher_request *areq)
275{
276	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
277	struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
278	struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
279	struct crypto_engine *engine;
280	int e;
281
282	rctx->op_dir = MESON_DECRYPT;
283	if (meson_cipher_need_fallback(areq))
284		return meson_cipher_do_fallback(areq);
285	e = get_engine_number(op->mc);
286	engine = op->mc->chanlist[e].engine;
287	rctx->flow = e;
288
289	return crypto_transfer_skcipher_request_to_engine(engine, areq);
290}
291
292int meson_skencrypt(struct skcipher_request *areq)
293{
294	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
295	struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
296	struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
297	struct crypto_engine *engine;
298	int e;
299
300	rctx->op_dir = MESON_ENCRYPT;
301	if (meson_cipher_need_fallback(areq))
302		return meson_cipher_do_fallback(areq);
303	e = get_engine_number(op->mc);
304	engine = op->mc->chanlist[e].engine;
305	rctx->flow = e;
306
307	return crypto_transfer_skcipher_request_to_engine(engine, areq);
308}
309
310int meson_cipher_init(struct crypto_tfm *tfm)
311{
312	struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
313	struct meson_alg_template *algt;
314	const char *name = crypto_tfm_alg_name(tfm);
315	struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
316	struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
317
318	memset(op, 0, sizeof(struct meson_cipher_tfm_ctx));
319
320	algt = container_of(alg, struct meson_alg_template, alg.skcipher.base);
321	op->mc = algt->mc;
322
323	op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
324	if (IS_ERR(op->fallback_tfm)) {
325		dev_err(op->mc->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
326			name, PTR_ERR(op->fallback_tfm));
327		return PTR_ERR(op->fallback_tfm);
328	}
329
330	crypto_skcipher_set_reqsize(sktfm, sizeof(struct meson_cipher_req_ctx) +
331				    crypto_skcipher_reqsize(op->fallback_tfm));
332
333	return 0;
334}
335
336void meson_cipher_exit(struct crypto_tfm *tfm)
337{
338	struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
339
340	kfree_sensitive(op->key);
341	crypto_free_skcipher(op->fallback_tfm);
342}
343
344int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
345		     unsigned int keylen)
346{
347	struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
348	struct meson_dev *mc = op->mc;
349
350	switch (keylen) {
351	case 128 / 8:
352		op->keymode = MODE_AES_128;
353		break;
354	case 192 / 8:
355		op->keymode = MODE_AES_192;
356		break;
357	case 256 / 8:
358		op->keymode = MODE_AES_256;
359		break;
360	default:
361		dev_dbg(mc->dev, "ERROR: Invalid keylen %u\n", keylen);
362		return -EINVAL;
363	}
364	kfree_sensitive(op->key);
365	op->keylen = keylen;
366	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
367	if (!op->key)
368		return -ENOMEM;
369
370	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
371}
372