1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * sun4i-ss-hash.c - hardware cryptographic accelerator for Allwinner A20 SoC
4 *
5 * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
6 *
7 * This file add support for MD5 and SHA1.
8 *
9 * You could find the datasheet in Documentation/arch/arm/sunxi.rst
10 */
11#include "sun4i-ss.h"
12#include <asm/unaligned.h>
13#include <linux/scatterlist.h>
14
15/* This is a totally arbitrary value */
16#define SS_TIMEOUT 100
17
18int sun4i_hash_crainit(struct crypto_tfm *tfm)
19{
20	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
21	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
22	struct sun4i_ss_alg_template *algt;
23	int err;
24
25	memset(op, 0, sizeof(struct sun4i_tfm_ctx));
26
27	algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
28	op->ss = algt->ss;
29
30	err = pm_runtime_resume_and_get(op->ss->dev);
31	if (err < 0)
32		return err;
33
34	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
35				 sizeof(struct sun4i_req_ctx));
36	return 0;
37}
38
39void sun4i_hash_craexit(struct crypto_tfm *tfm)
40{
41	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
42
43	pm_runtime_put(op->ss->dev);
44}
45
46/* sun4i_hash_init: initialize request context */
47int sun4i_hash_init(struct ahash_request *areq)
48{
49	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
50	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
51	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
52	struct sun4i_ss_alg_template *algt;
53
54	memset(op, 0, sizeof(struct sun4i_req_ctx));
55
56	algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
57	op->mode = algt->mode;
58
59	return 0;
60}
61
62int sun4i_hash_export_md5(struct ahash_request *areq, void *out)
63{
64	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
65	struct md5_state *octx = out;
66	int i;
67
68	octx->byte_count = op->byte_count + op->len;
69
70	memcpy(octx->block, op->buf, op->len);
71
72	if (op->byte_count) {
73		for (i = 0; i < 4; i++)
74			octx->hash[i] = op->hash[i];
75	} else {
76		octx->hash[0] = SHA1_H0;
77		octx->hash[1] = SHA1_H1;
78		octx->hash[2] = SHA1_H2;
79		octx->hash[3] = SHA1_H3;
80	}
81
82	return 0;
83}
84
85int sun4i_hash_import_md5(struct ahash_request *areq, const void *in)
86{
87	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
88	const struct md5_state *ictx = in;
89	int i;
90
91	sun4i_hash_init(areq);
92
93	op->byte_count = ictx->byte_count & ~0x3F;
94	op->len = ictx->byte_count & 0x3F;
95
96	memcpy(op->buf, ictx->block, op->len);
97
98	for (i = 0; i < 4; i++)
99		op->hash[i] = ictx->hash[i];
100
101	return 0;
102}
103
104int sun4i_hash_export_sha1(struct ahash_request *areq, void *out)
105{
106	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
107	struct sha1_state *octx = out;
108	int i;
109
110	octx->count = op->byte_count + op->len;
111
112	memcpy(octx->buffer, op->buf, op->len);
113
114	if (op->byte_count) {
115		for (i = 0; i < 5; i++)
116			octx->state[i] = op->hash[i];
117	} else {
118		octx->state[0] = SHA1_H0;
119		octx->state[1] = SHA1_H1;
120		octx->state[2] = SHA1_H2;
121		octx->state[3] = SHA1_H3;
122		octx->state[4] = SHA1_H4;
123	}
124
125	return 0;
126}
127
128int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in)
129{
130	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
131	const struct sha1_state *ictx = in;
132	int i;
133
134	sun4i_hash_init(areq);
135
136	op->byte_count = ictx->count & ~0x3F;
137	op->len = ictx->count & 0x3F;
138
139	memcpy(op->buf, ictx->buffer, op->len);
140
141	for (i = 0; i < 5; i++)
142		op->hash[i] = ictx->state[i];
143
144	return 0;
145}
146
147#define SS_HASH_UPDATE 1
148#define SS_HASH_FINAL 2
149
150/*
151 * sun4i_hash_update: update hash engine
152 *
153 * Could be used for both SHA1 and MD5
154 * Write data by step of 32bits and put then in the SS.
155 *
156 * Since we cannot leave partial data and hash state in the engine,
157 * we need to get the hash state at the end of this function.
158 * We can get the hash state every 64 bytes
159 *
160 * So the first work is to get the number of bytes to write to SS modulo 64
161 * The extra bytes will go to a temporary buffer op->buf storing op->len bytes
162 *
163 * So at the begin of update()
164 * if op->len + areq->nbytes < 64
165 * => all data will be written to wait buffer (op->buf) and end=0
166 * if not, write all data from op->buf to the device and position end to
167 * complete to 64bytes
168 *
169 * example 1:
170 * update1 60o => op->len=60
171 * update2 60o => need one more word to have 64 bytes
172 * end=4
173 * so write all data from op->buf and one word of SGs
174 * write remaining data in op->buf
175 * final state op->len=56
176 */
177static int sun4i_hash(struct ahash_request *areq)
178{
179	/*
180	 * i is the total bytes read from SGs, to be compared to areq->nbytes
181	 * i is important because we cannot rely on SG length since the sum of
182	 * SG->length could be greater than areq->nbytes
183	 *
184	 * end is the position when we need to stop writing to the device,
185	 * to be compared to i
186	 *
187	 * in_i: advancement in the current SG
188	 */
189	unsigned int i = 0, end, fill, min_fill, nwait, nbw = 0, j = 0, todo;
190	unsigned int in_i = 0;
191	u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, v, ivmode = 0;
192	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
193	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
194	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
195	struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
196	struct sun4i_ss_ctx *ss = tfmctx->ss;
197	struct sun4i_ss_alg_template *algt;
198	struct scatterlist *in_sg = areq->src;
199	struct sg_mapping_iter mi;
200	int in_r, err = 0;
201	size_t copied = 0;
202	u32 wb = 0;
203
204	dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x",
205		__func__, crypto_tfm_alg_name(areq->base.tfm),
206		op->byte_count, areq->nbytes, op->mode,
207		op->len, op->hash[0]);
208
209	if (unlikely(!areq->nbytes) && !(op->flags & SS_HASH_FINAL))
210		return 0;
211
212	/* protect against overflow */
213	if (unlikely(areq->nbytes > UINT_MAX - op->len)) {
214		dev_err(ss->dev, "Cannot process too large request\n");
215		return -EINVAL;
216	}
217
218	if (op->len + areq->nbytes < 64 && !(op->flags & SS_HASH_FINAL)) {
219		/* linearize data to op->buf */
220		copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
221					    op->buf + op->len, areq->nbytes, 0);
222		op->len += copied;
223		return 0;
224	}
225
226	spin_lock_bh(&ss->slock);
227
228	/*
229	 * if some data have been processed before,
230	 * we need to restore the partial hash state
231	 */
232	if (op->byte_count) {
233		ivmode = SS_IV_ARBITRARY;
234		for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
235			writel(op->hash[i], ss->base + SS_IV0 + i * 4);
236	}
237	/* Enable the device */
238	writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL);
239
240	if (!(op->flags & SS_HASH_UPDATE))
241		goto hash_final;
242
243	/* start of handling data */
244	if (!(op->flags & SS_HASH_FINAL)) {
245		end = ((areq->nbytes + op->len) / 64) * 64 - op->len;
246
247		if (end > areq->nbytes || areq->nbytes - end > 63) {
248			dev_err(ss->dev, "ERROR: Bound error %u %u\n",
249				end, areq->nbytes);
250			err = -EINVAL;
251			goto release_ss;
252		}
253	} else {
254		/* Since we have the flag final, we can go up to modulo 4 */
255		if (areq->nbytes < 4)
256			end = 0;
257		else
258			end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
259	}
260
261	/* TODO if SGlen % 4 and !op->len then DMA */
262	i = 1;
263	while (in_sg && i == 1) {
264		if (in_sg->length % 4)
265			i = 0;
266		in_sg = sg_next(in_sg);
267	}
268	if (i == 1 && !op->len && areq->nbytes)
269		dev_dbg(ss->dev, "We can DMA\n");
270
271	i = 0;
272	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
273		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
274	sg_miter_next(&mi);
275	in_i = 0;
276
277	do {
278		/*
279		 * we need to linearize in two case:
280		 * - the buffer is already used
281		 * - the SG does not have enough byte remaining ( < 4)
282		 */
283		if (op->len || (mi.length - in_i) < 4) {
284			/*
285			 * if we have entered here we have two reason to stop
286			 * - the buffer is full
287			 * - reach the end
288			 */
289			while (op->len < 64 && i < end) {
290				/* how many bytes we can read from current SG */
291				in_r = min(end - i, 64 - op->len);
292				in_r = min_t(size_t, mi.length - in_i, in_r);
293				memcpy(op->buf + op->len, mi.addr + in_i, in_r);
294				op->len += in_r;
295				i += in_r;
296				in_i += in_r;
297				if (in_i == mi.length) {
298					sg_miter_next(&mi);
299					in_i = 0;
300				}
301			}
302			if (op->len > 3 && !(op->len % 4)) {
303				/* write buf to the device */
304				writesl(ss->base + SS_RXFIFO, op->buf,
305					op->len / 4);
306				op->byte_count += op->len;
307				op->len = 0;
308			}
309		}
310		if (mi.length - in_i > 3 && i < end) {
311			/* how many bytes we can read from current SG */
312			in_r = min_t(size_t, mi.length - in_i, areq->nbytes - i);
313			in_r = min_t(size_t, ((mi.length - in_i) / 4) * 4, in_r);
314			/* how many bytes we can write in the device*/
315			todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4);
316			writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo);
317			op->byte_count += todo * 4;
318			i += todo * 4;
319			in_i += todo * 4;
320			rx_cnt -= todo;
321			if (!rx_cnt) {
322				spaces = readl(ss->base + SS_FCSR);
323				rx_cnt = SS_RXFIFO_SPACES(spaces);
324			}
325			if (in_i == mi.length) {
326				sg_miter_next(&mi);
327				in_i = 0;
328			}
329		}
330	} while (i < end);
331
332	/*
333	 * Now we have written to the device all that we can,
334	 * store the remaining bytes in op->buf
335	 */
336	if ((areq->nbytes - i) < 64) {
337		while (i < areq->nbytes && in_i < mi.length && op->len < 64) {
338			/* how many bytes we can read from current SG */
339			in_r = min(areq->nbytes - i, 64 - op->len);
340			in_r = min_t(size_t, mi.length - in_i, in_r);
341			memcpy(op->buf + op->len, mi.addr + in_i, in_r);
342			op->len += in_r;
343			i += in_r;
344			in_i += in_r;
345			if (in_i == mi.length) {
346				sg_miter_next(&mi);
347				in_i = 0;
348			}
349		}
350	}
351
352	sg_miter_stop(&mi);
353
354	/*
355	 * End of data process
356	 * Now if we have the flag final go to finalize part
357	 * If not, store the partial hash
358	 */
359	if (op->flags & SS_HASH_FINAL)
360		goto hash_final;
361
362	writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
363	i = 0;
364	do {
365		v = readl(ss->base + SS_CTL);
366		i++;
367	} while (i < SS_TIMEOUT && (v & SS_DATA_END));
368	if (unlikely(i >= SS_TIMEOUT)) {
369		dev_err_ratelimited(ss->dev,
370				    "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
371				    i, SS_TIMEOUT, v, areq->nbytes);
372		err = -EIO;
373		goto release_ss;
374	}
375
376	/*
377	 * The datasheet isn't very clear about when to retrieve the digest. The
378	 * bit SS_DATA_END is cleared when the engine has processed the data and
379	 * when the digest is computed *but* it doesn't mean the digest is
380	 * available in the digest registers. Hence the delay to be sure we can
381	 * read it.
382	 */
383	ndelay(1);
384
385	for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
386		op->hash[i] = readl(ss->base + SS_MD0 + i * 4);
387
388	goto release_ss;
389
390/*
391 * hash_final: finalize hashing operation
392 *
393 * If we have some remaining bytes, we write them.
394 * Then ask the SS for finalizing the hashing operation
395 *
396 * I do not check RX FIFO size in this function since the size is 32
397 * after each enabling and this function neither write more than 32 words.
398 * If we come from the update part, we cannot have more than
399 * 3 remaining bytes to write and SS is fast enough to not care about it.
400 */
401
402hash_final:
403	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
404		algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
405		algt->stat_req++;
406	}
407
408	/* write the remaining words of the wait buffer */
409	if (op->len) {
410		nwait = op->len / 4;
411		if (nwait) {
412			writesl(ss->base + SS_RXFIFO, op->buf, nwait);
413			op->byte_count += 4 * nwait;
414		}
415
416		nbw = op->len - 4 * nwait;
417		if (nbw) {
418			wb = le32_to_cpup((__le32 *)(op->buf + nwait * 4));
419			wb &= GENMASK((nbw * 8) - 1, 0);
420
421			op->byte_count += nbw;
422		}
423	}
424
425	/* write the remaining bytes of the nbw buffer */
426	wb |= ((1 << 7) << (nbw * 8));
427	((__le32 *)bf)[j++] = cpu_to_le32(wb);
428
429	/*
430	 * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1)
431	 * I take the operations from other MD5/SHA1 implementations
432	 */
433
434	/* last block size */
435	fill = 64 - (op->byte_count % 64);
436	min_fill = 2 * sizeof(u32) + (nbw ? 0 : sizeof(u32));
437
438	/* if we can't fill all data, jump to the next 64 block */
439	if (fill < min_fill)
440		fill += 64;
441
442	j += (fill - min_fill) / sizeof(u32);
443
444	/* write the length of data */
445	if (op->mode == SS_OP_SHA1) {
446		__be64 *bits = (__be64 *)&bf[j];
447		*bits = cpu_to_be64(op->byte_count << 3);
448		j += 2;
449	} else {
450		__le64 *bits = (__le64 *)&bf[j];
451		*bits = cpu_to_le64(op->byte_count << 3);
452		j += 2;
453	}
454	writesl(ss->base + SS_RXFIFO, bf, j);
455
456	/* Tell the SS to stop the hashing */
457	writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
458
459	/*
460	 * Wait for SS to finish the hash.
461	 * The timeout could happen only in case of bad overclocking
462	 * or driver bug.
463	 */
464	i = 0;
465	do {
466		v = readl(ss->base + SS_CTL);
467		i++;
468	} while (i < SS_TIMEOUT && (v & SS_DATA_END));
469	if (unlikely(i >= SS_TIMEOUT)) {
470		dev_err_ratelimited(ss->dev,
471				    "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
472				    i, SS_TIMEOUT, v, areq->nbytes);
473		err = -EIO;
474		goto release_ss;
475	}
476
477	/*
478	 * The datasheet isn't very clear about when to retrieve the digest. The
479	 * bit SS_DATA_END is cleared when the engine has processed the data and
480	 * when the digest is computed *but* it doesn't mean the digest is
481	 * available in the digest registers. Hence the delay to be sure we can
482	 * read it.
483	 */
484	ndelay(1);
485
486	/* Get the hash from the device */
487	if (op->mode == SS_OP_SHA1) {
488		for (i = 0; i < 5; i++) {
489			v = readl(ss->base + SS_MD0 + i * 4);
490			if (ss->variant->sha1_in_be)
491				put_unaligned_le32(v, areq->result + i * 4);
492			else
493				put_unaligned_be32(v, areq->result + i * 4);
494		}
495	} else {
496		for (i = 0; i < 4; i++) {
497			v = readl(ss->base + SS_MD0 + i * 4);
498			put_unaligned_le32(v, areq->result + i * 4);
499		}
500	}
501
502release_ss:
503	writel(0, ss->base + SS_CTL);
504	spin_unlock_bh(&ss->slock);
505	return err;
506}
507
508int sun4i_hash_final(struct ahash_request *areq)
509{
510	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
511
512	op->flags = SS_HASH_FINAL;
513	return sun4i_hash(areq);
514}
515
516int sun4i_hash_update(struct ahash_request *areq)
517{
518	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
519
520	op->flags = SS_HASH_UPDATE;
521	return sun4i_hash(areq);
522}
523
524/* sun4i_hash_finup: finalize hashing operation after an update */
525int sun4i_hash_finup(struct ahash_request *areq)
526{
527	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
528
529	op->flags = SS_HASH_UPDATE | SS_HASH_FINAL;
530	return sun4i_hash(areq);
531}
532
533/* combo of init/update/final functions */
534int sun4i_hash_digest(struct ahash_request *areq)
535{
536	int err;
537	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
538
539	err = sun4i_hash_init(areq);
540	if (err)
541		return err;
542
543	op->flags = SS_HASH_UPDATE | SS_HASH_FINAL;
544	return sun4i_hash(areq);
545}
546