1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2014 Imagination Technologies
4 * Authors:  Will Thomas, James Hartley
5 *
6 *	Interface structure taken from omap-sham driver
7 */
8
9#include <linux/clk.h>
10#include <linux/dma-mapping.h>
11#include <linux/dmaengine.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/mod_devicetable.h>
17#include <linux/platform_device.h>
18#include <linux/scatterlist.h>
19
20#include <crypto/internal/hash.h>
21#include <crypto/md5.h>
22#include <crypto/sha1.h>
23#include <crypto/sha2.h>
24
25#define CR_RESET			0
26#define CR_RESET_SET			1
27#define CR_RESET_UNSET			0
28
29#define CR_MESSAGE_LENGTH_H		0x4
30#define CR_MESSAGE_LENGTH_L		0x8
31
32#define CR_CONTROL			0xc
33#define CR_CONTROL_BYTE_ORDER_3210	0
34#define CR_CONTROL_BYTE_ORDER_0123	1
35#define CR_CONTROL_BYTE_ORDER_2310	2
36#define CR_CONTROL_BYTE_ORDER_1032	3
37#define CR_CONTROL_BYTE_ORDER_SHIFT	8
38#define CR_CONTROL_ALGO_MD5	0
39#define CR_CONTROL_ALGO_SHA1	1
40#define CR_CONTROL_ALGO_SHA224	2
41#define CR_CONTROL_ALGO_SHA256	3
42
43#define CR_INTSTAT			0x10
44#define CR_INTENAB			0x14
45#define CR_INTCLEAR			0x18
46#define CR_INT_RESULTS_AVAILABLE	BIT(0)
47#define CR_INT_NEW_RESULTS_SET		BIT(1)
48#define CR_INT_RESULT_READ_ERR		BIT(2)
49#define CR_INT_MESSAGE_WRITE_ERROR	BIT(3)
50#define CR_INT_STATUS			BIT(8)
51
52#define CR_RESULT_QUEUE		0x1c
53#define CR_RSD0				0x40
54#define CR_CORE_REV			0x50
55#define CR_CORE_DES1		0x60
56#define CR_CORE_DES2		0x70
57
58#define DRIVER_FLAGS_BUSY		BIT(0)
59#define DRIVER_FLAGS_FINAL		BIT(1)
60#define DRIVER_FLAGS_DMA_ACTIVE		BIT(2)
61#define DRIVER_FLAGS_OUTPUT_READY	BIT(3)
62#define DRIVER_FLAGS_INIT		BIT(4)
63#define DRIVER_FLAGS_CPU		BIT(5)
64#define DRIVER_FLAGS_DMA_READY		BIT(6)
65#define DRIVER_FLAGS_ERROR		BIT(7)
66#define DRIVER_FLAGS_SG			BIT(8)
67#define DRIVER_FLAGS_SHA1		BIT(18)
68#define DRIVER_FLAGS_SHA224		BIT(19)
69#define DRIVER_FLAGS_SHA256		BIT(20)
70#define DRIVER_FLAGS_MD5		BIT(21)
71
72#define IMG_HASH_QUEUE_LENGTH		20
73#define IMG_HASH_DMA_BURST		4
74#define IMG_HASH_DMA_THRESHOLD		64
75
76#ifdef __LITTLE_ENDIAN
77#define IMG_HASH_BYTE_ORDER		CR_CONTROL_BYTE_ORDER_3210
78#else
79#define IMG_HASH_BYTE_ORDER		CR_CONTROL_BYTE_ORDER_0123
80#endif
81
82struct img_hash_dev;
83
84struct img_hash_request_ctx {
85	struct img_hash_dev	*hdev;
86	u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
87	unsigned long		flags;
88	size_t			digsize;
89
90	dma_addr_t		dma_addr;
91	size_t			dma_ct;
92
93	/* sg root */
94	struct scatterlist	*sgfirst;
95	/* walk state */
96	struct scatterlist	*sg;
97	size_t			nents;
98	size_t			offset;
99	unsigned int		total;
100	size_t			sent;
101
102	unsigned long		op;
103
104	size_t			bufcnt;
105	struct ahash_request	fallback_req;
106
107	/* Zero length buffer must remain last member of struct */
108	u8 buffer[] __aligned(sizeof(u32));
109};
110
111struct img_hash_ctx {
112	struct img_hash_dev	*hdev;
113	unsigned long		flags;
114	struct crypto_ahash	*fallback;
115};
116
117struct img_hash_dev {
118	struct list_head	list;
119	struct device		*dev;
120	struct clk		*hash_clk;
121	struct clk		*sys_clk;
122	void __iomem		*io_base;
123
124	phys_addr_t		bus_addr;
125	void __iomem		*cpu_addr;
126
127	spinlock_t		lock;
128	int			err;
129	struct tasklet_struct	done_task;
130	struct tasklet_struct	dma_task;
131
132	unsigned long		flags;
133	struct crypto_queue	queue;
134	struct ahash_request	*req;
135
136	struct dma_chan		*dma_lch;
137};
138
139struct img_hash_drv {
140	struct list_head dev_list;
141	spinlock_t lock;
142};
143
144static struct img_hash_drv img_hash = {
145	.dev_list = LIST_HEAD_INIT(img_hash.dev_list),
146	.lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
147};
148
149static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
150{
151	return readl_relaxed(hdev->io_base + offset);
152}
153
154static inline void img_hash_write(struct img_hash_dev *hdev,
155				  u32 offset, u32 value)
156{
157	writel_relaxed(value, hdev->io_base + offset);
158}
159
160static inline __be32 img_hash_read_result_queue(struct img_hash_dev *hdev)
161{
162	return cpu_to_be32(img_hash_read(hdev, CR_RESULT_QUEUE));
163}
164
165static void img_hash_start(struct img_hash_dev *hdev, bool dma)
166{
167	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
168	u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
169
170	if (ctx->flags & DRIVER_FLAGS_MD5)
171		cr |= CR_CONTROL_ALGO_MD5;
172	else if (ctx->flags & DRIVER_FLAGS_SHA1)
173		cr |= CR_CONTROL_ALGO_SHA1;
174	else if (ctx->flags & DRIVER_FLAGS_SHA224)
175		cr |= CR_CONTROL_ALGO_SHA224;
176	else if (ctx->flags & DRIVER_FLAGS_SHA256)
177		cr |= CR_CONTROL_ALGO_SHA256;
178	dev_dbg(hdev->dev, "Starting hash process\n");
179	img_hash_write(hdev, CR_CONTROL, cr);
180
181	/*
182	 * The hardware block requires two cycles between writing the control
183	 * register and writing the first word of data in non DMA mode, to
184	 * ensure the first data write is not grouped in burst with the control
185	 * register write a read is issued to 'flush' the bus.
186	 */
187	if (!dma)
188		img_hash_read(hdev, CR_CONTROL);
189}
190
191static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
192			     size_t length, int final)
193{
194	u32 count, len32;
195	const u32 *buffer = (const u32 *)buf;
196
197	dev_dbg(hdev->dev, "xmit_cpu:  length: %zu bytes\n", length);
198
199	if (final)
200		hdev->flags |= DRIVER_FLAGS_FINAL;
201
202	len32 = DIV_ROUND_UP(length, sizeof(u32));
203
204	for (count = 0; count < len32; count++)
205		writel_relaxed(buffer[count], hdev->cpu_addr);
206
207	return -EINPROGRESS;
208}
209
210static void img_hash_dma_callback(void *data)
211{
212	struct img_hash_dev *hdev = data;
213	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
214
215	if (ctx->bufcnt) {
216		img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
217		ctx->bufcnt = 0;
218	}
219	if (ctx->sg)
220		tasklet_schedule(&hdev->dma_task);
221}
222
223static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
224{
225	struct dma_async_tx_descriptor *desc;
226	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
227
228	ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
229	if (ctx->dma_ct == 0) {
230		dev_err(hdev->dev, "Invalid DMA sg\n");
231		hdev->err = -EINVAL;
232		return -EINVAL;
233	}
234
235	desc = dmaengine_prep_slave_sg(hdev->dma_lch,
236				       sg,
237				       ctx->dma_ct,
238				       DMA_MEM_TO_DEV,
239				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
240	if (!desc) {
241		dev_err(hdev->dev, "Null DMA descriptor\n");
242		hdev->err = -EINVAL;
243		dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
244		return -EINVAL;
245	}
246	desc->callback = img_hash_dma_callback;
247	desc->callback_param = hdev;
248	dmaengine_submit(desc);
249	dma_async_issue_pending(hdev->dma_lch);
250
251	return 0;
252}
253
254static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
255{
256	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
257
258	ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
259					ctx->buffer, hdev->req->nbytes);
260
261	ctx->total = hdev->req->nbytes;
262	ctx->bufcnt = 0;
263
264	hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
265
266	img_hash_start(hdev, false);
267
268	return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
269}
270
271static int img_hash_finish(struct ahash_request *req)
272{
273	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
274
275	if (!req->result)
276		return -EINVAL;
277
278	memcpy(req->result, ctx->digest, ctx->digsize);
279
280	return 0;
281}
282
283static void img_hash_copy_hash(struct ahash_request *req)
284{
285	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
286	__be32 *hash = (__be32 *)ctx->digest;
287	int i;
288
289	for (i = (ctx->digsize / sizeof(*hash)) - 1; i >= 0; i--)
290		hash[i] = img_hash_read_result_queue(ctx->hdev);
291}
292
293static void img_hash_finish_req(struct ahash_request *req, int err)
294{
295	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
296	struct img_hash_dev *hdev =  ctx->hdev;
297
298	if (!err) {
299		img_hash_copy_hash(req);
300		if (DRIVER_FLAGS_FINAL & hdev->flags)
301			err = img_hash_finish(req);
302	} else {
303		dev_warn(hdev->dev, "Hash failed with error %d\n", err);
304		ctx->flags |= DRIVER_FLAGS_ERROR;
305	}
306
307	hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
308		DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
309
310	if (req->base.complete)
311		ahash_request_complete(req, err);
312}
313
314static int img_hash_write_via_dma(struct img_hash_dev *hdev)
315{
316	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
317
318	img_hash_start(hdev, true);
319
320	dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
321
322	if (!ctx->total)
323		hdev->flags |= DRIVER_FLAGS_FINAL;
324
325	hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
326
327	tasklet_schedule(&hdev->dma_task);
328
329	return -EINPROGRESS;
330}
331
332static int img_hash_dma_init(struct img_hash_dev *hdev)
333{
334	struct dma_slave_config dma_conf;
335	int err;
336
337	hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
338	if (IS_ERR(hdev->dma_lch)) {
339		dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
340		return PTR_ERR(hdev->dma_lch);
341	}
342	dma_conf.direction = DMA_MEM_TO_DEV;
343	dma_conf.dst_addr = hdev->bus_addr;
344	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
345	dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
346	dma_conf.device_fc = false;
347
348	err = dmaengine_slave_config(hdev->dma_lch,  &dma_conf);
349	if (err) {
350		dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
351		dma_release_channel(hdev->dma_lch);
352		return err;
353	}
354
355	return 0;
356}
357
358static void img_hash_dma_task(unsigned long d)
359{
360	struct img_hash_dev *hdev = (struct img_hash_dev *)d;
361	struct img_hash_request_ctx *ctx;
362	u8 *addr;
363	size_t nbytes, bleft, wsend, len, tbc;
364	struct scatterlist tsg;
365
366	if (!hdev->req)
367		return;
368
369	ctx = ahash_request_ctx(hdev->req);
370	if (!ctx->sg)
371		return;
372
373	addr = sg_virt(ctx->sg);
374	nbytes = ctx->sg->length - ctx->offset;
375
376	/*
377	 * The hash accelerator does not support a data valid mask. This means
378	 * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
379	 * padding bytes in the last word written by that dma would erroneously
380	 * be included in the hash. To avoid this we round down the transfer,
381	 * and add the excess to the start of the next dma. It does not matter
382	 * that the final dma may not be a multiple of 4 bytes as the hashing
383	 * block is programmed to accept the correct number of bytes.
384	 */
385
386	bleft = nbytes % 4;
387	wsend = (nbytes / 4);
388
389	if (wsend) {
390		sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
391		if (img_hash_xmit_dma(hdev, &tsg)) {
392			dev_err(hdev->dev, "DMA failed, falling back to CPU");
393			ctx->flags |= DRIVER_FLAGS_CPU;
394			hdev->err = 0;
395			img_hash_xmit_cpu(hdev, addr + ctx->offset,
396					  wsend * 4, 0);
397			ctx->sent += wsend * 4;
398			wsend = 0;
399		} else {
400			ctx->sent += wsend * 4;
401		}
402	}
403
404	if (bleft) {
405		ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
406						 ctx->buffer, bleft, ctx->sent);
407		tbc = 0;
408		ctx->sg = sg_next(ctx->sg);
409		while (ctx->sg && (ctx->bufcnt < 4)) {
410			len = ctx->sg->length;
411			if (likely(len > (4 - ctx->bufcnt)))
412				len = 4 - ctx->bufcnt;
413			tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
414						 ctx->buffer + ctx->bufcnt, len,
415					ctx->sent + ctx->bufcnt);
416			ctx->bufcnt += tbc;
417			if (tbc >= ctx->sg->length) {
418				ctx->sg = sg_next(ctx->sg);
419				tbc = 0;
420			}
421		}
422
423		ctx->sent += ctx->bufcnt;
424		ctx->offset = tbc;
425
426		if (!wsend)
427			img_hash_dma_callback(hdev);
428	} else {
429		ctx->offset = 0;
430		ctx->sg = sg_next(ctx->sg);
431	}
432}
433
434static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
435{
436	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
437
438	if (ctx->flags & DRIVER_FLAGS_SG)
439		dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
440
441	return 0;
442}
443
444static int img_hash_process_data(struct img_hash_dev *hdev)
445{
446	struct ahash_request *req = hdev->req;
447	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
448	int err = 0;
449
450	ctx->bufcnt = 0;
451
452	if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
453		dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
454			req->nbytes);
455		err = img_hash_write_via_dma(hdev);
456	} else {
457		dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
458			req->nbytes);
459		err = img_hash_write_via_cpu(hdev);
460	}
461	return err;
462}
463
464static int img_hash_hw_init(struct img_hash_dev *hdev)
465{
466	unsigned long long nbits;
467	u32 u, l;
468
469	img_hash_write(hdev, CR_RESET, CR_RESET_SET);
470	img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
471	img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
472
473	nbits = (u64)hdev->req->nbytes << 3;
474	u = nbits >> 32;
475	l = nbits;
476	img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
477	img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
478
479	if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
480		hdev->flags |= DRIVER_FLAGS_INIT;
481		hdev->err = 0;
482	}
483	dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
484	return 0;
485}
486
487static int img_hash_init(struct ahash_request *req)
488{
489	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
490	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
491	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
492
493	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
494	rctx->fallback_req.base.flags =	req->base.flags
495		& CRYPTO_TFM_REQ_MAY_SLEEP;
496
497	return crypto_ahash_init(&rctx->fallback_req);
498}
499
500static int img_hash_handle_queue(struct img_hash_dev *hdev,
501				 struct ahash_request *req)
502{
503	struct crypto_async_request *async_req, *backlog;
504	struct img_hash_request_ctx *ctx;
505	unsigned long flags;
506	int err = 0, res = 0;
507
508	spin_lock_irqsave(&hdev->lock, flags);
509
510	if (req)
511		res = ahash_enqueue_request(&hdev->queue, req);
512
513	if (DRIVER_FLAGS_BUSY & hdev->flags) {
514		spin_unlock_irqrestore(&hdev->lock, flags);
515		return res;
516	}
517
518	backlog = crypto_get_backlog(&hdev->queue);
519	async_req = crypto_dequeue_request(&hdev->queue);
520	if (async_req)
521		hdev->flags |= DRIVER_FLAGS_BUSY;
522
523	spin_unlock_irqrestore(&hdev->lock, flags);
524
525	if (!async_req)
526		return res;
527
528	if (backlog)
529		crypto_request_complete(backlog, -EINPROGRESS);
530
531	req = ahash_request_cast(async_req);
532	hdev->req = req;
533
534	ctx = ahash_request_ctx(req);
535
536	dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
537		 ctx->op, req->nbytes);
538
539	err = img_hash_hw_init(hdev);
540
541	if (!err)
542		err = img_hash_process_data(hdev);
543
544	if (err != -EINPROGRESS) {
545		/* done_task will not finish so do it here */
546		img_hash_finish_req(req, err);
547	}
548	return res;
549}
550
551static int img_hash_update(struct ahash_request *req)
552{
553	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
554	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
555	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
556
557	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
558	rctx->fallback_req.base.flags = req->base.flags
559		& CRYPTO_TFM_REQ_MAY_SLEEP;
560	rctx->fallback_req.nbytes = req->nbytes;
561	rctx->fallback_req.src = req->src;
562
563	return crypto_ahash_update(&rctx->fallback_req);
564}
565
566static int img_hash_final(struct ahash_request *req)
567{
568	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
569	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
570	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
571
572	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
573	rctx->fallback_req.base.flags = req->base.flags
574		& CRYPTO_TFM_REQ_MAY_SLEEP;
575	rctx->fallback_req.result = req->result;
576
577	return crypto_ahash_final(&rctx->fallback_req);
578}
579
580static int img_hash_finup(struct ahash_request *req)
581{
582	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
583	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
584	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
585
586	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
587	rctx->fallback_req.base.flags = req->base.flags
588		& CRYPTO_TFM_REQ_MAY_SLEEP;
589	rctx->fallback_req.nbytes = req->nbytes;
590	rctx->fallback_req.src = req->src;
591	rctx->fallback_req.result = req->result;
592
593	return crypto_ahash_finup(&rctx->fallback_req);
594}
595
596static int img_hash_import(struct ahash_request *req, const void *in)
597{
598	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
599	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
600	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
601
602	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
603	rctx->fallback_req.base.flags = req->base.flags
604		& CRYPTO_TFM_REQ_MAY_SLEEP;
605
606	return crypto_ahash_import(&rctx->fallback_req, in);
607}
608
609static int img_hash_export(struct ahash_request *req, void *out)
610{
611	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
612	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
613	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
614
615	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
616	rctx->fallback_req.base.flags = req->base.flags
617		& CRYPTO_TFM_REQ_MAY_SLEEP;
618
619	return crypto_ahash_export(&rctx->fallback_req, out);
620}
621
622static int img_hash_digest(struct ahash_request *req)
623{
624	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
625	struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
626	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
627	struct img_hash_dev *hdev = NULL;
628	struct img_hash_dev *tmp;
629	int err;
630
631	spin_lock(&img_hash.lock);
632	if (!tctx->hdev) {
633		list_for_each_entry(tmp, &img_hash.dev_list, list) {
634			hdev = tmp;
635			break;
636		}
637		tctx->hdev = hdev;
638
639	} else {
640		hdev = tctx->hdev;
641	}
642
643	spin_unlock(&img_hash.lock);
644	ctx->hdev = hdev;
645	ctx->flags = 0;
646	ctx->digsize = crypto_ahash_digestsize(tfm);
647
648	switch (ctx->digsize) {
649	case SHA1_DIGEST_SIZE:
650		ctx->flags |= DRIVER_FLAGS_SHA1;
651		break;
652	case SHA256_DIGEST_SIZE:
653		ctx->flags |= DRIVER_FLAGS_SHA256;
654		break;
655	case SHA224_DIGEST_SIZE:
656		ctx->flags |= DRIVER_FLAGS_SHA224;
657		break;
658	case MD5_DIGEST_SIZE:
659		ctx->flags |= DRIVER_FLAGS_MD5;
660		break;
661	default:
662		return -EINVAL;
663	}
664
665	ctx->bufcnt = 0;
666	ctx->offset = 0;
667	ctx->sent = 0;
668	ctx->total = req->nbytes;
669	ctx->sg = req->src;
670	ctx->sgfirst = req->src;
671	ctx->nents = sg_nents(ctx->sg);
672
673	err = img_hash_handle_queue(tctx->hdev, req);
674
675	return err;
676}
677
678static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
679{
680	struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
681
682	ctx->fallback = crypto_alloc_ahash(alg_name, 0,
683					   CRYPTO_ALG_NEED_FALLBACK);
684	if (IS_ERR(ctx->fallback)) {
685		pr_err("img_hash: Could not load fallback driver.\n");
686		return PTR_ERR(ctx->fallback);
687	}
688	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
689				 sizeof(struct img_hash_request_ctx) +
690				 crypto_ahash_reqsize(ctx->fallback) +
691				 IMG_HASH_DMA_THRESHOLD);
692
693	return 0;
694}
695
696static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
697{
698	return img_hash_cra_init(tfm, "md5-generic");
699}
700
701static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
702{
703	return img_hash_cra_init(tfm, "sha1-generic");
704}
705
706static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
707{
708	return img_hash_cra_init(tfm, "sha224-generic");
709}
710
711static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
712{
713	return img_hash_cra_init(tfm, "sha256-generic");
714}
715
716static void img_hash_cra_exit(struct crypto_tfm *tfm)
717{
718	struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
719
720	crypto_free_ahash(tctx->fallback);
721}
722
723static irqreturn_t img_irq_handler(int irq, void *dev_id)
724{
725	struct img_hash_dev *hdev = dev_id;
726	u32 reg;
727
728	reg = img_hash_read(hdev, CR_INTSTAT);
729	img_hash_write(hdev, CR_INTCLEAR, reg);
730
731	if (reg & CR_INT_NEW_RESULTS_SET) {
732		dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
733		if (DRIVER_FLAGS_BUSY & hdev->flags) {
734			hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
735			if (!(DRIVER_FLAGS_CPU & hdev->flags))
736				hdev->flags |= DRIVER_FLAGS_DMA_READY;
737			tasklet_schedule(&hdev->done_task);
738		} else {
739			dev_warn(hdev->dev,
740				 "HASH interrupt when no active requests.\n");
741		}
742	} else if (reg & CR_INT_RESULTS_AVAILABLE) {
743		dev_warn(hdev->dev,
744			 "IRQ triggered before the hash had completed\n");
745	} else if (reg & CR_INT_RESULT_READ_ERR) {
746		dev_warn(hdev->dev,
747			 "Attempt to read from an empty result queue\n");
748	} else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
749		dev_warn(hdev->dev,
750			 "Data written before the hardware was configured\n");
751	}
752	return IRQ_HANDLED;
753}
754
755static struct ahash_alg img_algs[] = {
756	{
757		.init = img_hash_init,
758		.update = img_hash_update,
759		.final = img_hash_final,
760		.finup = img_hash_finup,
761		.export = img_hash_export,
762		.import = img_hash_import,
763		.digest = img_hash_digest,
764		.halg = {
765			.digestsize = MD5_DIGEST_SIZE,
766			.statesize = sizeof(struct md5_state),
767			.base = {
768				.cra_name = "md5",
769				.cra_driver_name = "img-md5",
770				.cra_priority = 300,
771				.cra_flags =
772				CRYPTO_ALG_ASYNC |
773				CRYPTO_ALG_NEED_FALLBACK,
774				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
775				.cra_ctxsize = sizeof(struct img_hash_ctx),
776				.cra_init = img_hash_cra_md5_init,
777				.cra_exit = img_hash_cra_exit,
778				.cra_module = THIS_MODULE,
779			}
780		}
781	},
782	{
783		.init = img_hash_init,
784		.update = img_hash_update,
785		.final = img_hash_final,
786		.finup = img_hash_finup,
787		.export = img_hash_export,
788		.import = img_hash_import,
789		.digest = img_hash_digest,
790		.halg = {
791			.digestsize = SHA1_DIGEST_SIZE,
792			.statesize = sizeof(struct sha1_state),
793			.base = {
794				.cra_name = "sha1",
795				.cra_driver_name = "img-sha1",
796				.cra_priority = 300,
797				.cra_flags =
798				CRYPTO_ALG_ASYNC |
799				CRYPTO_ALG_NEED_FALLBACK,
800				.cra_blocksize = SHA1_BLOCK_SIZE,
801				.cra_ctxsize = sizeof(struct img_hash_ctx),
802				.cra_init = img_hash_cra_sha1_init,
803				.cra_exit = img_hash_cra_exit,
804				.cra_module = THIS_MODULE,
805			}
806		}
807	},
808	{
809		.init = img_hash_init,
810		.update = img_hash_update,
811		.final = img_hash_final,
812		.finup = img_hash_finup,
813		.export = img_hash_export,
814		.import = img_hash_import,
815		.digest = img_hash_digest,
816		.halg = {
817			.digestsize = SHA224_DIGEST_SIZE,
818			.statesize = sizeof(struct sha256_state),
819			.base = {
820				.cra_name = "sha224",
821				.cra_driver_name = "img-sha224",
822				.cra_priority = 300,
823				.cra_flags =
824				CRYPTO_ALG_ASYNC |
825				CRYPTO_ALG_NEED_FALLBACK,
826				.cra_blocksize = SHA224_BLOCK_SIZE,
827				.cra_ctxsize = sizeof(struct img_hash_ctx),
828				.cra_init = img_hash_cra_sha224_init,
829				.cra_exit = img_hash_cra_exit,
830				.cra_module = THIS_MODULE,
831			}
832		}
833	},
834	{
835		.init = img_hash_init,
836		.update = img_hash_update,
837		.final = img_hash_final,
838		.finup = img_hash_finup,
839		.export = img_hash_export,
840		.import = img_hash_import,
841		.digest = img_hash_digest,
842		.halg = {
843			.digestsize = SHA256_DIGEST_SIZE,
844			.statesize = sizeof(struct sha256_state),
845			.base = {
846				.cra_name = "sha256",
847				.cra_driver_name = "img-sha256",
848				.cra_priority = 300,
849				.cra_flags =
850				CRYPTO_ALG_ASYNC |
851				CRYPTO_ALG_NEED_FALLBACK,
852				.cra_blocksize = SHA256_BLOCK_SIZE,
853				.cra_ctxsize = sizeof(struct img_hash_ctx),
854				.cra_init = img_hash_cra_sha256_init,
855				.cra_exit = img_hash_cra_exit,
856				.cra_module = THIS_MODULE,
857			}
858		}
859	}
860};
861
862static int img_register_algs(struct img_hash_dev *hdev)
863{
864	int i, err;
865
866	for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
867		err = crypto_register_ahash(&img_algs[i]);
868		if (err)
869			goto err_reg;
870	}
871	return 0;
872
873err_reg:
874	for (; i--; )
875		crypto_unregister_ahash(&img_algs[i]);
876
877	return err;
878}
879
880static int img_unregister_algs(struct img_hash_dev *hdev)
881{
882	int i;
883
884	for (i = 0; i < ARRAY_SIZE(img_algs); i++)
885		crypto_unregister_ahash(&img_algs[i]);
886	return 0;
887}
888
889static void img_hash_done_task(unsigned long data)
890{
891	struct img_hash_dev *hdev = (struct img_hash_dev *)data;
892	int err = 0;
893
894	if (hdev->err == -EINVAL) {
895		err = hdev->err;
896		goto finish;
897	}
898
899	if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
900		img_hash_handle_queue(hdev, NULL);
901		return;
902	}
903
904	if (DRIVER_FLAGS_CPU & hdev->flags) {
905		if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
906			hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
907			goto finish;
908		}
909	} else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
910		if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
911			hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
912			img_hash_write_via_dma_stop(hdev);
913			if (hdev->err) {
914				err = hdev->err;
915				goto finish;
916			}
917		}
918		if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
919			hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
920					DRIVER_FLAGS_OUTPUT_READY);
921			goto finish;
922		}
923	}
924	return;
925
926finish:
927	img_hash_finish_req(hdev->req, err);
928}
929
930static const struct of_device_id img_hash_match[] __maybe_unused = {
931	{ .compatible = "img,hash-accelerator" },
932	{}
933};
934MODULE_DEVICE_TABLE(of, img_hash_match);
935
936static int img_hash_probe(struct platform_device *pdev)
937{
938	struct img_hash_dev *hdev;
939	struct device *dev = &pdev->dev;
940	struct resource *hash_res;
941	int	irq;
942	int err;
943
944	hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
945	if (hdev == NULL)
946		return -ENOMEM;
947
948	spin_lock_init(&hdev->lock);
949
950	hdev->dev = dev;
951
952	platform_set_drvdata(pdev, hdev);
953
954	INIT_LIST_HEAD(&hdev->list);
955
956	tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
957	tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
958
959	crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
960
961	/* Register bank */
962	hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
963	if (IS_ERR(hdev->io_base)) {
964		err = PTR_ERR(hdev->io_base);
965		goto res_err;
966	}
967
968	/* Write port (DMA or CPU) */
969	hdev->cpu_addr = devm_platform_get_and_ioremap_resource(pdev, 1, &hash_res);
970	if (IS_ERR(hdev->cpu_addr)) {
971		err = PTR_ERR(hdev->cpu_addr);
972		goto res_err;
973	}
974	hdev->bus_addr = hash_res->start;
975
976	irq = platform_get_irq(pdev, 0);
977	if (irq < 0) {
978		err = irq;
979		goto res_err;
980	}
981
982	err = devm_request_irq(dev, irq, img_irq_handler, 0,
983			       dev_name(dev), hdev);
984	if (err) {
985		dev_err(dev, "unable to request irq\n");
986		goto res_err;
987	}
988	dev_dbg(dev, "using IRQ channel %d\n", irq);
989
990	hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
991	if (IS_ERR(hdev->hash_clk)) {
992		dev_err(dev, "clock initialization failed.\n");
993		err = PTR_ERR(hdev->hash_clk);
994		goto res_err;
995	}
996
997	hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
998	if (IS_ERR(hdev->sys_clk)) {
999		dev_err(dev, "clock initialization failed.\n");
1000		err = PTR_ERR(hdev->sys_clk);
1001		goto res_err;
1002	}
1003
1004	err = clk_prepare_enable(hdev->hash_clk);
1005	if (err)
1006		goto res_err;
1007
1008	err = clk_prepare_enable(hdev->sys_clk);
1009	if (err)
1010		goto clk_err;
1011
1012	err = img_hash_dma_init(hdev);
1013	if (err)
1014		goto dma_err;
1015
1016	dev_dbg(dev, "using %s for DMA transfers\n",
1017		dma_chan_name(hdev->dma_lch));
1018
1019	spin_lock(&img_hash.lock);
1020	list_add_tail(&hdev->list, &img_hash.dev_list);
1021	spin_unlock(&img_hash.lock);
1022
1023	err = img_register_algs(hdev);
1024	if (err)
1025		goto err_algs;
1026	dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
1027
1028	return 0;
1029
1030err_algs:
1031	spin_lock(&img_hash.lock);
1032	list_del(&hdev->list);
1033	spin_unlock(&img_hash.lock);
1034	dma_release_channel(hdev->dma_lch);
1035dma_err:
1036	clk_disable_unprepare(hdev->sys_clk);
1037clk_err:
1038	clk_disable_unprepare(hdev->hash_clk);
1039res_err:
1040	tasklet_kill(&hdev->done_task);
1041	tasklet_kill(&hdev->dma_task);
1042
1043	return err;
1044}
1045
1046static void img_hash_remove(struct platform_device *pdev)
1047{
1048	struct img_hash_dev *hdev;
1049
1050	hdev = platform_get_drvdata(pdev);
1051	spin_lock(&img_hash.lock);
1052	list_del(&hdev->list);
1053	spin_unlock(&img_hash.lock);
1054
1055	img_unregister_algs(hdev);
1056
1057	tasklet_kill(&hdev->done_task);
1058	tasklet_kill(&hdev->dma_task);
1059
1060	dma_release_channel(hdev->dma_lch);
1061
1062	clk_disable_unprepare(hdev->hash_clk);
1063	clk_disable_unprepare(hdev->sys_clk);
1064}
1065
1066#ifdef CONFIG_PM_SLEEP
1067static int img_hash_suspend(struct device *dev)
1068{
1069	struct img_hash_dev *hdev = dev_get_drvdata(dev);
1070
1071	clk_disable_unprepare(hdev->hash_clk);
1072	clk_disable_unprepare(hdev->sys_clk);
1073
1074	return 0;
1075}
1076
1077static int img_hash_resume(struct device *dev)
1078{
1079	struct img_hash_dev *hdev = dev_get_drvdata(dev);
1080	int ret;
1081
1082	ret = clk_prepare_enable(hdev->hash_clk);
1083	if (ret)
1084		return ret;
1085
1086	ret = clk_prepare_enable(hdev->sys_clk);
1087	if (ret) {
1088		clk_disable_unprepare(hdev->hash_clk);
1089		return ret;
1090	}
1091
1092	return 0;
1093}
1094#endif /* CONFIG_PM_SLEEP */
1095
1096static const struct dev_pm_ops img_hash_pm_ops = {
1097	SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
1098};
1099
1100static struct platform_driver img_hash_driver = {
1101	.probe		= img_hash_probe,
1102	.remove_new	= img_hash_remove,
1103	.driver		= {
1104		.name	= "img-hash-accelerator",
1105		.pm	= &img_hash_pm_ops,
1106		.of_match_table	= img_hash_match,
1107	}
1108};
1109module_platform_driver(img_hash_driver);
1110
1111MODULE_LICENSE("GPL v2");
1112MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
1113MODULE_AUTHOR("Will Thomas.");
1114MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");
1115