1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
4 *
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 * Author: Arnaud Ebalard <arno@natisbad.org>
7 *
8 * This work is based on an initial version written by
9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
10 */
11
12#include <crypto/hmac.h>
13#include <crypto/md5.h>
14#include <crypto/sha1.h>
15#include <crypto/sha2.h>
16#include <linux/device.h>
17#include <linux/dma-mapping.h>
18
19#include "cesa.h"
20
21struct mv_cesa_ahash_dma_iter {
22	struct mv_cesa_dma_iter base;
23	struct mv_cesa_sg_dma_iter src;
24};
25
26static inline void
27mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
28			    struct ahash_request *req)
29{
30	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
31	unsigned int len = req->nbytes + creq->cache_ptr;
32
33	if (!creq->last_req)
34		len &= ~CESA_HASH_BLOCK_SIZE_MSK;
35
36	mv_cesa_req_dma_iter_init(&iter->base, len);
37	mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
38	iter->src.op_offset = creq->cache_ptr;
39}
40
41static inline bool
42mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
43{
44	iter->src.op_offset = 0;
45
46	return mv_cesa_req_dma_iter_next_op(&iter->base);
47}
48
49static inline int
50mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
51{
52	req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
53				    &req->cache_dma);
54	if (!req->cache)
55		return -ENOMEM;
56
57	return 0;
58}
59
60static inline void
61mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
62{
63	if (!req->cache)
64		return;
65
66	dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
67		      req->cache_dma);
68}
69
70static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
71					   gfp_t flags)
72{
73	if (req->padding)
74		return 0;
75
76	req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
77				      &req->padding_dma);
78	if (!req->padding)
79		return -ENOMEM;
80
81	return 0;
82}
83
84static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
85{
86	if (!req->padding)
87		return;
88
89	dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
90		      req->padding_dma);
91	req->padding = NULL;
92}
93
94static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
95{
96	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
97
98	mv_cesa_ahash_dma_free_padding(&creq->req.dma);
99}
100
101static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
102{
103	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
104
105	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
106	mv_cesa_ahash_dma_free_cache(&creq->req.dma);
107	mv_cesa_dma_cleanup(&creq->base);
108}
109
110static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
111{
112	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
113
114	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
115		mv_cesa_ahash_dma_cleanup(req);
116}
117
118static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
119{
120	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
121
122	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
123		mv_cesa_ahash_dma_last_cleanup(req);
124}
125
126static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
127{
128	unsigned int index, padlen;
129
130	index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
131	padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
132
133	return padlen;
134}
135
136static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
137{
138	unsigned int padlen;
139
140	buf[0] = 0x80;
141	/* Pad out to 56 mod 64 */
142	padlen = mv_cesa_ahash_pad_len(creq);
143	memset(buf + 1, 0, padlen - 1);
144
145	if (creq->algo_le) {
146		__le64 bits = cpu_to_le64(creq->len << 3);
147
148		memcpy(buf + padlen, &bits, sizeof(bits));
149	} else {
150		__be64 bits = cpu_to_be64(creq->len << 3);
151
152		memcpy(buf + padlen, &bits, sizeof(bits));
153	}
154
155	return padlen + 8;
156}
157
158static void mv_cesa_ahash_std_step(struct ahash_request *req)
159{
160	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
161	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
162	struct mv_cesa_engine *engine = creq->base.engine;
163	struct mv_cesa_op_ctx *op;
164	unsigned int new_cache_ptr = 0;
165	u32 frag_mode;
166	size_t  len;
167	unsigned int digsize;
168	int i;
169
170	mv_cesa_adjust_op(engine, &creq->op_tmpl);
171	if (engine->pool)
172		memcpy(engine->sram_pool, &creq->op_tmpl,
173		       sizeof(creq->op_tmpl));
174	else
175		memcpy_toio(engine->sram, &creq->op_tmpl,
176			    sizeof(creq->op_tmpl));
177
178	if (!sreq->offset) {
179		digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
180		for (i = 0; i < digsize / 4; i++)
181			writel_relaxed(creq->state[i],
182				       engine->regs + CESA_IVDIG(i));
183	}
184
185	if (creq->cache_ptr) {
186		if (engine->pool)
187			memcpy(engine->sram_pool + CESA_SA_DATA_SRAM_OFFSET,
188			       creq->cache, creq->cache_ptr);
189		else
190			memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
191				    creq->cache, creq->cache_ptr);
192	}
193
194	len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
195		    CESA_SA_SRAM_PAYLOAD_SIZE);
196
197	if (!creq->last_req) {
198		new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
199		len &= ~CESA_HASH_BLOCK_SIZE_MSK;
200	}
201
202	if (len - creq->cache_ptr)
203		sreq->offset += mv_cesa_sg_copy_to_sram(
204			engine, req->src, creq->src_nents,
205			CESA_SA_DATA_SRAM_OFFSET + creq->cache_ptr,
206			len - creq->cache_ptr, sreq->offset);
207
208	op = &creq->op_tmpl;
209
210	frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
211
212	if (creq->last_req && sreq->offset == req->nbytes &&
213	    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
214		if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
215			frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
216		else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
217			frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
218	}
219
220	if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
221	    frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
222		if (len &&
223		    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
224			mv_cesa_set_mac_op_total_len(op, creq->len);
225		} else {
226			int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
227
228			if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
229				len &= CESA_HASH_BLOCK_SIZE_MSK;
230				new_cache_ptr = 64 - trailerlen;
231				if (engine->pool)
232					memcpy(creq->cache,
233					       engine->sram_pool +
234					       CESA_SA_DATA_SRAM_OFFSET + len,
235					       new_cache_ptr);
236				else
237					memcpy_fromio(creq->cache,
238						      engine->sram +
239						      CESA_SA_DATA_SRAM_OFFSET +
240						      len,
241						      new_cache_ptr);
242			} else {
243				i = mv_cesa_ahash_pad_req(creq, creq->cache);
244				len += i;
245				if (engine->pool)
246					memcpy(engine->sram_pool + len +
247					       CESA_SA_DATA_SRAM_OFFSET,
248					       creq->cache, i);
249				else
250					memcpy_toio(engine->sram + len +
251						    CESA_SA_DATA_SRAM_OFFSET,
252						    creq->cache, i);
253			}
254
255			if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
256				frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
257			else
258				frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
259		}
260	}
261
262	mv_cesa_set_mac_op_frag_len(op, len);
263	mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
264
265	/* FIXME: only update enc_len field */
266	if (engine->pool)
267		memcpy(engine->sram_pool, op, sizeof(*op));
268	else
269		memcpy_toio(engine->sram, op, sizeof(*op));
270
271	if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
272		mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
273				      CESA_SA_DESC_CFG_FRAG_MSK);
274
275	creq->cache_ptr = new_cache_ptr;
276
277	mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
278	writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
279	WARN_ON(readl(engine->regs + CESA_SA_CMD) &
280		CESA_SA_CMD_EN_CESA_SA_ACCL0);
281	writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
282}
283
284static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
285{
286	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
287	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
288
289	if (sreq->offset < (req->nbytes - creq->cache_ptr))
290		return -EINPROGRESS;
291
292	return 0;
293}
294
295static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
296{
297	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
298	struct mv_cesa_req *basereq = &creq->base;
299
300	mv_cesa_dma_prepare(basereq, basereq->engine);
301}
302
303static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
304{
305	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
306	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
307
308	sreq->offset = 0;
309}
310
311static void mv_cesa_ahash_dma_step(struct ahash_request *req)
312{
313	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
314	struct mv_cesa_req *base = &creq->base;
315
316	/* We must explicitly set the digest state. */
317	if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
318		struct mv_cesa_engine *engine = base->engine;
319		int i;
320
321		/* Set the hash state in the IVDIG regs. */
322		for (i = 0; i < ARRAY_SIZE(creq->state); i++)
323			writel_relaxed(creq->state[i], engine->regs +
324				       CESA_IVDIG(i));
325	}
326
327	mv_cesa_dma_step(base);
328}
329
330static void mv_cesa_ahash_step(struct crypto_async_request *req)
331{
332	struct ahash_request *ahashreq = ahash_request_cast(req);
333	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
334
335	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
336		mv_cesa_ahash_dma_step(ahashreq);
337	else
338		mv_cesa_ahash_std_step(ahashreq);
339}
340
341static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
342{
343	struct ahash_request *ahashreq = ahash_request_cast(req);
344	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
345
346	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
347		return mv_cesa_dma_process(&creq->base, status);
348
349	return mv_cesa_ahash_std_process(ahashreq, status);
350}
351
352static void mv_cesa_ahash_complete(struct crypto_async_request *req)
353{
354	struct ahash_request *ahashreq = ahash_request_cast(req);
355	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
356	struct mv_cesa_engine *engine = creq->base.engine;
357	unsigned int digsize;
358	int i;
359
360	digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
361
362	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
363	    (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
364	     CESA_TDMA_RESULT) {
365		__le32 *data = NULL;
366
367		/*
368		 * Result is already in the correct endianness when the SA is
369		 * used
370		 */
371		data = creq->base.chain.last->op->ctx.hash.hash;
372		for (i = 0; i < digsize / 4; i++)
373			creq->state[i] = le32_to_cpu(data[i]);
374
375		memcpy(ahashreq->result, data, digsize);
376	} else {
377		for (i = 0; i < digsize / 4; i++)
378			creq->state[i] = readl_relaxed(engine->regs +
379						       CESA_IVDIG(i));
380		if (creq->last_req) {
381			/*
382			 * Hardware's MD5 digest is in little endian format, but
383			 * SHA in big endian format
384			 */
385			if (creq->algo_le) {
386				__le32 *result = (void *)ahashreq->result;
387
388				for (i = 0; i < digsize / 4; i++)
389					result[i] = cpu_to_le32(creq->state[i]);
390			} else {
391				__be32 *result = (void *)ahashreq->result;
392
393				for (i = 0; i < digsize / 4; i++)
394					result[i] = cpu_to_be32(creq->state[i]);
395			}
396		}
397	}
398
399	atomic_sub(ahashreq->nbytes, &engine->load);
400}
401
402static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
403				  struct mv_cesa_engine *engine)
404{
405	struct ahash_request *ahashreq = ahash_request_cast(req);
406	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
407
408	creq->base.engine = engine;
409
410	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
411		mv_cesa_ahash_dma_prepare(ahashreq);
412	else
413		mv_cesa_ahash_std_prepare(ahashreq);
414}
415
416static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
417{
418	struct ahash_request *ahashreq = ahash_request_cast(req);
419	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
420
421	if (creq->last_req)
422		mv_cesa_ahash_last_cleanup(ahashreq);
423
424	mv_cesa_ahash_cleanup(ahashreq);
425
426	if (creq->cache_ptr)
427		sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
428				   creq->cache,
429				   creq->cache_ptr,
430				   ahashreq->nbytes - creq->cache_ptr);
431}
432
433static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
434	.step = mv_cesa_ahash_step,
435	.process = mv_cesa_ahash_process,
436	.cleanup = mv_cesa_ahash_req_cleanup,
437	.complete = mv_cesa_ahash_complete,
438};
439
440static void mv_cesa_ahash_init(struct ahash_request *req,
441			      struct mv_cesa_op_ctx *tmpl, bool algo_le)
442{
443	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
444
445	memset(creq, 0, sizeof(*creq));
446	mv_cesa_update_op_cfg(tmpl,
447			      CESA_SA_DESC_CFG_OP_MAC_ONLY |
448			      CESA_SA_DESC_CFG_FIRST_FRAG,
449			      CESA_SA_DESC_CFG_OP_MSK |
450			      CESA_SA_DESC_CFG_FRAG_MSK);
451	mv_cesa_set_mac_op_total_len(tmpl, 0);
452	mv_cesa_set_mac_op_frag_len(tmpl, 0);
453	creq->op_tmpl = *tmpl;
454	creq->len = 0;
455	creq->algo_le = algo_le;
456}
457
458static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
459{
460	struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
461
462	ctx->base.ops = &mv_cesa_ahash_req_ops;
463
464	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
465				 sizeof(struct mv_cesa_ahash_req));
466	return 0;
467}
468
469static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
470{
471	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
472	bool cached = false;
473
474	if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE &&
475	    !creq->last_req) {
476		cached = true;
477
478		if (!req->nbytes)
479			return cached;
480
481		sg_pcopy_to_buffer(req->src, creq->src_nents,
482				   creq->cache + creq->cache_ptr,
483				   req->nbytes, 0);
484
485		creq->cache_ptr += req->nbytes;
486	}
487
488	return cached;
489}
490
491static struct mv_cesa_op_ctx *
492mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
493		     struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
494		     gfp_t flags)
495{
496	struct mv_cesa_op_ctx *op;
497	int ret;
498
499	op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
500	if (IS_ERR(op))
501		return op;
502
503	/* Set the operation block fragment length. */
504	mv_cesa_set_mac_op_frag_len(op, frag_len);
505
506	/* Append dummy desc to launch operation */
507	ret = mv_cesa_dma_add_dummy_launch(chain, flags);
508	if (ret)
509		return ERR_PTR(ret);
510
511	if (mv_cesa_mac_op_is_first_frag(tmpl))
512		mv_cesa_update_op_cfg(tmpl,
513				      CESA_SA_DESC_CFG_MID_FRAG,
514				      CESA_SA_DESC_CFG_FRAG_MSK);
515
516	return op;
517}
518
519static int
520mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
521			    struct mv_cesa_ahash_req *creq,
522			    gfp_t flags)
523{
524	struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
525	int ret;
526
527	if (!creq->cache_ptr)
528		return 0;
529
530	ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
531	if (ret)
532		return ret;
533
534	memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
535
536	return mv_cesa_dma_add_data_transfer(chain,
537					     CESA_SA_DATA_SRAM_OFFSET,
538					     ahashdreq->cache_dma,
539					     creq->cache_ptr,
540					     CESA_TDMA_DST_IN_SRAM,
541					     flags);
542}
543
544static struct mv_cesa_op_ctx *
545mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
546			   struct mv_cesa_ahash_dma_iter *dma_iter,
547			   struct mv_cesa_ahash_req *creq,
548			   unsigned int frag_len, gfp_t flags)
549{
550	struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
551	unsigned int len, trailerlen, padoff = 0;
552	struct mv_cesa_op_ctx *op;
553	int ret;
554
555	/*
556	 * If the transfer is smaller than our maximum length, and we have
557	 * some data outstanding, we can ask the engine to finish the hash.
558	 */
559	if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
560		op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
561					  flags);
562		if (IS_ERR(op))
563			return op;
564
565		mv_cesa_set_mac_op_total_len(op, creq->len);
566		mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
567						CESA_SA_DESC_CFG_NOT_FRAG :
568						CESA_SA_DESC_CFG_LAST_FRAG,
569				      CESA_SA_DESC_CFG_FRAG_MSK);
570
571		ret = mv_cesa_dma_add_result_op(chain,
572						CESA_SA_CFG_SRAM_OFFSET,
573						CESA_SA_DATA_SRAM_OFFSET,
574						CESA_TDMA_SRC_IN_SRAM, flags);
575		if (ret)
576			return ERR_PTR(-ENOMEM);
577		return op;
578	}
579
580	/*
581	 * The request is longer than the engine can handle, or we have
582	 * no data outstanding. Manually generate the padding, adding it
583	 * as a "mid" fragment.
584	 */
585	ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
586	if (ret)
587		return ERR_PTR(ret);
588
589	trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
590
591	len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
592	if (len) {
593		ret = mv_cesa_dma_add_data_transfer(chain,
594						CESA_SA_DATA_SRAM_OFFSET +
595						frag_len,
596						ahashdreq->padding_dma,
597						len, CESA_TDMA_DST_IN_SRAM,
598						flags);
599		if (ret)
600			return ERR_PTR(ret);
601
602		op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
603					  flags);
604		if (IS_ERR(op))
605			return op;
606
607		if (len == trailerlen)
608			return op;
609
610		padoff += len;
611	}
612
613	ret = mv_cesa_dma_add_data_transfer(chain,
614					    CESA_SA_DATA_SRAM_OFFSET,
615					    ahashdreq->padding_dma +
616					    padoff,
617					    trailerlen - padoff,
618					    CESA_TDMA_DST_IN_SRAM,
619					    flags);
620	if (ret)
621		return ERR_PTR(ret);
622
623	return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
624				    flags);
625}
626
627static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
628{
629	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
630	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
631		      GFP_KERNEL : GFP_ATOMIC;
632	struct mv_cesa_req *basereq = &creq->base;
633	struct mv_cesa_ahash_dma_iter iter;
634	struct mv_cesa_op_ctx *op = NULL;
635	unsigned int frag_len;
636	bool set_state = false;
637	int ret;
638	u32 type;
639
640	basereq->chain.first = NULL;
641	basereq->chain.last = NULL;
642
643	if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
644		set_state = true;
645
646	if (creq->src_nents) {
647		ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
648				 DMA_TO_DEVICE);
649		if (!ret) {
650			ret = -ENOMEM;
651			goto err;
652		}
653	}
654
655	mv_cesa_tdma_desc_iter_init(&basereq->chain);
656	mv_cesa_ahash_req_iter_init(&iter, req);
657
658	/*
659	 * Add the cache (left-over data from a previous block) first.
660	 * This will never overflow the SRAM size.
661	 */
662	ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
663	if (ret)
664		goto err_free_tdma;
665
666	if (iter.src.sg) {
667		/*
668		 * Add all the new data, inserting an operation block and
669		 * launch command between each full SRAM block-worth of
670		 * data. We intentionally do not add the final op block.
671		 */
672		while (true) {
673			ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
674							   &iter.base,
675							   &iter.src, flags);
676			if (ret)
677				goto err_free_tdma;
678
679			frag_len = iter.base.op_len;
680
681			if (!mv_cesa_ahash_req_iter_next_op(&iter))
682				break;
683
684			op = mv_cesa_dma_add_frag(&basereq->chain,
685						  &creq->op_tmpl,
686						  frag_len, flags);
687			if (IS_ERR(op)) {
688				ret = PTR_ERR(op);
689				goto err_free_tdma;
690			}
691		}
692	} else {
693		/* Account for the data that was in the cache. */
694		frag_len = iter.base.op_len;
695	}
696
697	/*
698	 * At this point, frag_len indicates whether we have any data
699	 * outstanding which needs an operation.  Queue up the final
700	 * operation, which depends whether this is the final request.
701	 */
702	if (creq->last_req)
703		op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
704						frag_len, flags);
705	else if (frag_len)
706		op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
707					  frag_len, flags);
708
709	if (IS_ERR(op)) {
710		ret = PTR_ERR(op);
711		goto err_free_tdma;
712	}
713
714	/*
715	 * If results are copied via DMA, this means that this
716	 * request can be directly processed by the engine,
717	 * without partial updates. So we can chain it at the
718	 * DMA level with other requests.
719	 */
720	type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
721
722	if (op && type != CESA_TDMA_RESULT) {
723		/* Add dummy desc to wait for crypto operation end */
724		ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
725		if (ret)
726			goto err_free_tdma;
727	}
728
729	if (!creq->last_req)
730		creq->cache_ptr = req->nbytes + creq->cache_ptr -
731				  iter.base.len;
732	else
733		creq->cache_ptr = 0;
734
735	basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
736
737	if (type != CESA_TDMA_RESULT)
738		basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
739
740	if (set_state) {
741		/*
742		 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
743		 * let the step logic know that the IVDIG registers should be
744		 * explicitly set before launching a TDMA chain.
745		 */
746		basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
747	}
748
749	return 0;
750
751err_free_tdma:
752	mv_cesa_dma_cleanup(basereq);
753	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
754
755err:
756	mv_cesa_ahash_last_cleanup(req);
757
758	return ret;
759}
760
761static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
762{
763	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
764
765	creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
766	if (creq->src_nents < 0) {
767		dev_err(cesa_dev->dev, "Invalid number of src SG");
768		return creq->src_nents;
769	}
770
771	*cached = mv_cesa_ahash_cache_req(req);
772
773	if (*cached)
774		return 0;
775
776	if (cesa_dev->caps->has_tdma)
777		return mv_cesa_ahash_dma_req_init(req);
778	else
779		return 0;
780}
781
782static int mv_cesa_ahash_queue_req(struct ahash_request *req)
783{
784	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
785	struct mv_cesa_engine *engine;
786	bool cached = false;
787	int ret;
788
789	ret = mv_cesa_ahash_req_init(req, &cached);
790	if (ret)
791		return ret;
792
793	if (cached)
794		return 0;
795
796	engine = mv_cesa_select_engine(req->nbytes);
797	mv_cesa_ahash_prepare(&req->base, engine);
798
799	ret = mv_cesa_queue_req(&req->base, &creq->base);
800
801	if (mv_cesa_req_needs_cleanup(&req->base, ret))
802		mv_cesa_ahash_cleanup(req);
803
804	return ret;
805}
806
807static int mv_cesa_ahash_update(struct ahash_request *req)
808{
809	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
810
811	creq->len += req->nbytes;
812
813	return mv_cesa_ahash_queue_req(req);
814}
815
816static int mv_cesa_ahash_final(struct ahash_request *req)
817{
818	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
819	struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
820
821	mv_cesa_set_mac_op_total_len(tmpl, creq->len);
822	creq->last_req = true;
823	req->nbytes = 0;
824
825	return mv_cesa_ahash_queue_req(req);
826}
827
828static int mv_cesa_ahash_finup(struct ahash_request *req)
829{
830	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
831	struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
832
833	creq->len += req->nbytes;
834	mv_cesa_set_mac_op_total_len(tmpl, creq->len);
835	creq->last_req = true;
836
837	return mv_cesa_ahash_queue_req(req);
838}
839
840static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
841				u64 *len, void *cache)
842{
843	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
844	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
845	unsigned int digsize = crypto_ahash_digestsize(ahash);
846	unsigned int blocksize;
847
848	blocksize = crypto_ahash_blocksize(ahash);
849
850	*len = creq->len;
851	memcpy(hash, creq->state, digsize);
852	memset(cache, 0, blocksize);
853	memcpy(cache, creq->cache, creq->cache_ptr);
854
855	return 0;
856}
857
858static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
859				u64 len, const void *cache)
860{
861	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
862	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
863	unsigned int digsize = crypto_ahash_digestsize(ahash);
864	unsigned int blocksize;
865	unsigned int cache_ptr;
866	int ret;
867
868	ret = crypto_ahash_init(req);
869	if (ret)
870		return ret;
871
872	blocksize = crypto_ahash_blocksize(ahash);
873	if (len >= blocksize)
874		mv_cesa_update_op_cfg(&creq->op_tmpl,
875				      CESA_SA_DESC_CFG_MID_FRAG,
876				      CESA_SA_DESC_CFG_FRAG_MSK);
877
878	creq->len = len;
879	memcpy(creq->state, hash, digsize);
880	creq->cache_ptr = 0;
881
882	cache_ptr = do_div(len, blocksize);
883	if (!cache_ptr)
884		return 0;
885
886	memcpy(creq->cache, cache, cache_ptr);
887	creq->cache_ptr = cache_ptr;
888
889	return 0;
890}
891
892static int mv_cesa_md5_init(struct ahash_request *req)
893{
894	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
895	struct mv_cesa_op_ctx tmpl = { };
896
897	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
898
899	mv_cesa_ahash_init(req, &tmpl, true);
900
901	creq->state[0] = MD5_H0;
902	creq->state[1] = MD5_H1;
903	creq->state[2] = MD5_H2;
904	creq->state[3] = MD5_H3;
905
906	return 0;
907}
908
909static int mv_cesa_md5_export(struct ahash_request *req, void *out)
910{
911	struct md5_state *out_state = out;
912
913	return mv_cesa_ahash_export(req, out_state->hash,
914				    &out_state->byte_count, out_state->block);
915}
916
917static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
918{
919	const struct md5_state *in_state = in;
920
921	return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
922				    in_state->block);
923}
924
925static int mv_cesa_md5_digest(struct ahash_request *req)
926{
927	int ret;
928
929	ret = mv_cesa_md5_init(req);
930	if (ret)
931		return ret;
932
933	return mv_cesa_ahash_finup(req);
934}
935
936struct ahash_alg mv_md5_alg = {
937	.init = mv_cesa_md5_init,
938	.update = mv_cesa_ahash_update,
939	.final = mv_cesa_ahash_final,
940	.finup = mv_cesa_ahash_finup,
941	.digest = mv_cesa_md5_digest,
942	.export = mv_cesa_md5_export,
943	.import = mv_cesa_md5_import,
944	.halg = {
945		.digestsize = MD5_DIGEST_SIZE,
946		.statesize = sizeof(struct md5_state),
947		.base = {
948			.cra_name = "md5",
949			.cra_driver_name = "mv-md5",
950			.cra_priority = 300,
951			.cra_flags = CRYPTO_ALG_ASYNC |
952				     CRYPTO_ALG_ALLOCATES_MEMORY |
953				     CRYPTO_ALG_KERN_DRIVER_ONLY,
954			.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
955			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
956			.cra_init = mv_cesa_ahash_cra_init,
957			.cra_module = THIS_MODULE,
958		}
959	}
960};
961
962static int mv_cesa_sha1_init(struct ahash_request *req)
963{
964	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
965	struct mv_cesa_op_ctx tmpl = { };
966
967	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
968
969	mv_cesa_ahash_init(req, &tmpl, false);
970
971	creq->state[0] = SHA1_H0;
972	creq->state[1] = SHA1_H1;
973	creq->state[2] = SHA1_H2;
974	creq->state[3] = SHA1_H3;
975	creq->state[4] = SHA1_H4;
976
977	return 0;
978}
979
980static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
981{
982	struct sha1_state *out_state = out;
983
984	return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
985				    out_state->buffer);
986}
987
988static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
989{
990	const struct sha1_state *in_state = in;
991
992	return mv_cesa_ahash_import(req, in_state->state, in_state->count,
993				    in_state->buffer);
994}
995
996static int mv_cesa_sha1_digest(struct ahash_request *req)
997{
998	int ret;
999
1000	ret = mv_cesa_sha1_init(req);
1001	if (ret)
1002		return ret;
1003
1004	return mv_cesa_ahash_finup(req);
1005}
1006
1007struct ahash_alg mv_sha1_alg = {
1008	.init = mv_cesa_sha1_init,
1009	.update = mv_cesa_ahash_update,
1010	.final = mv_cesa_ahash_final,
1011	.finup = mv_cesa_ahash_finup,
1012	.digest = mv_cesa_sha1_digest,
1013	.export = mv_cesa_sha1_export,
1014	.import = mv_cesa_sha1_import,
1015	.halg = {
1016		.digestsize = SHA1_DIGEST_SIZE,
1017		.statesize = sizeof(struct sha1_state),
1018		.base = {
1019			.cra_name = "sha1",
1020			.cra_driver_name = "mv-sha1",
1021			.cra_priority = 300,
1022			.cra_flags = CRYPTO_ALG_ASYNC |
1023				     CRYPTO_ALG_ALLOCATES_MEMORY |
1024				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1025			.cra_blocksize = SHA1_BLOCK_SIZE,
1026			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1027			.cra_init = mv_cesa_ahash_cra_init,
1028			.cra_module = THIS_MODULE,
1029		}
1030	}
1031};
1032
1033static int mv_cesa_sha256_init(struct ahash_request *req)
1034{
1035	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
1036	struct mv_cesa_op_ctx tmpl = { };
1037
1038	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
1039
1040	mv_cesa_ahash_init(req, &tmpl, false);
1041
1042	creq->state[0] = SHA256_H0;
1043	creq->state[1] = SHA256_H1;
1044	creq->state[2] = SHA256_H2;
1045	creq->state[3] = SHA256_H3;
1046	creq->state[4] = SHA256_H4;
1047	creq->state[5] = SHA256_H5;
1048	creq->state[6] = SHA256_H6;
1049	creq->state[7] = SHA256_H7;
1050
1051	return 0;
1052}
1053
1054static int mv_cesa_sha256_digest(struct ahash_request *req)
1055{
1056	int ret;
1057
1058	ret = mv_cesa_sha256_init(req);
1059	if (ret)
1060		return ret;
1061
1062	return mv_cesa_ahash_finup(req);
1063}
1064
1065static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
1066{
1067	struct sha256_state *out_state = out;
1068
1069	return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
1070				    out_state->buf);
1071}
1072
1073static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
1074{
1075	const struct sha256_state *in_state = in;
1076
1077	return mv_cesa_ahash_import(req, in_state->state, in_state->count,
1078				    in_state->buf);
1079}
1080
1081struct ahash_alg mv_sha256_alg = {
1082	.init = mv_cesa_sha256_init,
1083	.update = mv_cesa_ahash_update,
1084	.final = mv_cesa_ahash_final,
1085	.finup = mv_cesa_ahash_finup,
1086	.digest = mv_cesa_sha256_digest,
1087	.export = mv_cesa_sha256_export,
1088	.import = mv_cesa_sha256_import,
1089	.halg = {
1090		.digestsize = SHA256_DIGEST_SIZE,
1091		.statesize = sizeof(struct sha256_state),
1092		.base = {
1093			.cra_name = "sha256",
1094			.cra_driver_name = "mv-sha256",
1095			.cra_priority = 300,
1096			.cra_flags = CRYPTO_ALG_ASYNC |
1097				     CRYPTO_ALG_ALLOCATES_MEMORY |
1098				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1099			.cra_blocksize = SHA256_BLOCK_SIZE,
1100			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1101			.cra_init = mv_cesa_ahash_cra_init,
1102			.cra_module = THIS_MODULE,
1103		}
1104	}
1105};
1106
1107static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
1108				       void *state, unsigned int blocksize)
1109{
1110	DECLARE_CRYPTO_WAIT(result);
1111	struct scatterlist sg;
1112	int ret;
1113
1114	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1115				   crypto_req_done, &result);
1116	sg_init_one(&sg, pad, blocksize);
1117	ahash_request_set_crypt(req, &sg, pad, blocksize);
1118
1119	ret = crypto_ahash_init(req);
1120	if (ret)
1121		return ret;
1122
1123	ret = crypto_ahash_update(req);
1124	ret = crypto_wait_req(ret, &result);
1125
1126	if (ret)
1127		return ret;
1128
1129	ret = crypto_ahash_export(req, state);
1130	if (ret)
1131		return ret;
1132
1133	return 0;
1134}
1135
1136static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
1137				  const u8 *key, unsigned int keylen,
1138				  u8 *ipad, u8 *opad,
1139				  unsigned int blocksize)
1140{
1141	DECLARE_CRYPTO_WAIT(result);
1142	struct scatterlist sg;
1143	int ret;
1144	int i;
1145
1146	if (keylen <= blocksize) {
1147		memcpy(ipad, key, keylen);
1148	} else {
1149		u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
1150
1151		if (!keydup)
1152			return -ENOMEM;
1153
1154		ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1155					   crypto_req_done, &result);
1156		sg_init_one(&sg, keydup, keylen);
1157		ahash_request_set_crypt(req, &sg, ipad, keylen);
1158
1159		ret = crypto_ahash_digest(req);
1160		ret = crypto_wait_req(ret, &result);
1161
1162		/* Set the memory region to 0 to avoid any leak. */
1163		kfree_sensitive(keydup);
1164
1165		if (ret)
1166			return ret;
1167
1168		keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1169	}
1170
1171	memset(ipad + keylen, 0, blocksize - keylen);
1172	memcpy(opad, ipad, blocksize);
1173
1174	for (i = 0; i < blocksize; i++) {
1175		ipad[i] ^= HMAC_IPAD_VALUE;
1176		opad[i] ^= HMAC_OPAD_VALUE;
1177	}
1178
1179	return 0;
1180}
1181
1182static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
1183				const u8 *key, unsigned int keylen,
1184				void *istate, void *ostate)
1185{
1186	struct ahash_request *req;
1187	struct crypto_ahash *tfm;
1188	unsigned int blocksize;
1189	u8 *ipad = NULL;
1190	u8 *opad;
1191	int ret;
1192
1193	tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
1194	if (IS_ERR(tfm))
1195		return PTR_ERR(tfm);
1196
1197	req = ahash_request_alloc(tfm, GFP_KERNEL);
1198	if (!req) {
1199		ret = -ENOMEM;
1200		goto free_ahash;
1201	}
1202
1203	crypto_ahash_clear_flags(tfm, ~0);
1204
1205	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1206
1207	ipad = kcalloc(2, blocksize, GFP_KERNEL);
1208	if (!ipad) {
1209		ret = -ENOMEM;
1210		goto free_req;
1211	}
1212
1213	opad = ipad + blocksize;
1214
1215	ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
1216	if (ret)
1217		goto free_ipad;
1218
1219	ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
1220	if (ret)
1221		goto free_ipad;
1222
1223	ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
1224
1225free_ipad:
1226	kfree(ipad);
1227free_req:
1228	ahash_request_free(req);
1229free_ahash:
1230	crypto_free_ahash(tfm);
1231
1232	return ret;
1233}
1234
1235static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1236{
1237	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
1238
1239	ctx->base.ops = &mv_cesa_ahash_req_ops;
1240
1241	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1242				 sizeof(struct mv_cesa_ahash_req));
1243	return 0;
1244}
1245
1246static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
1247{
1248	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1249	struct mv_cesa_op_ctx tmpl = { };
1250
1251	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
1252	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1253
1254	mv_cesa_ahash_init(req, &tmpl, true);
1255
1256	return 0;
1257}
1258
1259static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1260				    unsigned int keylen)
1261{
1262	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1263	struct md5_state istate, ostate;
1264	int ret, i;
1265
1266	ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
1267	if (ret)
1268		return ret;
1269
1270	for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
1271		ctx->iv[i] = cpu_to_be32(istate.hash[i]);
1272
1273	for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
1274		ctx->iv[i + 8] = cpu_to_be32(ostate.hash[i]);
1275
1276	return 0;
1277}
1278
1279static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
1280{
1281	int ret;
1282
1283	ret = mv_cesa_ahmac_md5_init(req);
1284	if (ret)
1285		return ret;
1286
1287	return mv_cesa_ahash_finup(req);
1288}
1289
1290struct ahash_alg mv_ahmac_md5_alg = {
1291	.init = mv_cesa_ahmac_md5_init,
1292	.update = mv_cesa_ahash_update,
1293	.final = mv_cesa_ahash_final,
1294	.finup = mv_cesa_ahash_finup,
1295	.digest = mv_cesa_ahmac_md5_digest,
1296	.setkey = mv_cesa_ahmac_md5_setkey,
1297	.export = mv_cesa_md5_export,
1298	.import = mv_cesa_md5_import,
1299	.halg = {
1300		.digestsize = MD5_DIGEST_SIZE,
1301		.statesize = sizeof(struct md5_state),
1302		.base = {
1303			.cra_name = "hmac(md5)",
1304			.cra_driver_name = "mv-hmac-md5",
1305			.cra_priority = 300,
1306			.cra_flags = CRYPTO_ALG_ASYNC |
1307				     CRYPTO_ALG_ALLOCATES_MEMORY |
1308				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1309			.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1310			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1311			.cra_init = mv_cesa_ahmac_cra_init,
1312			.cra_module = THIS_MODULE,
1313		}
1314	}
1315};
1316
1317static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1318{
1319	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1320	struct mv_cesa_op_ctx tmpl = { };
1321
1322	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1323	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1324
1325	mv_cesa_ahash_init(req, &tmpl, false);
1326
1327	return 0;
1328}
1329
1330static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1331				     unsigned int keylen)
1332{
1333	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1334	struct sha1_state istate, ostate;
1335	int ret, i;
1336
1337	ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
1338	if (ret)
1339		return ret;
1340
1341	for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1342		ctx->iv[i] = cpu_to_be32(istate.state[i]);
1343
1344	for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1345		ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
1346
1347	return 0;
1348}
1349
1350static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
1351{
1352	int ret;
1353
1354	ret = mv_cesa_ahmac_sha1_init(req);
1355	if (ret)
1356		return ret;
1357
1358	return mv_cesa_ahash_finup(req);
1359}
1360
1361struct ahash_alg mv_ahmac_sha1_alg = {
1362	.init = mv_cesa_ahmac_sha1_init,
1363	.update = mv_cesa_ahash_update,
1364	.final = mv_cesa_ahash_final,
1365	.finup = mv_cesa_ahash_finup,
1366	.digest = mv_cesa_ahmac_sha1_digest,
1367	.setkey = mv_cesa_ahmac_sha1_setkey,
1368	.export = mv_cesa_sha1_export,
1369	.import = mv_cesa_sha1_import,
1370	.halg = {
1371		.digestsize = SHA1_DIGEST_SIZE,
1372		.statesize = sizeof(struct sha1_state),
1373		.base = {
1374			.cra_name = "hmac(sha1)",
1375			.cra_driver_name = "mv-hmac-sha1",
1376			.cra_priority = 300,
1377			.cra_flags = CRYPTO_ALG_ASYNC |
1378				     CRYPTO_ALG_ALLOCATES_MEMORY |
1379				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1380			.cra_blocksize = SHA1_BLOCK_SIZE,
1381			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1382			.cra_init = mv_cesa_ahmac_cra_init,
1383			.cra_module = THIS_MODULE,
1384		}
1385	}
1386};
1387
1388static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1389				       unsigned int keylen)
1390{
1391	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1392	struct sha256_state istate, ostate;
1393	int ret, i;
1394
1395	ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
1396	if (ret)
1397		return ret;
1398
1399	for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1400		ctx->iv[i] = cpu_to_be32(istate.state[i]);
1401
1402	for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1403		ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
1404
1405	return 0;
1406}
1407
1408static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
1409{
1410	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1411	struct mv_cesa_op_ctx tmpl = { };
1412
1413	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
1414	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1415
1416	mv_cesa_ahash_init(req, &tmpl, false);
1417
1418	return 0;
1419}
1420
1421static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
1422{
1423	int ret;
1424
1425	ret = mv_cesa_ahmac_sha256_init(req);
1426	if (ret)
1427		return ret;
1428
1429	return mv_cesa_ahash_finup(req);
1430}
1431
1432struct ahash_alg mv_ahmac_sha256_alg = {
1433	.init = mv_cesa_ahmac_sha256_init,
1434	.update = mv_cesa_ahash_update,
1435	.final = mv_cesa_ahash_final,
1436	.finup = mv_cesa_ahash_finup,
1437	.digest = mv_cesa_ahmac_sha256_digest,
1438	.setkey = mv_cesa_ahmac_sha256_setkey,
1439	.export = mv_cesa_sha256_export,
1440	.import = mv_cesa_sha256_import,
1441	.halg = {
1442		.digestsize = SHA256_DIGEST_SIZE,
1443		.statesize = sizeof(struct sha256_state),
1444		.base = {
1445			.cra_name = "hmac(sha256)",
1446			.cra_driver_name = "mv-hmac-sha256",
1447			.cra_priority = 300,
1448			.cra_flags = CRYPTO_ALG_ASYNC |
1449				     CRYPTO_ALG_ALLOCATES_MEMORY |
1450				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1451			.cra_blocksize = SHA256_BLOCK_SIZE,
1452			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1453			.cra_init = mv_cesa_ahmac_cra_init,
1454			.cra_module = THIS_MODULE,
1455		}
1456	}
1457};
1458