1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 *
5 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019, 2023 NXP
7 *
8 * Based on caamalg.c crypto API driver.
9 *
10 * relationship of digest job descriptor or first job descriptor after init to
11 * shared descriptors:
12 *
13 * ---------------                     ---------------
14 * | JobDesc #1  |-------------------->|  ShareDesc  |
15 * | *(packet 1) |                     |  (hashKey)  |
16 * ---------------                     | (operation) |
17 *                                     ---------------
18 *
19 * relationship of subsequent job descriptors to shared descriptors:
20 *
21 * ---------------                     ---------------
22 * | JobDesc #2  |-------------------->|  ShareDesc  |
23 * | *(packet 2) |      |------------->|  (hashKey)  |
24 * ---------------      |    |-------->| (operation) |
25 *       .              |    |         | (load ctx2) |
26 *       .              |    |         ---------------
27 * ---------------      |    |
28 * | JobDesc #3  |------|    |
29 * | *(packet 3) |           |
30 * ---------------           |
31 *       .                   |
32 *       .                   |
33 * ---------------           |
34 * | JobDesc #4  |------------
35 * | *(packet 4) |
36 * ---------------
37 *
38 * The SharedDesc never changes for a connection unless rekeyed, but
39 * each packet will likely be in a different place. So all we need
40 * to know to process the packet is where the input is, where the
41 * output goes, and what context we want to process with. Context is
42 * in the SharedDesc, packet references in the JobDesc.
43 *
44 * So, a job desc looks like:
45 *
46 * ---------------------
47 * | Header            |
48 * | ShareDesc Pointer |
49 * | SEQ_OUT_PTR       |
50 * | (output buffer)   |
51 * | (output length)   |
52 * | SEQ_IN_PTR        |
53 * | (input buffer)    |
54 * | (input length)    |
55 * ---------------------
56 */
57
58#include "compat.h"
59
60#include "regs.h"
61#include "intern.h"
62#include "desc_constr.h"
63#include "jr.h"
64#include "error.h"
65#include "sg_sw_sec4.h"
66#include "key_gen.h"
67#include "caamhash_desc.h"
68#include <crypto/internal/engine.h>
69#include <crypto/internal/hash.h>
70#include <linux/dma-mapping.h>
71#include <linux/err.h>
72#include <linux/kernel.h>
73#include <linux/slab.h>
74#include <linux/string.h>
75
76#define CAAM_CRA_PRIORITY		3000
77
78/* max hash key is max split key size */
79#define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
80
81#define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
82#define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
83
84#define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
85					 CAAM_MAX_HASH_KEY_SIZE)
86#define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
87
88/* caam context sizes for hashes: running digest + 8 */
89#define HASH_MSG_LEN			8
90#define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91
92static struct list_head hash_list;
93
94/* ahash per-session context */
95struct caam_hash_ctx {
96	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
97	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
98	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
99	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
100	u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
101	dma_addr_t sh_desc_update_dma ____cacheline_aligned;
102	dma_addr_t sh_desc_update_first_dma;
103	dma_addr_t sh_desc_fin_dma;
104	dma_addr_t sh_desc_digest_dma;
105	enum dma_data_direction dir;
106	enum dma_data_direction key_dir;
107	struct device *jrdev;
108	int ctx_len;
109	struct alginfo adata;
110};
111
112/* ahash state */
113struct caam_hash_state {
114	dma_addr_t buf_dma;
115	dma_addr_t ctx_dma;
116	int ctx_dma_len;
117	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
118	int buflen;
119	int next_buflen;
120	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
121	int (*update)(struct ahash_request *req) ____cacheline_aligned;
122	int (*final)(struct ahash_request *req);
123	int (*finup)(struct ahash_request *req);
124	struct ahash_edesc *edesc;
125	void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err,
126			      void *context);
127};
128
129struct caam_export_state {
130	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
131	u8 caam_ctx[MAX_CTX_LEN];
132	int buflen;
133	int (*update)(struct ahash_request *req);
134	int (*final)(struct ahash_request *req);
135	int (*finup)(struct ahash_request *req);
136};
137
138static inline bool is_cmac_aes(u32 algtype)
139{
140	return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
141	       (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
142}
143/* Common job descriptor seq in/out ptr routines */
144
145/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
146static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
147				      struct caam_hash_state *state,
148				      int ctx_len)
149{
150	state->ctx_dma_len = ctx_len;
151	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
152					ctx_len, DMA_FROM_DEVICE);
153	if (dma_mapping_error(jrdev, state->ctx_dma)) {
154		dev_err(jrdev, "unable to map ctx\n");
155		state->ctx_dma = 0;
156		return -ENOMEM;
157	}
158
159	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
160
161	return 0;
162}
163
164/* Map current buffer in state (if length > 0) and put it in link table */
165static inline int buf_map_to_sec4_sg(struct device *jrdev,
166				     struct sec4_sg_entry *sec4_sg,
167				     struct caam_hash_state *state)
168{
169	int buflen = state->buflen;
170
171	if (!buflen)
172		return 0;
173
174	state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
175					DMA_TO_DEVICE);
176	if (dma_mapping_error(jrdev, state->buf_dma)) {
177		dev_err(jrdev, "unable to map buf\n");
178		state->buf_dma = 0;
179		return -ENOMEM;
180	}
181
182	dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
183
184	return 0;
185}
186
187/* Map state->caam_ctx, and add it to link table */
188static inline int ctx_map_to_sec4_sg(struct device *jrdev,
189				     struct caam_hash_state *state, int ctx_len,
190				     struct sec4_sg_entry *sec4_sg, u32 flag)
191{
192	state->ctx_dma_len = ctx_len;
193	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
194	if (dma_mapping_error(jrdev, state->ctx_dma)) {
195		dev_err(jrdev, "unable to map ctx\n");
196		state->ctx_dma = 0;
197		return -ENOMEM;
198	}
199
200	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
201
202	return 0;
203}
204
205static int ahash_set_sh_desc(struct crypto_ahash *ahash)
206{
207	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
208	int digestsize = crypto_ahash_digestsize(ahash);
209	struct device *jrdev = ctx->jrdev;
210	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
211	u32 *desc;
212
213	ctx->adata.key_virt = ctx->key;
214
215	/* ahash_update shared descriptor */
216	desc = ctx->sh_desc_update;
217	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
218			  ctx->ctx_len, true, ctrlpriv->era);
219	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
220				   desc_bytes(desc), ctx->dir);
221
222	print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
223			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
224			     1);
225
226	/* ahash_update_first shared descriptor */
227	desc = ctx->sh_desc_update_first;
228	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
229			  ctx->ctx_len, false, ctrlpriv->era);
230	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
231				   desc_bytes(desc), ctx->dir);
232	print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
233			     ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
234			     desc_bytes(desc), 1);
235
236	/* ahash_final shared descriptor */
237	desc = ctx->sh_desc_fin;
238	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
239			  ctx->ctx_len, true, ctrlpriv->era);
240	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
241				   desc_bytes(desc), ctx->dir);
242
243	print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
244			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
245			     desc_bytes(desc), 1);
246
247	/* ahash_digest shared descriptor */
248	desc = ctx->sh_desc_digest;
249	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
250			  ctx->ctx_len, false, ctrlpriv->era);
251	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
252				   desc_bytes(desc), ctx->dir);
253
254	print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
255			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
256			     desc_bytes(desc), 1);
257
258	return 0;
259}
260
261static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
262{
263	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
264	int digestsize = crypto_ahash_digestsize(ahash);
265	struct device *jrdev = ctx->jrdev;
266	u32 *desc;
267
268	/* shared descriptor for ahash_update */
269	desc = ctx->sh_desc_update;
270	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
271			    ctx->ctx_len, ctx->ctx_len);
272	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
273				   desc_bytes(desc), ctx->dir);
274	print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
275			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
276			     1);
277
278	/* shared descriptor for ahash_{final,finup} */
279	desc = ctx->sh_desc_fin;
280	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
281			    digestsize, ctx->ctx_len);
282	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
283				   desc_bytes(desc), ctx->dir);
284	print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
285			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
286			     1);
287
288	/* key is immediate data for INIT and INITFINAL states */
289	ctx->adata.key_virt = ctx->key;
290
291	/* shared descriptor for first invocation of ahash_update */
292	desc = ctx->sh_desc_update_first;
293	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
294			    ctx->ctx_len);
295	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
296				   desc_bytes(desc), ctx->dir);
297	print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
298			     " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
299			     desc_bytes(desc), 1);
300
301	/* shared descriptor for ahash_digest */
302	desc = ctx->sh_desc_digest;
303	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
304			    digestsize, ctx->ctx_len);
305	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
306				   desc_bytes(desc), ctx->dir);
307	print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
308			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
309			     1);
310	return 0;
311}
312
313static int acmac_set_sh_desc(struct crypto_ahash *ahash)
314{
315	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
316	int digestsize = crypto_ahash_digestsize(ahash);
317	struct device *jrdev = ctx->jrdev;
318	u32 *desc;
319
320	/* shared descriptor for ahash_update */
321	desc = ctx->sh_desc_update;
322	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
323			    ctx->ctx_len, ctx->ctx_len);
324	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
325				   desc_bytes(desc), ctx->dir);
326	print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
327			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
328			     desc_bytes(desc), 1);
329
330	/* shared descriptor for ahash_{final,finup} */
331	desc = ctx->sh_desc_fin;
332	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
333			    digestsize, ctx->ctx_len);
334	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
335				   desc_bytes(desc), ctx->dir);
336	print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
337			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
338			     desc_bytes(desc), 1);
339
340	/* shared descriptor for first invocation of ahash_update */
341	desc = ctx->sh_desc_update_first;
342	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
343			    ctx->ctx_len);
344	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
345				   desc_bytes(desc), ctx->dir);
346	print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
347			     " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
348			     desc_bytes(desc), 1);
349
350	/* shared descriptor for ahash_digest */
351	desc = ctx->sh_desc_digest;
352	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
353			    digestsize, ctx->ctx_len);
354	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
355				   desc_bytes(desc), ctx->dir);
356	print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
357			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
358			     desc_bytes(desc), 1);
359
360	return 0;
361}
362
363/* Digest hash size if it is too large */
364static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
365			   u32 digestsize)
366{
367	struct device *jrdev = ctx->jrdev;
368	u32 *desc;
369	struct split_key_result result;
370	dma_addr_t key_dma;
371	int ret;
372
373	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL);
374	if (!desc)
375		return -ENOMEM;
376
377	init_job_desc(desc, 0);
378
379	key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
380	if (dma_mapping_error(jrdev, key_dma)) {
381		dev_err(jrdev, "unable to map key memory\n");
382		kfree(desc);
383		return -ENOMEM;
384	}
385
386	/* Job descriptor to perform unkeyed hash on key_in */
387	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
388			 OP_ALG_AS_INITFINAL);
389	append_seq_in_ptr(desc, key_dma, *keylen, 0);
390	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
391			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
392	append_seq_out_ptr(desc, key_dma, digestsize, 0);
393	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
394			 LDST_SRCDST_BYTE_CONTEXT);
395
396	print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
397			     DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
398	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
399			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
400			     1);
401
402	result.err = 0;
403	init_completion(&result.completion);
404
405	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
406	if (ret == -EINPROGRESS) {
407		/* in progress */
408		wait_for_completion(&result.completion);
409		ret = result.err;
410
411		print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
412				     DUMP_PREFIX_ADDRESS, 16, 4, key,
413				     digestsize, 1);
414	}
415	dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
416
417	*keylen = digestsize;
418
419	kfree(desc);
420
421	return ret;
422}
423
424static int ahash_setkey(struct crypto_ahash *ahash,
425			const u8 *key, unsigned int keylen)
426{
427	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
428	struct device *jrdev = ctx->jrdev;
429	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
430	int digestsize = crypto_ahash_digestsize(ahash);
431	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
432	int ret;
433	u8 *hashed_key = NULL;
434
435	dev_dbg(jrdev, "keylen %d\n", keylen);
436
437	if (keylen > blocksize) {
438		unsigned int aligned_len =
439			ALIGN(keylen, dma_get_cache_alignment());
440
441		if (aligned_len < keylen)
442			return -EOVERFLOW;
443
444		hashed_key = kmemdup(key, keylen, GFP_KERNEL);
445		if (!hashed_key)
446			return -ENOMEM;
447		ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
448		if (ret)
449			goto bad_free_key;
450		key = hashed_key;
451	}
452
453	/*
454	 * If DKP is supported, use it in the shared descriptor to generate
455	 * the split key.
456	 */
457	if (ctrlpriv->era >= 6) {
458		ctx->adata.key_inline = true;
459		ctx->adata.keylen = keylen;
460		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
461						      OP_ALG_ALGSEL_MASK);
462
463		if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
464			goto bad_free_key;
465
466		memcpy(ctx->key, key, keylen);
467
468		/*
469		 * In case |user key| > |derived key|, using DKP<imm,imm>
470		 * would result in invalid opcodes (last bytes of user key) in
471		 * the resulting descriptor. Use DKP<ptr,imm> instead => both
472		 * virtual and dma key addresses are needed.
473		 */
474		if (keylen > ctx->adata.keylen_pad)
475			dma_sync_single_for_device(ctx->jrdev,
476						   ctx->adata.key_dma,
477						   ctx->adata.keylen_pad,
478						   DMA_TO_DEVICE);
479	} else {
480		ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
481				    keylen, CAAM_MAX_HASH_KEY_SIZE);
482		if (ret)
483			goto bad_free_key;
484	}
485
486	kfree(hashed_key);
487	return ahash_set_sh_desc(ahash);
488 bad_free_key:
489	kfree(hashed_key);
490	return -EINVAL;
491}
492
493static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
494			unsigned int keylen)
495{
496	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
497	struct device *jrdev = ctx->jrdev;
498
499	if (keylen != AES_KEYSIZE_128)
500		return -EINVAL;
501
502	memcpy(ctx->key, key, keylen);
503	dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
504				   DMA_TO_DEVICE);
505	ctx->adata.keylen = keylen;
506
507	print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
508			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
509
510	return axcbc_set_sh_desc(ahash);
511}
512
513static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
514			unsigned int keylen)
515{
516	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
517	int err;
518
519	err = aes_check_keylen(keylen);
520	if (err)
521		return err;
522
523	/* key is immediate data for all cmac shared descriptors */
524	ctx->adata.key_virt = key;
525	ctx->adata.keylen = keylen;
526
527	print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
528			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
529
530	return acmac_set_sh_desc(ahash);
531}
532
533/*
534 * ahash_edesc - s/w-extended ahash descriptor
535 * @sec4_sg_dma: physical mapped address of h/w link table
536 * @src_nents: number of segments in input scatterlist
537 * @sec4_sg_bytes: length of dma mapped sec4_sg space
538 * @bklog: stored to determine if the request needs backlog
539 * @hw_desc: the h/w job descriptor followed by any referenced link tables
540 * @sec4_sg: h/w link table
541 */
542struct ahash_edesc {
543	dma_addr_t sec4_sg_dma;
544	int src_nents;
545	int sec4_sg_bytes;
546	bool bklog;
547	u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
548	struct sec4_sg_entry sec4_sg[];
549};
550
551static inline void ahash_unmap(struct device *dev,
552			struct ahash_edesc *edesc,
553			struct ahash_request *req, int dst_len)
554{
555	struct caam_hash_state *state = ahash_request_ctx_dma(req);
556
557	if (edesc->src_nents)
558		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
559
560	if (edesc->sec4_sg_bytes)
561		dma_unmap_single(dev, edesc->sec4_sg_dma,
562				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
563
564	if (state->buf_dma) {
565		dma_unmap_single(dev, state->buf_dma, state->buflen,
566				 DMA_TO_DEVICE);
567		state->buf_dma = 0;
568	}
569}
570
571static inline void ahash_unmap_ctx(struct device *dev,
572			struct ahash_edesc *edesc,
573			struct ahash_request *req, int dst_len, u32 flag)
574{
575	struct caam_hash_state *state = ahash_request_ctx_dma(req);
576
577	if (state->ctx_dma) {
578		dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
579		state->ctx_dma = 0;
580	}
581	ahash_unmap(dev, edesc, req, dst_len);
582}
583
584static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
585				  void *context, enum dma_data_direction dir)
586{
587	struct ahash_request *req = context;
588	struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
589	struct ahash_edesc *edesc;
590	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
591	int digestsize = crypto_ahash_digestsize(ahash);
592	struct caam_hash_state *state = ahash_request_ctx_dma(req);
593	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
594	int ecode = 0;
595	bool has_bklog;
596
597	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
598
599	edesc = state->edesc;
600	has_bklog = edesc->bklog;
601
602	if (err)
603		ecode = caam_jr_strstatus(jrdev, err);
604
605	ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
606	memcpy(req->result, state->caam_ctx, digestsize);
607	kfree(edesc);
608
609	print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
610			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
611			     ctx->ctx_len, 1);
612
613	/*
614	 * If no backlog flag, the completion of the request is done
615	 * by CAAM, not crypto engine.
616	 */
617	if (!has_bklog)
618		ahash_request_complete(req, ecode);
619	else
620		crypto_finalize_hash_request(jrp->engine, req, ecode);
621}
622
623static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
624		       void *context)
625{
626	ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
627}
628
629static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
630			       void *context)
631{
632	ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
633}
634
635static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
636				     void *context, enum dma_data_direction dir)
637{
638	struct ahash_request *req = context;
639	struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
640	struct ahash_edesc *edesc;
641	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
642	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
643	struct caam_hash_state *state = ahash_request_ctx_dma(req);
644	int digestsize = crypto_ahash_digestsize(ahash);
645	int ecode = 0;
646	bool has_bklog;
647
648	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
649
650	edesc = state->edesc;
651	has_bklog = edesc->bklog;
652	if (err)
653		ecode = caam_jr_strstatus(jrdev, err);
654
655	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
656	kfree(edesc);
657
658	scatterwalk_map_and_copy(state->buf, req->src,
659				 req->nbytes - state->next_buflen,
660				 state->next_buflen, 0);
661	state->buflen = state->next_buflen;
662
663	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
664			     DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
665			     state->buflen, 1);
666
667	print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
668			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
669			     ctx->ctx_len, 1);
670	if (req->result)
671		print_hex_dump_debug("result@"__stringify(__LINE__)": ",
672				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
673				     digestsize, 1);
674
675	/*
676	 * If no backlog flag, the completion of the request is done
677	 * by CAAM, not crypto engine.
678	 */
679	if (!has_bklog)
680		ahash_request_complete(req, ecode);
681	else
682		crypto_finalize_hash_request(jrp->engine, req, ecode);
683
684}
685
686static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
687			  void *context)
688{
689	ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
690}
691
692static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
693			       void *context)
694{
695	ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
696}
697
698/*
699 * Allocate an enhanced descriptor, which contains the hardware descriptor
700 * and space for hardware scatter table containing sg_num entries.
701 */
702static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
703					     int sg_num, u32 *sh_desc,
704					     dma_addr_t sh_desc_dma)
705{
706	struct caam_hash_state *state = ahash_request_ctx_dma(req);
707	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
708		       GFP_KERNEL : GFP_ATOMIC;
709	struct ahash_edesc *edesc;
710
711	edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags);
712	if (!edesc)
713		return NULL;
714
715	state->edesc = edesc;
716
717	init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
718			     HDR_SHARE_DEFER | HDR_REVERSE);
719
720	return edesc;
721}
722
723static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
724			       struct ahash_edesc *edesc,
725			       struct ahash_request *req, int nents,
726			       unsigned int first_sg,
727			       unsigned int first_bytes, size_t to_hash)
728{
729	dma_addr_t src_dma;
730	u32 options;
731
732	if (nents > 1 || first_sg) {
733		struct sec4_sg_entry *sg = edesc->sec4_sg;
734		unsigned int sgsize = sizeof(*sg) *
735				      pad_sg_nents(first_sg + nents);
736
737		sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
738
739		src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
740		if (dma_mapping_error(ctx->jrdev, src_dma)) {
741			dev_err(ctx->jrdev, "unable to map S/G table\n");
742			return -ENOMEM;
743		}
744
745		edesc->sec4_sg_bytes = sgsize;
746		edesc->sec4_sg_dma = src_dma;
747		options = LDST_SGF;
748	} else {
749		src_dma = sg_dma_address(req->src);
750		options = 0;
751	}
752
753	append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
754			  options);
755
756	return 0;
757}
758
759static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
760{
761	struct ahash_request *req = ahash_request_cast(areq);
762	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(crypto_ahash_reqtfm(req));
763	struct caam_hash_state *state = ahash_request_ctx_dma(req);
764	struct device *jrdev = ctx->jrdev;
765	u32 *desc = state->edesc->hw_desc;
766	int ret;
767
768	state->edesc->bklog = true;
769
770	ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
771
772	if (ret == -ENOSPC && engine->retry_support)
773		return ret;
774
775	if (ret != -EINPROGRESS) {
776		ahash_unmap(jrdev, state->edesc, req, 0);
777		kfree(state->edesc);
778	} else {
779		ret = 0;
780	}
781
782	return ret;
783}
784
785static int ahash_enqueue_req(struct device *jrdev,
786			     void (*cbk)(struct device *jrdev, u32 *desc,
787					 u32 err, void *context),
788			     struct ahash_request *req,
789			     int dst_len, enum dma_data_direction dir)
790{
791	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
792	struct caam_hash_state *state = ahash_request_ctx_dma(req);
793	struct ahash_edesc *edesc = state->edesc;
794	u32 *desc = edesc->hw_desc;
795	int ret;
796
797	state->ahash_op_done = cbk;
798
799	/*
800	 * Only the backlog request are sent to crypto-engine since the others
801	 * can be handled by CAAM, if free, especially since JR has up to 1024
802	 * entries (more than the 10 entries from crypto-engine).
803	 */
804	if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
805		ret = crypto_transfer_hash_request_to_engine(jrpriv->engine,
806							     req);
807	else
808		ret = caam_jr_enqueue(jrdev, desc, cbk, req);
809
810	if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
811		ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
812		kfree(edesc);
813	}
814
815	return ret;
816}
817
818/* submit update job descriptor */
819static int ahash_update_ctx(struct ahash_request *req)
820{
821	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
822	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
823	struct caam_hash_state *state = ahash_request_ctx_dma(req);
824	struct device *jrdev = ctx->jrdev;
825	u8 *buf = state->buf;
826	int *buflen = &state->buflen;
827	int *next_buflen = &state->next_buflen;
828	int blocksize = crypto_ahash_blocksize(ahash);
829	int in_len = *buflen + req->nbytes, to_hash;
830	u32 *desc;
831	int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
832	struct ahash_edesc *edesc;
833	int ret = 0;
834
835	*next_buflen = in_len & (blocksize - 1);
836	to_hash = in_len - *next_buflen;
837
838	/*
839	 * For XCBC and CMAC, if to_hash is multiple of block size,
840	 * keep last block in internal buffer
841	 */
842	if ((is_xcbc_aes(ctx->adata.algtype) ||
843	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
844	     (*next_buflen == 0)) {
845		*next_buflen = blocksize;
846		to_hash -= blocksize;
847	}
848
849	if (to_hash) {
850		int pad_nents;
851		int src_len = req->nbytes - *next_buflen;
852
853		src_nents = sg_nents_for_len(req->src, src_len);
854		if (src_nents < 0) {
855			dev_err(jrdev, "Invalid number of src SG.\n");
856			return src_nents;
857		}
858
859		if (src_nents) {
860			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
861						  DMA_TO_DEVICE);
862			if (!mapped_nents) {
863				dev_err(jrdev, "unable to DMA map source\n");
864				return -ENOMEM;
865			}
866		} else {
867			mapped_nents = 0;
868		}
869
870		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
871		pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
872		sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
873
874		/*
875		 * allocate space for base edesc and hw desc commands,
876		 * link tables
877		 */
878		edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
879					  ctx->sh_desc_update_dma);
880		if (!edesc) {
881			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
882			return -ENOMEM;
883		}
884
885		edesc->src_nents = src_nents;
886		edesc->sec4_sg_bytes = sec4_sg_bytes;
887
888		ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
889					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
890		if (ret)
891			goto unmap_ctx;
892
893		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
894		if (ret)
895			goto unmap_ctx;
896
897		if (mapped_nents)
898			sg_to_sec4_sg_last(req->src, src_len,
899					   edesc->sec4_sg + sec4_sg_src_index,
900					   0);
901		else
902			sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
903					    1);
904
905		desc = edesc->hw_desc;
906
907		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
908						     sec4_sg_bytes,
909						     DMA_TO_DEVICE);
910		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
911			dev_err(jrdev, "unable to map S/G table\n");
912			ret = -ENOMEM;
913			goto unmap_ctx;
914		}
915
916		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
917				       to_hash, LDST_SGF);
918
919		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
920
921		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
922				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
923				     desc_bytes(desc), 1);
924
925		ret = ahash_enqueue_req(jrdev, ahash_done_bi, req,
926					ctx->ctx_len, DMA_BIDIRECTIONAL);
927	} else if (*next_buflen) {
928		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
929					 req->nbytes, 0);
930		*buflen = *next_buflen;
931
932		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
933				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
934				     *buflen, 1);
935	}
936
937	return ret;
938unmap_ctx:
939	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
940	kfree(edesc);
941	return ret;
942}
943
944static int ahash_final_ctx(struct ahash_request *req)
945{
946	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
947	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
948	struct caam_hash_state *state = ahash_request_ctx_dma(req);
949	struct device *jrdev = ctx->jrdev;
950	int buflen = state->buflen;
951	u32 *desc;
952	int sec4_sg_bytes;
953	int digestsize = crypto_ahash_digestsize(ahash);
954	struct ahash_edesc *edesc;
955	int ret;
956
957	sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
958			sizeof(struct sec4_sg_entry);
959
960	/* allocate space for base edesc and hw desc commands, link tables */
961	edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
962				  ctx->sh_desc_fin_dma);
963	if (!edesc)
964		return -ENOMEM;
965
966	desc = edesc->hw_desc;
967
968	edesc->sec4_sg_bytes = sec4_sg_bytes;
969
970	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
971				 edesc->sec4_sg, DMA_BIDIRECTIONAL);
972	if (ret)
973		goto unmap_ctx;
974
975	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
976	if (ret)
977		goto unmap_ctx;
978
979	sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
980
981	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
982					    sec4_sg_bytes, DMA_TO_DEVICE);
983	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
984		dev_err(jrdev, "unable to map S/G table\n");
985		ret = -ENOMEM;
986		goto unmap_ctx;
987	}
988
989	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
990			  LDST_SGF);
991	append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
992
993	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
994			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
995			     1);
996
997	return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
998				 digestsize, DMA_BIDIRECTIONAL);
999 unmap_ctx:
1000	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1001	kfree(edesc);
1002	return ret;
1003}
1004
1005static int ahash_finup_ctx(struct ahash_request *req)
1006{
1007	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1008	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1009	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1010	struct device *jrdev = ctx->jrdev;
1011	int buflen = state->buflen;
1012	u32 *desc;
1013	int sec4_sg_src_index;
1014	int src_nents, mapped_nents;
1015	int digestsize = crypto_ahash_digestsize(ahash);
1016	struct ahash_edesc *edesc;
1017	int ret;
1018
1019	src_nents = sg_nents_for_len(req->src, req->nbytes);
1020	if (src_nents < 0) {
1021		dev_err(jrdev, "Invalid number of src SG.\n");
1022		return src_nents;
1023	}
1024
1025	if (src_nents) {
1026		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1027					  DMA_TO_DEVICE);
1028		if (!mapped_nents) {
1029			dev_err(jrdev, "unable to DMA map source\n");
1030			return -ENOMEM;
1031		}
1032	} else {
1033		mapped_nents = 0;
1034	}
1035
1036	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1037
1038	/* allocate space for base edesc and hw desc commands, link tables */
1039	edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1040				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
1041	if (!edesc) {
1042		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1043		return -ENOMEM;
1044	}
1045
1046	desc = edesc->hw_desc;
1047
1048	edesc->src_nents = src_nents;
1049
1050	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1051				 edesc->sec4_sg, DMA_BIDIRECTIONAL);
1052	if (ret)
1053		goto unmap_ctx;
1054
1055	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1056	if (ret)
1057		goto unmap_ctx;
1058
1059	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1060				  sec4_sg_src_index, ctx->ctx_len + buflen,
1061				  req->nbytes);
1062	if (ret)
1063		goto unmap_ctx;
1064
1065	append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1066
1067	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1068			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1069			     1);
1070
1071	return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
1072				 digestsize, DMA_BIDIRECTIONAL);
1073 unmap_ctx:
1074	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1075	kfree(edesc);
1076	return ret;
1077}
1078
1079static int ahash_digest(struct ahash_request *req)
1080{
1081	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1082	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1083	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1084	struct device *jrdev = ctx->jrdev;
1085	u32 *desc;
1086	int digestsize = crypto_ahash_digestsize(ahash);
1087	int src_nents, mapped_nents;
1088	struct ahash_edesc *edesc;
1089	int ret;
1090
1091	state->buf_dma = 0;
1092
1093	src_nents = sg_nents_for_len(req->src, req->nbytes);
1094	if (src_nents < 0) {
1095		dev_err(jrdev, "Invalid number of src SG.\n");
1096		return src_nents;
1097	}
1098
1099	if (src_nents) {
1100		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1101					  DMA_TO_DEVICE);
1102		if (!mapped_nents) {
1103			dev_err(jrdev, "unable to map source for DMA\n");
1104			return -ENOMEM;
1105		}
1106	} else {
1107		mapped_nents = 0;
1108	}
1109
1110	/* allocate space for base edesc and hw desc commands, link tables */
1111	edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
1112				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1113	if (!edesc) {
1114		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1115		return -ENOMEM;
1116	}
1117
1118	edesc->src_nents = src_nents;
1119
1120	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1121				  req->nbytes);
1122	if (ret) {
1123		ahash_unmap(jrdev, edesc, req, digestsize);
1124		kfree(edesc);
1125		return ret;
1126	}
1127
1128	desc = edesc->hw_desc;
1129
1130	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1131	if (ret) {
1132		ahash_unmap(jrdev, edesc, req, digestsize);
1133		kfree(edesc);
1134		return -ENOMEM;
1135	}
1136
1137	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1138			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1139			     1);
1140
1141	return ahash_enqueue_req(jrdev, ahash_done, req, digestsize,
1142				 DMA_FROM_DEVICE);
1143}
1144
1145/* submit ahash final if it the first job descriptor */
1146static int ahash_final_no_ctx(struct ahash_request *req)
1147{
1148	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1149	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1150	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1151	struct device *jrdev = ctx->jrdev;
1152	u8 *buf = state->buf;
1153	int buflen = state->buflen;
1154	u32 *desc;
1155	int digestsize = crypto_ahash_digestsize(ahash);
1156	struct ahash_edesc *edesc;
1157	int ret;
1158
1159	/* allocate space for base edesc and hw desc commands, link tables */
1160	edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
1161				  ctx->sh_desc_digest_dma);
1162	if (!edesc)
1163		return -ENOMEM;
1164
1165	desc = edesc->hw_desc;
1166
1167	if (buflen) {
1168		state->buf_dma = dma_map_single(jrdev, buf, buflen,
1169						DMA_TO_DEVICE);
1170		if (dma_mapping_error(jrdev, state->buf_dma)) {
1171			dev_err(jrdev, "unable to map src\n");
1172			goto unmap;
1173		}
1174
1175		append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1176	}
1177
1178	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1179	if (ret)
1180		goto unmap;
1181
1182	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1183			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1184			     1);
1185
1186	return ahash_enqueue_req(jrdev, ahash_done, req,
1187				 digestsize, DMA_FROM_DEVICE);
1188 unmap:
1189	ahash_unmap(jrdev, edesc, req, digestsize);
1190	kfree(edesc);
1191	return -ENOMEM;
1192}
1193
1194/* submit ahash update if it the first job descriptor after update */
1195static int ahash_update_no_ctx(struct ahash_request *req)
1196{
1197	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1198	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1199	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1200	struct device *jrdev = ctx->jrdev;
1201	u8 *buf = state->buf;
1202	int *buflen = &state->buflen;
1203	int *next_buflen = &state->next_buflen;
1204	int blocksize = crypto_ahash_blocksize(ahash);
1205	int in_len = *buflen + req->nbytes, to_hash;
1206	int sec4_sg_bytes, src_nents, mapped_nents;
1207	struct ahash_edesc *edesc;
1208	u32 *desc;
1209	int ret = 0;
1210
1211	*next_buflen = in_len & (blocksize - 1);
1212	to_hash = in_len - *next_buflen;
1213
1214	/*
1215	 * For XCBC and CMAC, if to_hash is multiple of block size,
1216	 * keep last block in internal buffer
1217	 */
1218	if ((is_xcbc_aes(ctx->adata.algtype) ||
1219	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1220	     (*next_buflen == 0)) {
1221		*next_buflen = blocksize;
1222		to_hash -= blocksize;
1223	}
1224
1225	if (to_hash) {
1226		int pad_nents;
1227		int src_len = req->nbytes - *next_buflen;
1228
1229		src_nents = sg_nents_for_len(req->src, src_len);
1230		if (src_nents < 0) {
1231			dev_err(jrdev, "Invalid number of src SG.\n");
1232			return src_nents;
1233		}
1234
1235		if (src_nents) {
1236			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1237						  DMA_TO_DEVICE);
1238			if (!mapped_nents) {
1239				dev_err(jrdev, "unable to DMA map source\n");
1240				return -ENOMEM;
1241			}
1242		} else {
1243			mapped_nents = 0;
1244		}
1245
1246		pad_nents = pad_sg_nents(1 + mapped_nents);
1247		sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1248
1249		/*
1250		 * allocate space for base edesc and hw desc commands,
1251		 * link tables
1252		 */
1253		edesc = ahash_edesc_alloc(req, pad_nents,
1254					  ctx->sh_desc_update_first,
1255					  ctx->sh_desc_update_first_dma);
1256		if (!edesc) {
1257			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1258			return -ENOMEM;
1259		}
1260
1261		edesc->src_nents = src_nents;
1262		edesc->sec4_sg_bytes = sec4_sg_bytes;
1263
1264		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1265		if (ret)
1266			goto unmap_ctx;
1267
1268		sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1269
1270		desc = edesc->hw_desc;
1271
1272		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1273						    sec4_sg_bytes,
1274						    DMA_TO_DEVICE);
1275		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1276			dev_err(jrdev, "unable to map S/G table\n");
1277			ret = -ENOMEM;
1278			goto unmap_ctx;
1279		}
1280
1281		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1282
1283		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1284		if (ret)
1285			goto unmap_ctx;
1286
1287		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1288				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
1289				     desc_bytes(desc), 1);
1290
1291		ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1292					ctx->ctx_len, DMA_TO_DEVICE);
1293		if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1294			return ret;
1295		state->update = ahash_update_ctx;
1296		state->finup = ahash_finup_ctx;
1297		state->final = ahash_final_ctx;
1298	} else if (*next_buflen) {
1299		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1300					 req->nbytes, 0);
1301		*buflen = *next_buflen;
1302
1303		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1304				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
1305				     *buflen, 1);
1306	}
1307
1308	return ret;
1309 unmap_ctx:
1310	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1311	kfree(edesc);
1312	return ret;
1313}
1314
1315/* submit ahash finup if it the first job descriptor after update */
1316static int ahash_finup_no_ctx(struct ahash_request *req)
1317{
1318	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1319	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1320	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1321	struct device *jrdev = ctx->jrdev;
1322	int buflen = state->buflen;
1323	u32 *desc;
1324	int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1325	int digestsize = crypto_ahash_digestsize(ahash);
1326	struct ahash_edesc *edesc;
1327	int ret;
1328
1329	src_nents = sg_nents_for_len(req->src, req->nbytes);
1330	if (src_nents < 0) {
1331		dev_err(jrdev, "Invalid number of src SG.\n");
1332		return src_nents;
1333	}
1334
1335	if (src_nents) {
1336		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1337					  DMA_TO_DEVICE);
1338		if (!mapped_nents) {
1339			dev_err(jrdev, "unable to DMA map source\n");
1340			return -ENOMEM;
1341		}
1342	} else {
1343		mapped_nents = 0;
1344	}
1345
1346	sec4_sg_src_index = 2;
1347	sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1348			 sizeof(struct sec4_sg_entry);
1349
1350	/* allocate space for base edesc and hw desc commands, link tables */
1351	edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1352				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1353	if (!edesc) {
1354		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1355		return -ENOMEM;
1356	}
1357
1358	desc = edesc->hw_desc;
1359
1360	edesc->src_nents = src_nents;
1361	edesc->sec4_sg_bytes = sec4_sg_bytes;
1362
1363	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1364	if (ret)
1365		goto unmap;
1366
1367	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1368				  req->nbytes);
1369	if (ret) {
1370		dev_err(jrdev, "unable to map S/G table\n");
1371		goto unmap;
1372	}
1373
1374	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1375	if (ret)
1376		goto unmap;
1377
1378	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1379			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1380			     1);
1381
1382	return ahash_enqueue_req(jrdev, ahash_done, req,
1383				 digestsize, DMA_FROM_DEVICE);
1384 unmap:
1385	ahash_unmap(jrdev, edesc, req, digestsize);
1386	kfree(edesc);
1387	return -ENOMEM;
1388
1389}
1390
1391/* submit first update job descriptor after init */
1392static int ahash_update_first(struct ahash_request *req)
1393{
1394	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1395	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1396	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1397	struct device *jrdev = ctx->jrdev;
1398	u8 *buf = state->buf;
1399	int *buflen = &state->buflen;
1400	int *next_buflen = &state->next_buflen;
1401	int to_hash;
1402	int blocksize = crypto_ahash_blocksize(ahash);
1403	u32 *desc;
1404	int src_nents, mapped_nents;
1405	struct ahash_edesc *edesc;
1406	int ret = 0;
1407
1408	*next_buflen = req->nbytes & (blocksize - 1);
1409	to_hash = req->nbytes - *next_buflen;
1410
1411	/*
1412	 * For XCBC and CMAC, if to_hash is multiple of block size,
1413	 * keep last block in internal buffer
1414	 */
1415	if ((is_xcbc_aes(ctx->adata.algtype) ||
1416	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1417	     (*next_buflen == 0)) {
1418		*next_buflen = blocksize;
1419		to_hash -= blocksize;
1420	}
1421
1422	if (to_hash) {
1423		src_nents = sg_nents_for_len(req->src,
1424					     req->nbytes - *next_buflen);
1425		if (src_nents < 0) {
1426			dev_err(jrdev, "Invalid number of src SG.\n");
1427			return src_nents;
1428		}
1429
1430		if (src_nents) {
1431			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1432						  DMA_TO_DEVICE);
1433			if (!mapped_nents) {
1434				dev_err(jrdev, "unable to map source for DMA\n");
1435				return -ENOMEM;
1436			}
1437		} else {
1438			mapped_nents = 0;
1439		}
1440
1441		/*
1442		 * allocate space for base edesc and hw desc commands,
1443		 * link tables
1444		 */
1445		edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
1446					  mapped_nents : 0,
1447					  ctx->sh_desc_update_first,
1448					  ctx->sh_desc_update_first_dma);
1449		if (!edesc) {
1450			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1451			return -ENOMEM;
1452		}
1453
1454		edesc->src_nents = src_nents;
1455
1456		ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1457					  to_hash);
1458		if (ret)
1459			goto unmap_ctx;
1460
1461		desc = edesc->hw_desc;
1462
1463		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1464		if (ret)
1465			goto unmap_ctx;
1466
1467		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1468				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
1469				     desc_bytes(desc), 1);
1470
1471		ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1472					ctx->ctx_len, DMA_TO_DEVICE);
1473		if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1474			return ret;
1475		state->update = ahash_update_ctx;
1476		state->finup = ahash_finup_ctx;
1477		state->final = ahash_final_ctx;
1478	} else if (*next_buflen) {
1479		state->update = ahash_update_no_ctx;
1480		state->finup = ahash_finup_no_ctx;
1481		state->final = ahash_final_no_ctx;
1482		scatterwalk_map_and_copy(buf, req->src, 0,
1483					 req->nbytes, 0);
1484		*buflen = *next_buflen;
1485
1486		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1487				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
1488				     *buflen, 1);
1489	}
1490
1491	return ret;
1492 unmap_ctx:
1493	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1494	kfree(edesc);
1495	return ret;
1496}
1497
1498static int ahash_finup_first(struct ahash_request *req)
1499{
1500	return ahash_digest(req);
1501}
1502
1503static int ahash_init(struct ahash_request *req)
1504{
1505	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1506
1507	state->update = ahash_update_first;
1508	state->finup = ahash_finup_first;
1509	state->final = ahash_final_no_ctx;
1510
1511	state->ctx_dma = 0;
1512	state->ctx_dma_len = 0;
1513	state->buf_dma = 0;
1514	state->buflen = 0;
1515	state->next_buflen = 0;
1516
1517	return 0;
1518}
1519
1520static int ahash_update(struct ahash_request *req)
1521{
1522	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1523
1524	return state->update(req);
1525}
1526
1527static int ahash_finup(struct ahash_request *req)
1528{
1529	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1530
1531	return state->finup(req);
1532}
1533
1534static int ahash_final(struct ahash_request *req)
1535{
1536	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1537
1538	return state->final(req);
1539}
1540
1541static int ahash_export(struct ahash_request *req, void *out)
1542{
1543	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1544	struct caam_export_state *export = out;
1545	u8 *buf = state->buf;
1546	int len = state->buflen;
1547
1548	memcpy(export->buf, buf, len);
1549	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1550	export->buflen = len;
1551	export->update = state->update;
1552	export->final = state->final;
1553	export->finup = state->finup;
1554
1555	return 0;
1556}
1557
1558static int ahash_import(struct ahash_request *req, const void *in)
1559{
1560	struct caam_hash_state *state = ahash_request_ctx_dma(req);
1561	const struct caam_export_state *export = in;
1562
1563	memset(state, 0, sizeof(*state));
1564	memcpy(state->buf, export->buf, export->buflen);
1565	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1566	state->buflen = export->buflen;
1567	state->update = export->update;
1568	state->final = export->final;
1569	state->finup = export->finup;
1570
1571	return 0;
1572}
1573
1574struct caam_hash_template {
1575	char name[CRYPTO_MAX_ALG_NAME];
1576	char driver_name[CRYPTO_MAX_ALG_NAME];
1577	char hmac_name[CRYPTO_MAX_ALG_NAME];
1578	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1579	unsigned int blocksize;
1580	struct ahash_alg template_ahash;
1581	u32 alg_type;
1582};
1583
1584/* ahash descriptors */
1585static struct caam_hash_template driver_hash[] = {
1586	{
1587		.name = "sha1",
1588		.driver_name = "sha1-caam",
1589		.hmac_name = "hmac(sha1)",
1590		.hmac_driver_name = "hmac-sha1-caam",
1591		.blocksize = SHA1_BLOCK_SIZE,
1592		.template_ahash = {
1593			.init = ahash_init,
1594			.update = ahash_update,
1595			.final = ahash_final,
1596			.finup = ahash_finup,
1597			.digest = ahash_digest,
1598			.export = ahash_export,
1599			.import = ahash_import,
1600			.setkey = ahash_setkey,
1601			.halg = {
1602				.digestsize = SHA1_DIGEST_SIZE,
1603				.statesize = sizeof(struct caam_export_state),
1604			},
1605		},
1606		.alg_type = OP_ALG_ALGSEL_SHA1,
1607	}, {
1608		.name = "sha224",
1609		.driver_name = "sha224-caam",
1610		.hmac_name = "hmac(sha224)",
1611		.hmac_driver_name = "hmac-sha224-caam",
1612		.blocksize = SHA224_BLOCK_SIZE,
1613		.template_ahash = {
1614			.init = ahash_init,
1615			.update = ahash_update,
1616			.final = ahash_final,
1617			.finup = ahash_finup,
1618			.digest = ahash_digest,
1619			.export = ahash_export,
1620			.import = ahash_import,
1621			.setkey = ahash_setkey,
1622			.halg = {
1623				.digestsize = SHA224_DIGEST_SIZE,
1624				.statesize = sizeof(struct caam_export_state),
1625			},
1626		},
1627		.alg_type = OP_ALG_ALGSEL_SHA224,
1628	}, {
1629		.name = "sha256",
1630		.driver_name = "sha256-caam",
1631		.hmac_name = "hmac(sha256)",
1632		.hmac_driver_name = "hmac-sha256-caam",
1633		.blocksize = SHA256_BLOCK_SIZE,
1634		.template_ahash = {
1635			.init = ahash_init,
1636			.update = ahash_update,
1637			.final = ahash_final,
1638			.finup = ahash_finup,
1639			.digest = ahash_digest,
1640			.export = ahash_export,
1641			.import = ahash_import,
1642			.setkey = ahash_setkey,
1643			.halg = {
1644				.digestsize = SHA256_DIGEST_SIZE,
1645				.statesize = sizeof(struct caam_export_state),
1646			},
1647		},
1648		.alg_type = OP_ALG_ALGSEL_SHA256,
1649	}, {
1650		.name = "sha384",
1651		.driver_name = "sha384-caam",
1652		.hmac_name = "hmac(sha384)",
1653		.hmac_driver_name = "hmac-sha384-caam",
1654		.blocksize = SHA384_BLOCK_SIZE,
1655		.template_ahash = {
1656			.init = ahash_init,
1657			.update = ahash_update,
1658			.final = ahash_final,
1659			.finup = ahash_finup,
1660			.digest = ahash_digest,
1661			.export = ahash_export,
1662			.import = ahash_import,
1663			.setkey = ahash_setkey,
1664			.halg = {
1665				.digestsize = SHA384_DIGEST_SIZE,
1666				.statesize = sizeof(struct caam_export_state),
1667			},
1668		},
1669		.alg_type = OP_ALG_ALGSEL_SHA384,
1670	}, {
1671		.name = "sha512",
1672		.driver_name = "sha512-caam",
1673		.hmac_name = "hmac(sha512)",
1674		.hmac_driver_name = "hmac-sha512-caam",
1675		.blocksize = SHA512_BLOCK_SIZE,
1676		.template_ahash = {
1677			.init = ahash_init,
1678			.update = ahash_update,
1679			.final = ahash_final,
1680			.finup = ahash_finup,
1681			.digest = ahash_digest,
1682			.export = ahash_export,
1683			.import = ahash_import,
1684			.setkey = ahash_setkey,
1685			.halg = {
1686				.digestsize = SHA512_DIGEST_SIZE,
1687				.statesize = sizeof(struct caam_export_state),
1688			},
1689		},
1690		.alg_type = OP_ALG_ALGSEL_SHA512,
1691	}, {
1692		.name = "md5",
1693		.driver_name = "md5-caam",
1694		.hmac_name = "hmac(md5)",
1695		.hmac_driver_name = "hmac-md5-caam",
1696		.blocksize = MD5_BLOCK_WORDS * 4,
1697		.template_ahash = {
1698			.init = ahash_init,
1699			.update = ahash_update,
1700			.final = ahash_final,
1701			.finup = ahash_finup,
1702			.digest = ahash_digest,
1703			.export = ahash_export,
1704			.import = ahash_import,
1705			.setkey = ahash_setkey,
1706			.halg = {
1707				.digestsize = MD5_DIGEST_SIZE,
1708				.statesize = sizeof(struct caam_export_state),
1709			},
1710		},
1711		.alg_type = OP_ALG_ALGSEL_MD5,
1712	}, {
1713		.hmac_name = "xcbc(aes)",
1714		.hmac_driver_name = "xcbc-aes-caam",
1715		.blocksize = AES_BLOCK_SIZE,
1716		.template_ahash = {
1717			.init = ahash_init,
1718			.update = ahash_update,
1719			.final = ahash_final,
1720			.finup = ahash_finup,
1721			.digest = ahash_digest,
1722			.export = ahash_export,
1723			.import = ahash_import,
1724			.setkey = axcbc_setkey,
1725			.halg = {
1726				.digestsize = AES_BLOCK_SIZE,
1727				.statesize = sizeof(struct caam_export_state),
1728			},
1729		 },
1730		.alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1731	}, {
1732		.hmac_name = "cmac(aes)",
1733		.hmac_driver_name = "cmac-aes-caam",
1734		.blocksize = AES_BLOCK_SIZE,
1735		.template_ahash = {
1736			.init = ahash_init,
1737			.update = ahash_update,
1738			.final = ahash_final,
1739			.finup = ahash_finup,
1740			.digest = ahash_digest,
1741			.export = ahash_export,
1742			.import = ahash_import,
1743			.setkey = acmac_setkey,
1744			.halg = {
1745				.digestsize = AES_BLOCK_SIZE,
1746				.statesize = sizeof(struct caam_export_state),
1747			},
1748		 },
1749		.alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1750	},
1751};
1752
1753struct caam_hash_alg {
1754	struct list_head entry;
1755	int alg_type;
1756	bool is_hmac;
1757	struct ahash_engine_alg ahash_alg;
1758};
1759
1760static int caam_hash_cra_init(struct crypto_tfm *tfm)
1761{
1762	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1763	struct crypto_alg *base = tfm->__crt_alg;
1764	struct hash_alg_common *halg =
1765		 container_of(base, struct hash_alg_common, base);
1766	struct ahash_alg *alg =
1767		 container_of(halg, struct ahash_alg, halg);
1768	struct caam_hash_alg *caam_hash =
1769		 container_of(alg, struct caam_hash_alg, ahash_alg.base);
1770	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1771	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1772	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1773					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1774					 HASH_MSG_LEN + 32,
1775					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1776					 HASH_MSG_LEN + 64,
1777					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1778	const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx,
1779						      sh_desc_update);
1780	dma_addr_t dma_addr;
1781	struct caam_drv_private *priv;
1782
1783	/*
1784	 * Get a Job ring from Job Ring driver to ensure in-order
1785	 * crypto request processing per tfm
1786	 */
1787	ctx->jrdev = caam_jr_alloc();
1788	if (IS_ERR(ctx->jrdev)) {
1789		pr_err("Job Ring Device allocation for transform failed\n");
1790		return PTR_ERR(ctx->jrdev);
1791	}
1792
1793	priv = dev_get_drvdata(ctx->jrdev->parent);
1794
1795	if (is_xcbc_aes(caam_hash->alg_type)) {
1796		ctx->dir = DMA_TO_DEVICE;
1797		ctx->key_dir = DMA_BIDIRECTIONAL;
1798		ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1799		ctx->ctx_len = 48;
1800	} else if (is_cmac_aes(caam_hash->alg_type)) {
1801		ctx->dir = DMA_TO_DEVICE;
1802		ctx->key_dir = DMA_NONE;
1803		ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1804		ctx->ctx_len = 32;
1805	} else {
1806		if (priv->era >= 6) {
1807			ctx->dir = DMA_BIDIRECTIONAL;
1808			ctx->key_dir = caam_hash->is_hmac ? DMA_TO_DEVICE : DMA_NONE;
1809		} else {
1810			ctx->dir = DMA_TO_DEVICE;
1811			ctx->key_dir = DMA_NONE;
1812		}
1813		ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1814		ctx->ctx_len = runninglen[(ctx->adata.algtype &
1815					   OP_ALG_ALGSEL_SUBMASK) >>
1816					  OP_ALG_ALGSEL_SHIFT];
1817	}
1818
1819	if (ctx->key_dir != DMA_NONE) {
1820		ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1821							  ARRAY_SIZE(ctx->key),
1822							  ctx->key_dir,
1823							  DMA_ATTR_SKIP_CPU_SYNC);
1824		if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1825			dev_err(ctx->jrdev, "unable to map key\n");
1826			caam_jr_free(ctx->jrdev);
1827			return -ENOMEM;
1828		}
1829	}
1830
1831	dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1832					offsetof(struct caam_hash_ctx, key) -
1833					sh_desc_update_offset,
1834					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1835	if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1836		dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1837
1838		if (ctx->key_dir != DMA_NONE)
1839			dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1840					       ARRAY_SIZE(ctx->key),
1841					       ctx->key_dir,
1842					       DMA_ATTR_SKIP_CPU_SYNC);
1843
1844		caam_jr_free(ctx->jrdev);
1845		return -ENOMEM;
1846	}
1847
1848	ctx->sh_desc_update_dma = dma_addr;
1849	ctx->sh_desc_update_first_dma = dma_addr +
1850					offsetof(struct caam_hash_ctx,
1851						 sh_desc_update_first) -
1852					sh_desc_update_offset;
1853	ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1854						   sh_desc_fin) -
1855					sh_desc_update_offset;
1856	ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1857						      sh_desc_digest) -
1858					sh_desc_update_offset;
1859
1860	crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
1861
1862	/*
1863	 * For keyed hash algorithms shared descriptors
1864	 * will be created later in setkey() callback
1865	 */
1866	return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash);
1867}
1868
1869static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1870{
1871	struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
1872
1873	dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1874			       offsetof(struct caam_hash_ctx, key) -
1875			       offsetof(struct caam_hash_ctx, sh_desc_update),
1876			       ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1877	if (ctx->key_dir != DMA_NONE)
1878		dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1879				       ARRAY_SIZE(ctx->key), ctx->key_dir,
1880				       DMA_ATTR_SKIP_CPU_SYNC);
1881	caam_jr_free(ctx->jrdev);
1882}
1883
1884void caam_algapi_hash_exit(void)
1885{
1886	struct caam_hash_alg *t_alg, *n;
1887
1888	if (!hash_list.next)
1889		return;
1890
1891	list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1892		crypto_engine_unregister_ahash(&t_alg->ahash_alg);
1893		list_del(&t_alg->entry);
1894		kfree(t_alg);
1895	}
1896}
1897
1898static struct caam_hash_alg *
1899caam_hash_alloc(struct caam_hash_template *template,
1900		bool keyed)
1901{
1902	struct caam_hash_alg *t_alg;
1903	struct ahash_alg *halg;
1904	struct crypto_alg *alg;
1905
1906	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1907	if (!t_alg)
1908		return ERR_PTR(-ENOMEM);
1909
1910	t_alg->ahash_alg.base = template->template_ahash;
1911	halg = &t_alg->ahash_alg.base;
1912	alg = &halg->halg.base;
1913
1914	if (keyed) {
1915		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1916			 template->hmac_name);
1917		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1918			 template->hmac_driver_name);
1919		t_alg->is_hmac = true;
1920	} else {
1921		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1922			 template->name);
1923		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1924			 template->driver_name);
1925		halg->setkey = NULL;
1926		t_alg->is_hmac = false;
1927	}
1928	alg->cra_module = THIS_MODULE;
1929	alg->cra_init = caam_hash_cra_init;
1930	alg->cra_exit = caam_hash_cra_exit;
1931	alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
1932	alg->cra_priority = CAAM_CRA_PRIORITY;
1933	alg->cra_blocksize = template->blocksize;
1934	alg->cra_alignmask = 0;
1935	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
1936
1937	t_alg->alg_type = template->alg_type;
1938	t_alg->ahash_alg.op.do_one_request = ahash_do_one_req;
1939
1940	return t_alg;
1941}
1942
1943int caam_algapi_hash_init(struct device *ctrldev)
1944{
1945	int i = 0, err = 0;
1946	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1947	unsigned int md_limit = SHA512_DIGEST_SIZE;
1948	u32 md_inst, md_vid;
1949
1950	/*
1951	 * Register crypto algorithms the device supports.  First, identify
1952	 * presence and attributes of MD block.
1953	 */
1954	if (priv->era < 10) {
1955		struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
1956
1957		md_vid = (rd_reg32(&perfmon->cha_id_ls) &
1958			  CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1959		md_inst = (rd_reg32(&perfmon->cha_num_ls) &
1960			   CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1961	} else {
1962		u32 mdha = rd_reg32(&priv->jr[0]->vreg.mdha);
1963
1964		md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1965		md_inst = mdha & CHA_VER_NUM_MASK;
1966	}
1967
1968	/*
1969	 * Skip registration of any hashing algorithms if MD block
1970	 * is not present.
1971	 */
1972	if (!md_inst)
1973		return 0;
1974
1975	/* Limit digest size based on LP256 */
1976	if (md_vid == CHA_VER_VID_MD_LP256)
1977		md_limit = SHA256_DIGEST_SIZE;
1978
1979	INIT_LIST_HEAD(&hash_list);
1980
1981	/* register crypto algorithms the device supports */
1982	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1983		struct caam_hash_alg *t_alg;
1984		struct caam_hash_template *alg = driver_hash + i;
1985
1986		/* If MD size is not supported by device, skip registration */
1987		if (is_mdha(alg->alg_type) &&
1988		    alg->template_ahash.halg.digestsize > md_limit)
1989			continue;
1990
1991		/* register hmac version */
1992		t_alg = caam_hash_alloc(alg, true);
1993		if (IS_ERR(t_alg)) {
1994			err = PTR_ERR(t_alg);
1995			pr_warn("%s alg allocation failed\n",
1996				alg->hmac_driver_name);
1997			continue;
1998		}
1999
2000		err = crypto_engine_register_ahash(&t_alg->ahash_alg);
2001		if (err) {
2002			pr_warn("%s alg registration failed: %d\n",
2003				t_alg->ahash_alg.base.halg.base.cra_driver_name,
2004				err);
2005			kfree(t_alg);
2006		} else
2007			list_add_tail(&t_alg->entry, &hash_list);
2008
2009		if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2010			continue;
2011
2012		/* register unkeyed version */
2013		t_alg = caam_hash_alloc(alg, false);
2014		if (IS_ERR(t_alg)) {
2015			err = PTR_ERR(t_alg);
2016			pr_warn("%s alg allocation failed\n", alg->driver_name);
2017			continue;
2018		}
2019
2020		err = crypto_engine_register_ahash(&t_alg->ahash_alg);
2021		if (err) {
2022			pr_warn("%s alg registration failed: %d\n",
2023				t_alg->ahash_alg.base.halg.base.cra_driver_name,
2024				err);
2025			kfree(t_alg);
2026		} else
2027			list_add_tail(&t_alg->entry, &hash_list);
2028	}
2029
2030	return err;
2031}
2032