1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright 2014 Freescale Semiconductor, Inc.
4 * Copyright 2021 NXP
5 */
6
7#include <common.h>
8#include <cpu_func.h>
9#include <log.h>
10#include <malloc.h>
11#include <memalign.h>
12#include "jobdesc.h"
13#include "desc.h"
14#include "jr.h"
15#include "fsl_hash.h"
16#include <hw_sha.h>
17#include <asm/cache.h>
18#include <linux/errno.h>
19
20#define CRYPTO_MAX_ALG_NAME	80
21#define SHA1_DIGEST_SIZE        20
22#define SHA256_DIGEST_SIZE      32
23
24struct caam_hash_template {
25	char name[CRYPTO_MAX_ALG_NAME];
26	unsigned int digestsize;
27	u32 alg_type;
28};
29
30enum caam_hash_algos {
31	SHA1 = 0,
32	SHA256
33};
34
35static struct caam_hash_template driver_hash[] = {
36	{
37		.name = "sha1",
38		.digestsize = SHA1_DIGEST_SIZE,
39		.alg_type = OP_ALG_ALGSEL_SHA1,
40	},
41	{
42		.name = "sha256",
43		.digestsize = SHA256_DIGEST_SIZE,
44		.alg_type = OP_ALG_ALGSEL_SHA256,
45	},
46};
47
48static enum caam_hash_algos get_hash_type(struct hash_algo *algo)
49{
50	if (!strcmp(algo->name, driver_hash[SHA1].name))
51		return SHA1;
52	else
53		return SHA256;
54}
55
56/* Create the context for progressive hashing using h/w acceleration.
57 *
58 * @ctxp: Pointer to the pointer of the context for hashing
59 * @caam_algo: Enum for SHA1 or SHA256
60 * Return: 0 if ok, -ENOMEM on error
61 */
62static int caam_hash_init(void **ctxp, enum caam_hash_algos caam_algo)
63{
64	*ctxp = calloc(1, sizeof(struct sha_ctx));
65	if (*ctxp == NULL) {
66		debug("Cannot allocate memory for context\n");
67		return -ENOMEM;
68	}
69	return 0;
70}
71
72/*
73 * Update sg table for progressive hashing using h/w acceleration
74 *
75 * The context is freed by this function if an error occurs.
76 * We support at most 32 Scatter/Gather Entries.
77 *
78 * @hash_ctx: Pointer to the context for hashing
79 * @buf: Pointer to the buffer being hashed
80 * @size: Size of the buffer being hashed
81 * @is_last: 1 if this is the last update; 0 otherwise
82 * @caam_algo: Enum for SHA1 or SHA256
83 * Return: 0 if ok, -EINVAL on error
84 */
85static int caam_hash_update(void *hash_ctx, const void *buf,
86			    unsigned int size, int is_last,
87			    enum caam_hash_algos caam_algo)
88{
89	uint32_t final;
90	caam_dma_addr_t addr = virt_to_phys((void *)buf);
91	struct sha_ctx *ctx = hash_ctx;
92
93	if (ctx->sg_num >= MAX_SG_32) {
94		free(ctx);
95		return -EINVAL;
96	}
97
98#ifdef CONFIG_CAAM_64BIT
99	sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_hi, (uint32_t)(addr >> 32));
100#else
101	sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_hi, 0x0);
102#endif
103	sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_lo, (caam_dma_addr_t)addr);
104
105	sec_out32(&ctx->sg_tbl[ctx->sg_num].len_flag,
106		  (size & SG_ENTRY_LENGTH_MASK));
107
108	ctx->sg_num++;
109
110	if (is_last) {
111		final = sec_in32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag) |
112			SG_ENTRY_FINAL_BIT;
113		sec_out32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag, final);
114	}
115
116	return 0;
117}
118
119/*
120 * Perform progressive hashing on the given buffer and copy hash at
121 * destination buffer
122 *
123 * The context is freed after successful completion of hash operation.
124 * In case of failure, context is not freed.
125 * @hash_ctx: Pointer to the context for hashing
126 * @dest_buf: Pointer to the destination buffer where hash is to be copied
127 * @size: Size of the buffer being hashed
128 * @caam_algo: Enum for SHA1 or SHA256
129 * Return: 0 if ok, -EINVAL on error
130 */
131static int caam_hash_finish(void *hash_ctx, void *dest_buf,
132			    int size, enum caam_hash_algos caam_algo)
133{
134	uint32_t len = 0, sg_entry_len;
135	struct sha_ctx *ctx = hash_ctx;
136	int i = 0, ret = 0;
137	caam_dma_addr_t addr;
138
139	if (size < driver_hash[caam_algo].digestsize) {
140		return -EINVAL;
141	}
142
143	flush_dcache_range((ulong)ctx->sg_tbl,
144			   (ulong)(ctx->sg_tbl) + (ctx->sg_num * sizeof(struct sg_entry)));
145	for (i = 0; i < ctx->sg_num; i++) {
146		sg_entry_len = (sec_in32(&ctx->sg_tbl[i].len_flag) &
147				SG_ENTRY_LENGTH_MASK);
148		len += sg_entry_len;
149#ifdef CONFIG_CAAM_64BIT
150		addr = sec_in32(&ctx->sg_tbl[i].addr_hi);
151		addr = (addr << 32) | sec_in32(&ctx->sg_tbl[i].addr_lo);
152#else
153		addr = sec_in32(&ctx->sg_tbl[i].addr_lo);
154#endif
155		flush_dcache_range(addr, addr + sg_entry_len);
156	}
157	inline_cnstr_jobdesc_hash(ctx->sha_desc, (uint8_t *)ctx->sg_tbl, len,
158				  ctx->hash,
159				  driver_hash[caam_algo].alg_type,
160				  driver_hash[caam_algo].digestsize,
161				  1);
162
163	flush_dcache_range((ulong)ctx->sha_desc,
164			   (ulong)(ctx->sha_desc) + (sizeof(uint32_t) * MAX_CAAM_DESCSIZE));
165	flush_dcache_range((ulong)ctx->hash,
166			   (ulong)(ctx->hash) + driver_hash[caam_algo].digestsize);
167
168	ret = run_descriptor_jr(ctx->sha_desc);
169
170	if (ret) {
171		debug("Error %x\n", ret);
172		return ret;
173	} else {
174		invalidate_dcache_range((ulong)ctx->hash,
175					(ulong)(ctx->hash) + driver_hash[caam_algo].digestsize);
176		memcpy(dest_buf, ctx->hash, sizeof(ctx->hash));
177	}
178	free(ctx);
179	return ret;
180}
181
182int caam_hash(const unsigned char *pbuf, unsigned int buf_len,
183	      unsigned char *pout, enum caam_hash_algos algo)
184{
185	int ret = 0;
186	uint32_t *desc;
187	unsigned int size;
188
189	desc = malloc_cache_aligned(sizeof(int) * MAX_CAAM_DESCSIZE);
190	if (!desc) {
191		debug("Not enough memory for descriptor allocation\n");
192		return -ENOMEM;
193	}
194
195	size = ALIGN(buf_len, ARCH_DMA_MINALIGN);
196	flush_dcache_range((unsigned long)pbuf, (unsigned long)pbuf + size);
197
198	inline_cnstr_jobdesc_hash(desc, pbuf, buf_len, pout,
199				  driver_hash[algo].alg_type,
200				  driver_hash[algo].digestsize,
201				  0);
202
203	size = ALIGN(sizeof(int) * MAX_CAAM_DESCSIZE, ARCH_DMA_MINALIGN);
204	flush_dcache_range((unsigned long)desc, (unsigned long)desc + size);
205	size = ALIGN(driver_hash[algo].digestsize, ARCH_DMA_MINALIGN);
206	invalidate_dcache_range((unsigned long)pout, (unsigned long)pout + size);
207
208	ret = run_descriptor_jr(desc);
209
210	size = ALIGN(driver_hash[algo].digestsize, ARCH_DMA_MINALIGN);
211	invalidate_dcache_range((unsigned long)pout,
212				(unsigned long)pout + size);
213
214	free(desc);
215	return ret;
216}
217
218void hw_sha256(const unsigned char *pbuf, unsigned int buf_len,
219			unsigned char *pout, unsigned int chunk_size)
220{
221	if (caam_hash(pbuf, buf_len, pout, SHA256))
222		printf("CAAM was not setup properly or it is faulty\n");
223}
224
225void hw_sha1(const unsigned char *pbuf, unsigned int buf_len,
226			unsigned char *pout, unsigned int chunk_size)
227{
228	if (caam_hash(pbuf, buf_len, pout, SHA1))
229		printf("CAAM was not setup properly or it is faulty\n");
230}
231
232int hw_sha_init(struct hash_algo *algo, void **ctxp)
233{
234	return caam_hash_init(ctxp, get_hash_type(algo));
235}
236
237int hw_sha_update(struct hash_algo *algo, void *ctx, const void *buf,
238			    unsigned int size, int is_last)
239{
240	return caam_hash_update(ctx, buf, size, is_last, get_hash_type(algo));
241}
242
243int hw_sha_finish(struct hash_algo *algo, void *ctx, void *dest_buf,
244		     int size)
245{
246	return caam_hash_finish(ctx, dest_buf, size, get_hash_type(algo));
247}
248