1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * SHA-512 routines supporting the Power 7+ Nest Accelerators driver
4 *
5 * Copyright (C) 2011-2012 International Business Machines Inc.
6 *
7 * Author: Kent Yoder <yoder1@us.ibm.com>
8 */
9
10#include <crypto/internal/hash.h>
11#include <crypto/sha2.h>
12#include <linux/module.h>
13#include <asm/vio.h>
14
15#include "nx_csbcpb.h"
16#include "nx.h"
17
18struct sha512_state_be {
19	__be64 state[SHA512_DIGEST_SIZE / 8];
20	u64 count[2];
21	u8 buf[SHA512_BLOCK_SIZE];
22};
23
24static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
25{
26	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
27	int err;
28
29	err = nx_crypto_ctx_sha_init(tfm);
30	if (err)
31		return err;
32
33	nx_ctx_init(nx_ctx, HCOP_FC_SHA);
34
35	nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
36
37	NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
38
39	return 0;
40}
41
42static int nx_sha512_init(struct shash_desc *desc)
43{
44	struct sha512_state_be *sctx = shash_desc_ctx(desc);
45
46	memset(sctx, 0, sizeof *sctx);
47
48	sctx->state[0] = __cpu_to_be64(SHA512_H0);
49	sctx->state[1] = __cpu_to_be64(SHA512_H1);
50	sctx->state[2] = __cpu_to_be64(SHA512_H2);
51	sctx->state[3] = __cpu_to_be64(SHA512_H3);
52	sctx->state[4] = __cpu_to_be64(SHA512_H4);
53	sctx->state[5] = __cpu_to_be64(SHA512_H5);
54	sctx->state[6] = __cpu_to_be64(SHA512_H6);
55	sctx->state[7] = __cpu_to_be64(SHA512_H7);
56	sctx->count[0] = 0;
57
58	return 0;
59}
60
61static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
62			    unsigned int len)
63{
64	struct sha512_state_be *sctx = shash_desc_ctx(desc);
65	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
66	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
67	struct nx_sg *out_sg;
68	u64 to_process, leftover = 0, total;
69	unsigned long irq_flags;
70	int rc = 0;
71	int data_len;
72	u32 max_sg_len;
73	u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
74
75	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
76
77	/* 2 cases for total data len:
78	 *  1: < SHA512_BLOCK_SIZE: copy into state, return 0
79	 *  2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
80	 */
81	total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len;
82	if (total < SHA512_BLOCK_SIZE) {
83		memcpy(sctx->buf + buf_len, data, len);
84		sctx->count[0] += len;
85		goto out;
86	}
87
88	memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE);
89	NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
90	NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
91
92	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
93			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
94	max_sg_len = min_t(u64, max_sg_len,
95			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
96
97	data_len = SHA512_DIGEST_SIZE;
98	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
99				  &data_len, max_sg_len);
100	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
101
102	if (data_len != SHA512_DIGEST_SIZE) {
103		rc = -EINVAL;
104		goto out;
105	}
106
107	do {
108		int used_sgs = 0;
109		struct nx_sg *in_sg = nx_ctx->in_sg;
110
111		if (buf_len) {
112			data_len = buf_len;
113			in_sg = nx_build_sg_list(in_sg,
114						 (u8 *) sctx->buf,
115						 &data_len, max_sg_len);
116
117			if (data_len != buf_len) {
118				rc = -EINVAL;
119				goto out;
120			}
121			used_sgs = in_sg - nx_ctx->in_sg;
122		}
123
124		/* to_process: SHA512_BLOCK_SIZE aligned chunk to be
125		 * processed in this iteration. This value is restricted
126		 * by sg list limits and number of sgs we already used
127		 * for leftover data. (see above)
128		 * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
129		 * but because data may not be aligned, we need to account
130		 * for that too. */
131		to_process = min_t(u64, total,
132			(max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
133		to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
134
135		data_len = to_process - buf_len;
136		in_sg = nx_build_sg_list(in_sg, (u8 *) data,
137					 &data_len, max_sg_len);
138
139		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
140
141		if (data_len != (to_process - buf_len)) {
142			rc = -EINVAL;
143			goto out;
144		}
145
146		to_process = data_len + buf_len;
147		leftover = total - to_process;
148
149		/*
150		 * we've hit the nx chip previously and we're updating
151		 * again, so copy over the partial digest.
152		 */
153		memcpy(csbcpb->cpb.sha512.input_partial_digest,
154			       csbcpb->cpb.sha512.message_digest,
155			       SHA512_DIGEST_SIZE);
156
157		if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
158			rc = -EINVAL;
159			goto out;
160		}
161
162		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
163		if (rc)
164			goto out;
165
166		atomic_inc(&(nx_ctx->stats->sha512_ops));
167
168		total -= to_process;
169		data += to_process - buf_len;
170		buf_len = 0;
171
172	} while (leftover >= SHA512_BLOCK_SIZE);
173
174	/* copy the leftover back into the state struct */
175	if (leftover)
176		memcpy(sctx->buf, data, leftover);
177	sctx->count[0] += len;
178	memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
179out:
180	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
181	return rc;
182}
183
184static int nx_sha512_final(struct shash_desc *desc, u8 *out)
185{
186	struct sha512_state_be *sctx = shash_desc_ctx(desc);
187	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
188	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
189	struct nx_sg *in_sg, *out_sg;
190	u32 max_sg_len;
191	u64 count0;
192	unsigned long irq_flags;
193	int rc = 0;
194	int len;
195
196	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
197
198	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
199			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
200	max_sg_len = min_t(u64, max_sg_len,
201			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
202
203	/* final is represented by continuing the operation and indicating that
204	 * this is not an intermediate operation */
205	if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
206		/* we've hit the nx chip previously, now we're finalizing,
207		 * so copy over the partial digest */
208		memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state,
209							SHA512_DIGEST_SIZE);
210		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
211		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
212	} else {
213		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
214		NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
215	}
216
217	NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
218
219	count0 = sctx->count[0] * 8;
220
221	csbcpb->cpb.sha512.message_bit_length_lo = count0;
222
223	len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
224	in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
225				 max_sg_len);
226
227	if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
228		rc = -EINVAL;
229		goto out;
230	}
231
232	len = SHA512_DIGEST_SIZE;
233	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
234				 max_sg_len);
235
236	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
237	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
238
239	if (!nx_ctx->op.outlen) {
240		rc = -EINVAL;
241		goto out;
242	}
243
244	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
245	if (rc)
246		goto out;
247
248	atomic_inc(&(nx_ctx->stats->sha512_ops));
249	atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes));
250
251	memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
252out:
253	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
254	return rc;
255}
256
257static int nx_sha512_export(struct shash_desc *desc, void *out)
258{
259	struct sha512_state_be *sctx = shash_desc_ctx(desc);
260
261	memcpy(out, sctx, sizeof(*sctx));
262
263	return 0;
264}
265
266static int nx_sha512_import(struct shash_desc *desc, const void *in)
267{
268	struct sha512_state_be *sctx = shash_desc_ctx(desc);
269
270	memcpy(sctx, in, sizeof(*sctx));
271
272	return 0;
273}
274
275struct shash_alg nx_shash_sha512_alg = {
276	.digestsize = SHA512_DIGEST_SIZE,
277	.init       = nx_sha512_init,
278	.update     = nx_sha512_update,
279	.final      = nx_sha512_final,
280	.export     = nx_sha512_export,
281	.import     = nx_sha512_import,
282	.descsize   = sizeof(struct sha512_state_be),
283	.statesize  = sizeof(struct sha512_state_be),
284	.base       = {
285		.cra_name        = "sha512",
286		.cra_driver_name = "sha512-nx",
287		.cra_priority    = 300,
288		.cra_blocksize   = SHA512_BLOCK_SIZE,
289		.cra_module      = THIS_MODULE,
290		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
291		.cra_init        = nx_crypto_ctx_sha512_init,
292		.cra_exit        = nx_crypto_ctx_exit,
293	}
294};
295