1/*-
2 * Copyright (c) 2021 The FreeBSD Foundation
3 *
4 * This software was developed by Andrew Turner under sponsorship from
5 * the FreeBSD Foundation.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/types.h>
30
31#include <arm_neon.h>
32
33#include "sha512.h"
34#include "sha512c_impl.h"
35
36void __hidden
37SHA512_Transform_arm64_impl(uint64_t * state,
38    const unsigned char block[SHA512_BLOCK_LENGTH], const uint64_t K[80])
39{
40	uint64x2_t W[8];
41	uint64x2_t S[4];
42	uint64x2_t S_start[4];
43	uint64x2_t K_tmp, S_tmp;
44	int i;
45
46#define	A64_LOAD_W(x)							\
47    W[x] = vld1q_u64((const uint64_t *)(&block[(x) * 16]));		\
48    W[x] = vreinterpretq_u64_u8(vrev64q_u8(vreinterpretq_u8_u64(W[x])))
49
50	/* 1. Prepare the first part of the message schedule W. */
51	A64_LOAD_W(0);
52	A64_LOAD_W(1);
53	A64_LOAD_W(2);
54	A64_LOAD_W(3);
55	A64_LOAD_W(4);
56	A64_LOAD_W(5);
57	A64_LOAD_W(6);
58	A64_LOAD_W(7);
59
60	/* 2. Initialize working variables. */
61	S[0] = vld1q_u64(&state[0]);
62	S[1] = vld1q_u64(&state[2]);
63	S[2] = vld1q_u64(&state[4]);
64	S[3] = vld1q_u64(&state[6]);
65
66	S_start[0] = S[0];
67	S_start[1] = S[1];
68	S_start[2] = S[2];
69	S_start[3] = S[3];
70
71	/* 3. Mix. */
72	for (i = 0; i < 80; i += 16) {
73		/*
74		 * The schedule array has 4 vectors:
75		 *  ab = S[( 8 - i) % 4]
76		 *  cd = S[( 9 - i) % 4]
77		 *  ef = S[(10 - i) % 4]
78		 *  gh = S[(11 - i) % 4]
79		 *
80		 * The following maacro:
81		 *  - Loads the round constants
82		 *  - Add them to schedule words
83		 *  - Rotates the total to switch the order of the two halves
84		 *    so they are in the correct order for gh
85		 *  - Fix the alignment
86		 *   - Extract fg from ef and gh
87		 *   - Extract de from cd and ef
88		 * - Pass these into the first part of the sha512 calculation
89		 *   to calculate the Sigma 1 and Ch steps
90		 * - Calculate the Sigma 0 and Maj steps and store to gh
91		 * - Add the first part to the cd vector
92		 */
93#define	A64_RNDr(S, W, i, ii)						\
94    K_tmp = vld1q_u64(K + (i * 2) + ii);				\
95    K_tmp = vaddq_u64(W[i], K_tmp);					\
96    K_tmp = vextq_u64(K_tmp, K_tmp, 1);					\
97    K_tmp = vaddq_u64(K_tmp, S[(11 - i) % 4]);				\
98    S_tmp = vsha512hq_u64(K_tmp,					\
99      vextq_u64(S[(10 - i) % 4], S[(11 - i) % 4], 1),			\
100      vextq_u64(S[(9 - i) % 4], S[(10 - i) % 4], 1));			\
101    S[(11 - i) % 4] = vsha512h2q_u64(S_tmp, S[(9 - i) % 4], S[(8 - i) % 4]); \
102    S[(9 - i) % 4] = vaddq_u64(S[(9 - i) % 4], S_tmp)
103
104		A64_RNDr(S, W, 0, i);
105		A64_RNDr(S, W, 1, i);
106		A64_RNDr(S, W, 2, i);
107		A64_RNDr(S, W, 3, i);
108		A64_RNDr(S, W, 4, i);
109		A64_RNDr(S, W, 5, i);
110		A64_RNDr(S, W, 6, i);
111		A64_RNDr(S, W, 7, i);
112
113		if (i == 64)
114			break;
115
116		/*
117		 * Perform the Message schedule computation:
118		 * - vsha512su0q_u64 performs the sigma 0 half and add it to
119		 *   the old value
120		 * - vextq_u64 fixes the alignment of the vectors
121		 * - vsha512su1q_u64 performs the sigma 1 half and adds it
122		 *   and both the above all together
123		 */
124#define A64_MSCH(x)							\
125    W[x] = vsha512su1q_u64(						\
126      vsha512su0q_u64(W[x], W[(x + 1) % 8]),				\
127      W[(x + 7) % 8],							\
128      vextq_u64(W[(x + 4) % 8], W[(x + 5) % 8], 1))
129
130		A64_MSCH(0);
131		A64_MSCH(1);
132		A64_MSCH(2);
133		A64_MSCH(3);
134		A64_MSCH(4);
135		A64_MSCH(5);
136		A64_MSCH(6);
137		A64_MSCH(7);
138	}
139
140	/* 4. Mix local working variables into global state */
141	S[0] = vaddq_u64(S[0], S_start[0]);
142	S[1] = vaddq_u64(S[1], S_start[1]);
143	S[2] = vaddq_u64(S[2], S_start[2]);
144	S[3] = vaddq_u64(S[3], S_start[3]);
145
146	vst1q_u64(&state[0], S[0]);
147	vst1q_u64(&state[2], S[1]);
148	vst1q_u64(&state[4], S[2]);
149	vst1q_u64(&state[6], S[3]);
150}
151