1///////////////////////////////////////////////////////////////////////////////
2//
3/// \file       sha256.c
4/// \brief      SHA-256
5///
6/// \todo       Crypto++ has x86 ASM optimizations. They use SSE so if they
7///             are imported to liblzma, SSE instructions need to be used
8///             conditionally to keep the code working on older boxes.
9///             We could also support using some external libary for SHA-256.
10//
11//  This code is based on the code found from 7-Zip, which has a modified
12//  version of the SHA-256 found from Crypto++ <http://www.cryptopp.com/>.
13//  The code was modified a little to fit into liblzma.
14//
15//  Authors:    Kevin Springle
16//              Wei Dai
17//              Igor Pavlov
18//              Lasse Collin
19//
20//  This file has been put into the public domain.
21//  You can do whatever you want with this file.
22//
23///////////////////////////////////////////////////////////////////////////////
24
25// Avoid bogus warnings in transform().
26#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 2) || __GNUC__ > 4
27#	pragma GCC diagnostic ignored "-Wuninitialized"
28#endif
29
30#include "check.h"
31
32// At least on x86, GCC is able to optimize this to a rotate instruction.
33#define rotr_32(num, amount) ((num) >> (amount) | (num) << (32 - (amount)))
34
35#define blk0(i) (W[i] = data[i])
36#define blk2(i) (W[i & 15] += s1(W[(i - 2) & 15]) + W[(i - 7) & 15] \
37		+ s0(W[(i - 15) & 15]))
38
39#define Ch(x, y, z) (z ^ (x & (y ^ z)))
40#define Maj(x, y, z) ((x & y) | (z & (x | y)))
41
42#define a(i) T[(0 - i) & 7]
43#define b(i) T[(1 - i) & 7]
44#define c(i) T[(2 - i) & 7]
45#define d(i) T[(3 - i) & 7]
46#define e(i) T[(4 - i) & 7]
47#define f(i) T[(5 - i) & 7]
48#define g(i) T[(6 - i) & 7]
49#define h(i) T[(7 - i) & 7]
50
51#define R(i) \
52	h(i) += S1(e(i)) + Ch(e(i), f(i), g(i)) + SHA256_K[i + j] \
53		+ (j ? blk2(i) : blk0(i)); \
54	d(i) += h(i); \
55	h(i) += S0(a(i)) + Maj(a(i), b(i), c(i))
56
57#define S0(x) (rotr_32(x, 2) ^ rotr_32(x, 13) ^ rotr_32(x, 22))
58#define S1(x) (rotr_32(x, 6) ^ rotr_32(x, 11) ^ rotr_32(x, 25))
59#define s0(x) (rotr_32(x, 7) ^ rotr_32(x, 18) ^ (x >> 3))
60#define s1(x) (rotr_32(x, 17) ^ rotr_32(x, 19) ^ (x >> 10))
61
62
63static const uint32_t SHA256_K[64] = {
64	0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5,
65	0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5,
66	0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3,
67	0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174,
68	0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC,
69	0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA,
70	0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7,
71	0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967,
72	0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13,
73	0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85,
74	0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3,
75	0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070,
76	0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5,
77	0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3,
78	0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208,
79	0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2,
80};
81
82
83static void
84transform(uint32_t state[static 8], const uint32_t data[static 16])
85{
86	uint32_t W[16];
87	uint32_t T[8];
88
89	// Copy state[] to working vars.
90	memcpy(T, state, sizeof(T));
91
92	// 64 operations, partially loop unrolled
93	for (unsigned int j = 0; j < 64; j += 16) {
94		R( 0); R( 1); R( 2); R( 3);
95		R( 4); R( 5); R( 6); R( 7);
96		R( 8); R( 9); R(10); R(11);
97		R(12); R(13); R(14); R(15);
98	}
99
100	// Add the working vars back into state[].
101	state[0] += a(0);
102	state[1] += b(0);
103	state[2] += c(0);
104	state[3] += d(0);
105	state[4] += e(0);
106	state[5] += f(0);
107	state[6] += g(0);
108	state[7] += h(0);
109}
110
111
112static void
113process(lzma_check_state *check)
114{
115#ifdef WORDS_BIGENDIAN
116	transform(check->state.sha256.state, check->buffer.u32);
117
118#else
119	uint32_t data[16];
120
121	for (size_t i = 0; i < 16; ++i)
122		data[i] = bswap32(check->buffer.u32[i]);
123
124	transform(check->state.sha256.state, data);
125#endif
126
127	return;
128}
129
130
131extern void
132lzma_sha256_init(lzma_check_state *check)
133{
134	static const uint32_t s[8] = {
135		0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
136		0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19,
137	};
138
139	memcpy(check->state.sha256.state, s, sizeof(s));
140	check->state.sha256.size = 0;
141
142	return;
143}
144
145
146extern void
147lzma_sha256_update(const uint8_t *buf, size_t size, lzma_check_state *check)
148{
149	// Copy the input data into a properly aligned temporary buffer.
150	// This way we can be called with arbitrarily sized buffers
151	// (no need to be multiple of 64 bytes), and the code works also
152	// on architectures that don't allow unaligned memory access.
153	while (size > 0) {
154		const size_t copy_start = check->state.sha256.size & 0x3F;
155		size_t copy_size = 64 - copy_start;
156		if (copy_size > size)
157			copy_size = size;
158
159		memcpy(check->buffer.u8 + copy_start, buf, copy_size);
160
161		buf += copy_size;
162		size -= copy_size;
163		check->state.sha256.size += copy_size;
164
165		if ((check->state.sha256.size & 0x3F) == 0)
166			process(check);
167	}
168
169	return;
170}
171
172
173extern void
174lzma_sha256_finish(lzma_check_state *check)
175{
176	// Add padding as described in RFC 3174 (it describes SHA-1 but
177	// the same padding style is used for SHA-256 too).
178	size_t pos = check->state.sha256.size & 0x3F;
179	check->buffer.u8[pos++] = 0x80;
180
181	while (pos != 64 - 8) {
182		if (pos == 64) {
183			process(check);
184			pos = 0;
185		}
186
187		check->buffer.u8[pos++] = 0x00;
188	}
189
190	// Convert the message size from bytes to bits.
191	check->state.sha256.size *= 8;
192
193	check->buffer.u64[(64 - 8) / 8] = conv64be(check->state.sha256.size);
194
195	process(check);
196
197	for (size_t i = 0; i < 8; ++i)
198		check->buffer.u32[i] = conv32be(check->state.sha256.state[i]);
199
200	return;
201}
202