1/*-
2 * Copyright (c) 2010 Konstantin Belousov <kib@FreeBSD.org>
3 * Copyright (c) 2010-2011 Pawel Jakub Dawidek <pawel@dawidek.net>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD$");
30
31#include <sys/param.h>
32#include <sys/libkern.h>
33#include <sys/malloc.h>
34#include <sys/proc.h>
35#include <sys/systm.h>
36#include <crypto/aesni/aesni.h>
37
38MALLOC_DECLARE(M_AESNI);
39
40void
41aesni_encrypt_cbc(int rounds, const void *key_schedule, size_t len,
42    const uint8_t *from, uint8_t *to, const uint8_t iv[AES_BLOCK_LEN])
43{
44	const uint8_t *ivp;
45	size_t i;
46
47	len /= AES_BLOCK_LEN;
48	ivp = iv;
49	for (i = 0; i < len; i++) {
50		aesni_enc(rounds - 1, key_schedule, from, to, ivp);
51		ivp = to;
52		from += AES_BLOCK_LEN;
53		to += AES_BLOCK_LEN;
54	}
55}
56
57void
58aesni_encrypt_ecb(int rounds, const void *key_schedule, size_t len,
59    const uint8_t from[AES_BLOCK_LEN], uint8_t to[AES_BLOCK_LEN])
60{
61	size_t i;
62
63	len /= AES_BLOCK_LEN;
64	for (i = 0; i < len; i++) {
65		aesni_enc(rounds - 1, key_schedule, from, to, NULL);
66		from += AES_BLOCK_LEN;
67		to += AES_BLOCK_LEN;
68	}
69}
70
71void
72aesni_decrypt_ecb(int rounds, const void *key_schedule, size_t len,
73    const uint8_t from[AES_BLOCK_LEN], uint8_t to[AES_BLOCK_LEN])
74{
75	size_t i;
76
77	len /= AES_BLOCK_LEN;
78	for (i = 0; i < len; i++) {
79		aesni_dec(rounds - 1, key_schedule, from, to, NULL);
80		from += AES_BLOCK_LEN;
81		to += AES_BLOCK_LEN;
82	}
83}
84
85#define	AES_XTS_BLOCKSIZE	16
86#define	AES_XTS_IVSIZE		8
87#define	AES_XTS_ALPHA		0x87	/* GF(2^128) generator polynomial */
88
89static void
90aesni_crypt_xts_block(int rounds, const void *key_schedule, uint64_t *tweak,
91    const uint64_t *from, uint64_t *to, uint64_t *block, int do_encrypt)
92{
93	int carry;
94
95	block[0] = from[0] ^ tweak[0];
96	block[1] = from[1] ^ tweak[1];
97
98	if (do_encrypt)
99		aesni_enc(rounds - 1, key_schedule, (uint8_t *)block, (uint8_t *)to, NULL);
100	else
101		aesni_dec(rounds - 1, key_schedule, (uint8_t *)block, (uint8_t *)to, NULL);
102
103	to[0] ^= tweak[0];
104	to[1] ^= tweak[1];
105
106	/* Exponentiate tweak. */
107	carry = ((tweak[0] & 0x8000000000000000ULL) > 0);
108	tweak[0] <<= 1;
109	if (tweak[1] & 0x8000000000000000ULL) {
110		uint8_t *twk = (uint8_t *)tweak;
111
112		twk[0] ^= AES_XTS_ALPHA;
113	}
114	tweak[1] <<= 1;
115	if (carry)
116		tweak[1] |= 1;
117}
118
119static void
120aesni_crypt_xts(int rounds, const void *data_schedule,
121    const void *tweak_schedule, size_t len, const uint8_t *from, uint8_t *to,
122    const uint8_t iv[AES_BLOCK_LEN], int do_encrypt)
123{
124	uint64_t block[AES_XTS_BLOCKSIZE / 8];
125	uint8_t tweak[AES_XTS_BLOCKSIZE];
126	size_t i;
127
128	/*
129	 * Prepare tweak as E_k2(IV). IV is specified as LE representation
130	 * of a 64-bit block number which we allow to be passed in directly.
131	 */
132#if BYTE_ORDER == LITTLE_ENDIAN
133	bcopy(iv, tweak, AES_XTS_IVSIZE);
134	/* Last 64 bits of IV are always zero. */
135	bzero(tweak + AES_XTS_IVSIZE, AES_XTS_IVSIZE);
136#else
137#error Only LITTLE_ENDIAN architectures are supported.
138#endif
139	aesni_enc(rounds - 1, tweak_schedule, tweak, tweak, NULL);
140
141	len /= AES_XTS_BLOCKSIZE;
142	for (i = 0; i < len; i++) {
143		aesni_crypt_xts_block(rounds, data_schedule, (uint64_t *)tweak,
144		    (const uint64_t *)from, (uint64_t *)to, block, do_encrypt);
145		from += AES_XTS_BLOCKSIZE;
146		to += AES_XTS_BLOCKSIZE;
147	}
148
149	bzero(tweak, sizeof(tweak));
150	bzero(block, sizeof(block));
151}
152
153static void
154aesni_encrypt_xts(int rounds, const void *data_schedule,
155    const void *tweak_schedule, size_t len, const uint8_t *from, uint8_t *to,
156    const uint8_t iv[AES_BLOCK_LEN])
157{
158
159	aesni_crypt_xts(rounds, data_schedule, tweak_schedule, len, from, to,
160	    iv, 1);
161}
162
163static void
164aesni_decrypt_xts(int rounds, const void *data_schedule,
165    const void *tweak_schedule, size_t len, const uint8_t *from, uint8_t *to,
166    const uint8_t iv[AES_BLOCK_LEN])
167{
168
169	aesni_crypt_xts(rounds, data_schedule, tweak_schedule, len, from, to,
170	    iv, 0);
171}
172
173static int
174aesni_cipher_setup_common(struct aesni_session *ses, const uint8_t *key,
175    int keylen)
176{
177
178	switch (ses->algo) {
179	case CRYPTO_AES_CBC:
180		switch (keylen) {
181		case 128:
182			ses->rounds = AES128_ROUNDS;
183			break;
184		case 192:
185			ses->rounds = AES192_ROUNDS;
186			break;
187		case 256:
188			ses->rounds = AES256_ROUNDS;
189			break;
190		default:
191			return (EINVAL);
192		}
193		break;
194	case CRYPTO_AES_XTS:
195		switch (keylen) {
196		case 256:
197			ses->rounds = AES128_ROUNDS;
198			break;
199		case 512:
200			ses->rounds = AES256_ROUNDS;
201			break;
202		default:
203			return (EINVAL);
204		}
205		break;
206	default:
207		return (EINVAL);
208	}
209
210	aesni_set_enckey(key, ses->enc_schedule, ses->rounds);
211	aesni_set_deckey(ses->enc_schedule, ses->dec_schedule, ses->rounds);
212	if (ses->algo == CRYPTO_AES_CBC)
213		arc4rand(ses->iv, sizeof(ses->iv), 0);
214	else /* if (ses->algo == CRYPTO_AES_XTS) */ {
215		aesni_set_enckey(key + keylen / 16, ses->xts_schedule,
216		    ses->rounds);
217	}
218
219	return (0);
220}
221
222int
223aesni_cipher_setup(struct aesni_session *ses, struct cryptoini *encini)
224{
225	struct thread *td;
226	int error, saved_ctx;
227
228	td = curthread;
229	if (!is_fpu_kern_thread(0)) {
230		error = fpu_kern_enter(td, ses->fpu_ctx, FPU_KERN_NORMAL);
231		saved_ctx = 1;
232	} else {
233		error = 0;
234		saved_ctx = 0;
235	}
236	if (error == 0) {
237		error = aesni_cipher_setup_common(ses, encini->cri_key,
238		    encini->cri_klen);
239		if (saved_ctx)
240			fpu_kern_leave(td, ses->fpu_ctx);
241	}
242	return (error);
243}
244
245int
246aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
247    struct cryptop *crp)
248{
249	struct thread *td;
250	uint8_t *buf;
251	int error, allocated, saved_ctx;
252
253	buf = aesni_cipher_alloc(enccrd, crp, &allocated);
254	if (buf == NULL)
255		return (ENOMEM);
256
257	td = curthread;
258	if (!is_fpu_kern_thread(0)) {
259		error = fpu_kern_enter(td, ses->fpu_ctx, FPU_KERN_NORMAL);
260		if (error != 0)
261			goto out;
262		saved_ctx = 1;
263	} else {
264		saved_ctx = 0;
265		error = 0;
266	}
267
268	if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
269		error = aesni_cipher_setup_common(ses, enccrd->crd_key,
270		    enccrd->crd_klen);
271		if (error != 0)
272			goto out;
273	}
274
275	if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) {
276		if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
277			bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
278		if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
279			crypto_copyback(crp->crp_flags, crp->crp_buf,
280			    enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
281		if (ses->algo == CRYPTO_AES_CBC) {
282			aesni_encrypt_cbc(ses->rounds, ses->enc_schedule,
283			    enccrd->crd_len, buf, buf, ses->iv);
284		} else /* if (ses->algo == CRYPTO_AES_XTS) */ {
285			aesni_encrypt_xts(ses->rounds, ses->enc_schedule,
286			    ses->xts_schedule, enccrd->crd_len, buf, buf,
287			    ses->iv);
288		}
289	} else {
290		if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
291			bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
292		else
293			crypto_copydata(crp->crp_flags, crp->crp_buf,
294			    enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
295		if (ses->algo == CRYPTO_AES_CBC) {
296			aesni_decrypt_cbc(ses->rounds, ses->dec_schedule,
297			    enccrd->crd_len, buf, ses->iv);
298		} else /* if (ses->algo == CRYPTO_AES_XTS) */ {
299			aesni_decrypt_xts(ses->rounds, ses->dec_schedule,
300			    ses->xts_schedule, enccrd->crd_len, buf, buf,
301			    ses->iv);
302		}
303	}
304	if (saved_ctx)
305		fpu_kern_leave(td, ses->fpu_ctx);
306	if (allocated)
307		crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
308		    enccrd->crd_len, buf);
309	if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0)
310		crypto_copydata(crp->crp_flags, crp->crp_buf,
311		    enccrd->crd_skip + enccrd->crd_len - AES_BLOCK_LEN,
312		    AES_BLOCK_LEN, ses->iv);
313 out:
314	if (allocated) {
315		bzero(buf, enccrd->crd_len);
316		free(buf, M_AESNI);
317	}
318	return (error);
319}
320