1/*
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*	$FreeBSD: src/sys/netinet6/esp_rijndael.c,v 1.1.2.1 2001/07/03 11:01:50 ume Exp $	*/
30/*	$KAME: esp_rijndael.c,v 1.4 2001/03/02 05:53:05 itojun Exp $	*/
31
32/*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 *    notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 *    notice, this list of conditions and the following disclaimer in the
43 *    documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 *    may be used to endorse or promote products derived from this software
46 *    without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61#include <sys/param.h>
62#include <sys/systm.h>
63#include <sys/socket.h>
64#include <sys/queue.h>
65#include <sys/syslog.h>
66#include <sys/mbuf.h>
67#include <sys/mcache.h>
68
69#include <kern/locks.h>
70
71#include <net/if.h>
72#include <net/route.h>
73
74#include <netinet6/ipsec.h>
75#include <netinet6/esp.h>
76#include <netinet6/esp_rijndael.h>
77
78#include <libkern/crypto/aes.h>
79
80#include <netkey/key.h>
81
82#include <net/net_osdep.h>
83
84#define MAX_REALIGN_LEN 2000
85#define AES_BLOCKLEN 16
86
87extern lck_mtx_t *sadb_mutex;
88
89int
90esp_aes_schedlen(
91	__unused const struct esp_algorithm *algo)
92{
93
94	return sizeof(aes_ctx);
95}
96
97int
98esp_aes_schedule(
99	__unused const struct esp_algorithm *algo,
100	struct secasvar *sav)
101{
102
103	lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_OWNED);
104	aes_ctx *ctx = (aes_ctx*)sav->sched;
105
106	aes_decrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->decrypt);
107	aes_encrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->encrypt);
108
109	return 0;
110}
111
112
113/* The following 2 functions decrypt or encrypt the contents of
114 * the mbuf chain passed in keeping the IP and ESP header's in place,
115 * along with the IV.
116 * The code attempts to call the crypto code with the largest chunk
117 * of data it can based on the amount of source data in
118 * the current source mbuf and the space remaining in the current
119 * destination mbuf.  The crypto code requires data to be a multiples
120 * of 16 bytes.  A separate buffer is used when a 16 byte block spans
121 * mbufs.
122 *
123 * m = mbuf chain
124 * off = offset to ESP header
125 *
126 * local vars for source:
127 * soff = offset from beginning of the chain to the head of the
128 *			current mbuf.
129 * scut = last mbuf that contains headers to be retained
130 * scutoff = offset to end of the headers in scut
131 * s = the current mbuf
132 * sn = current offset to data in s (next source data to process)
133 *
134 * local vars for dest:
135 * d0 = head of chain
136 * d = current mbuf
137 * dn = current offset in d (next location to store result)
138 */
139
140
141int
142esp_cbc_decrypt_aes(m, off, sav, algo, ivlen)
143	struct mbuf *m;
144	size_t off;
145	struct secasvar *sav;
146	const struct esp_algorithm *algo;
147	int ivlen;
148{
149	struct mbuf *s;
150	struct mbuf *d, *d0, *dp;
151	int soff;	/* offset from the head of chain, to head of this mbuf */
152	int sn, dn;	/* offset from the head of the mbuf, to meat */
153	size_t ivoff, bodyoff;
154	u_int8_t iv[AES_BLOCKLEN] __attribute__((aligned(4))), *dptr;
155	u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
156	struct mbuf *scut;
157	int scutoff;
158	int	i, len;
159
160
161	if (ivlen != AES_BLOCKLEN) {
162		ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
163		    "unsupported ivlen %d\n", algo->name, ivlen));
164		m_freem(m);
165		return EINVAL;
166	}
167
168	if (sav->flags & SADB_X_EXT_OLD) {
169		/* RFC 1827 */
170		ivoff = off + sizeof(struct esp);
171		bodyoff = off + sizeof(struct esp) + ivlen;
172	} else {
173		ivoff = off + sizeof(struct newesp);
174		bodyoff = off + sizeof(struct newesp) + ivlen;
175	}
176
177	if (m->m_pkthdr.len < bodyoff) {
178		ipseclog((LOG_ERR, "esp_cbc_decrypt %s: bad len %d/%lu\n",
179		    algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
180		m_freem(m);
181		return EINVAL;
182	}
183	if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
184		ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
185		    "payload length must be multiple of %d\n",
186		    algo->name, AES_BLOCKLEN));
187		m_freem(m);
188		return EINVAL;
189	}
190
191	/* grab iv */
192	m_copydata(m, ivoff, ivlen, (caddr_t) iv);
193
194	s = m;
195	soff = sn = dn = 0;
196	d = d0 = dp = NULL;
197	sp = dptr = NULL;
198
199	/* skip header/IV offset */
200	while (soff < bodyoff) {
201		if (soff + s->m_len > bodyoff) {
202			sn = bodyoff - soff;
203			break;
204		}
205
206		soff += s->m_len;
207		s = s->m_next;
208	}
209	scut = s;
210	scutoff = sn;
211
212	/* skip over empty mbuf */
213	while (s && s->m_len == 0)
214		s = s->m_next;
215
216	while (soff < m->m_pkthdr.len) {
217		/* source */
218		if (sn + AES_BLOCKLEN <= s->m_len) {
219			/* body is continuous */
220			sp = mtod(s, u_int8_t *) + sn;
221			len = s->m_len - sn;
222			len -= len % AES_BLOCKLEN;	// full blocks only
223		} else {
224			/* body is non-continuous */
225			m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
226			sp = sbuf;
227			len = AES_BLOCKLEN;			// 1 block only in sbuf
228		}
229
230		/* destination */
231		if (!d || dn + AES_BLOCKLEN > d->m_len) {
232			if (d)
233				dp = d;
234			MGET(d, M_DONTWAIT, MT_DATA);
235			i = m->m_pkthdr.len - (soff + sn);
236			if (d && i > MLEN) {
237				MCLGET(d, M_DONTWAIT);
238				if ((d->m_flags & M_EXT) == 0) {
239					d = m_mbigget(d, M_DONTWAIT);
240					if ((d->m_flags & M_EXT) == 0) {
241						m_free(d);
242						d = NULL;
243					}
244				}
245			}
246			if (!d) {
247				m_freem(m);
248				if (d0)
249					m_freem(d0);
250				return ENOBUFS;
251			}
252			if (!d0)
253				d0 = d;
254			if (dp)
255				dp->m_next = d;
256
257			// try to make mbuf data aligned
258			if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
259				m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
260			}
261
262			d->m_len = M_TRAILINGSPACE(d);
263			d->m_len -= d->m_len % AES_BLOCKLEN;
264			if (d->m_len > i)
265				d->m_len = i;
266			dptr = mtod(d, u_int8_t *);
267			dn = 0;
268		}
269
270		/* adjust len if greater than space available in dest */
271		if (len > d->m_len - dn)
272			len = d->m_len - dn;
273
274		/* decrypt */
275		// check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
276		if (IPSEC_IS_P2ALIGNED(sp)) {
277			sp_unaligned = NULL;
278		} else {
279			sp_unaligned = sp;
280			if (len > MAX_REALIGN_LEN) {
281				return ENOBUFS;
282			}
283			if (sp_aligned == NULL) {
284				sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
285				if (sp_aligned == NULL)
286					return ENOMEM;
287			}
288			sp = sp_aligned;
289			memcpy(sp, sp_unaligned, len);
290		}
291		// no need to check output pointer alignment
292		aes_decrypt_cbc(sp, iv, len >> 4, dptr + dn,
293				(aes_decrypt_ctx*)(&(((aes_ctx*)sav->sched)->decrypt)));
294
295		// update unaligned pointers
296		if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
297			sp = sp_unaligned;
298		}
299
300		/* udpate offsets */
301		sn += len;
302		dn += len;
303
304		// next iv
305		bcopy(sp + len - AES_BLOCKLEN, iv, AES_BLOCKLEN);
306
307		/* find the next source block */
308		while (s && sn >= s->m_len) {
309			sn -= s->m_len;
310			soff += s->m_len;
311			s = s->m_next;
312		}
313
314	}
315
316	/* free un-needed source mbufs and add dest mbufs to chain */
317	m_freem(scut->m_next);
318	scut->m_len = scutoff;
319	scut->m_next = d0;
320
321	// free memory
322	if (sp_aligned != NULL) {
323		FREE(sp_aligned, M_SECA);
324		sp_aligned = NULL;
325	}
326
327	/* just in case */
328	bzero(iv, sizeof(iv));
329	bzero(sbuf, sizeof(sbuf));
330
331	return 0;
332}
333
334int
335esp_cbc_encrypt_aes(
336	struct mbuf *m,
337	size_t off,
338	__unused size_t plen,
339	struct secasvar *sav,
340	const struct esp_algorithm *algo,
341	int ivlen)
342{
343	struct mbuf *s;
344	struct mbuf *d, *d0, *dp;
345	int soff;	/* offset from the head of chain, to head of this mbuf */
346	int sn, dn;	/* offset from the head of the mbuf, to meat */
347	size_t ivoff, bodyoff;
348	u_int8_t *ivp, *dptr, *ivp_unaligned;
349	u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
350	u_int8_t ivp_aligned_buf[AES_BLOCKLEN] __attribute__((aligned(4)));
351	struct mbuf *scut;
352	int scutoff;
353	int i, len;
354
355	if (ivlen != AES_BLOCKLEN) {
356		ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
357		    "unsupported ivlen %d\n", algo->name, ivlen));
358		m_freem(m);
359		return EINVAL;
360	}
361
362	if (sav->flags & SADB_X_EXT_OLD) {
363		/* RFC 1827 */
364		ivoff = off + sizeof(struct esp);
365		bodyoff = off + sizeof(struct esp) + ivlen;
366	} else {
367		ivoff = off + sizeof(struct newesp);
368		bodyoff = off + sizeof(struct newesp) + ivlen;
369	}
370
371	/* put iv into the packet */
372	m_copyback(m, ivoff, ivlen, sav->iv);
373	ivp = (u_int8_t *) sav->iv;
374
375	if (m->m_pkthdr.len < bodyoff) {
376		ipseclog((LOG_ERR, "esp_cbc_encrypt %s: bad len %d/%lu\n",
377		    algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
378		m_freem(m);
379		return EINVAL;
380	}
381	if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
382		ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
383		    "payload length must be multiple of %lu\n",
384		    algo->name, AES_BLOCKLEN));
385		m_freem(m);
386		return EINVAL;
387	}
388
389	s = m;
390	soff = sn = dn = 0;
391	d = d0 = dp = NULL;
392	sp = dptr = NULL;
393
394	/* skip headers/IV */
395	while (soff < bodyoff) {
396		if (soff + s->m_len > bodyoff) {
397			sn = bodyoff - soff;
398			break;
399		}
400
401		soff += s->m_len;
402		s = s->m_next;
403	}
404	scut = s;
405	scutoff = sn;
406
407	/* skip over empty mbuf */
408	while (s && s->m_len == 0)
409		s = s->m_next;
410
411	while (soff < m->m_pkthdr.len) {
412		/* source */
413		if (sn + AES_BLOCKLEN <= s->m_len) {
414			/* body is continuous */
415			sp = mtod(s, u_int8_t *) + sn;
416			len = s->m_len - sn;
417			len -= len % AES_BLOCKLEN;	// full blocks only
418		} else {
419			/* body is non-continuous */
420			m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
421			sp = sbuf;
422			len = AES_BLOCKLEN;			// 1 block only in sbuf
423		}
424
425		/* destination */
426		if (!d || dn + AES_BLOCKLEN > d->m_len) {
427			if (d)
428				dp = d;
429			MGET(d, M_DONTWAIT, MT_DATA);
430			i = m->m_pkthdr.len - (soff + sn);
431			if (d && i > MLEN) {
432				MCLGET(d, M_DONTWAIT);
433				if ((d->m_flags & M_EXT) == 0) {
434					d = m_mbigget(d, M_DONTWAIT);
435					if ((d->m_flags & M_EXT) == 0) {
436						m_free(d);
437						d = NULL;
438					}
439				}
440			}
441			if (!d) {
442				m_freem(m);
443				if (d0)
444					m_freem(d0);
445				return ENOBUFS;
446			}
447			if (!d0)
448				d0 = d;
449			if (dp)
450				dp->m_next = d;
451
452			// try to make mbuf data aligned
453			if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
454				m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
455			}
456
457			d->m_len = M_TRAILINGSPACE(d);
458			d->m_len -= d->m_len % AES_BLOCKLEN;
459			if (d->m_len > i)
460				d->m_len = i;
461			dptr = mtod(d, u_int8_t *);
462			dn = 0;
463		}
464
465		/* adjust len if greater than space available */
466		if (len > d->m_len - dn)
467			len = d->m_len - dn;
468
469		/* encrypt */
470		// check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
471		if (IPSEC_IS_P2ALIGNED(sp)) {
472			sp_unaligned = NULL;
473		} else {
474			sp_unaligned = sp;
475			if (len > MAX_REALIGN_LEN) {
476				return ENOBUFS;
477			}
478			if (sp_aligned == NULL) {
479				sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
480				if (sp_aligned == NULL)
481					return ENOMEM;
482			}
483			sp = sp_aligned;
484			memcpy(sp, sp_unaligned, len);
485		}
486		// check ivp pointer alignment and use a separate aligned buffer (if ivp is not aligned on 4-byte boundary).
487		if (IPSEC_IS_P2ALIGNED(ivp)) {
488			ivp_unaligned = NULL;
489		} else {
490			ivp_unaligned = ivp;
491			ivp = ivp_aligned_buf;
492			memcpy(ivp, ivp_unaligned, AES_BLOCKLEN);
493		}
494		// no need to check output pointer alignment
495		aes_encrypt_cbc(sp, ivp, len >> 4, dptr + dn,
496			(aes_encrypt_ctx*)(&(((aes_ctx*)sav->sched)->encrypt)));
497
498		// update unaligned pointers
499		if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
500			sp = sp_unaligned;
501		}
502		if (!IPSEC_IS_P2ALIGNED(ivp_unaligned)) {
503			ivp = ivp_unaligned;
504		}
505
506		/* update offsets */
507		sn += len;
508		dn += len;
509
510		/* next iv */
511		ivp = dptr + dn - AES_BLOCKLEN;	// last block encrypted
512
513		/* find the next source block and skip empty mbufs */
514		while (s && sn >= s->m_len) {
515			sn -= s->m_len;
516			soff += s->m_len;
517			s = s->m_next;
518		}
519	}
520
521	/* free un-needed source mbufs and add dest mbufs to chain */
522	m_freem(scut->m_next);
523	scut->m_len = scutoff;
524	scut->m_next = d0;
525
526	// free memory
527	if (sp_aligned != NULL) {
528		FREE(sp_aligned, M_SECA);
529		sp_aligned = NULL;
530	}
531
532	/* just in case */
533	bzero(sbuf, sizeof(sbuf));
534	key_sa_stir_iv(sav);
535
536	return 0;
537}
538