cryptosoft.c revision 188154
1/*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
2
3/*-
4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
6 *
7 * This code was written by Angelos D. Keromytis in Athens, Greece, in
8 * February 2000. Network Security Technologies Inc. (NSTI) kindly
9 * supported the development of this code.
10 *
11 * Copyright (c) 2000, 2001 Angelos D. Keromytis
12 *
13 * Permission to use, copy, and modify this software with or without fee
14 * is hereby granted, provided that this entire notice is included in
15 * all source code copies of any software which is or includes a copy or
16 * modification of this software.
17 *
18 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
19 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
20 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
21 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
22 * PURPOSE.
23 */
24
25#include <sys/cdefs.h>
26__FBSDID("$FreeBSD: head/sys/opencrypto/cryptosoft.c 188154 2009-02-05 17:43:12Z imp $");
27
28#include <sys/param.h>
29#include <sys/systm.h>
30#include <sys/malloc.h>
31#include <sys/mbuf.h>
32#include <sys/module.h>
33#include <sys/sysctl.h>
34#include <sys/errno.h>
35#include <sys/random.h>
36#include <sys/kernel.h>
37#include <sys/uio.h>
38
39#include <crypto/blowfish/blowfish.h>
40#include <crypto/sha1.h>
41#include <opencrypto/rmd160.h>
42#include <opencrypto/cast.h>
43#include <opencrypto/skipjack.h>
44#include <sys/md5.h>
45
46#include <opencrypto/cryptodev.h>
47#include <opencrypto/cryptosoft.h>
48#include <opencrypto/xform.h>
49
50#include <sys/kobj.h>
51#include <sys/bus.h>
52#include "cryptodev_if.h"
53
54static	int32_t swcr_id;
55static	struct swcr_data **swcr_sessions = NULL;
56static	u_int32_t swcr_sesnum;
57
58u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
59u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
60
61static	int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
62static	int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
63static	int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
64static	int swcr_freesession(device_t dev, u_int64_t tid);
65
66/*
67 * Apply a symmetric encryption/decryption algorithm.
68 */
69static int
70swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
71    int flags)
72{
73	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
74	unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
75	struct enc_xform *exf;
76	int i, k, j, blks;
77
78	exf = sw->sw_exf;
79	blks = exf->blocksize;
80
81	/* Check for non-padded data */
82	if (crd->crd_len % blks)
83		return EINVAL;
84
85	/* Initialize the IV */
86	if (crd->crd_flags & CRD_F_ENCRYPT) {
87		/* IV explicitly provided ? */
88		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
89			bcopy(crd->crd_iv, iv, blks);
90		else
91			arc4rand(iv, blks, 0);
92
93		/* Do we need to write the IV */
94		if (!(crd->crd_flags & CRD_F_IV_PRESENT))
95			crypto_copyback(flags, buf, crd->crd_inject, blks, iv);
96
97	} else {	/* Decryption */
98			/* IV explicitly provided ? */
99		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
100			bcopy(crd->crd_iv, iv, blks);
101		else {
102			/* Get IV off buf */
103			crypto_copydata(flags, buf, crd->crd_inject, blks, iv);
104		}
105	}
106
107	if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
108		int error;
109
110		if (sw->sw_kschedule)
111			exf->zerokey(&(sw->sw_kschedule));
112		error = exf->setkey(&sw->sw_kschedule,
113				crd->crd_key, crd->crd_klen / 8);
114		if (error)
115			return (error);
116	}
117	ivp = iv;
118
119	if (flags & CRYPTO_F_IMBUF) {
120		struct mbuf *m = (struct mbuf *) buf;
121
122		/* Find beginning of data */
123		m = m_getptr(m, crd->crd_skip, &k);
124		if (m == NULL)
125			return EINVAL;
126
127		i = crd->crd_len;
128
129		while (i > 0) {
130			/*
131			 * If there's insufficient data at the end of
132			 * an mbuf, we have to do some copying.
133			 */
134			if (m->m_len < k + blks && m->m_len != k) {
135				m_copydata(m, k, blks, blk);
136
137				/* Actual encryption/decryption */
138				if (crd->crd_flags & CRD_F_ENCRYPT) {
139					/* XOR with previous block */
140					for (j = 0; j < blks; j++)
141						blk[j] ^= ivp[j];
142
143					exf->encrypt(sw->sw_kschedule, blk);
144
145					/*
146					 * Keep encrypted block for XOR'ing
147					 * with next block
148					 */
149					bcopy(blk, iv, blks);
150					ivp = iv;
151				} else {	/* decrypt */
152					/*
153					 * Keep encrypted block for XOR'ing
154					 * with next block
155					 */
156					if (ivp == iv)
157						bcopy(blk, piv, blks);
158					else
159						bcopy(blk, iv, blks);
160
161					exf->decrypt(sw->sw_kschedule, blk);
162
163					/* XOR with previous block */
164					for (j = 0; j < blks; j++)
165						blk[j] ^= ivp[j];
166
167					if (ivp == iv)
168						bcopy(piv, iv, blks);
169					else
170						ivp = iv;
171				}
172
173				/* Copy back decrypted block */
174				m_copyback(m, k, blks, blk);
175
176				/* Advance pointer */
177				m = m_getptr(m, k + blks, &k);
178				if (m == NULL)
179					return EINVAL;
180
181				i -= blks;
182
183				/* Could be done... */
184				if (i == 0)
185					break;
186			}
187
188			/* Skip possibly empty mbufs */
189			if (k == m->m_len) {
190				for (m = m->m_next; m && m->m_len == 0;
191				    m = m->m_next)
192					;
193				k = 0;
194			}
195
196			/* Sanity check */
197			if (m == NULL)
198				return EINVAL;
199
200			/*
201			 * Warning: idat may point to garbage here, but
202			 * we only use it in the while() loop, only if
203			 * there are indeed enough data.
204			 */
205			idat = mtod(m, unsigned char *) + k;
206
207	   		while (m->m_len >= k + blks && i > 0) {
208				if (crd->crd_flags & CRD_F_ENCRYPT) {
209					/* XOR with previous block/IV */
210					for (j = 0; j < blks; j++)
211						idat[j] ^= ivp[j];
212
213					exf->encrypt(sw->sw_kschedule, idat);
214					ivp = idat;
215				} else {	/* decrypt */
216					/*
217					 * Keep encrypted block to be used
218					 * in next block's processing.
219					 */
220					if (ivp == iv)
221						bcopy(idat, piv, blks);
222					else
223						bcopy(idat, iv, blks);
224
225					exf->decrypt(sw->sw_kschedule, idat);
226
227					/* XOR with previous block/IV */
228					for (j = 0; j < blks; j++)
229						idat[j] ^= ivp[j];
230
231					if (ivp == iv)
232						bcopy(piv, iv, blks);
233					else
234						ivp = iv;
235				}
236
237				idat += blks;
238				k += blks;
239				i -= blks;
240			}
241		}
242
243		return 0; /* Done with mbuf encryption/decryption */
244	} else if (flags & CRYPTO_F_IOV) {
245		struct uio *uio = (struct uio *) buf;
246		struct iovec *iov;
247
248		/* Find beginning of data */
249		iov = cuio_getptr(uio, crd->crd_skip, &k);
250		if (iov == NULL)
251			return EINVAL;
252
253		i = crd->crd_len;
254
255		while (i > 0) {
256			/*
257			 * If there's insufficient data at the end of
258			 * an iovec, we have to do some copying.
259			 */
260			if (iov->iov_len < k + blks && iov->iov_len != k) {
261				cuio_copydata(uio, k, blks, blk);
262
263				/* Actual encryption/decryption */
264				if (crd->crd_flags & CRD_F_ENCRYPT) {
265					/* XOR with previous block */
266					for (j = 0; j < blks; j++)
267						blk[j] ^= ivp[j];
268
269					exf->encrypt(sw->sw_kschedule, blk);
270
271					/*
272					 * Keep encrypted block for XOR'ing
273					 * with next block
274					 */
275					bcopy(blk, iv, blks);
276					ivp = iv;
277				} else {	/* decrypt */
278					/*
279					 * Keep encrypted block for XOR'ing
280					 * with next block
281					 */
282					if (ivp == iv)
283						bcopy(blk, piv, blks);
284					else
285						bcopy(blk, iv, blks);
286
287					exf->decrypt(sw->sw_kschedule, blk);
288
289					/* XOR with previous block */
290					for (j = 0; j < blks; j++)
291						blk[j] ^= ivp[j];
292
293					if (ivp == iv)
294						bcopy(piv, iv, blks);
295					else
296						ivp = iv;
297				}
298
299				/* Copy back decrypted block */
300				cuio_copyback(uio, k, blks, blk);
301
302				/* Advance pointer */
303				iov = cuio_getptr(uio, k + blks, &k);
304				if (iov == NULL)
305					return EINVAL;
306
307				i -= blks;
308
309				/* Could be done... */
310				if (i == 0)
311					break;
312			}
313
314			/*
315			 * Warning: idat may point to garbage here, but
316			 * we only use it in the while() loop, only if
317			 * there are indeed enough data.
318			 */
319			idat = (char *)iov->iov_base + k;
320
321	   		while (iov->iov_len >= k + blks && i > 0) {
322				if (crd->crd_flags & CRD_F_ENCRYPT) {
323					/* XOR with previous block/IV */
324					for (j = 0; j < blks; j++)
325						idat[j] ^= ivp[j];
326
327					exf->encrypt(sw->sw_kschedule, idat);
328					ivp = idat;
329				} else {	/* decrypt */
330					/*
331					 * Keep encrypted block to be used
332					 * in next block's processing.
333					 */
334					if (ivp == iv)
335						bcopy(idat, piv, blks);
336					else
337						bcopy(idat, iv, blks);
338
339					exf->decrypt(sw->sw_kschedule, idat);
340
341					/* XOR with previous block/IV */
342					for (j = 0; j < blks; j++)
343						idat[j] ^= ivp[j];
344
345					if (ivp == iv)
346						bcopy(piv, iv, blks);
347					else
348						ivp = iv;
349				}
350
351				idat += blks;
352				k += blks;
353				i -= blks;
354			}
355			if (k == iov->iov_len) {
356				iov++;
357				k = 0;
358			}
359		}
360
361		return 0; /* Done with iovec encryption/decryption */
362	} else {	/* contiguous buffer */
363		if (crd->crd_flags & CRD_F_ENCRYPT) {
364			for (i = crd->crd_skip;
365			    i < crd->crd_skip + crd->crd_len; i += blks) {
366				/* XOR with the IV/previous block, as appropriate. */
367				if (i == crd->crd_skip)
368					for (k = 0; k < blks; k++)
369						buf[i + k] ^= ivp[k];
370				else
371					for (k = 0; k < blks; k++)
372						buf[i + k] ^= buf[i + k - blks];
373				exf->encrypt(sw->sw_kschedule, buf + i);
374			}
375		} else {		/* Decrypt */
376			/*
377			 * Start at the end, so we don't need to keep the encrypted
378			 * block as the IV for the next block.
379			 */
380			for (i = crd->crd_skip + crd->crd_len - blks;
381			    i >= crd->crd_skip; i -= blks) {
382				exf->decrypt(sw->sw_kschedule, buf + i);
383
384				/* XOR with the IV/previous block, as appropriate */
385				if (i == crd->crd_skip)
386					for (k = 0; k < blks; k++)
387						buf[i + k] ^= ivp[k];
388				else
389					for (k = 0; k < blks; k++)
390						buf[i + k] ^= buf[i + k - blks];
391			}
392		}
393
394		return 0; /* Done with contiguous buffer encryption/decryption */
395	}
396
397	/* Unreachable */
398	return EINVAL;
399}
400
401static void
402swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
403    int klen)
404{
405	int k;
406
407	klen /= 8;
408
409	switch (axf->type) {
410	case CRYPTO_MD5_HMAC:
411	case CRYPTO_SHA1_HMAC:
412	case CRYPTO_SHA2_256_HMAC:
413	case CRYPTO_SHA2_384_HMAC:
414	case CRYPTO_SHA2_512_HMAC:
415	case CRYPTO_NULL_HMAC:
416	case CRYPTO_RIPEMD160_HMAC:
417		for (k = 0; k < klen; k++)
418			key[k] ^= HMAC_IPAD_VAL;
419
420		axf->Init(sw->sw_ictx);
421		axf->Update(sw->sw_ictx, key, klen);
422		axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
423
424		for (k = 0; k < klen; k++)
425			key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
426
427		axf->Init(sw->sw_octx);
428		axf->Update(sw->sw_octx, key, klen);
429		axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
430
431		for (k = 0; k < klen; k++)
432			key[k] ^= HMAC_OPAD_VAL;
433		break;
434	case CRYPTO_MD5_KPDK:
435	case CRYPTO_SHA1_KPDK:
436	{
437		/* We need a buffer that can hold an md5 and a sha1 result. */
438		u_char buf[SHA1_RESULTLEN];
439
440		sw->sw_klen = klen;
441		bcopy(key, sw->sw_octx, klen);
442		axf->Init(sw->sw_ictx);
443		axf->Update(sw->sw_ictx, key, klen);
444		axf->Final(buf, sw->sw_ictx);
445		break;
446	}
447	default:
448		printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
449		    "doesn't use keys.\n", __func__, axf->type);
450	}
451}
452
453/*
454 * Compute keyed-hash authenticator.
455 */
456static int
457swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
458    int flags)
459{
460	unsigned char aalg[HASH_MAX_LEN];
461	struct auth_hash *axf;
462	union authctx ctx;
463	int err;
464
465	if (sw->sw_ictx == 0)
466		return EINVAL;
467
468	axf = sw->sw_axf;
469
470	if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
471		swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
472
473	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
474
475	err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
476	    (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
477	if (err)
478		return err;
479
480	switch (sw->sw_alg) {
481	case CRYPTO_MD5_HMAC:
482	case CRYPTO_SHA1_HMAC:
483	case CRYPTO_SHA2_256_HMAC:
484	case CRYPTO_SHA2_384_HMAC:
485	case CRYPTO_SHA2_512_HMAC:
486	case CRYPTO_RIPEMD160_HMAC:
487		if (sw->sw_octx == NULL)
488			return EINVAL;
489
490		axf->Final(aalg, &ctx);
491		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
492		axf->Update(&ctx, aalg, axf->hashsize);
493		axf->Final(aalg, &ctx);
494		break;
495
496	case CRYPTO_MD5_KPDK:
497	case CRYPTO_SHA1_KPDK:
498		if (sw->sw_octx == NULL)
499			return EINVAL;
500
501		axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
502		axf->Final(aalg, &ctx);
503		break;
504
505	case CRYPTO_NULL_HMAC:
506		axf->Final(aalg, &ctx);
507		break;
508	}
509
510	/* Inject the authentication data */
511	crypto_copyback(flags, buf, crd->crd_inject,
512	    sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
513	return 0;
514}
515
516/*
517 * Apply a compression/decompression algorithm
518 */
519static int
520swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
521    caddr_t buf, int flags)
522{
523	u_int8_t *data, *out;
524	struct comp_algo *cxf;
525	int adj;
526	u_int32_t result;
527
528	cxf = sw->sw_cxf;
529
530	/* We must handle the whole buffer of data in one time
531	 * then if there is not all the data in the mbuf, we must
532	 * copy in a buffer.
533	 */
534
535	data = malloc(crd->crd_len, M_CRYPTO_DATA,  M_NOWAIT);
536	if (data == NULL)
537		return (EINVAL);
538	crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
539
540	if (crd->crd_flags & CRD_F_COMP)
541		result = cxf->compress(data, crd->crd_len, &out);
542	else
543		result = cxf->decompress(data, crd->crd_len, &out);
544
545	free(data, M_CRYPTO_DATA);
546	if (result == 0)
547		return EINVAL;
548
549	/* Copy back the (de)compressed data. m_copyback is
550	 * extending the mbuf as necessary.
551	 */
552	sw->sw_size = result;
553	/* Check the compressed size when doing compression */
554	if (crd->crd_flags & CRD_F_COMP) {
555		if (result > crd->crd_len) {
556			/* Compression was useless, we lost time */
557			free(out, M_CRYPTO_DATA);
558			return 0;
559		}
560	}
561
562	crypto_copyback(flags, buf, crd->crd_skip, result, out);
563	if (result < crd->crd_len) {
564		adj = result - crd->crd_len;
565		if (flags & CRYPTO_F_IMBUF) {
566			adj = result - crd->crd_len;
567			m_adj((struct mbuf *)buf, adj);
568		} else if (flags & CRYPTO_F_IOV) {
569			struct uio *uio = (struct uio *)buf;
570			int ind;
571
572			adj = crd->crd_len - result;
573			ind = uio->uio_iovcnt - 1;
574
575			while (adj > 0 && ind >= 0) {
576				if (adj < uio->uio_iov[ind].iov_len) {
577					uio->uio_iov[ind].iov_len -= adj;
578					break;
579				}
580
581				adj -= uio->uio_iov[ind].iov_len;
582				uio->uio_iov[ind].iov_len = 0;
583				ind--;
584				uio->uio_iovcnt--;
585			}
586		}
587	}
588	free(out, M_CRYPTO_DATA);
589	return 0;
590}
591
592/*
593 * Generate a new software session.
594 */
595static int
596swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
597{
598	struct swcr_data **swd;
599	struct auth_hash *axf;
600	struct enc_xform *txf;
601	struct comp_algo *cxf;
602	u_int32_t i;
603	int error;
604
605	if (sid == NULL || cri == NULL)
606		return EINVAL;
607
608	if (swcr_sessions) {
609		for (i = 1; i < swcr_sesnum; i++)
610			if (swcr_sessions[i] == NULL)
611				break;
612	} else
613		i = 1;		/* NB: to silence compiler warning */
614
615	if (swcr_sessions == NULL || i == swcr_sesnum) {
616		if (swcr_sessions == NULL) {
617			i = 1; /* We leave swcr_sessions[0] empty */
618			swcr_sesnum = CRYPTO_SW_SESSIONS;
619		} else
620			swcr_sesnum *= 2;
621
622		swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
623		    M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
624		if (swd == NULL) {
625			/* Reset session number */
626			if (swcr_sesnum == CRYPTO_SW_SESSIONS)
627				swcr_sesnum = 0;
628			else
629				swcr_sesnum /= 2;
630			return ENOBUFS;
631		}
632
633		/* Copy existing sessions */
634		if (swcr_sessions != NULL) {
635			bcopy(swcr_sessions, swd,
636			    (swcr_sesnum / 2) * sizeof(struct swcr_data *));
637			free(swcr_sessions, M_CRYPTO_DATA);
638		}
639
640		swcr_sessions = swd;
641	}
642
643	swd = &swcr_sessions[i];
644	*sid = i;
645
646	while (cri) {
647		*swd = malloc(sizeof(struct swcr_data),
648		    M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
649		if (*swd == NULL) {
650			swcr_freesession(dev, i);
651			return ENOBUFS;
652		}
653
654		switch (cri->cri_alg) {
655		case CRYPTO_DES_CBC:
656			txf = &enc_xform_des;
657			goto enccommon;
658		case CRYPTO_3DES_CBC:
659			txf = &enc_xform_3des;
660			goto enccommon;
661		case CRYPTO_BLF_CBC:
662			txf = &enc_xform_blf;
663			goto enccommon;
664		case CRYPTO_CAST_CBC:
665			txf = &enc_xform_cast5;
666			goto enccommon;
667		case CRYPTO_SKIPJACK_CBC:
668			txf = &enc_xform_skipjack;
669			goto enccommon;
670		case CRYPTO_RIJNDAEL128_CBC:
671			txf = &enc_xform_rijndael128;
672			goto enccommon;
673		case CRYPTO_CAMELLIA_CBC:
674			txf = &enc_xform_camellia;
675			goto enccommon;
676		case CRYPTO_NULL_CBC:
677			txf = &enc_xform_null;
678			goto enccommon;
679		enccommon:
680			if (cri->cri_key != NULL) {
681				error = txf->setkey(&((*swd)->sw_kschedule),
682				    cri->cri_key, cri->cri_klen / 8);
683				if (error) {
684					swcr_freesession(dev, i);
685					return error;
686				}
687			}
688			(*swd)->sw_exf = txf;
689			break;
690
691		case CRYPTO_MD5_HMAC:
692			axf = &auth_hash_hmac_md5;
693			goto authcommon;
694		case CRYPTO_SHA1_HMAC:
695			axf = &auth_hash_hmac_sha1;
696			goto authcommon;
697		case CRYPTO_SHA2_256_HMAC:
698			axf = &auth_hash_hmac_sha2_256;
699			goto authcommon;
700		case CRYPTO_SHA2_384_HMAC:
701			axf = &auth_hash_hmac_sha2_384;
702			goto authcommon;
703		case CRYPTO_SHA2_512_HMAC:
704			axf = &auth_hash_hmac_sha2_512;
705			goto authcommon;
706		case CRYPTO_NULL_HMAC:
707			axf = &auth_hash_null;
708			goto authcommon;
709		case CRYPTO_RIPEMD160_HMAC:
710			axf = &auth_hash_hmac_ripemd_160;
711		authcommon:
712			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
713			    M_NOWAIT);
714			if ((*swd)->sw_ictx == NULL) {
715				swcr_freesession(dev, i);
716				return ENOBUFS;
717			}
718
719			(*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
720			    M_NOWAIT);
721			if ((*swd)->sw_octx == NULL) {
722				swcr_freesession(dev, i);
723				return ENOBUFS;
724			}
725
726			if (cri->cri_key != NULL) {
727				swcr_authprepare(axf, *swd, cri->cri_key,
728				    cri->cri_klen);
729			}
730
731			(*swd)->sw_mlen = cri->cri_mlen;
732			(*swd)->sw_axf = axf;
733			break;
734
735		case CRYPTO_MD5_KPDK:
736			axf = &auth_hash_key_md5;
737			goto auth2common;
738
739		case CRYPTO_SHA1_KPDK:
740			axf = &auth_hash_key_sha1;
741		auth2common:
742			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
743			    M_NOWAIT);
744			if ((*swd)->sw_ictx == NULL) {
745				swcr_freesession(dev, i);
746				return ENOBUFS;
747			}
748
749			(*swd)->sw_octx = malloc(cri->cri_klen / 8,
750			    M_CRYPTO_DATA, M_NOWAIT);
751			if ((*swd)->sw_octx == NULL) {
752				swcr_freesession(dev, i);
753				return ENOBUFS;
754			}
755
756			/* Store the key so we can "append" it to the payload */
757			if (cri->cri_key != NULL) {
758				swcr_authprepare(axf, *swd, cri->cri_key,
759				    cri->cri_klen);
760			}
761
762			(*swd)->sw_mlen = cri->cri_mlen;
763			(*swd)->sw_axf = axf;
764			break;
765#ifdef notdef
766		case CRYPTO_MD5:
767			axf = &auth_hash_md5;
768			goto auth3common;
769
770		case CRYPTO_SHA1:
771			axf = &auth_hash_sha1;
772		auth3common:
773			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
774			    M_NOWAIT);
775			if ((*swd)->sw_ictx == NULL) {
776				swcr_freesession(dev, i);
777				return ENOBUFS;
778			}
779
780			axf->Init((*swd)->sw_ictx);
781			(*swd)->sw_mlen = cri->cri_mlen;
782			(*swd)->sw_axf = axf;
783			break;
784#endif
785		case CRYPTO_DEFLATE_COMP:
786			cxf = &comp_algo_deflate;
787			(*swd)->sw_cxf = cxf;
788			break;
789		default:
790			swcr_freesession(dev, i);
791			return EINVAL;
792		}
793
794		(*swd)->sw_alg = cri->cri_alg;
795		cri = cri->cri_next;
796		swd = &((*swd)->sw_next);
797	}
798	return 0;
799}
800
801/*
802 * Free a session.
803 */
804static int
805swcr_freesession(device_t dev, u_int64_t tid)
806{
807	struct swcr_data *swd;
808	struct enc_xform *txf;
809	struct auth_hash *axf;
810	struct comp_algo *cxf;
811	u_int32_t sid = CRYPTO_SESID2LID(tid);
812
813	if (sid > swcr_sesnum || swcr_sessions == NULL ||
814	    swcr_sessions[sid] == NULL)
815		return EINVAL;
816
817	/* Silently accept and return */
818	if (sid == 0)
819		return 0;
820
821	while ((swd = swcr_sessions[sid]) != NULL) {
822		swcr_sessions[sid] = swd->sw_next;
823
824		switch (swd->sw_alg) {
825		case CRYPTO_DES_CBC:
826		case CRYPTO_3DES_CBC:
827		case CRYPTO_BLF_CBC:
828		case CRYPTO_CAST_CBC:
829		case CRYPTO_SKIPJACK_CBC:
830		case CRYPTO_RIJNDAEL128_CBC:
831		case CRYPTO_CAMELLIA_CBC:
832		case CRYPTO_NULL_CBC:
833			txf = swd->sw_exf;
834
835			if (swd->sw_kschedule)
836				txf->zerokey(&(swd->sw_kschedule));
837			break;
838
839		case CRYPTO_MD5_HMAC:
840		case CRYPTO_SHA1_HMAC:
841		case CRYPTO_SHA2_256_HMAC:
842		case CRYPTO_SHA2_384_HMAC:
843		case CRYPTO_SHA2_512_HMAC:
844		case CRYPTO_RIPEMD160_HMAC:
845		case CRYPTO_NULL_HMAC:
846			axf = swd->sw_axf;
847
848			if (swd->sw_ictx) {
849				bzero(swd->sw_ictx, axf->ctxsize);
850				free(swd->sw_ictx, M_CRYPTO_DATA);
851			}
852			if (swd->sw_octx) {
853				bzero(swd->sw_octx, axf->ctxsize);
854				free(swd->sw_octx, M_CRYPTO_DATA);
855			}
856			break;
857
858		case CRYPTO_MD5_KPDK:
859		case CRYPTO_SHA1_KPDK:
860			axf = swd->sw_axf;
861
862			if (swd->sw_ictx) {
863				bzero(swd->sw_ictx, axf->ctxsize);
864				free(swd->sw_ictx, M_CRYPTO_DATA);
865			}
866			if (swd->sw_octx) {
867				bzero(swd->sw_octx, swd->sw_klen);
868				free(swd->sw_octx, M_CRYPTO_DATA);
869			}
870			break;
871
872		case CRYPTO_MD5:
873		case CRYPTO_SHA1:
874			axf = swd->sw_axf;
875
876			if (swd->sw_ictx)
877				free(swd->sw_ictx, M_CRYPTO_DATA);
878			break;
879
880		case CRYPTO_DEFLATE_COMP:
881			cxf = swd->sw_cxf;
882			break;
883		}
884
885		free(swd, M_CRYPTO_DATA);
886	}
887	return 0;
888}
889
890/*
891 * Process a software request.
892 */
893static int
894swcr_process(device_t dev, struct cryptop *crp, int hint)
895{
896	struct cryptodesc *crd;
897	struct swcr_data *sw;
898	u_int32_t lid;
899
900	/* Sanity check */
901	if (crp == NULL)
902		return EINVAL;
903
904	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
905		crp->crp_etype = EINVAL;
906		goto done;
907	}
908
909	lid = crp->crp_sid & 0xffffffff;
910	if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
911		crp->crp_etype = ENOENT;
912		goto done;
913	}
914
915	/* Go through crypto descriptors, processing as we go */
916	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
917		/*
918		 * Find the crypto context.
919		 *
920		 * XXX Note that the logic here prevents us from having
921		 * XXX the same algorithm multiple times in a session
922		 * XXX (or rather, we can but it won't give us the right
923		 * XXX results). To do that, we'd need some way of differentiating
924		 * XXX between the various instances of an algorithm (so we can
925		 * XXX locate the correct crypto context).
926		 */
927		for (sw = swcr_sessions[lid];
928		    sw && sw->sw_alg != crd->crd_alg;
929		    sw = sw->sw_next)
930			;
931
932		/* No such context ? */
933		if (sw == NULL) {
934			crp->crp_etype = EINVAL;
935			goto done;
936		}
937		switch (sw->sw_alg) {
938		case CRYPTO_DES_CBC:
939		case CRYPTO_3DES_CBC:
940		case CRYPTO_BLF_CBC:
941		case CRYPTO_CAST_CBC:
942		case CRYPTO_SKIPJACK_CBC:
943		case CRYPTO_RIJNDAEL128_CBC:
944		case CRYPTO_CAMELLIA_CBC:
945			if ((crp->crp_etype = swcr_encdec(crd, sw,
946			    crp->crp_buf, crp->crp_flags)) != 0)
947				goto done;
948			break;
949		case CRYPTO_NULL_CBC:
950			crp->crp_etype = 0;
951			break;
952		case CRYPTO_MD5_HMAC:
953		case CRYPTO_SHA1_HMAC:
954		case CRYPTO_SHA2_256_HMAC:
955		case CRYPTO_SHA2_384_HMAC:
956		case CRYPTO_SHA2_512_HMAC:
957		case CRYPTO_RIPEMD160_HMAC:
958		case CRYPTO_NULL_HMAC:
959		case CRYPTO_MD5_KPDK:
960		case CRYPTO_SHA1_KPDK:
961		case CRYPTO_MD5:
962		case CRYPTO_SHA1:
963			if ((crp->crp_etype = swcr_authcompute(crd, sw,
964			    crp->crp_buf, crp->crp_flags)) != 0)
965				goto done;
966			break;
967
968		case CRYPTO_DEFLATE_COMP:
969			if ((crp->crp_etype = swcr_compdec(crd, sw,
970			    crp->crp_buf, crp->crp_flags)) != 0)
971				goto done;
972			else
973				crp->crp_olen = (int)sw->sw_size;
974			break;
975
976		default:
977			/* Unknown/unsupported algorithm */
978			crp->crp_etype = EINVAL;
979			goto done;
980		}
981	}
982
983done:
984	crypto_done(crp);
985	return 0;
986}
987
988static void
989swcr_identify(driver_t *drv, device_t parent)
990{
991	/* NB: order 10 is so we get attached after h/w devices */
992	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
993	    BUS_ADD_CHILD(parent, 10, "cryptosoft", -1) == 0)
994		panic("cryptosoft: could not attach");
995}
996
997static int
998swcr_probe(device_t dev)
999{
1000	device_set_desc(dev, "software crypto");
1001	return (0);
1002}
1003
1004static int
1005swcr_attach(device_t dev)
1006{
1007	memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN);
1008	memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN);
1009
1010	swcr_id = crypto_get_driverid(dev,
1011			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1012	if (swcr_id < 0) {
1013		device_printf(dev, "cannot initialize!");
1014		return ENOMEM;
1015	}
1016#define	REGISTER(alg) \
1017	crypto_register(swcr_id, alg, 0,0)
1018	REGISTER(CRYPTO_DES_CBC);
1019	REGISTER(CRYPTO_3DES_CBC);
1020	REGISTER(CRYPTO_BLF_CBC);
1021	REGISTER(CRYPTO_CAST_CBC);
1022	REGISTER(CRYPTO_SKIPJACK_CBC);
1023	REGISTER(CRYPTO_NULL_CBC);
1024	REGISTER(CRYPTO_MD5_HMAC);
1025	REGISTER(CRYPTO_SHA1_HMAC);
1026	REGISTER(CRYPTO_SHA2_256_HMAC);
1027	REGISTER(CRYPTO_SHA2_384_HMAC);
1028	REGISTER(CRYPTO_SHA2_512_HMAC);
1029	REGISTER(CRYPTO_RIPEMD160_HMAC);
1030	REGISTER(CRYPTO_NULL_HMAC);
1031	REGISTER(CRYPTO_MD5_KPDK);
1032	REGISTER(CRYPTO_SHA1_KPDK);
1033	REGISTER(CRYPTO_MD5);
1034	REGISTER(CRYPTO_SHA1);
1035	REGISTER(CRYPTO_RIJNDAEL128_CBC);
1036 	REGISTER(CRYPTO_CAMELLIA_CBC);
1037	REGISTER(CRYPTO_DEFLATE_COMP);
1038#undef REGISTER
1039
1040	return 0;
1041}
1042
1043static int
1044swcr_detach(device_t dev)
1045{
1046	crypto_unregister_all(swcr_id);
1047	if (swcr_sessions != NULL)
1048		free(swcr_sessions, M_CRYPTO_DATA);
1049	return 0;
1050}
1051
1052static device_method_t swcr_methods[] = {
1053	DEVMETHOD(device_identify,	swcr_identify),
1054	DEVMETHOD(device_probe,		swcr_probe),
1055	DEVMETHOD(device_attach,	swcr_attach),
1056	DEVMETHOD(device_detach,	swcr_detach),
1057
1058	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1059	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1060	DEVMETHOD(cryptodev_process,	swcr_process),
1061
1062	{0, 0},
1063};
1064
1065static driver_t swcr_driver = {
1066	"cryptosoft",
1067	swcr_methods,
1068	0,		/* NB: no softc */
1069};
1070static devclass_t swcr_devclass;
1071
1072/*
1073 * NB: We explicitly reference the crypto module so we
1074 * get the necessary ordering when built as a loadable
1075 * module.  This is required because we bundle the crypto
1076 * module code together with the cryptosoft driver (otherwise
1077 * normal module dependencies would handle things).
1078 */
1079extern int crypto_modevent(struct module *, int, void *);
1080/* XXX where to attach */
1081DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1082MODULE_VERSION(cryptosoft, 1);
1083MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1084