crypto.c revision 108813
1/*	$FreeBSD: head/sys/opencrypto/crypto.c 108813 2003-01-06 18:52:05Z sam $	*/
2/*	$OpenBSD: crypto.c,v 1.38 2002/06/11 11:14:29 beck Exp $	*/
3/*
4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5 *
6 * This code was written by Angelos D. Keromytis in Athens, Greece, in
7 * February 2000. Network Security Technologies Inc. (NSTI) kindly
8 * supported the development of this code.
9 *
10 * Copyright (c) 2000, 2001 Angelos D. Keromytis
11 *
12 * Permission to use, copy, and modify this software with or without fee
13 * is hereby granted, provided that this entire notice is included in
14 * all source code copies of any software which is or includes a copy or
15 * modification of this software.
16 *
17 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
18 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
19 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
20 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
21 * PURPOSE.
22 */
23#define	CRYPTO_TIMING				/* enable timing support */
24
25#include <sys/param.h>
26#include <sys/systm.h>
27#include <sys/eventhandler.h>
28#include <sys/kernel.h>
29#include <sys/kthread.h>
30#include <sys/lock.h>
31#include <sys/mutex.h>
32#include <sys/malloc.h>
33#include <sys/proc.h>
34#include <sys/sysctl.h>
35
36#include <vm/uma.h>
37#include <opencrypto/cryptodev.h>
38#include <opencrypto/xform.h>			/* XXX for M_XDATA */
39
40#define	SESID2HID(sid)	(((sid) >> 32) & 0xffffffff)
41
42/*
43 * Crypto drivers register themselves by allocating a slot in the
44 * crypto_drivers table with crypto_get_driverid() and then registering
45 * each algorithm they support with crypto_register() and crypto_kregister().
46 */
47static	struct mtx crypto_drivers_mtx;		/* lock on driver table */
48#define	CRYPTO_DRIVER_LOCK()	mtx_lock(&crypto_drivers_mtx)
49#define	CRYPTO_DRIVER_UNLOCK()	mtx_unlock(&crypto_drivers_mtx)
50static	struct cryptocap *crypto_drivers = NULL;
51static	int crypto_drivers_num = 0;
52
53/*
54 * There are two queues for crypto requests; one for symmetric (e.g.
55 * cipher) operations and one for asymmetric (e.g. MOD)operations.
56 * A single mutex is used to lock access to both queues.  We could
57 * have one per-queue but having one simplifies handling of block/unblock
58 * operations.
59 */
60static	TAILQ_HEAD(,cryptop) crp_q;		/* request queues */
61static	TAILQ_HEAD(,cryptkop) crp_kq;
62static	struct mtx crypto_q_mtx;
63#define	CRYPTO_Q_LOCK()		mtx_lock(&crypto_q_mtx)
64#define	CRYPTO_Q_UNLOCK()	mtx_unlock(&crypto_q_mtx)
65
66/*
67 * There are two queues for processing completed crypto requests; one
68 * for the symmetric and one for the asymmetric ops.  We only need one
69 * but have two to avoid type futzing (cryptop vs. cryptkop).  A single
70 * mutex is used to lock access to both queues.  Note that this lock
71 * must be separate from the lock on request queues to insure driver
72 * callbacks don't generate lock order reversals.
73 */
74static	TAILQ_HEAD(,cryptop) crp_ret_q;		/* callback queues */
75static	TAILQ_HEAD(,cryptkop) crp_ret_kq;
76static	struct mtx crypto_ret_q_mtx;
77#define	CRYPTO_RETQ_LOCK()	mtx_lock(&crypto_ret_q_mtx)
78#define	CRYPTO_RETQ_UNLOCK()	mtx_unlock(&crypto_ret_q_mtx)
79
80static	uma_zone_t cryptop_zone;
81static	uma_zone_t cryptodesc_zone;
82
83int	crypto_userasymcrypto = 1;	/* userland may do asym crypto reqs */
84SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
85	   &crypto_userasymcrypto, 0,
86	   "Enable/disable user-mode access to asymmetric crypto support");
87int	crypto_devallowsoft = 0;	/* only use hardware crypto for asym */
88SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
89	   &crypto_devallowsoft, 0,
90	   "Enable/disable use of software asym crypto support");
91
92MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
93
94static	void crypto_proc(void);
95static	struct proc *cryptoproc;
96static	void crypto_ret_proc(void);
97static	struct proc *cryptoretproc;
98static	void crypto_destroy(void);
99
100static	struct cryptostats cryptostats;
101SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
102	    cryptostats, "Crypto system statistics");
103
104#ifdef CRYPTO_TIMING
105static	int crypto_timing = 0;
106SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
107	   &crypto_timing, 0, "Enable/disable crypto timing support");
108#endif
109
110static int
111crypto_init(void)
112{
113	int error;
114
115	mtx_init(&crypto_drivers_mtx, "crypto driver table",
116		NULL, MTX_DEF|MTX_QUIET);
117
118	TAILQ_INIT(&crp_q);
119	TAILQ_INIT(&crp_kq);
120	mtx_init(&crypto_q_mtx, "crypto op queues", NULL, MTX_DEF);
121
122	TAILQ_INIT(&crp_ret_q);
123	TAILQ_INIT(&crp_ret_kq);
124	mtx_init(&crypto_ret_q_mtx, "crypto return queues", NULL, MTX_DEF);
125
126	cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop),
127				    0, 0, 0, 0,
128				    UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
129	cryptodesc_zone = uma_zcreate("cryptodesc", sizeof (struct cryptodesc),
130				    0, 0, 0, 0,
131				    UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
132	if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
133		printf("crypto_init: cannot setup crypto zones\n");
134		error = ENOMEM;
135		goto bad;
136	}
137
138	crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
139	crypto_drivers = malloc(crypto_drivers_num *
140	    sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
141	if (crypto_drivers == NULL) {
142		printf("crypto_init: cannot setup crypto drivers\n");
143		error = ENOMEM;
144		goto bad;
145	}
146
147	error = kthread_create((void (*)(void *)) crypto_proc, NULL,
148		    &cryptoproc, 0, 0, "crypto");
149	if (error) {
150		printf("crypto_init: cannot start crypto thread; error %d",
151			error);
152		goto bad;
153	}
154
155	error = kthread_create((void (*)(void *)) crypto_ret_proc, NULL,
156		    &cryptoretproc, 0, 0, "crypto returns");
157	if (error) {
158		printf("crypto_init: cannot start cryptoret thread; error %d",
159			error);
160		goto bad;
161	}
162	return 0;
163bad:
164	crypto_destroy();
165	return error;
166}
167
168/*
169 * Signal a crypto thread to terminate.  We use the driver
170 * table lock to synchronize the sleep/wakeups so that we
171 * are sure the threads have terminated before we release
172 * the data structures they use.  See crypto_finis below
173 * for the other half of this song-and-dance.
174 */
175static void
176crypto_terminate(struct proc **pp, void *q)
177{
178	struct proc *p;
179
180	mtx_assert(&crypto_drivers_mtx, MA_OWNED);
181	p = *pp;
182	*pp = NULL;
183	if (p) {
184		wakeup_one(q);
185		PROC_LOCK(p);		/* NB: insure we don't miss wakeup */
186		CRYPTO_DRIVER_UNLOCK();	/* let crypto_finis progress */
187		msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0);
188		PROC_UNLOCK(p);
189		CRYPTO_DRIVER_LOCK();
190	}
191}
192
193static void
194crypto_destroy(void)
195{
196	/*
197	 * Terminate any crypto threads.
198	 */
199	CRYPTO_DRIVER_LOCK();
200	crypto_terminate(&cryptoproc, &crp_q);
201	crypto_terminate(&cryptoretproc, &crp_ret_q);
202	CRYPTO_DRIVER_UNLOCK();
203
204	/* XXX flush queues??? */
205
206	/*
207	 * Reclaim dynamically allocated resources.
208	 */
209	if (crypto_drivers != NULL)
210		free(crypto_drivers, M_CRYPTO_DATA);
211
212	if (cryptodesc_zone != NULL)
213		uma_zdestroy(cryptodesc_zone);
214	if (cryptop_zone != NULL)
215		uma_zdestroy(cryptop_zone);
216	mtx_destroy(&crypto_q_mtx);
217	mtx_destroy(&crypto_ret_q_mtx);
218	mtx_destroy(&crypto_drivers_mtx);
219}
220
221/*
222 * Initialization code, both for static and dynamic loading.
223 */
224static int
225crypto_modevent(module_t mod, int type, void *unused)
226{
227	int error = EINVAL;
228
229	switch (type) {
230	case MOD_LOAD:
231		error = crypto_init();
232		if (error == 0 && bootverbose)
233			printf("crypto: <crypto core>\n");
234		break;
235	case MOD_UNLOAD:
236		/*XXX disallow if active sessions */
237		error = 0;
238		crypto_destroy();
239		return 0;
240	}
241	return error;
242}
243
244static moduledata_t crypto_mod = {
245	"crypto",
246	crypto_modevent,
247	0
248};
249MODULE_VERSION(crypto, 1);
250DECLARE_MODULE(crypto, crypto_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
251
252/*
253 * Create a new session.
254 */
255int
256crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
257{
258	struct cryptoini *cr;
259	u_int32_t hid, lid;
260	int err = EINVAL;
261
262	CRYPTO_DRIVER_LOCK();
263
264	if (crypto_drivers == NULL)
265		goto done;
266
267	/*
268	 * The algorithm we use here is pretty stupid; just use the
269	 * first driver that supports all the algorithms we need.
270	 *
271	 * XXX We need more smarts here (in real life too, but that's
272	 * XXX another story altogether).
273	 */
274
275	for (hid = 0; hid < crypto_drivers_num; hid++) {
276		/*
277		 * If it's not initialized or has remaining sessions
278		 * referencing it, skip.
279		 */
280		if (crypto_drivers[hid].cc_newsession == NULL ||
281		    (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP))
282			continue;
283
284		/* Hardware required -- ignore software drivers. */
285		if (hard > 0 &&
286		    (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE))
287			continue;
288		/* Software required -- ignore hardware drivers. */
289		if (hard < 0 &&
290		    (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) == 0)
291			continue;
292
293		/* See if all the algorithms are supported. */
294		for (cr = cri; cr; cr = cr->cri_next)
295			if (crypto_drivers[hid].cc_alg[cr->cri_alg] == 0)
296				break;
297
298		if (cr == NULL) {
299			/* Ok, all algorithms are supported. */
300
301			/*
302			 * Can't do everything in one session.
303			 *
304			 * XXX Fix this. We need to inject a "virtual" session layer right
305			 * XXX about here.
306			 */
307
308			/* Call the driver initialization routine. */
309			lid = hid;		/* Pass the driver ID. */
310			err = crypto_drivers[hid].cc_newsession(
311					crypto_drivers[hid].cc_arg, &lid, cri);
312			if (err == 0) {
313				(*sid) = hid;
314				(*sid) <<= 32;
315				(*sid) |= (lid & 0xffffffff);
316				crypto_drivers[hid].cc_sessions++;
317			}
318			break;
319		}
320	}
321done:
322	CRYPTO_DRIVER_UNLOCK();
323	return err;
324}
325
326/*
327 * Delete an existing session (or a reserved session on an unregistered
328 * driver).
329 */
330int
331crypto_freesession(u_int64_t sid)
332{
333	u_int32_t hid;
334	int err;
335
336	CRYPTO_DRIVER_LOCK();
337
338	if (crypto_drivers == NULL) {
339		err = EINVAL;
340		goto done;
341	}
342
343	/* Determine two IDs. */
344	hid = SESID2HID(sid);
345
346	if (hid >= crypto_drivers_num) {
347		err = ENOENT;
348		goto done;
349	}
350
351	if (crypto_drivers[hid].cc_sessions)
352		crypto_drivers[hid].cc_sessions--;
353
354	/* Call the driver cleanup routine, if available. */
355	if (crypto_drivers[hid].cc_freesession)
356		err = crypto_drivers[hid].cc_freesession(
357				crypto_drivers[hid].cc_arg, sid);
358	else
359		err = 0;
360
361	/*
362	 * If this was the last session of a driver marked as invalid,
363	 * make the entry available for reuse.
364	 */
365	if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) &&
366	    crypto_drivers[hid].cc_sessions == 0)
367		bzero(&crypto_drivers[hid], sizeof(struct cryptocap));
368
369done:
370	CRYPTO_DRIVER_UNLOCK();
371	return err;
372}
373
374/*
375 * Return an unused driver id.  Used by drivers prior to registering
376 * support for the algorithms they handle.
377 */
378int32_t
379crypto_get_driverid(u_int32_t flags)
380{
381	struct cryptocap *newdrv;
382	int i;
383
384	CRYPTO_DRIVER_LOCK();
385
386	for (i = 0; i < crypto_drivers_num; i++)
387		if (crypto_drivers[i].cc_process == NULL &&
388		    (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0 &&
389		    crypto_drivers[i].cc_sessions == 0)
390			break;
391
392	/* Out of entries, allocate some more. */
393	if (i == crypto_drivers_num) {
394		/* Be careful about wrap-around. */
395		if (2 * crypto_drivers_num <= crypto_drivers_num) {
396			CRYPTO_DRIVER_UNLOCK();
397			printf("crypto: driver count wraparound!\n");
398			return -1;
399		}
400
401		newdrv = malloc(2 * crypto_drivers_num *
402		    sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
403		if (newdrv == NULL) {
404			CRYPTO_DRIVER_UNLOCK();
405			printf("crypto: no space to expand driver table!\n");
406			return -1;
407		}
408
409		bcopy(crypto_drivers, newdrv,
410		    crypto_drivers_num * sizeof(struct cryptocap));
411
412		crypto_drivers_num *= 2;
413
414		free(crypto_drivers, M_CRYPTO_DATA);
415		crypto_drivers = newdrv;
416	}
417
418	/* NB: state is zero'd on free */
419	crypto_drivers[i].cc_sessions = 1;	/* Mark */
420	crypto_drivers[i].cc_flags = flags;
421	if (bootverbose)
422		printf("crypto: assign driver %u, flags %u\n", i, flags);
423
424	CRYPTO_DRIVER_UNLOCK();
425
426	return i;
427}
428
429static struct cryptocap *
430crypto_checkdriver(u_int32_t hid)
431{
432	if (crypto_drivers == NULL)
433		return NULL;
434	return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
435}
436
437/*
438 * Register support for a key-related algorithm.  This routine
439 * is called once for each algorithm supported a driver.
440 */
441int
442crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
443    int (*kprocess)(void*, struct cryptkop *, int),
444    void *karg)
445{
446	struct cryptocap *cap;
447	int err;
448
449	CRYPTO_DRIVER_LOCK();
450
451	cap = crypto_checkdriver(driverid);
452	if (cap != NULL &&
453	    (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
454		/*
455		 * XXX Do some performance testing to determine placing.
456		 * XXX We probably need an auxiliary data structure that
457		 * XXX describes relative performances.
458		 */
459
460		cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
461		if (bootverbose)
462			printf("crypto: driver %u registers key alg %u flags %u\n"
463				, driverid
464				, kalg
465				, flags
466			);
467
468		if (cap->cc_kprocess == NULL) {
469			cap->cc_karg = karg;
470			cap->cc_kprocess = kprocess;
471		}
472		err = 0;
473	} else
474		err = EINVAL;
475
476	CRYPTO_DRIVER_UNLOCK();
477	return err;
478}
479
480/*
481 * Register support for a non-key-related algorithm.  This routine
482 * is called once for each such algorithm supported by a driver.
483 */
484int
485crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
486    u_int32_t flags,
487    int (*newses)(void*, u_int32_t*, struct cryptoini*),
488    int (*freeses)(void*, u_int64_t),
489    int (*process)(void*, struct cryptop *, int),
490    void *arg)
491{
492	struct cryptocap *cap;
493	int err;
494
495	CRYPTO_DRIVER_LOCK();
496
497	cap = crypto_checkdriver(driverid);
498	/* NB: algorithms are in the range [1..max] */
499	if (cap != NULL &&
500	    (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
501		/*
502		 * XXX Do some performance testing to determine placing.
503		 * XXX We probably need an auxiliary data structure that
504		 * XXX describes relative performances.
505		 */
506
507		cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
508		cap->cc_max_op_len[alg] = maxoplen;
509		if (bootverbose)
510			printf("crypto: driver %u registers alg %u flags %u maxoplen %u\n"
511				, driverid
512				, alg
513				, flags
514				, maxoplen
515			);
516
517		if (cap->cc_process == NULL) {
518			cap->cc_arg = arg;
519			cap->cc_newsession = newses;
520			cap->cc_process = process;
521			cap->cc_freesession = freeses;
522			cap->cc_sessions = 0;		/* Unmark */
523		}
524		err = 0;
525	} else
526		err = EINVAL;
527
528	CRYPTO_DRIVER_UNLOCK();
529	return err;
530}
531
532/*
533 * Unregister a crypto driver. If there are pending sessions using it,
534 * leave enough information around so that subsequent calls using those
535 * sessions will correctly detect the driver has been unregistered and
536 * reroute requests.
537 */
538int
539crypto_unregister(u_int32_t driverid, int alg)
540{
541	int i, err;
542	u_int32_t ses;
543	struct cryptocap *cap;
544
545	CRYPTO_DRIVER_LOCK();
546
547	cap = crypto_checkdriver(driverid);
548	if (cap != NULL &&
549	    (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
550	    cap->cc_alg[alg] != 0) {
551		cap->cc_alg[alg] = 0;
552		cap->cc_max_op_len[alg] = 0;
553
554		/* Was this the last algorithm ? */
555		for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
556			if (cap->cc_alg[i] != 0)
557				break;
558
559		if (i == CRYPTO_ALGORITHM_MAX + 1) {
560			ses = cap->cc_sessions;
561			bzero(cap, sizeof(struct cryptocap));
562			if (ses != 0) {
563				/*
564				 * If there are pending sessions, just mark as invalid.
565				 */
566				cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
567				cap->cc_sessions = ses;
568			}
569		}
570		err = 0;
571	} else
572		err = EINVAL;
573
574	CRYPTO_DRIVER_UNLOCK();
575	return err;
576}
577
578/*
579 * Unregister all algorithms associated with a crypto driver.
580 * If there are pending sessions using it, leave enough information
581 * around so that subsequent calls using those sessions will
582 * correctly detect the driver has been unregistered and reroute
583 * requests.
584 */
585int
586crypto_unregister_all(u_int32_t driverid)
587{
588	int i, err;
589	u_int32_t ses;
590	struct cryptocap *cap;
591
592	CRYPTO_DRIVER_LOCK();
593
594	cap = crypto_checkdriver(driverid);
595	if (cap != NULL) {
596		for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
597			cap->cc_alg[i] = 0;
598			cap->cc_max_op_len[i] = 0;
599		}
600		ses = cap->cc_sessions;
601		bzero(cap, sizeof(struct cryptocap));
602		if (ses != 0) {
603			/*
604			 * If there are pending sessions, just mark as invalid.
605			 */
606			cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
607			cap->cc_sessions = ses;
608		}
609		err = 0;
610	} else
611		err = EINVAL;
612
613	CRYPTO_DRIVER_UNLOCK();
614	return err;
615}
616
617/*
618 * Clear blockage on a driver.  The what parameter indicates whether
619 * the driver is now ready for cryptop's and/or cryptokop's.
620 */
621int
622crypto_unblock(u_int32_t driverid, int what)
623{
624	struct cryptocap *cap;
625	int needwakeup, err;
626
627	CRYPTO_Q_LOCK();
628	cap = crypto_checkdriver(driverid);
629	if (cap != NULL) {
630		needwakeup = 0;
631		if (what & CRYPTO_SYMQ) {
632			needwakeup |= cap->cc_qblocked;
633			cap->cc_qblocked = 0;
634		}
635		if (what & CRYPTO_ASYMQ) {
636			needwakeup |= cap->cc_kqblocked;
637			cap->cc_kqblocked = 0;
638		}
639		if (needwakeup)
640			wakeup_one(&crp_q);
641		err = 0;
642	} else
643		err = EINVAL;
644	CRYPTO_Q_UNLOCK();
645
646	return err;
647}
648
649/*
650 * Add a crypto request to a queue, to be processed by the kernel thread.
651 */
652int
653crypto_dispatch(struct cryptop *crp)
654{
655	struct cryptocap *cap;
656	int wasempty;
657
658	cryptostats.cs_ops++;
659
660#ifdef CRYPTO_TIMING
661	if (crypto_timing)
662		binuptime(&crp->crp_tstamp);
663#endif
664
665	CRYPTO_Q_LOCK();
666	wasempty = TAILQ_EMPTY(&crp_q);
667	TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
668
669	/*
670	 * Wakeup processing thread if driver is not blocked.
671	 */
672	cap = crypto_checkdriver(SESID2HID(crp->crp_sid));
673	if (cap && !cap->cc_qblocked && wasempty)
674		wakeup_one(&crp_q);
675	CRYPTO_Q_UNLOCK();
676
677	return 0;
678}
679
680/*
681 * Add an asymetric crypto request to a queue,
682 * to be processed by the kernel thread.
683 */
684int
685crypto_kdispatch(struct cryptkop *krp)
686{
687	struct cryptocap *cap;
688	int wasempty;
689
690	cryptostats.cs_kops++;
691
692	CRYPTO_Q_LOCK();
693	wasempty = TAILQ_EMPTY(&crp_kq);
694	TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
695
696	/*
697	 * Wakeup processing thread if driver is not blocked.
698	 */
699	cap = crypto_checkdriver(krp->krp_hid);
700	if (cap && !cap->cc_kqblocked && wasempty)
701		wakeup_one(&crp_q);	/* NB: shared wait channel */
702	CRYPTO_Q_UNLOCK();
703
704	return 0;
705}
706
707/*
708 * Dispatch an assymetric crypto request to the appropriate crypto devices.
709 */
710static int
711crypto_kinvoke(struct cryptkop *krp, int hint)
712{
713	u_int32_t hid;
714	int error;
715
716	mtx_assert(&crypto_q_mtx, MA_OWNED);
717
718	/* Sanity checks. */
719	if (krp == NULL)
720		return EINVAL;
721	if (krp->krp_callback == NULL) {
722		free(krp, M_XDATA);		/* XXX allocated in cryptodev */
723		return EINVAL;
724	}
725
726	for (hid = 0; hid < crypto_drivers_num; hid++) {
727		if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
728		    !crypto_devallowsoft)
729			continue;
730		if (crypto_drivers[hid].cc_kprocess == NULL)
731			continue;
732		if ((crypto_drivers[hid].cc_kalg[krp->krp_op] &
733		    CRYPTO_ALG_FLAG_SUPPORTED) == 0)
734			continue;
735		break;
736	}
737	if (hid < crypto_drivers_num) {
738		krp->krp_hid = hid;
739		error = crypto_drivers[hid].cc_kprocess(
740				crypto_drivers[hid].cc_karg, krp, hint);
741	} else
742		error = ENODEV;
743
744	if (error) {
745		krp->krp_status = error;
746		crypto_kdone(krp);
747	}
748	return 0;
749}
750
751#ifdef CRYPTO_TIMING
752static void
753crypto_tstat(struct cryptotstat *ts, struct bintime *bt)
754{
755	struct bintime now, delta;
756	struct timespec t;
757	uint64_t u;
758
759	binuptime(&now);
760	u = now.frac;
761	delta.frac = now.frac - bt->frac;
762	delta.sec = now.sec - bt->sec;
763	if (u < delta.frac)
764		delta.sec--;
765	bintime2timespec(&delta, &t);
766	timespecadd(&ts->acc, &t);
767	if (timespeccmp(&t, &ts->min, <))
768		ts->min = t;
769	if (timespeccmp(&t, &ts->max, >))
770		ts->max = t;
771	ts->count++;
772
773	*bt = now;
774}
775#endif
776
777/*
778 * Dispatch a crypto request to the appropriate crypto devices.
779 */
780static int
781crypto_invoke(struct cryptop *crp, int hint)
782{
783	u_int32_t hid;
784	int (*process)(void*, struct cryptop *, int);
785
786#ifdef CRYPTO_TIMING
787	if (crypto_timing)
788		crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
789#endif
790	mtx_assert(&crypto_q_mtx, MA_OWNED);
791
792	/* Sanity checks. */
793	if (crp == NULL)
794		return EINVAL;
795	if (crp->crp_callback == NULL) {
796		crypto_freereq(crp);
797		return EINVAL;
798	}
799	if (crp->crp_desc == NULL) {
800		crp->crp_etype = EINVAL;
801		crypto_done(crp);
802		return 0;
803	}
804
805	hid = SESID2HID(crp->crp_sid);
806	if (hid < crypto_drivers_num) {
807		if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP)
808			crypto_freesession(crp->crp_sid);
809		process = crypto_drivers[hid].cc_process;
810	} else {
811		process = NULL;
812	}
813
814	if (process == NULL) {
815		struct cryptodesc *crd;
816		u_int64_t nid;
817
818		/*
819		 * Driver has unregistered; migrate the session and return
820		 * an error to the caller so they'll resubmit the op.
821		 */
822		for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
823			crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
824
825		if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
826			crp->crp_sid = nid;
827
828		crp->crp_etype = EAGAIN;
829		crypto_done(crp);
830		return 0;
831	} else {
832		/*
833		 * Invoke the driver to process the request.
834		 */
835		return (*process)(crypto_drivers[hid].cc_arg, crp, hint);
836	}
837}
838
839/*
840 * Release a set of crypto descriptors.
841 */
842void
843crypto_freereq(struct cryptop *crp)
844{
845	struct cryptodesc *crd;
846
847	if (crp == NULL)
848		return;
849
850	while ((crd = crp->crp_desc) != NULL) {
851		crp->crp_desc = crd->crd_next;
852		uma_zfree(cryptodesc_zone, crd);
853	}
854
855	uma_zfree(cryptop_zone, crp);
856}
857
858/*
859 * Acquire a set of crypto descriptors.
860 */
861struct cryptop *
862crypto_getreq(int num)
863{
864	struct cryptodesc *crd;
865	struct cryptop *crp;
866
867	crp = uma_zalloc(cryptop_zone, M_NOWAIT|M_ZERO);
868	if (crp != NULL) {
869		while (num--) {
870			crd = uma_zalloc(cryptodesc_zone, M_NOWAIT|M_ZERO);
871			if (crd == NULL) {
872				crypto_freereq(crp);
873				return NULL;
874			}
875
876			crd->crd_next = crp->crp_desc;
877			crp->crp_desc = crd;
878		}
879	}
880	return crp;
881}
882
883/*
884 * Invoke the callback on behalf of the driver.
885 */
886void
887crypto_done(struct cryptop *crp)
888{
889	int wasempty;
890
891	if (crp->crp_etype != 0)
892		cryptostats.cs_errs++;
893#ifdef CRYPTO_TIMING
894	if (crypto_timing)
895		crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
896#endif
897	CRYPTO_RETQ_LOCK();
898	wasempty = TAILQ_EMPTY(&crp_ret_q);
899	TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
900
901	if (wasempty)
902		wakeup_one(&crp_ret_q);		/* shared wait channel */
903	CRYPTO_RETQ_UNLOCK();
904}
905
906/*
907 * Invoke the callback on behalf of the driver.
908 */
909void
910crypto_kdone(struct cryptkop *krp)
911{
912	int wasempty;
913
914	if (krp->krp_status != 0)
915		cryptostats.cs_kerrs++;
916	CRYPTO_RETQ_LOCK();
917	wasempty = TAILQ_EMPTY(&crp_ret_kq);
918	TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
919
920	if (wasempty)
921		wakeup_one(&crp_ret_q);		/* shared wait channel */
922	CRYPTO_RETQ_UNLOCK();
923}
924
925int
926crypto_getfeat(int *featp)
927{
928	int hid, kalg, feat = 0;
929
930	if (!crypto_userasymcrypto)
931		goto out;
932
933	CRYPTO_DRIVER_LOCK();
934	for (hid = 0; hid < crypto_drivers_num; hid++) {
935		if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
936		    !crypto_devallowsoft) {
937			continue;
938		}
939		if (crypto_drivers[hid].cc_kprocess == NULL)
940			continue;
941		for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
942			if ((crypto_drivers[hid].cc_kalg[kalg] &
943			    CRYPTO_ALG_FLAG_SUPPORTED) != 0)
944				feat |=  1 << kalg;
945	}
946	CRYPTO_DRIVER_UNLOCK();
947out:
948	*featp = feat;
949	return (0);
950}
951
952/*
953 * Terminate a thread at module unload.  The process that
954 * initiated this is waiting for us to signal that we're gone;
955 * wake it up and exit.  We use the driver table lock to insure
956 * we don't do the wakeup before they're waiting.  There is no
957 * race here because the waiter sleeps on the proc lock for the
958 * thread so it gets notified at the right time because of an
959 * extra wakeup that's done in exit1().
960 */
961static void
962crypto_finis(void *chan)
963{
964	CRYPTO_DRIVER_LOCK();
965	wakeup_one(chan);
966	CRYPTO_DRIVER_UNLOCK();
967	mtx_lock(&Giant);
968	kthread_exit(0);
969}
970
971/*
972 * Crypto thread, dispatches crypto requests.
973 */
974static void
975crypto_proc(void)
976{
977	struct cryptop *crp, *submit;
978	struct cryptkop *krp;
979	struct cryptocap *cap;
980	int result, hint;
981
982	CRYPTO_Q_LOCK();
983	for (;;) {
984		/*
985		 * Find the first element in the queue that can be
986		 * processed and look-ahead to see if multiple ops
987		 * are ready for the same driver.
988		 */
989		submit = NULL;
990		hint = 0;
991		TAILQ_FOREACH(crp, &crp_q, crp_next) {
992			u_int32_t hid = SESID2HID(crp->crp_sid);
993			cap = crypto_checkdriver(hid);
994			if (cap == NULL || cap->cc_process == NULL) {
995				/* Op needs to be migrated, process it. */
996				if (submit == NULL)
997					submit = crp;
998				break;
999			}
1000			if (!cap->cc_qblocked) {
1001				if (submit != NULL) {
1002					/*
1003					 * We stop on finding another op,
1004					 * regardless whether its for the same
1005					 * driver or not.  We could keep
1006					 * searching the queue but it might be
1007					 * better to just use a per-driver
1008					 * queue instead.
1009					 */
1010					if (SESID2HID(submit->crp_sid) == hid)
1011						hint = CRYPTO_HINT_MORE;
1012					break;
1013				} else {
1014					submit = crp;
1015					if (submit->crp_flags & CRYPTO_F_NODELAY)
1016						break;
1017					/* keep scanning for more are q'd */
1018				}
1019			}
1020		}
1021		if (submit != NULL) {
1022			TAILQ_REMOVE(&crp_q, submit, crp_next);
1023			result = crypto_invoke(submit, hint);
1024			if (result == ERESTART) {
1025				/*
1026				 * The driver ran out of resources, mark the
1027				 * driver ``blocked'' for cryptop's and put
1028				 * the request back in the queue.  It would
1029				 * best to put the request back where we got
1030				 * it but that's hard so for now we put it
1031				 * at the front.  This should be ok; putting
1032				 * it at the end does not work.
1033				 */
1034				/* XXX validate sid again? */
1035				crypto_drivers[SESID2HID(submit->crp_sid)].cc_qblocked = 1;
1036				TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1037				cryptostats.cs_blocks++;
1038			}
1039		}
1040
1041		/* As above, but for key ops */
1042		TAILQ_FOREACH(krp, &crp_kq, krp_next) {
1043			cap = crypto_checkdriver(krp->krp_hid);
1044			if (cap == NULL || cap->cc_kprocess == NULL) {
1045				/* Op needs to be migrated, process it. */
1046				break;
1047			}
1048			if (!cap->cc_kqblocked)
1049				break;
1050		}
1051		if (krp != NULL) {
1052			TAILQ_REMOVE(&crp_kq, krp, krp_next);
1053			result = crypto_kinvoke(krp, 0);
1054			if (result == ERESTART) {
1055				/*
1056				 * The driver ran out of resources, mark the
1057				 * driver ``blocked'' for cryptkop's and put
1058				 * the request back in the queue.  It would
1059				 * best to put the request back where we got
1060				 * it but that's hard so for now we put it
1061				 * at the front.  This should be ok; putting
1062				 * it at the end does not work.
1063				 */
1064				/* XXX validate sid again? */
1065				crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1066				TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1067				cryptostats.cs_kblocks++;
1068			}
1069		}
1070
1071		if (submit == NULL && krp == NULL) {
1072			/*
1073			 * Nothing more to be processed.  Sleep until we're
1074			 * woken because there are more ops to process.
1075			 * This happens either by submission or by a driver
1076			 * becoming unblocked and notifying us through
1077			 * crypto_unblock.  Note that when we wakeup we
1078			 * start processing each queue again from the
1079			 * front. It's not clear that it's important to
1080			 * preserve this ordering since ops may finish
1081			 * out of order if dispatched to different devices
1082			 * and some become blocked while others do not.
1083			 */
1084			msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0);
1085			if (cryptoproc == NULL)
1086				break;
1087			cryptostats.cs_intrs++;
1088		}
1089	}
1090	CRYPTO_Q_UNLOCK();
1091
1092	crypto_finis(&crp_q);
1093}
1094
1095/*
1096 * Crypto returns thread, does callbacks for processed crypto requests.
1097 * Callbacks are done here, rather than in the crypto drivers, because
1098 * callbacks typically are expensive and would slow interrupt handling.
1099 */
1100static void
1101crypto_ret_proc(void)
1102{
1103	struct cryptop *crpt;
1104	struct cryptkop *krpt;
1105
1106	CRYPTO_RETQ_LOCK();
1107	for (;;) {
1108		/* Harvest return q's for completed ops */
1109		crpt = TAILQ_FIRST(&crp_ret_q);
1110		if (crpt != NULL)
1111			TAILQ_REMOVE(&crp_ret_q, crpt, crp_next);
1112
1113		krpt = TAILQ_FIRST(&crp_ret_kq);
1114		if (krpt != NULL)
1115			TAILQ_REMOVE(&crp_ret_kq, krpt, krp_next);
1116
1117		if (crpt != NULL || krpt != NULL) {
1118			CRYPTO_RETQ_UNLOCK();
1119			/*
1120			 * Run callbacks unlocked.
1121			 */
1122			if (crpt != NULL) {
1123#ifdef CRYPTO_TIMING
1124				if (crypto_timing) {
1125					/*
1126					 * NB: We must copy the timestamp before
1127					 * doing the callback as the cryptop is
1128					 * likely to be reclaimed.
1129					 */
1130					struct bintime t = crpt->crp_tstamp;
1131					crypto_tstat(&cryptostats.cs_cb, &t);
1132					crpt->crp_callback(crpt);
1133					crypto_tstat(&cryptostats.cs_finis, &t);
1134				} else
1135#endif
1136					crpt->crp_callback(crpt);
1137			}
1138			if (krpt != NULL)
1139				krpt->krp_callback(krpt);
1140			CRYPTO_RETQ_LOCK();
1141		} else {
1142			/*
1143			 * Nothing more to be processed.  Sleep until we're
1144			 * woken because there are more returns to process.
1145			 */
1146			msleep(&crp_ret_q, &crypto_ret_q_mtx, PWAIT,
1147				"crypto_ret_wait", 0);
1148			if (cryptoretproc == NULL)
1149				break;
1150			cryptostats.cs_rets++;
1151		}
1152	}
1153	CRYPTO_RETQ_UNLOCK();
1154
1155	crypto_finis(&crp_ret_q);
1156}
1157