1/*	$NetBSD$	*/
2
3/*-
4 * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Michael Graff <explorer@flame.org> and Thor Lancelot Simon.
9 * This code uses ideas and algorithms from the Linux driver written by
10 * Ted Ts'o.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__KERNEL_RCSID(0, "$NetBSD$");
36
37#include <sys/param.h>
38#include <sys/ioctl.h>
39#include <sys/fcntl.h>
40#include <sys/select.h>
41#include <sys/poll.h>
42#include <sys/kmem.h>
43#include <sys/mutex.h>
44#include <sys/proc.h>
45#include <sys/kernel.h>
46#include <sys/conf.h>
47#include <sys/systm.h>
48#include <sys/callout.h>
49#include <sys/rnd.h>
50#include <sys/vnode.h>
51#include <sys/pool.h>
52#include <sys/kauth.h>
53#include <sys/once.h>
54#include <sys/rngtest.h>
55#include <sys/cpu.h>	/* XXX temporary, see rnd_detach_source */
56
57#include <dev/rnd_private.h>
58
59#if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL) /* XXX: bad pooka */
60#include <machine/cpu_counter.h>
61#endif
62
63#ifdef RND_DEBUG
64#define	DPRINTF(l,x)      if (rnd_debug & (l)) printf x
65int	rnd_debug = 0;
66#else
67#define	DPRINTF(l,x)
68#endif
69
70#define	RND_DEBUG_WRITE		0x0001
71#define	RND_DEBUG_READ		0x0002
72#define	RND_DEBUG_IOCTL		0x0004
73#define	RND_DEBUG_SNOOZE	0x0008
74
75/*
76 * list devices attached
77 */
78#if 0
79#define	RND_VERBOSE
80#endif
81
82/*
83 * The size of a temporary buffer, kmem_alloc()ed when needed, and used for
84 * reading and writing data.
85 */
86#define	RND_TEMP_BUFFER_SIZE	128
87
88/*
89 * This is a little bit of state information attached to each device that we
90 * collect entropy from.  This is simply a collection buffer, and when it
91 * is full it will be "detached" from the source and added to the entropy
92 * pool after entropy is distilled as much as possible.
93 */
94#define	RND_SAMPLE_COUNT	64	/* collect N samples, then compress */
95typedef struct _rnd_sample_t {
96	SIMPLEQ_ENTRY(_rnd_sample_t) next;
97	krndsource_t	*source;
98	int		cursor;
99	int		entropy;
100	u_int32_t	ts[RND_SAMPLE_COUNT];
101	u_int32_t	values[RND_SAMPLE_COUNT];
102} rnd_sample_t;
103
104/*
105 * The event queue.  Fields are altered at an interrupt level.
106 * All accesses must be protected with the mutex.
107 */
108volatile int			rnd_timeout_pending;
109SIMPLEQ_HEAD(, _rnd_sample_t)	rnd_samples;
110kmutex_t			rnd_mtx;
111
112
113/*
114 * Entropy sinks: usually other generators waiting to be rekeyed.
115 *
116 * A sink's callback MUST NOT re-add the sink to the list, or
117 * list corruption will occur.  The list is protected by the
118 * rndsink_mtx, which must be released before calling any sink's
119 * callback.
120 */
121TAILQ_HEAD(, rndsink)		rnd_sinks;
122kmutex_t			rndsink_mtx;
123
124/*
125 * Memory pool for sample buffers
126 */
127static pool_cache_t rnd_mempc;
128
129/*
130 * Our random pool.  This is defined here rather than using the general
131 * purpose one defined in rndpool.c.
132 *
133 * Samples are collected and queued into a separate mutex-protected queue
134 * (rnd_samples, see above), and processed in a timeout routine; therefore,
135 * the mutex protecting the random pool is at IPL_SOFTCLOCK() as well.
136 */
137rndpool_t rnd_pool;
138kmutex_t  rndpool_mtx;
139kcondvar_t rndpool_cv;
140
141/*
142 * This source is used to easily "remove" queue entries when the source
143 * which actually generated the events is going away.
144 */
145static krndsource_t rnd_source_no_collect = {
146	/* LIST_ENTRY list */
147	.name = { 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't',
148		   0, 0, 0, 0, 0, 0, 0 },
149	.last_time = 0, .last_delta = 0, .last_delta2 = 0, .total = 0,
150	.type = RND_TYPE_UNKNOWN,
151	.flags = (RND_FLAG_NO_COLLECT |
152		  RND_FLAG_NO_ESTIMATE |
153		  RND_TYPE_UNKNOWN),
154	.state = NULL,
155	.test_cnt = 0,
156	.test = NULL
157};
158
159struct callout rnd_callout, skew_callout;
160
161void	      rnd_wakeup_readers(void);
162static inline u_int32_t rnd_estimate_entropy(krndsource_t *, u_int32_t);
163static inline u_int32_t rnd_counter(void);
164static        void	rnd_timeout(void *);
165static	      void	rnd_process_events(void *);
166u_int32_t     rnd_extract_data_locked(void *, u_int32_t, u_int32_t); /* XXX */
167static	      void	rnd_add_data_ts(krndsource_t *, const void *const,
168					uint32_t, uint32_t, uint32_t);
169
170int			rnd_ready = 0;
171int			rnd_initial_entropy = 0;
172
173#ifdef DIAGNOSTIC
174static int		rnd_tested = 0;
175static rngtest_t	rnd_rt;
176static uint8_t		rnd_testbits[sizeof(rnd_rt.rt_b)];
177#endif
178
179LIST_HEAD(, krndsource)	rnd_sources;
180
181rndsave_t		*boot_rsp;
182
183/*
184 * Generate a 32-bit counter.  This should be more machine dependent,
185 * using cycle counters and the like when possible.
186 */
187static inline u_int32_t
188rnd_counter(void)
189{
190	struct timeval tv;
191
192#if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL) /* XXX: bad pooka */
193	if (cpu_hascounter())
194		return (cpu_counter32());
195#endif
196	if (rnd_ready) {
197		microtime(&tv);
198		return (tv.tv_sec * 1000000 + tv.tv_usec);
199	}
200	/* when called from rnd_init, its too early to call microtime safely */
201	return (0);
202}
203
204/*
205 * Check to see if there are readers waiting on us.  If so, kick them.
206 */
207void
208rnd_wakeup_readers(void)
209{
210	rndsink_t *sink, *tsink;
211	TAILQ_HEAD(, rndsink) sunk = TAILQ_HEAD_INITIALIZER(sunk);
212
213	mutex_spin_enter(&rndpool_mtx);
214	if (rndpool_get_entropy_count(&rnd_pool) < RND_ENTROPY_THRESHOLD * 8) {
215		mutex_spin_exit(&rndpool_mtx);
216		return;
217	}
218
219	/*
220	 * First, take care of in-kernel consumers needing rekeying.
221	 */
222	mutex_spin_enter(&rndsink_mtx);
223	TAILQ_FOREACH_SAFE(sink, &rnd_sinks, tailq, tsink) {
224		if (!mutex_tryenter(&sink->mtx)) {
225#ifdef RND_VERBOSE
226			printf("rnd_wakeup_readers: "
227			       "skipping busy rndsink\n");
228#endif
229			continue;
230		}
231
232		KASSERT(RSTATE_PENDING == sink->state);
233
234		if ((sink->len + RND_ENTROPY_THRESHOLD) * 8 <
235			rndpool_get_entropy_count(&rnd_pool)) {
236			/* We have enough entropy to sink some here. */
237			if (rndpool_extract_data(&rnd_pool, sink->data,
238						 sink->len, RND_EXTRACT_GOOD)
239			    != sink->len) {
240				panic("could not extract estimated "
241				      "entropy from pool");
242			}
243			sink->state = RSTATE_HASBITS;
244			/* Move this sink to the list of pending callbacks */
245			TAILQ_REMOVE(&rnd_sinks, sink, tailq);
246			TAILQ_INSERT_HEAD(&sunk, sink, tailq);
247		} else {
248			mutex_exit(&sink->mtx);
249		}
250	}
251	mutex_spin_exit(&rndsink_mtx);
252
253	/*
254	 * If we still have enough new bits to do something, feed userspace.
255	 */
256	if (rndpool_get_entropy_count(&rnd_pool) > RND_ENTROPY_THRESHOLD * 8) {
257#ifdef RND_VERBOSE
258		if (!rnd_initial_entropy)
259			printf("rnd: have initial entropy (%u)\n",
260			       rndpool_get_entropy_count(&rnd_pool));
261#endif
262		rnd_initial_entropy = 1;
263		mutex_spin_exit(&rndpool_mtx);
264	} else {
265		mutex_spin_exit(&rndpool_mtx);
266	}
267
268	/*
269	 * Now that we have dropped the mutex, we can run sinks' callbacks.
270	 * Since we have reused the "tailq" member of the sink structure for
271	 * this temporary on-stack queue, the callback must NEVER re-add
272	 * the sink to the main queue, or our on-stack queue will become
273	 * corrupt.
274	 */
275	while ((sink = TAILQ_FIRST(&sunk))) {
276#ifdef RND_VERBOSE
277		printf("supplying %d bytes to entropy sink \"%s\""
278		       " (cb %p, arg %p).\n",
279		       (int)sink->len, sink->name, sink->cb, sink->arg);
280#endif
281		sink->state = RSTATE_HASBITS;
282		sink->cb(sink->arg);
283		TAILQ_REMOVE(&sunk, sink, tailq);
284		mutex_spin_exit(&sink->mtx);
285	}
286}
287
288/*
289 * Use the timing of the event to estimate the entropy gathered.
290 * If all the differentials (first, second, and third) are non-zero, return
291 * non-zero.  If any of these are zero, return zero.
292 */
293static inline u_int32_t
294rnd_estimate_entropy(krndsource_t *rs, u_int32_t t)
295{
296	int32_t delta, delta2, delta3;
297
298	/*
299	 * If the time counter has overflowed, calculate the real difference.
300	 * If it has not, it is simplier.
301	 */
302	if (t < rs->last_time)
303		delta = UINT_MAX - rs->last_time + t;
304	else
305		delta = rs->last_time - t;
306
307	if (delta < 0)
308		delta = -delta;
309
310	/*
311	 * Calculate the second and third order differentials
312	 */
313	delta2 = rs->last_delta - delta;
314	if (delta2 < 0)
315		delta2 = -delta2;
316
317	delta3 = rs->last_delta2 - delta2;
318	if (delta3 < 0)
319		delta3 = -delta3;
320
321	rs->last_time = t;
322	rs->last_delta = delta;
323	rs->last_delta2 = delta2;
324
325	/*
326	 * If any delta is 0, we got no entropy.  If all are non-zero, we
327	 * might have something.
328	 */
329	if (delta == 0 || delta2 == 0 || delta3 == 0)
330		return (0);
331
332	return (1);
333}
334
335#if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL)
336static void
337rnd_skew(void *arg)
338{
339	static krndsource_t skewsrc;
340	static int live, flipflop;
341
342	/*
343	 * Only one instance of this callout will ever be scheduled
344	 * at a time (it is only ever scheduled by itself).  So no
345	 * locking is required here.
346	 */
347
348	/*
349	 * Even on systems with seemingly stable clocks, the
350	 * entropy estimator seems to think we get 1 bit here
351	 * about every 2 calls.  That seems like too much.  Set
352	 * NO_ESTIMATE on this source until we can better analyze
353	 * the entropy of its output.
354	 */
355	if (__predict_false(!live)) {
356		rnd_attach_source(&skewsrc, "callout", RND_TYPE_SKEW,
357				  RND_FLAG_NO_ESTIMATE);
358		live = 1;
359	}
360
361	flipflop = !flipflop;
362
363	if (flipflop) {
364		rnd_add_uint32(&skewsrc, rnd_counter());
365		callout_schedule(&skew_callout, hz);
366	} else {
367		callout_schedule(&skew_callout, 1);
368	}
369}
370#endif
371
372/*
373 * initialize the global random pool for our use.
374 * rnd_init() must be called very early on in the boot process, so
375 * the pool is ready for other devices to attach as sources.
376 */
377void
378rnd_init(void)
379{
380	u_int32_t c;
381
382	if (rnd_ready)
383		return;
384
385	mutex_init(&rnd_mtx, MUTEX_DEFAULT, IPL_VM);
386	mutex_init(&rndsink_mtx, MUTEX_DEFAULT, IPL_VM);
387
388	callout_init(&rnd_callout, CALLOUT_MPSAFE);
389	callout_setfunc(&rnd_callout, rnd_timeout, NULL);
390
391	/*
392	 * take a counter early, hoping that there's some variance in
393	 * the following operations
394	 */
395	c = rnd_counter();
396
397	LIST_INIT(&rnd_sources);
398	SIMPLEQ_INIT(&rnd_samples);
399	TAILQ_INIT(&rnd_sinks);
400
401	rndpool_init(&rnd_pool);
402	mutex_init(&rndpool_mtx, MUTEX_DEFAULT, IPL_VM);
403	cv_init(&rndpool_cv, "rndread");
404
405	rnd_mempc = pool_cache_init(sizeof(rnd_sample_t), 0, 0, 0,
406				    "rndsample", NULL, IPL_VM,
407				    NULL, NULL, NULL);
408
409	/*
410	 * Set resource limit. The rnd_process_events() function
411	 * is called every tick and process the sample queue.
412	 * Without limitation, if a lot of rnd_add_*() are called,
413	 * all kernel memory may be eaten up.
414	 */
415	pool_cache_sethardlimit(rnd_mempc, RND_POOLBITS, NULL, 0);
416
417	/*
418	 * Mix *something*, *anything* into the pool to help it get started.
419	 * However, it's not safe for rnd_counter() to call microtime() yet,
420	 * so on some platforms we might just end up with zeros anyway.
421	 * XXX more things to add would be nice.
422	 */
423	if (c) {
424		mutex_spin_enter(&rndpool_mtx);
425		rndpool_add_data(&rnd_pool, &c, sizeof(c), 1);
426		c = rnd_counter();
427		rndpool_add_data(&rnd_pool, &c, sizeof(c), 1);
428		mutex_spin_exit(&rndpool_mtx);
429	}
430
431	rnd_ready = 1;
432
433	/*
434	 * If we have a cycle counter, take its error with respect
435	 * to the callout mechanism as a source of entropy, ala
436	 * TrueRand.
437 	 *
438	 * XXX This will do little when the cycle counter *is* what's
439	 * XXX clocking the callout mechanism.  How to get this right
440	 * XXX without unsightly spelunking in the timecounter code?
441	 */
442#if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL) /* XXX: bad pooka */
443	callout_init(&skew_callout, CALLOUT_MPSAFE);
444	callout_setfunc(&skew_callout, rnd_skew, NULL);
445	rnd_skew(NULL);
446#endif
447
448#ifdef RND_VERBOSE
449	printf("rnd: initialised (%u)%s", RND_POOLBITS,
450	       c ? " with counter\n" : "\n");
451#endif
452	if (boot_rsp != NULL) {
453		mutex_spin_enter(&rndpool_mtx);
454			rndpool_add_data(&rnd_pool, boot_rsp->data,
455					 sizeof(boot_rsp->data),
456					 MIN(boot_rsp->entropy,
457					     RND_POOLBITS / 2));
458		if (rndpool_get_entropy_count(&rnd_pool) >
459		    RND_ENTROPY_THRESHOLD * 8) {
460                	rnd_initial_entropy = 1;
461		}
462                mutex_spin_exit(&rndpool_mtx);
463#ifdef RND_VERBOSE
464		printf("rnd: seeded with %d bits\n",
465		       MIN(boot_rsp->entropy, RND_POOLBITS / 2));
466#endif
467		memset(boot_rsp, 0, sizeof(*boot_rsp));
468	}
469}
470
471static rnd_sample_t *
472rnd_sample_allocate(krndsource_t *source)
473{
474	rnd_sample_t *c;
475
476	c = pool_cache_get(rnd_mempc, PR_WAITOK);
477	if (c == NULL)
478		return (NULL);
479
480	c->source = source;
481	c->cursor = 0;
482	c->entropy = 0;
483
484	return (c);
485}
486
487/*
488 * Don't wait on allocation.  To be used in an interrupt context.
489 */
490static rnd_sample_t *
491rnd_sample_allocate_isr(krndsource_t *source)
492{
493	rnd_sample_t *c;
494
495	c = pool_cache_get(rnd_mempc, PR_NOWAIT);
496	if (c == NULL)
497		return (NULL);
498
499	c->source = source;
500	c->cursor = 0;
501	c->entropy = 0;
502
503	return (c);
504}
505
506static void
507rnd_sample_free(rnd_sample_t *c)
508{
509	memset(c, 0, sizeof(*c));
510	pool_cache_put(rnd_mempc, c);
511}
512
513/*
514 * Add a source to our list of sources.
515 */
516void
517rnd_attach_source(krndsource_t *rs, const char *name, u_int32_t type,
518    u_int32_t flags)
519{
520	u_int32_t ts;
521
522	ts = rnd_counter();
523
524	strlcpy(rs->name, name, sizeof(rs->name));
525	rs->last_time = ts;
526	rs->last_delta = 0;
527	rs->last_delta2 = 0;
528	rs->total = 0;
529
530	/*
531	 * Force network devices to not collect any entropy by
532	 * default.
533	 */
534	if (type == RND_TYPE_NET)
535		flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE);
536
537	/*
538 	 * Hardware RNGs get extra space for statistical testing.
539	 */
540	if (type == RND_TYPE_RNG) {
541		rs->test = kmem_alloc(sizeof(rngtest_t), KM_NOSLEEP);
542		rs->test_cnt = 0;
543	} else {
544		rs->test = NULL;
545		rs->test_cnt = -1;
546	}
547
548	rs->type = type;
549	rs->flags = flags;
550
551	rs->state = rnd_sample_allocate(rs);
552
553	mutex_spin_enter(&rndpool_mtx);
554	LIST_INSERT_HEAD(&rnd_sources, rs, list);
555
556#ifdef RND_VERBOSE
557	printf("rnd: %s attached as an entropy source (", rs->name);
558	if (!(flags & RND_FLAG_NO_COLLECT)) {
559		printf("collecting");
560		if (flags & RND_FLAG_NO_ESTIMATE)
561			printf(" without estimation");
562	}
563	else
564		printf("off");
565	printf(")\n");
566#endif
567
568	/*
569	 * Again, put some more initial junk in the pool.
570	 * XXX Bogus, but harder to guess than zeros.
571	 */
572	rndpool_add_data(&rnd_pool, &ts, sizeof(u_int32_t), 1);
573	mutex_spin_exit(&rndpool_mtx);
574}
575
576/*
577 * Remove a source from our list of sources.
578 */
579void
580rnd_detach_source(krndsource_t *source)
581{
582	rnd_sample_t *sample;
583
584	mutex_spin_enter(&rnd_mtx);
585
586	LIST_REMOVE(source, list);
587
588	/*
589	 * If there are samples queued up "remove" them from the sample queue
590	 * by setting the source to the no-collect pseudosource.
591	 */
592	sample = SIMPLEQ_FIRST(&rnd_samples);
593	while (sample != NULL) {
594		if (sample->source == source)
595			sample->source = &rnd_source_no_collect;
596
597		sample = SIMPLEQ_NEXT(sample, next);
598	}
599
600	mutex_spin_exit(&rnd_mtx);
601
602	if (!cpu_softintr_p()) {	/* XXX XXX very temporary "fix" */
603		if (source->state) {
604			rnd_sample_free(source->state);
605			source->state = NULL;
606		}
607
608		if (source->test) {
609			kmem_free(source->test, sizeof(rngtest_t));
610		}
611	}
612
613#ifdef RND_VERBOSE
614	printf("rnd: %s detached as an entropy source\n", source->name);
615#endif
616}
617
618/*
619 * Add a 32-bit value to the entropy pool.  The rs parameter should point to
620 * the source-specific source structure.
621 */
622void
623_rnd_add_uint32(krndsource_t *rs, u_int32_t val)
624{
625	u_int32_t ts;
626	u_int32_t entropy = 0;
627
628	if (rs->flags & RND_FLAG_NO_COLLECT)
629		return;
630
631	/*
632	 * Sample the counter as soon as possible to avoid
633	 * entropy overestimation.
634	 */
635	ts = rnd_counter();
636
637	/*
638	 * If we are estimating entropy on this source,
639	 * calculate differentials.
640	 */
641
642	if ((rs->flags & RND_FLAG_NO_ESTIMATE) == 0) {
643		entropy = rnd_estimate_entropy(rs, ts);
644	}
645
646	rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts);
647}
648
649void
650rnd_add_data(krndsource_t *rs, const void *const data, uint32_t len,
651	     uint32_t entropy)
652{
653	/*
654	 * This interface is meant for feeding data which is,
655	 * itself, random.  Don't estimate entropy based on
656	 * timestamp, just directly add the data.
657	 */
658	rnd_add_data_ts(rs, data, len, entropy, rnd_counter());
659}
660
661static void
662rnd_add_data_ts(krndsource_t *rs, const void *const data, u_int32_t len,
663		u_int32_t entropy, uint32_t ts)
664{
665	rnd_sample_t *state = NULL;
666	const uint32_t *dint = data;
667	int todo, done, filled = 0;
668	SIMPLEQ_HEAD(, _rnd_sample_t) tmp_samples =
669	    		SIMPLEQ_HEAD_INITIALIZER(tmp_samples);
670
671	if (rs->flags & RND_FLAG_NO_COLLECT) {
672		return;
673	}
674
675	/*
676	 * Loop over data packaging it into sample buffers.
677	 * If a sample buffer allocation fails, drop all data.
678	 */
679	todo = len / sizeof(*dint);
680	for (done = 0; done < todo ; done++) {
681		state = rs->state;
682		if (state == NULL) {
683			state = rnd_sample_allocate_isr(rs);
684			if (__predict_false(state == NULL)) {
685				break;
686			}
687			rs->state = state;
688		}
689
690		state->ts[state->cursor] = ts;
691		state->values[state->cursor] = dint[done];
692		state->cursor++;
693
694		if (state->cursor == RND_SAMPLE_COUNT) {
695			SIMPLEQ_INSERT_HEAD(&tmp_samples, state, next);
696			filled++;
697			rs->state = NULL;
698		}
699	}
700
701	if (__predict_false(state == NULL)) {
702		while ((state = SIMPLEQ_FIRST(&tmp_samples))) {
703			SIMPLEQ_REMOVE_HEAD(&tmp_samples, next);
704			rnd_sample_free(state);
705		}
706		return;
707	}
708
709	/*
710	 * Claim all the entropy on the last one we send to
711	 * the pool, so we don't rely on it being evenly distributed
712	 * in the supplied data.
713	 *
714	 * XXX The rndpool code must accept samples with more
715	 * XXX claimed entropy than bits for this to work right.
716	 */
717	state->entropy += entropy;
718	rs->total += entropy;
719
720	/*
721	 * If we didn't finish any sample buffers, we're done.
722	 */
723	if (!filled) {
724		return;
725	}
726
727	mutex_spin_enter(&rnd_mtx);
728	while ((state = SIMPLEQ_FIRST(&tmp_samples))) {
729		SIMPLEQ_REMOVE_HEAD(&tmp_samples, next);
730		SIMPLEQ_INSERT_HEAD(&rnd_samples, state, next);
731	}
732
733	/*
734	 * If we are still starting up, cause immediate processing of
735	 * the queued samples.  Otherwise, if the timeout isn't
736	 * pending, have it run in the near future.
737	 */
738	if (__predict_false(cold)) {
739#ifdef RND_VERBOSE
740		printf("rnd: directly processing boot-time events.\n");
741#endif
742		rnd_process_events(NULL);	/* Drops lock! */
743		return;
744	}
745	if (rnd_timeout_pending == 0) {
746		rnd_timeout_pending = 1;
747		mutex_spin_exit(&rnd_mtx);
748		callout_schedule(&rnd_callout, 1);
749		return;
750	}
751	mutex_spin_exit(&rnd_mtx);
752}
753
754static int
755rnd_hwrng_test(rnd_sample_t *sample)
756{
757	krndsource_t *source = sample->source;
758	size_t cmplen;
759	uint8_t *v1, *v2;
760	size_t resid, totest;
761
762	KASSERT(source->type = RND_TYPE_RNG);
763
764	/*
765	 * Continuous-output test: compare two halves of the
766	 * sample buffer to each other.  The sample buffer (64 ints,
767	 * so either 256 or 512 bytes on any modern machine) should be
768	 * much larger than a typical hardware RNG output, so this seems
769	 * a reasonable way to do it without retaining extra data.
770	 */
771	cmplen = sizeof(sample->values) / 2;
772	v1 = (uint8_t *)sample->values;
773	v2 = (uint8_t *)sample->values + cmplen;
774
775	if (__predict_false(!memcmp(v1, v2, cmplen))) {
776		printf("rnd: source \"%s\" failed continuous-output test.\n",
777		       source->name);
778		return 1;
779	}
780
781	/*
782	 * FIPS 140 statistical RNG test.  We must accumulate 20,000 bits.
783	 */
784	if (__predict_true(source->test_cnt == -1)) {
785		/* already passed the test */
786		return 0;
787	}
788	resid = FIPS140_RNG_TEST_BYTES - source->test_cnt;
789	totest = MIN(RND_SAMPLE_COUNT * 4, resid);
790	memcpy(source->test->rt_b + source->test_cnt, sample->values, totest);
791	resid -= totest;
792	source->test_cnt += totest;
793	if (resid == 0) {
794		strlcpy(source->test->rt_name, source->name,
795			sizeof(source->test->rt_name));
796		if (rngtest(source->test)) {
797			printf("rnd: source \"%s\" failed statistical test.",
798			       source->name);
799			return 1;
800		}
801		source->test_cnt = -1;
802		memset(source->test, 0, sizeof(*source->test));
803	}
804	return 0;
805}
806
807/*
808 * Process the events in the ring buffer.  Called by rnd_timeout or
809 * by the add routines directly if the callout has never fired (that
810 * is, if we are "cold" -- just booted).
811 *
812 * Call with rnd_mtx held -- WILL RELEASE IT.
813 */
814static void
815rnd_process_events(void *arg)
816{
817	rnd_sample_t *sample;
818	krndsource_t *source, *badsource = NULL;
819	u_int32_t entropy;
820	SIMPLEQ_HEAD(, _rnd_sample_t) dq_samples =
821			SIMPLEQ_HEAD_INITIALIZER(dq_samples);
822	SIMPLEQ_HEAD(, _rnd_sample_t) df_samples =
823			SIMPLEQ_HEAD_INITIALIZER(df_samples);
824        TAILQ_HEAD(, rndsink) sunk = TAILQ_HEAD_INITIALIZER(sunk);
825
826	/*
827	 * Sample queue is protected by rnd_mtx, drain to onstack queue
828	 * and drop lock.
829	 */
830
831	while ((sample = SIMPLEQ_FIRST(&rnd_samples))) {
832		SIMPLEQ_REMOVE_HEAD(&rnd_samples, next);
833		/*
834		 * We repeat this check here, since it is possible
835		 * the source was disabled before we were called, but
836		 * after the entry was queued.
837		 */
838		if (__predict_false(sample->source->flags
839				    & RND_FLAG_NO_COLLECT)) {
840			SIMPLEQ_INSERT_TAIL(&df_samples, sample, next);
841		} else {
842			SIMPLEQ_INSERT_TAIL(&dq_samples, sample, next);
843		}
844	}
845	mutex_spin_exit(&rnd_mtx);
846
847	/* Don't thrash the rndpool mtx either.  Hold, add all samples. */
848	mutex_spin_enter(&rndpool_mtx);
849	while ((sample = SIMPLEQ_FIRST(&dq_samples))) {
850		SIMPLEQ_REMOVE_HEAD(&dq_samples, next);
851		source = sample->source;
852		entropy = sample->entropy;
853
854		/*
855		 * Hardware generators are great but sometimes they
856		 * have...hardware issues.  Don't use any data from
857		 * them unless it passes some tests.
858		 */
859		if (source->type == RND_TYPE_RNG) {
860			if (__predict_false(rnd_hwrng_test(sample))) {
861				/*
862				 * Detach the bad source.  See below.
863				 */
864				badsource = source;
865				printf("rnd: detaching source \"%s\".",
866				       badsource->name);
867				break;
868			}
869		}
870		rndpool_add_data(&rnd_pool, sample->values,
871		    RND_SAMPLE_COUNT * 4, 0);
872
873		rndpool_add_data(&rnd_pool, sample->ts,
874		    RND_SAMPLE_COUNT * 4, entropy);
875
876		source->total += sample->entropy;
877		SIMPLEQ_INSERT_TAIL(&df_samples, sample, next);
878	}
879	mutex_spin_exit(&rndpool_mtx);
880
881	/* Now we hold no locks: clean up. */
882	if (__predict_false(badsource)) {
883		/*
884		 * The detach routine frees any samples we have not
885		 * dequeued ourselves.  For sanity's sake, we simply
886		 * free (without using) all dequeued samples from the
887		 * point at which we detected a problem onwards.
888		 */
889		rnd_detach_source(badsource);
890		while ((sample = SIMPLEQ_FIRST(&dq_samples))) {
891			SIMPLEQ_REMOVE_HEAD(&dq_samples, next);
892			rnd_sample_free(sample);
893		}
894	}
895	while ((sample = SIMPLEQ_FIRST(&df_samples))) {
896		SIMPLEQ_REMOVE_HEAD(&df_samples, next);
897		rnd_sample_free(sample);
898	}
899
900	/*
901	 * Wake up any potential readers waiting.
902	 */
903	rnd_wakeup_readers();
904}
905
906/*
907 * Timeout, run to process the events in the ring buffer.
908 */
909static void
910rnd_timeout(void *arg)
911{
912        mutex_spin_enter(&rnd_mtx);
913        rnd_timeout_pending = 0;
914        rnd_process_events(arg);
915}
916
917u_int32_t
918rnd_extract_data_locked(void *p, u_int32_t len, u_int32_t flags)
919{
920	static int timed_in;
921
922	KASSERT(mutex_owned(&rndpool_mtx));
923	if (__predict_false(!timed_in)) {
924		if (boottime.tv_sec) {
925			rndpool_add_data(&rnd_pool, &boottime,
926					 sizeof(boottime), 0);
927		}
928		timed_in++;
929	}
930	if (__predict_false(!rnd_initial_entropy)) {
931		u_int32_t c;
932
933#ifdef RND_VERBOSE
934		printf("rnd: WARNING! initial entropy low (%u).\n",
935		       rndpool_get_entropy_count(&rnd_pool));
936#endif
937		/* Try once again to put something in the pool */
938		c = rnd_counter();
939		rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
940	}
941
942#ifdef DIAGNOSTIC
943	while (!rnd_tested) {
944		int entropy_count;
945
946		entropy_count = rndpool_get_entropy_count(&rnd_pool);
947#ifdef RND_VERBOSE
948		printf("rnd: starting statistical RNG test, entropy = %d.\n",
949			entropy_count);
950#endif
951		if (rndpool_extract_data(&rnd_pool, rnd_rt.rt_b,
952		    sizeof(rnd_rt.rt_b), RND_EXTRACT_ANY)
953		    != sizeof(rnd_rt.rt_b)) {
954			panic("rnd: could not get bits for statistical test");
955		}
956		/*
957		 * Stash the tested bits so we can put them back in the
958		 * pool, restoring the entropy count.  DO NOT rely on
959		 * rngtest to maintain the bits pristine -- we could end
960		 * up adding back non-random data claiming it were pure
961		 * entropy.
962		 */
963		memcpy(rnd_testbits, rnd_rt.rt_b, sizeof(rnd_rt.rt_b));
964		strlcpy(rnd_rt.rt_name, "entropy pool", sizeof(rnd_rt.rt_name));
965		if (rngtest(&rnd_rt)) {
966			/*
967			 * The probabiliity of a Type I error is 3/10000,
968			 * but note this can only happen at boot time.
969			 * The relevant standard says to reset the module,
970			 * but developers objected...
971			 */
972			printf("rnd: WARNING, ENTROPY POOL FAILED "
973			       "STATISTICAL TEST!\n");
974			continue;
975		}
976		memset(&rnd_rt, 0, sizeof(rnd_rt));
977		rndpool_add_data(&rnd_pool, rnd_testbits, sizeof(rnd_testbits),
978				 entropy_count);
979		memset(rnd_testbits, 0, sizeof(rnd_testbits));
980#ifdef RND_VERBOSE
981		printf("rnd: statistical RNG test done, entropy = %d.\n",
982		       rndpool_get_entropy_count(&rnd_pool));
983#endif
984		rnd_tested++;
985	}
986#endif
987	return rndpool_extract_data(&rnd_pool, p, len, flags);
988}
989
990u_int32_t
991rnd_extract_data(void *p, u_int32_t len, u_int32_t flags)
992{
993	uint32_t retval;
994
995	mutex_spin_enter(&rndpool_mtx);
996	retval = rnd_extract_data_locked(p, len, flags);
997	mutex_spin_exit(&rndpool_mtx);
998	return retval;
999}
1000
1001void
1002rndsink_attach(rndsink_t *rs)
1003{
1004#ifdef RND_VERBOSE
1005	printf("rnd: entropy sink \"%s\" wants %d bytes of data.\n",
1006	       rs->name, (int)rs->len);
1007#endif
1008
1009	KASSERT(mutex_owned(&rs->mtx));
1010	KASSERT(rs->state = RSTATE_PENDING);
1011
1012	mutex_spin_enter(&rndsink_mtx);
1013	TAILQ_INSERT_TAIL(&rnd_sinks, rs, tailq);
1014	mutex_spin_exit(&rndsink_mtx);
1015
1016	mutex_spin_enter(&rnd_mtx);
1017	if (rnd_timeout_pending == 0) {
1018		rnd_timeout_pending = 1;
1019		callout_schedule(&rnd_callout, 1);
1020	}
1021	mutex_spin_exit(&rnd_mtx);
1022
1023}
1024
1025void
1026rndsink_detach(rndsink_t *rs)
1027{
1028	rndsink_t *sink, *tsink;
1029#ifdef RND_VERBOSE
1030	printf("rnd: entropy sink \"%s\" no longer wants data.\n", rs->name);
1031#endif
1032	KASSERT(mutex_owned(&rs->mtx));
1033
1034	mutex_spin_enter(&rndsink_mtx);
1035	TAILQ_FOREACH_SAFE(sink, &rnd_sinks, tailq, tsink) {
1036		if (sink == rs) {
1037			TAILQ_REMOVE(&rnd_sinks, rs, tailq);
1038		}
1039	}
1040	mutex_spin_exit(&rndsink_mtx);
1041}
1042
1043void
1044rnd_seed(void *base, size_t len)
1045{
1046	SHA1_CTX s;
1047	uint8_t digest[SHA1_DIGEST_LENGTH];
1048
1049	if (len != sizeof(*boot_rsp)) {
1050		aprint_error("rnd: bad seed length %d\n", (int)len);
1051		return;
1052	}
1053
1054	boot_rsp = (rndsave_t *)base;
1055	SHA1Init(&s);
1056	SHA1Update(&s, (uint8_t *)&boot_rsp->entropy,
1057		   sizeof(boot_rsp->entropy));
1058	SHA1Update(&s, boot_rsp->data, sizeof(boot_rsp->data));
1059	SHA1Final(digest, &s);
1060
1061	if (memcmp(digest, boot_rsp->digest, sizeof(digest))) {
1062		aprint_error("rnd: bad seed checksum\n");
1063		return;
1064	}
1065
1066	/*
1067	 * It's not really well-defined whether bootloader-supplied
1068	 * modules run before or after rnd_init().  Handle both cases.
1069	 */
1070	if (rnd_ready) {
1071#ifdef RND_VERBOSE
1072		printf("rnd: ready, feeding in seed data directly.\n");
1073#endif
1074		mutex_spin_enter(&rndpool_mtx);
1075		rndpool_add_data(&rnd_pool, boot_rsp->data,
1076				 sizeof(boot_rsp->data),
1077				 MIN(boot_rsp->entropy, RND_POOLBITS / 2));
1078		memset(boot_rsp, 0, sizeof(*boot_rsp));
1079		mutex_spin_exit(&rndpool_mtx);
1080	} else {
1081#ifdef RND_VERBOSE
1082		printf("rnd: not ready, deferring seed feed.\n");
1083#endif
1084	}
1085}
1086