1/*	$OpenBSD: rnd.c,v 1.228 2024/06/14 10:17:05 claudio Exp $	*/
2
3/*
4 * Copyright (c) 2011,2020 Theo de Raadt.
5 * Copyright (c) 2008 Damien Miller.
6 * Copyright (c) 1996, 1997, 2000-2002 Michael Shalayeff.
7 * Copyright (c) 2013 Markus Friedl.
8 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, and the entire permission notice in its entirety,
16 *    including the disclaimer of warranties.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote
21 *    products derived from this software without specific prior
22 *    written permission.
23 *
24 * ALTERNATIVELY, this product may be distributed under the terms of
25 * the GNU Public License, in which case the provisions of the GPL are
26 * required INSTEAD OF the above restrictions.  (This clause is
27 * necessary due to a potential bad interaction between the GPL and
28 * the restrictions contained in a BSD-style copyright.)
29 *
30 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
31 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
32 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
33 * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
34 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
35 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
36 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
38 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
40 * OF THE POSSIBILITY OF SUCH DAMAGE.
41 */
42
43/*
44 * The bootblocks pre-fill the kernel .openbsd.randomdata section with seed
45 * material (on-disk from previous boot, hopefully mixed with a hardware rng).
46 * The first arc4random(9) call initializes this seed material as a chacha
47 * state.  Calls can be done early in kernel bootstrap code -- early use is
48 * encouraged.
49 *
50 * After the kernel timeout subsystem is initialized, random_start() prepares
51 * the entropy collection mechanism enqueue_randomness() and timeout-driven
52 * mixing into the chacha state.  The first submissions come from device
53 * probes, later on interrupt-time submissions are more common.  Entropy
54 * data (and timing information) get mixed over the entropy input ring
55 * rnd_event_space[] -- the goal is to collect damage.
56 *
57 * Based upon timeouts, a selection of the entropy ring rnd_event_space[]
58 * CRC bit-distributed and XOR mixed into entropy_pool[].
59 *
60 * From time to time, entropy_pool[] is SHA512-whitened, mixed with time
61 * information again, XOR'd with the inner and outer states of the existing
62 * chacha state, to create a new chacha state.
63 *
64 * During early boot (until cold=0), enqueue operations are immediately
65 * dequeued, and mixed into the chacha.
66 */
67
68#include <sys/param.h>
69#include <sys/event.h>
70#include <sys/ioctl.h>
71#include <sys/malloc.h>
72#include <sys/timeout.h>
73#include <sys/atomic.h>
74#include <sys/task.h>
75#include <sys/msgbuf.h>
76#include <sys/mount.h>
77#include <sys/syscallargs.h>
78
79#include <crypto/sha2.h>
80
81#define KEYSTREAM_ONLY
82#include <crypto/chacha_private.h>
83
84#include <uvm/uvm_extern.h>
85
86/*
87 * For the purposes of better mixing, we use the CRC-32 polynomial as
88 * well to make a twisted Generalized Feedback Shift Register
89 *
90 * (See M. Matsumoto & Y. Kurita, 1992.  Twisted GFSR generators.  ACM
91 * Transactions on Modeling and Computer Simulation 2(3):179-194.
92 * Also see M. Matsumoto & Y. Kurita, 1994.  Twisted GFSR generators
93 * II.  ACM Transactions on Modeling and Computer Simulation 4:254-266)
94 */
95
96/*
97 * Stirring polynomial over GF(2). Used in add_entropy_words() below.
98 *
99 * The polynomial terms are chosen to be evenly spaced (minimum RMS
100 * distance from evenly spaced; except for the last tap, which is 1 to
101 * get the twisting happening as fast as possible.
102 *
103 * The resultant polynomial is:
104 *   2^POOLWORDS + 2^POOL_TAP1 + 2^POOL_TAP2 + 2^POOL_TAP3 + 2^POOL_TAP4 + 1
105 */
106#define POOLWORDS	2048
107#define POOLBYTES	(POOLWORDS*4)
108#define POOLMASK	(POOLWORDS - 1)
109#define	POOL_TAP1	1638
110#define	POOL_TAP2	1231
111#define	POOL_TAP3	819
112#define	POOL_TAP4	411
113
114/*
115 * Raw entropy collection from device drivers; at interrupt context or not.
116 * enqueue_randomness() is used to submit data into the entropy input ring.
117 */
118
119#define QEVLEN	128		 /* must be a power of 2 */
120#define QEVCONSUME 8		 /* how many events to consume a time */
121
122#define KEYSZ	32
123#define IVSZ	8
124#define BLOCKSZ	64
125#define RSBUFSZ	(16*BLOCKSZ)
126#define EBUFSIZE KEYSZ + IVSZ
127
128struct rand_event {
129	u_int	re_time;
130	u_int	re_val;
131} rnd_event_space[QEVLEN];
132
133u_int	rnd_event_cons;
134u_int	rnd_event_prod;
135int	rnd_cold = 1;
136int	rnd_slowextract = 1;
137
138void	rnd_reinit(void *v);		/* timeout to start reinit */
139void	rnd_init(void *);			/* actually do the reinit */
140
141static u_int32_t entropy_pool[POOLWORDS];
142u_int32_t entropy_pool0[POOLWORDS] __attribute__((section(".openbsd.randomdata")));
143
144void	dequeue_randomness(void *);
145void	add_entropy_words(const u_int32_t *, u_int);
146void	extract_entropy(u_int8_t *)
147    __attribute__((__bounded__(__minbytes__,1,EBUFSIZE)));
148
149struct timeout rnd_timeout = TIMEOUT_INITIALIZER(dequeue_randomness, NULL);
150
151int	filt_randomread(struct knote *, long);
152void	filt_randomdetach(struct knote *);
153int	filt_randomwrite(struct knote *, long);
154
155static void _rs_seed(u_char *, size_t);
156static void _rs_clearseed(const void *p, size_t s);
157
158const struct filterops randomread_filtops = {
159	.f_flags	= FILTEROP_ISFD,
160	.f_attach	= NULL,
161	.f_detach	= filt_randomdetach,
162	.f_event	= filt_randomread,
163};
164
165const struct filterops randomwrite_filtops = {
166	.f_flags	= FILTEROP_ISFD,
167	.f_attach	= NULL,
168	.f_detach	= filt_randomdetach,
169	.f_event	= filt_randomwrite,
170};
171
172/*
173 * This function mixes entropy and timing into the entropy input ring.
174 */
175static void
176add_event_data(u_int val)
177{
178	struct rand_event *rep;
179	int e;
180
181	e = (atomic_inc_int_nv(&rnd_event_prod) - 1) & (QEVLEN-1);
182	rep = &rnd_event_space[e];
183	rep->re_time += cpu_rnd_messybits();
184	rep->re_val += val;
185}
186
187void
188enqueue_randomness(u_int val)
189{
190	add_event_data(val);
191
192	if (rnd_cold) {
193		dequeue_randomness(NULL);
194		rnd_init(NULL);
195		if (!cold)
196			rnd_cold = 0;
197	} else if (!timeout_pending(&rnd_timeout) &&
198	    (rnd_event_prod - rnd_event_cons) > QEVCONSUME) {
199		rnd_slowextract = min(rnd_slowextract * 2, 5000);
200		timeout_add_msec(&rnd_timeout, rnd_slowextract * 10);
201	}
202}
203
204/*
205 * This function merges entropy ring information into the buffer using
206 * a polynomial to spread the bits.
207 */
208void
209add_entropy_words(const u_int32_t *buf, u_int n)
210{
211	/* derived from IEEE 802.3 CRC-32 */
212	static const u_int32_t twist_table[8] = {
213		0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
214		0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278
215	};
216	static u_int	entropy_add_ptr;
217	static u_char	entropy_input_rotate;
218
219	for (; n--; buf++) {
220		u_int32_t w = (*buf << entropy_input_rotate) |
221		    (*buf >> ((32 - entropy_input_rotate) & 31));
222		u_int i = entropy_add_ptr =
223		    (entropy_add_ptr - 1) & POOLMASK;
224		/*
225		 * Normally, we add 7 bits of rotation to the pool.
226		 * At the beginning of the pool, add an extra 7 bits
227		 * rotation, so that successive passes spread the
228		 * input bits across the pool evenly.
229		 */
230		entropy_input_rotate =
231		    (entropy_input_rotate + (i ? 7 : 14)) & 31;
232
233		/* XOR pool contents corresponding to polynomial terms */
234		w ^= entropy_pool[(i + POOL_TAP1) & POOLMASK] ^
235		     entropy_pool[(i + POOL_TAP2) & POOLMASK] ^
236		     entropy_pool[(i + POOL_TAP3) & POOLMASK] ^
237		     entropy_pool[(i + POOL_TAP4) & POOLMASK] ^
238		     entropy_pool[(i + 1) & POOLMASK] ^
239		     entropy_pool[i]; /* + 2^POOLWORDS */
240
241		entropy_pool[i] = (w >> 3) ^ twist_table[w & 7];
242	}
243}
244
245/*
246 * Pulls entropy out of the queue and merges it into the pool with the
247 * CRC.  This takes a mix of fresh entries from the producer end of the
248 * queue and entries from the consumer end of the queue which are
249 * likely to have collected more damage.
250 */
251void
252dequeue_randomness(void *v)
253{
254	u_int32_t buf[2];
255	u_int startp, startc, i;
256
257	/* Some very new damage */
258	startp = rnd_event_prod - QEVCONSUME;
259	for (i = 0; i < QEVCONSUME; i++) {
260		u_int e = (startp + i) & (QEVLEN-1);
261
262		buf[0] = rnd_event_space[e].re_time;
263		buf[1] = rnd_event_space[e].re_val;
264		add_entropy_words(buf, 2);
265	}
266	/* and some probably more damaged */
267	startc = atomic_add_int_nv(&rnd_event_cons, QEVCONSUME) - QEVCONSUME;
268	for (i = 0; i < QEVCONSUME; i++) {
269		u_int e = (startc + i) & (QEVLEN-1);
270
271		buf[0] = rnd_event_space[e].re_time;
272		buf[1] = rnd_event_space[e].re_val;
273		add_entropy_words(buf, 2);
274	}
275}
276
277/*
278 * Grabs a chunk from the entropy_pool[] and slams it through SHA512 when
279 * requested.
280 */
281void
282extract_entropy(u_int8_t *buf)
283{
284	static u_int32_t extract_pool[POOLWORDS];
285	u_char digest[SHA512_DIGEST_LENGTH];
286	SHA2_CTX shactx;
287
288#if SHA512_DIGEST_LENGTH < EBUFSIZE
289#error "need more bigger hash output"
290#endif
291
292	/*
293	 * INTENTIONALLY not protected by any lock.  Races during
294	 * memcpy() result in acceptable input data; races during
295	 * SHA512Update() would create nasty data dependencies.  We
296	 * do not rely on this as a benefit, but if it happens, cool.
297	 */
298	memcpy(extract_pool, entropy_pool, sizeof(extract_pool));
299
300	/* Hash the pool to get the output */
301	SHA512Init(&shactx);
302	SHA512Update(&shactx, (u_int8_t *)extract_pool, sizeof(extract_pool));
303	SHA512Final(digest, &shactx);
304
305	/* Copy data to destination buffer */
306	memcpy(buf, digest, EBUFSIZE);
307
308	/*
309	 * Modify pool so next hash will produce different results.
310	 */
311	add_event_data(extract_pool[0]);
312	dequeue_randomness(NULL);
313
314	/* Wipe data from memory */
315	explicit_bzero(extract_pool, sizeof(extract_pool));
316	explicit_bzero(digest, sizeof(digest));
317}
318
319/* random keystream by ChaCha */
320
321struct mutex rndlock = MUTEX_INITIALIZER(IPL_HIGH);
322struct timeout rndreinit_timeout = TIMEOUT_INITIALIZER(rnd_reinit, NULL);
323struct task rnd_task = TASK_INITIALIZER(rnd_init, NULL);
324
325static chacha_ctx rs;		/* chacha context for random keystream */
326/* keystream blocks (also chacha seed from boot) */
327static u_char rs_buf[RSBUFSZ];
328u_char rs_buf0[RSBUFSZ] __attribute__((section(".openbsd.randomdata")));
329static size_t rs_have;		/* valid bytes at end of rs_buf */
330static size_t rs_count;		/* bytes till reseed */
331
332void
333suspend_randomness(void)
334{
335	struct timespec ts;
336
337	getnanotime(&ts);
338	enqueue_randomness(ts.tv_sec);
339	enqueue_randomness(ts.tv_nsec);
340
341	dequeue_randomness(NULL);
342	rs_count = 0;
343	arc4random_buf(entropy_pool, sizeof(entropy_pool));
344}
345
346void
347resume_randomness(char *buf, size_t buflen)
348{
349	struct timespec ts;
350
351	if (buf && buflen)
352		_rs_seed(buf, buflen);
353	getnanotime(&ts);
354	enqueue_randomness(ts.tv_sec);
355	enqueue_randomness(ts.tv_nsec);
356
357	dequeue_randomness(NULL);
358	rs_count = 0;
359}
360
361static inline void _rs_rekey(u_char *dat, size_t datlen);
362
363static inline void
364_rs_init(u_char *buf, size_t n)
365{
366	KASSERT(n >= KEYSZ + IVSZ);
367	chacha_keysetup(&rs, buf, KEYSZ * 8);
368	chacha_ivsetup(&rs, buf + KEYSZ, NULL);
369}
370
371static void
372_rs_seed(u_char *buf, size_t n)
373{
374	_rs_rekey(buf, n);
375
376	/* invalidate rs_buf */
377	rs_have = 0;
378	memset(rs_buf, 0, sizeof(rs_buf));
379
380	rs_count = 1600000;
381}
382
383static void
384_rs_stir(int do_lock)
385{
386	struct timespec ts;
387	u_int8_t buf[EBUFSIZE], *p;
388	int i;
389
390	/*
391	 * Use SHA512 PRNG data and a system timespec; early in the boot
392	 * process this is the best we can do -- some architectures do
393	 * not collect entropy very well during this time, but may have
394	 * clock information which is better than nothing.
395	 */
396	extract_entropy(buf);
397
398	nanotime(&ts);
399	for (p = (u_int8_t *)&ts, i = 0; i < sizeof(ts); i++)
400		buf[i] ^= p[i];
401
402	if (do_lock)
403		mtx_enter(&rndlock);
404	_rs_seed(buf, sizeof(buf));
405	if (do_lock)
406		mtx_leave(&rndlock);
407	explicit_bzero(buf, sizeof(buf));
408
409	/* encourage fast-dequeue again */
410	rnd_slowextract = 1;
411}
412
413static inline void
414_rs_stir_if_needed(size_t len)
415{
416	static int rs_initialized;
417
418	if (!rs_initialized) {
419		memcpy(entropy_pool, entropy_pool0, sizeof(entropy_pool));
420		memcpy(rs_buf, rs_buf0, sizeof(rs_buf));
421		/* seeds cannot be cleaned yet, random_start() will do so */
422		_rs_init(rs_buf, KEYSZ + IVSZ);
423		rs_count = 1024 * 1024 * 1024;	/* until main() runs */
424		rs_initialized = 1;
425	} else if (rs_count <= len)
426		_rs_stir(0);
427	else
428		rs_count -= len;
429}
430
431static void
432_rs_clearseed(const void *p, size_t s)
433{
434	struct kmem_dyn_mode kd_avoidalias;
435	vaddr_t va = trunc_page((vaddr_t)p);
436	vsize_t off = (vaddr_t)p - va;
437	vsize_t len;
438	vaddr_t rwva;
439	paddr_t pa;
440
441	while (s > 0) {
442		pmap_extract(pmap_kernel(), va, &pa);
443
444		memset(&kd_avoidalias, 0, sizeof(kd_avoidalias));
445		kd_avoidalias.kd_prefer = pa;
446		kd_avoidalias.kd_waitok = 1;
447		rwva = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none,
448		    &kd_avoidalias);
449		if (!rwva)
450			panic("_rs_clearseed");
451
452		pmap_kenter_pa(rwva, pa, PROT_READ | PROT_WRITE);
453		pmap_update(pmap_kernel());
454
455		len = MIN(s, PAGE_SIZE - off);
456		explicit_bzero((void *)(rwva + off), len);
457
458		pmap_kremove(rwva, PAGE_SIZE);
459		km_free((void *)rwva, PAGE_SIZE, &kv_any, &kp_none);
460
461		va += PAGE_SIZE;
462		s -= len;
463		off = 0;
464	}
465}
466
467static inline void
468_rs_rekey(u_char *dat, size_t datlen)
469{
470#ifndef KEYSTREAM_ONLY
471	memset(rs_buf, 0, sizeof(rs_buf));
472#endif
473	/* fill rs_buf with the keystream */
474	chacha_encrypt_bytes(&rs, rs_buf, rs_buf, sizeof(rs_buf));
475	/* mix in optional user provided data */
476	if (dat) {
477		size_t i, m;
478
479		m = MIN(datlen, KEYSZ + IVSZ);
480		for (i = 0; i < m; i++)
481			rs_buf[i] ^= dat[i];
482	}
483	/* immediately reinit for backtracking resistance */
484	_rs_init(rs_buf, KEYSZ + IVSZ);
485	memset(rs_buf, 0, KEYSZ + IVSZ);
486	rs_have = sizeof(rs_buf) - KEYSZ - IVSZ;
487}
488
489static inline void
490_rs_random_buf(void *_buf, size_t n)
491{
492	u_char *buf = (u_char *)_buf;
493	size_t m;
494
495	_rs_stir_if_needed(n);
496	while (n > 0) {
497		if (rs_have > 0) {
498			m = MIN(n, rs_have);
499			memcpy(buf, rs_buf + sizeof(rs_buf) - rs_have, m);
500			memset(rs_buf + sizeof(rs_buf) - rs_have, 0, m);
501			buf += m;
502			n -= m;
503			rs_have -= m;
504		}
505		if (rs_have == 0)
506			_rs_rekey(NULL, 0);
507	}
508}
509
510static inline void
511_rs_random_u32(u_int32_t *val)
512{
513	_rs_stir_if_needed(sizeof(*val));
514	if (rs_have < sizeof(*val))
515		_rs_rekey(NULL, 0);
516	memcpy(val, rs_buf + sizeof(rs_buf) - rs_have, sizeof(*val));
517	memset(rs_buf + sizeof(rs_buf) - rs_have, 0, sizeof(*val));
518	rs_have -= sizeof(*val);
519}
520
521/* Return one word of randomness from a ChaCha20 generator */
522u_int32_t
523arc4random(void)
524{
525	u_int32_t ret;
526
527	mtx_enter(&rndlock);
528	_rs_random_u32(&ret);
529	mtx_leave(&rndlock);
530	return ret;
531}
532
533/*
534 * Fill a buffer of arbitrary length with ChaCha20-derived randomness.
535 */
536void
537arc4random_buf(void *buf, size_t n)
538{
539	mtx_enter(&rndlock);
540	_rs_random_buf(buf, n);
541	mtx_leave(&rndlock);
542}
543
544/*
545 * Allocate a new ChaCha20 context for the caller to use.
546 */
547struct arc4random_ctx *
548arc4random_ctx_new(void)
549{
550	char keybuf[KEYSZ + IVSZ];
551
552	chacha_ctx *ctx = malloc(sizeof(chacha_ctx), M_TEMP, M_WAITOK);
553	arc4random_buf(keybuf, KEYSZ + IVSZ);
554	chacha_keysetup(ctx, keybuf, KEYSZ * 8);
555	chacha_ivsetup(ctx, keybuf + KEYSZ, NULL);
556	explicit_bzero(keybuf, sizeof(keybuf));
557	return (struct arc4random_ctx *)ctx;
558}
559
560/*
561 * Free a ChaCha20 context created by arc4random_ctx_new()
562 */
563void
564arc4random_ctx_free(struct arc4random_ctx *ctx)
565{
566	explicit_bzero(ctx, sizeof(chacha_ctx));
567	free(ctx, M_TEMP, sizeof(chacha_ctx));
568}
569
570/*
571 * Use a given ChaCha20 context to fill a buffer
572 */
573void
574arc4random_ctx_buf(struct arc4random_ctx *ctx, void *buf, size_t n)
575{
576#ifndef KEYSTREAM_ONLY
577	memset(buf, 0, n);
578#endif
579	chacha_encrypt_bytes((chacha_ctx *)ctx, buf, buf, n);
580}
581
582/*
583 * Calculate a uniformly distributed random number less than upper_bound
584 * avoiding "modulo bias".
585 *
586 * Uniformity is achieved by generating new random numbers until the one
587 * returned is outside the range [0, 2**32 % upper_bound).  This
588 * guarantees the selected random number will be inside
589 * [2**32 % upper_bound, 2**32) which maps back to [0, upper_bound)
590 * after reduction modulo upper_bound.
591 */
592u_int32_t
593arc4random_uniform(u_int32_t upper_bound)
594{
595	u_int32_t r, min;
596
597	if (upper_bound < 2)
598		return 0;
599
600	/* 2**32 % x == (2**32 - x) % x */
601	min = -upper_bound % upper_bound;
602
603	/*
604	 * This could theoretically loop forever but each retry has
605	 * p > 0.5 (worst case, usually far better) of selecting a
606	 * number inside the range we need, so it should rarely need
607	 * to re-roll.
608	 */
609	for (;;) {
610		r = arc4random();
611		if (r >= min)
612			break;
613	}
614
615	return r % upper_bound;
616}
617
618void
619rnd_init(void *null)
620{
621	_rs_stir(1);
622}
623
624/*
625 * Called by timeout to mark arc4 for stirring,
626 */
627void
628rnd_reinit(void *v)
629{
630	task_add(systq, &rnd_task);
631	/* 10 minutes, per dm@'s suggestion */
632	timeout_add_sec(&rndreinit_timeout, 10 * 60);
633}
634
635/*
636 * Start periodic services inside the random subsystem, which pull
637 * entropy forward, hash it, and re-seed the random stream as needed.
638 */
639void
640random_start(int goodseed)
641{
642	extern char etext[];
643
644#if !defined(NO_PROPOLICE)
645	extern long __guard_local;
646
647	if (__guard_local == 0)
648		printf("warning: no entropy supplied by boot loader\n");
649#endif
650
651	_rs_clearseed(entropy_pool0, sizeof(entropy_pool0));
652	_rs_clearseed(rs_buf0, sizeof(rs_buf0));
653
654	/* Message buffer may contain data from previous boot */
655	if (msgbufp->msg_magic == MSG_MAGIC)
656		add_entropy_words((u_int32_t *)msgbufp->msg_bufc,
657		    msgbufp->msg_bufs / sizeof(u_int32_t));
658	add_entropy_words((u_int32_t *)etext - 32*1024,
659	    8192/sizeof(u_int32_t));
660
661	dequeue_randomness(NULL);
662	rnd_init(NULL);
663	rnd_reinit(NULL);
664
665	if (goodseed)
666		printf("random: good seed from bootblocks\n");
667	else {
668		/* XXX kernel should work harder here */
669		printf("random: boothowto does not indicate good seed\n");
670	}
671}
672
673int
674randomopen(dev_t dev, int flag, int mode, struct proc *p)
675{
676	return 0;
677}
678
679int
680randomclose(dev_t dev, int flag, int mode, struct proc *p)
681{
682	return 0;
683}
684
685/*
686 * Maximum number of bytes to serve directly from the main ChaCha
687 * pool. Larger requests are served from a discrete ChaCha instance keyed
688 * from the main pool.
689 */
690#define RND_MAIN_MAX_BYTES	2048
691
692int
693randomread(dev_t dev, struct uio *uio, int ioflag)
694{
695	struct arc4random_ctx *lctx = NULL;
696	size_t		total = uio->uio_resid;
697	u_char		*buf;
698	int		ret = 0;
699
700	if (uio->uio_resid == 0)
701		return 0;
702
703	buf = malloc(POOLBYTES, M_TEMP, M_WAITOK);
704	if (total > RND_MAIN_MAX_BYTES)
705		lctx = arc4random_ctx_new();
706
707	while (ret == 0 && uio->uio_resid > 0) {
708		size_t	n = ulmin(POOLBYTES, uio->uio_resid);
709
710		if (lctx != NULL)
711			arc4random_ctx_buf(lctx, buf, n);
712		else
713			arc4random_buf(buf, n);
714		ret = uiomove(buf, n, uio);
715		if (ret == 0 && uio->uio_resid > 0)
716			yield();
717	}
718	if (lctx != NULL)
719		arc4random_ctx_free(lctx);
720	explicit_bzero(buf, POOLBYTES);
721	free(buf, M_TEMP, POOLBYTES);
722	return ret;
723}
724
725int
726randomwrite(dev_t dev, struct uio *uio, int flags)
727{
728	int		ret = 0, newdata = 0;
729	u_int32_t	*buf;
730
731	if (uio->uio_resid == 0)
732		return 0;
733
734	buf = malloc(POOLBYTES, M_TEMP, M_WAITOK);
735
736	while (ret == 0 && uio->uio_resid > 0) {
737		size_t	n = ulmin(POOLBYTES, uio->uio_resid);
738
739		ret = uiomove(buf, n, uio);
740		if (ret != 0)
741			break;
742		while (n % sizeof(u_int32_t))
743			((u_int8_t *)buf)[n++] = 0;
744		add_entropy_words(buf, n / 4);
745		if (uio->uio_resid > 0)
746			yield();
747		newdata = 1;
748	}
749
750	if (newdata)
751		rnd_init(NULL);
752
753	explicit_bzero(buf, POOLBYTES);
754	free(buf, M_TEMP, POOLBYTES);
755	return ret;
756}
757
758int
759randomkqfilter(dev_t dev, struct knote *kn)
760{
761	switch (kn->kn_filter) {
762	case EVFILT_READ:
763		kn->kn_fop = &randomread_filtops;
764		break;
765	case EVFILT_WRITE:
766		kn->kn_fop = &randomwrite_filtops;
767		break;
768	default:
769		return (EINVAL);
770	}
771
772	return (0);
773}
774
775void
776filt_randomdetach(struct knote *kn)
777{
778}
779
780int
781filt_randomread(struct knote *kn, long hint)
782{
783	kn->kn_data = RND_MAIN_MAX_BYTES;
784	return (1);
785}
786
787int
788filt_randomwrite(struct knote *kn, long hint)
789{
790	kn->kn_data = POOLBYTES;
791	return (1);
792}
793
794int
795randomioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
796{
797	switch (cmd) {
798	case FIOASYNC:
799		/* No async flag in softc so this is a no-op. */
800		break;
801	case FIONBIO:
802		/* Handled in the upper FS layer. */
803		break;
804	default:
805		return ENOTTY;
806	}
807	return 0;
808}
809
810int
811sys_getentropy(struct proc *p, void *v, register_t *retval)
812{
813	struct sys_getentropy_args /* {
814		syscallarg(void *) buf;
815		syscallarg(size_t) nbyte;
816	} */ *uap = v;
817	char buf[256];
818	int error;
819
820	if (SCARG(uap, nbyte) > sizeof(buf))
821		return (EIO);
822	arc4random_buf(buf, SCARG(uap, nbyte));
823	if ((error = copyout(buf, SCARG(uap, buf), SCARG(uap, nbyte))) != 0)
824		return (error);
825	explicit_bzero(buf, sizeof(buf));
826	*retval = 0;
827	return (0);
828}
829