1/*	$NetBSD: kern_entropy.c,v 1.66 2023/10/04 20:28:06 ad Exp $	*/
2
3/*-
4 * Copyright (c) 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Entropy subsystem
34 *
35 *	* Each CPU maintains a per-CPU entropy pool so that gathering
36 *	  entropy requires no interprocessor synchronization, except
37 *	  early at boot when we may be scrambling to gather entropy as
38 *	  soon as possible.
39 *
40 *	  - entropy_enter gathers entropy and never drops it on the
41 *	    floor, at the cost of sometimes having to do cryptography.
42 *
43 *	  - entropy_enter_intr gathers entropy or drops it on the
44 *	    floor, with low latency.  Work to stir the pool or kick the
45 *	    housekeeping thread is scheduled in soft interrupts.
46 *
47 *	* entropy_enter immediately enters into the global pool if it
48 *	  can transition to full entropy in one swell foop.  Otherwise,
49 *	  it defers to a housekeeping thread that consolidates entropy,
50 *	  but only when the CPUs collectively have full entropy, in
51 *	  order to mitigate iterative-guessing attacks.
52 *
53 *	* The entropy housekeeping thread continues to consolidate
54 *	  entropy even after we think we have full entropy, in case we
55 *	  are wrong, but is limited to one discretionary consolidation
56 *	  per minute, and only when new entropy is actually coming in,
57 *	  to limit performance impact.
58 *
59 *	* The entropy epoch is the number that changes when we
60 *	  transition from partial entropy to full entropy, so that
61 *	  users can easily determine when to reseed.  This also
62 *	  facilitates an operator explicitly causing everything to
63 *	  reseed by sysctl -w kern.entropy.consolidate=1.
64 *
65 *	* Entropy depletion is available for testing (or if you're into
66 *	  that sort of thing), with sysctl -w kern.entropy.depletion=1;
67 *	  the logic to support it is small, to minimize chance of bugs.
68 *
69 *	* While cold, a single global entropy pool is available for
70 *	  entering and extracting, serialized through splhigh/splx.
71 *	  The per-CPU entropy pool data structures are initialized in
72 *	  entropy_init and entropy_init_late (separated mainly for
73 *	  hysterical raisins at this point), but are not used until the
74 *	  system is warm, at which point access to the global entropy
75 *	  pool is limited to thread and softint context and serialized
76 *	  by E->lock.
77 */
78
79#include <sys/cdefs.h>
80__KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.66 2023/10/04 20:28:06 ad Exp $");
81
82#include <sys/param.h>
83#include <sys/types.h>
84#include <sys/atomic.h>
85#include <sys/compat_stub.h>
86#include <sys/condvar.h>
87#include <sys/cpu.h>
88#include <sys/entropy.h>
89#include <sys/errno.h>
90#include <sys/evcnt.h>
91#include <sys/event.h>
92#include <sys/file.h>
93#include <sys/intr.h>
94#include <sys/kauth.h>
95#include <sys/kernel.h>
96#include <sys/kmem.h>
97#include <sys/kthread.h>
98#include <sys/lwp.h>
99#include <sys/module_hook.h>
100#include <sys/mutex.h>
101#include <sys/percpu.h>
102#include <sys/poll.h>
103#include <sys/proc.h>
104#include <sys/queue.h>
105#include <sys/reboot.h>
106#include <sys/rnd.h>		/* legacy kernel API */
107#include <sys/rndio.h>		/* userland ioctl interface */
108#include <sys/rndsource.h>	/* kernel rndsource driver API */
109#include <sys/select.h>
110#include <sys/selinfo.h>
111#include <sys/sha1.h>		/* for boot seed checksum */
112#include <sys/stdint.h>
113#include <sys/sysctl.h>
114#include <sys/syslog.h>
115#include <sys/systm.h>
116#include <sys/time.h>
117#include <sys/xcall.h>
118
119#include <lib/libkern/entpool.h>
120
121#include <machine/limits.h>
122
123#ifdef __HAVE_CPU_COUNTER
124#include <machine/cpu_counter.h>
125#endif
126
127#define	MINENTROPYBYTES	ENTROPY_CAPACITY
128#define	MINENTROPYBITS	(MINENTROPYBYTES*NBBY)
129#define	MINSAMPLES	(2*MINENTROPYBITS)
130
131/*
132 * struct entropy_cpu
133 *
134 *	Per-CPU entropy state.  The pool is allocated separately
135 *	because percpu(9) sometimes moves per-CPU objects around
136 *	without zeroing them, which would lead to unwanted copies of
137 *	sensitive secrets.  The evcnt is allocated separately because
138 *	evcnt(9) assumes it stays put in memory.
139 */
140struct entropy_cpu {
141	struct entropy_cpu_evcnt {
142		struct evcnt		softint;
143		struct evcnt		intrdrop;
144		struct evcnt		intrtrunc;
145	}			*ec_evcnt;
146	struct entpool		*ec_pool;
147	unsigned		ec_bitspending;
148	unsigned		ec_samplespending;
149	bool			ec_locked;
150};
151
152/*
153 * struct entropy_cpu_lock
154 *
155 *	State for locking the per-CPU entropy state.
156 */
157struct entropy_cpu_lock {
158	int		ecl_s;
159	long		ecl_pctr;
160};
161
162/*
163 * struct rndsource_cpu
164 *
165 *	Per-CPU rndsource state.
166 */
167struct rndsource_cpu {
168	unsigned		rc_entropybits;
169	unsigned		rc_timesamples;
170	unsigned		rc_datasamples;
171	rnd_delta_t		rc_timedelta;
172};
173
174/*
175 * entropy_global (a.k.a. E for short in this file)
176 *
177 *	Global entropy state.  Writes protected by the global lock.
178 *	Some fields, marked (A), can be read outside the lock, and are
179 *	maintained with atomic_load/store_relaxed.
180 */
181struct {
182	kmutex_t	lock;		/* covers all global state */
183	struct entpool	pool;		/* global pool for extraction */
184	unsigned	bitsneeded;	/* (A) needed globally */
185	unsigned	bitspending;	/* pending in per-CPU pools */
186	unsigned	samplesneeded;	/* (A) needed globally */
187	unsigned	samplespending;	/* pending in per-CPU pools */
188	unsigned	timestamp;	/* (A) time of last consolidation */
189	unsigned	epoch;		/* (A) changes when needed -> 0 */
190	kcondvar_t	cv;		/* notifies state changes */
191	struct selinfo	selq;		/* notifies needed -> 0 */
192	struct lwp	*sourcelock;	/* lock on list of sources */
193	kcondvar_t	sourcelock_cv;	/* notifies sourcelock release */
194	LIST_HEAD(,krndsource) sources;	/* list of entropy sources */
195	bool		consolidate;	/* kick thread to consolidate */
196	bool		seed_rndsource;	/* true if seed source is attached */
197	bool		seeded;		/* true if seed file already loaded */
198} entropy_global __cacheline_aligned = {
199	/* Fields that must be initialized when the kernel is loaded.  */
200	.bitsneeded = MINENTROPYBITS,
201	.samplesneeded = MINSAMPLES,
202	.epoch = (unsigned)-1,	/* -1 means entropy never consolidated */
203	.sources = LIST_HEAD_INITIALIZER(entropy_global.sources),
204};
205
206#define	E	(&entropy_global)	/* declutter */
207
208/* Read-mostly globals */
209static struct percpu	*entropy_percpu __read_mostly; /* struct entropy_cpu */
210static void		*entropy_sih __read_mostly; /* softint handler */
211static struct lwp	*entropy_lwp __read_mostly; /* housekeeping thread */
212
213static struct krndsource seed_rndsource __read_mostly;
214
215/*
216 * Event counters
217 *
218 *	Must be careful with adding these because they can serve as
219 *	side channels.
220 */
221static struct evcnt entropy_discretionary_evcnt =
222    EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "discretionary");
223EVCNT_ATTACH_STATIC(entropy_discretionary_evcnt);
224static struct evcnt entropy_immediate_evcnt =
225    EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "immediate");
226EVCNT_ATTACH_STATIC(entropy_immediate_evcnt);
227static struct evcnt entropy_partial_evcnt =
228    EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "partial");
229EVCNT_ATTACH_STATIC(entropy_partial_evcnt);
230static struct evcnt entropy_consolidate_evcnt =
231    EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "consolidate");
232EVCNT_ATTACH_STATIC(entropy_consolidate_evcnt);
233static struct evcnt entropy_extract_fail_evcnt =
234    EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract fail");
235EVCNT_ATTACH_STATIC(entropy_extract_fail_evcnt);
236static struct evcnt entropy_request_evcnt =
237    EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "request");
238EVCNT_ATTACH_STATIC(entropy_request_evcnt);
239static struct evcnt entropy_deplete_evcnt =
240    EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "deplete");
241EVCNT_ATTACH_STATIC(entropy_deplete_evcnt);
242static struct evcnt entropy_notify_evcnt =
243    EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "notify");
244EVCNT_ATTACH_STATIC(entropy_notify_evcnt);
245
246/* Sysctl knobs */
247static bool	entropy_collection = 1;
248static bool	entropy_depletion = 0; /* Silly!  */
249
250static const struct sysctlnode	*entropy_sysctlroot;
251static struct sysctllog		*entropy_sysctllog;
252
253/* Forward declarations */
254static void	entropy_init_cpu(void *, void *, struct cpu_info *);
255static void	entropy_fini_cpu(void *, void *, struct cpu_info *);
256static void	entropy_account_cpu(struct entropy_cpu *);
257static void	entropy_enter(const void *, size_t, unsigned, bool);
258static bool	entropy_enter_intr(const void *, size_t, unsigned, bool);
259static void	entropy_softintr(void *);
260static void	entropy_thread(void *);
261static bool	entropy_pending(void);
262static void	entropy_pending_cpu(void *, void *, struct cpu_info *);
263static void	entropy_do_consolidate(void);
264static void	entropy_consolidate_xc(void *, void *);
265static void	entropy_notify(void);
266static int	sysctl_entropy_consolidate(SYSCTLFN_ARGS);
267static int	sysctl_entropy_gather(SYSCTLFN_ARGS);
268static void	filt_entropy_read_detach(struct knote *);
269static int	filt_entropy_read_event(struct knote *, long);
270static int	entropy_request(size_t, int);
271static void	rnd_add_data_internal(struct krndsource *, const void *,
272		    uint32_t, uint32_t, bool);
273static void	rnd_add_data_1(struct krndsource *, const void *, uint32_t,
274		    uint32_t, bool, uint32_t, bool);
275static unsigned	rndsource_entropybits(struct krndsource *);
276static void	rndsource_entropybits_cpu(void *, void *, struct cpu_info *);
277static void	rndsource_to_user(struct krndsource *, rndsource_t *);
278static void	rndsource_to_user_est(struct krndsource *, rndsource_est_t *);
279static void	rndsource_to_user_est_cpu(void *, void *, struct cpu_info *);
280
281/*
282 * entropy_timer()
283 *
284 *	Cycle counter, time counter, or anything that changes a wee bit
285 *	unpredictably.
286 */
287static inline uint32_t
288entropy_timer(void)
289{
290	struct bintime bt;
291	uint32_t v;
292
293	/* If we have a CPU cycle counter, use the low 32 bits.  */
294#ifdef __HAVE_CPU_COUNTER
295	if (__predict_true(cpu_hascounter()))
296		return cpu_counter32();
297#endif	/* __HAVE_CPU_COUNTER */
298
299	/* If we're cold, tough.  Can't binuptime while cold.  */
300	if (__predict_false(cold))
301		return 0;
302
303	/* Fold the 128 bits of binuptime into 32 bits.  */
304	binuptime(&bt);
305	v = bt.frac;
306	v ^= bt.frac >> 32;
307	v ^= bt.sec;
308	v ^= bt.sec >> 32;
309	return v;
310}
311
312static void
313attach_seed_rndsource(void)
314{
315
316	KASSERT(!cpu_intr_p());
317	KASSERT(!cpu_softintr_p());
318	KASSERT(cold);
319
320	/*
321	 * First called no later than entropy_init, while we are still
322	 * single-threaded, so no need for RUN_ONCE.
323	 */
324	if (E->seed_rndsource)
325		return;
326
327	rnd_attach_source(&seed_rndsource, "seed", RND_TYPE_UNKNOWN,
328	    RND_FLAG_COLLECT_VALUE);
329	E->seed_rndsource = true;
330}
331
332/*
333 * entropy_init()
334 *
335 *	Initialize the entropy subsystem.  Panic on failure.
336 *
337 *	Requires percpu(9) and sysctl(9) to be initialized.  Must run
338 *	while cold.
339 */
340static void
341entropy_init(void)
342{
343	uint32_t extra[2];
344	struct krndsource *rs;
345	unsigned i = 0;
346
347	KASSERT(cold);
348
349	/* Grab some cycle counts early at boot.  */
350	extra[i++] = entropy_timer();
351
352	/* Run the entropy pool cryptography self-test.  */
353	if (entpool_selftest() == -1)
354		panic("entropy pool crypto self-test failed");
355
356	/* Create the sysctl directory.  */
357	sysctl_createv(&entropy_sysctllog, 0, NULL, &entropy_sysctlroot,
358	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "entropy",
359	    SYSCTL_DESCR("Entropy (random number sources) options"),
360	    NULL, 0, NULL, 0,
361	    CTL_KERN, CTL_CREATE, CTL_EOL);
362
363	/* Create the sysctl knobs.  */
364	/* XXX These shouldn't be writable at securelevel>0.  */
365	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
366	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "collection",
367	    SYSCTL_DESCR("Automatically collect entropy from hardware"),
368	    NULL, 0, &entropy_collection, 0, CTL_CREATE, CTL_EOL);
369	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
370	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "depletion",
371	    SYSCTL_DESCR("`Deplete' entropy pool when observed"),
372	    NULL, 0, &entropy_depletion, 0, CTL_CREATE, CTL_EOL);
373	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
374	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "consolidate",
375	    SYSCTL_DESCR("Trigger entropy consolidation now"),
376	    sysctl_entropy_consolidate, 0, NULL, 0, CTL_CREATE, CTL_EOL);
377	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
378	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "gather",
379	    SYSCTL_DESCR("Trigger entropy gathering from sources now"),
380	    sysctl_entropy_gather, 0, NULL, 0, CTL_CREATE, CTL_EOL);
381	/* XXX These should maybe not be readable at securelevel>0.  */
382	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
383	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
384	    "needed",
385	    SYSCTL_DESCR("Systemwide entropy deficit (bits of entropy)"),
386	    NULL, 0, &E->bitsneeded, 0, CTL_CREATE, CTL_EOL);
387	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
388	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
389	    "pending",
390	    SYSCTL_DESCR("Number of bits of entropy pending on CPUs"),
391	    NULL, 0, &E->bitspending, 0, CTL_CREATE, CTL_EOL);
392	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
393	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
394	    "samplesneeded",
395	    SYSCTL_DESCR("Systemwide entropy deficit (samples)"),
396	    NULL, 0, &E->samplesneeded, 0, CTL_CREATE, CTL_EOL);
397	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
398	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
399	    "samplespending",
400	    SYSCTL_DESCR("Number of samples pending on CPUs"),
401	    NULL, 0, &E->samplespending, 0, CTL_CREATE, CTL_EOL);
402	sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
403	    CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
404	    "epoch", SYSCTL_DESCR("Entropy epoch"),
405	    NULL, 0, &E->epoch, 0, CTL_CREATE, CTL_EOL);
406
407	/* Initialize the global state for multithreaded operation.  */
408	mutex_init(&E->lock, MUTEX_DEFAULT, IPL_SOFTSERIAL);
409	cv_init(&E->cv, "entropy");
410	selinit(&E->selq);
411	cv_init(&E->sourcelock_cv, "entsrclock");
412
413	/* Make sure the seed source is attached.  */
414	attach_seed_rndsource();
415
416	/* Note if the bootloader didn't provide a seed.  */
417	if (!E->seeded)
418		aprint_debug("entropy: no seed from bootloader\n");
419
420	/* Allocate the per-CPU records for all early entropy sources.  */
421	LIST_FOREACH(rs, &E->sources, list)
422		rs->state = percpu_alloc(sizeof(struct rndsource_cpu));
423
424	/* Allocate and initialize the per-CPU state.  */
425	entropy_percpu = percpu_create(sizeof(struct entropy_cpu),
426	    entropy_init_cpu, entropy_fini_cpu, NULL);
427
428	/* Enter the boot cycle count to get started.  */
429	extra[i++] = entropy_timer();
430	KASSERT(i == __arraycount(extra));
431	entropy_enter(extra, sizeof extra, /*nbits*/0, /*count*/false);
432	explicit_memset(extra, 0, sizeof extra);
433}
434
435/*
436 * entropy_init_late()
437 *
438 *	Late initialization.  Panic on failure.
439 *
440 *	Requires CPUs to have been detected and LWPs to have started.
441 *	Must run while cold.
442 */
443static void
444entropy_init_late(void)
445{
446	int error;
447
448	KASSERT(cold);
449
450	/*
451	 * Establish the softint at the highest softint priority level.
452	 * Must happen after CPU detection.
453	 */
454	entropy_sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE,
455	    &entropy_softintr, NULL);
456	if (entropy_sih == NULL)
457		panic("unable to establish entropy softint");
458
459	/*
460	 * Create the entropy housekeeping thread.  Must happen after
461	 * lwpinit.
462	 */
463	error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_TS, NULL,
464	    entropy_thread, NULL, &entropy_lwp, "entbutler");
465	if (error)
466		panic("unable to create entropy housekeeping thread: %d",
467		    error);
468}
469
470/*
471 * entropy_init_cpu(ptr, cookie, ci)
472 *
473 *	percpu(9) constructor for per-CPU entropy pool.
474 */
475static void
476entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci)
477{
478	struct entropy_cpu *ec = ptr;
479	const char *cpuname;
480
481	ec->ec_evcnt = kmem_alloc(sizeof(*ec->ec_evcnt), KM_SLEEP);
482	ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP);
483	ec->ec_bitspending = 0;
484	ec->ec_samplespending = 0;
485	ec->ec_locked = false;
486
487	/* XXX ci_cpuname may not be initialized early enough.  */
488	cpuname = ci->ci_cpuname[0] == '\0' ? "cpu0" : ci->ci_cpuname;
489	evcnt_attach_dynamic(&ec->ec_evcnt->softint, EVCNT_TYPE_MISC, NULL,
490	    cpuname, "entropy softint");
491	evcnt_attach_dynamic(&ec->ec_evcnt->intrdrop, EVCNT_TYPE_MISC, NULL,
492	    cpuname, "entropy intrdrop");
493	evcnt_attach_dynamic(&ec->ec_evcnt->intrtrunc, EVCNT_TYPE_MISC, NULL,
494	    cpuname, "entropy intrtrunc");
495}
496
497/*
498 * entropy_fini_cpu(ptr, cookie, ci)
499 *
500 *	percpu(9) destructor for per-CPU entropy pool.
501 */
502static void
503entropy_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci)
504{
505	struct entropy_cpu *ec = ptr;
506
507	/*
508	 * Zero any lingering data.  Disclosure of the per-CPU pool
509	 * shouldn't retroactively affect the security of any keys
510	 * generated, because entpool(9) erases whatever we have just
511	 * drawn out of any pool, but better safe than sorry.
512	 */
513	explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool));
514
515	evcnt_detach(&ec->ec_evcnt->intrtrunc);
516	evcnt_detach(&ec->ec_evcnt->intrdrop);
517	evcnt_detach(&ec->ec_evcnt->softint);
518
519	kmem_free(ec->ec_pool, sizeof(*ec->ec_pool));
520	kmem_free(ec->ec_evcnt, sizeof(*ec->ec_evcnt));
521}
522
523/*
524 * ec = entropy_cpu_get(&lock)
525 * entropy_cpu_put(&lock, ec)
526 *
527 *	Lock and unlock the per-CPU entropy state.  This only prevents
528 *	access on the same CPU -- by hard interrupts, by soft
529 *	interrupts, or by other threads.
530 *
531 *	Blocks soft interrupts and preemption altogether; doesn't block
532 *	hard interrupts, but causes samples in hard interrupts to be
533 *	dropped.
534 */
535static struct entropy_cpu *
536entropy_cpu_get(struct entropy_cpu_lock *lock)
537{
538	struct entropy_cpu *ec;
539
540	ec = percpu_getref(entropy_percpu);
541	lock->ecl_s = splsoftserial();
542	KASSERT(!ec->ec_locked);
543	ec->ec_locked = true;
544	lock->ecl_pctr = lwp_pctr();
545	__insn_barrier();
546
547	return ec;
548}
549
550static void
551entropy_cpu_put(struct entropy_cpu_lock *lock, struct entropy_cpu *ec)
552{
553
554	KASSERT(ec == percpu_getptr_remote(entropy_percpu, curcpu()));
555	KASSERT(ec->ec_locked);
556
557	__insn_barrier();
558	KASSERT(lock->ecl_pctr == lwp_pctr());
559	ec->ec_locked = false;
560	splx(lock->ecl_s);
561	percpu_putref(entropy_percpu);
562}
563
564/*
565 * entropy_seed(seed)
566 *
567 *	Seed the entropy pool with seed.  Meant to be called as early
568 *	as possible by the bootloader; may be called before or after
569 *	entropy_init.  Must be called before system reaches userland.
570 *	Must be called in thread or soft interrupt context, not in hard
571 *	interrupt context.  Must be called at most once.
572 *
573 *	Overwrites the seed in place.  Caller may then free the memory.
574 */
575static void
576entropy_seed(rndsave_t *seed)
577{
578	SHA1_CTX ctx;
579	uint8_t digest[SHA1_DIGEST_LENGTH];
580	bool seeded;
581
582	KASSERT(!cpu_intr_p());
583	KASSERT(!cpu_softintr_p());
584	KASSERT(cold);
585
586	/*
587	 * Verify the checksum.  If the checksum fails, take the data
588	 * but ignore the entropy estimate -- the file may have been
589	 * incompletely written with garbage, which is harmless to add
590	 * but may not be as unpredictable as alleged.
591	 */
592	SHA1Init(&ctx);
593	SHA1Update(&ctx, (const void *)&seed->entropy, sizeof(seed->entropy));
594	SHA1Update(&ctx, seed->data, sizeof(seed->data));
595	SHA1Final(digest, &ctx);
596	CTASSERT(sizeof(seed->digest) == sizeof(digest));
597	if (!consttime_memequal(digest, seed->digest, sizeof(digest))) {
598		printf("entropy: invalid seed checksum\n");
599		seed->entropy = 0;
600	}
601	explicit_memset(&ctx, 0, sizeof ctx);
602	explicit_memset(digest, 0, sizeof digest);
603
604	/*
605	 * If the entropy is insensibly large, try byte-swapping.
606	 * Otherwise assume the file is corrupted and act as though it
607	 * has zero entropy.
608	 */
609	if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) {
610		seed->entropy = bswap32(seed->entropy);
611		if (howmany(seed->entropy, NBBY) > sizeof(seed->data))
612			seed->entropy = 0;
613	}
614
615	/* Make sure the seed source is attached.  */
616	attach_seed_rndsource();
617
618	/* Test and set E->seeded.  */
619	seeded = E->seeded;
620	E->seeded = (seed->entropy > 0);
621
622	/*
623	 * If we've been seeded, may be re-entering the same seed
624	 * (e.g., bootloader vs module init, or something).  No harm in
625	 * entering it twice, but it contributes no additional entropy.
626	 */
627	if (seeded) {
628		printf("entropy: double-seeded by bootloader\n");
629		seed->entropy = 0;
630	} else {
631		printf("entropy: entering seed from bootloader"
632		    " with %u bits of entropy\n", (unsigned)seed->entropy);
633	}
634
635	/* Enter it into the pool and promptly zero it.  */
636	rnd_add_data(&seed_rndsource, seed->data, sizeof(seed->data),
637	    seed->entropy);
638	explicit_memset(seed, 0, sizeof(*seed));
639}
640
641/*
642 * entropy_bootrequest()
643 *
644 *	Request entropy from all sources at boot, once config is
645 *	complete and interrupts are running but we are still cold.
646 */
647void
648entropy_bootrequest(void)
649{
650	int error;
651
652	KASSERT(!cpu_intr_p());
653	KASSERT(!cpu_softintr_p());
654	KASSERT(cold);
655
656	/*
657	 * Request enough to satisfy the maximum entropy shortage.
658	 * This is harmless overkill if the bootloader provided a seed.
659	 */
660	error = entropy_request(MINENTROPYBYTES, ENTROPY_WAIT);
661	KASSERTMSG(error == 0, "error=%d", error);
662}
663
664/*
665 * entropy_epoch()
666 *
667 *	Returns the current entropy epoch.  If this changes, you should
668 *	reseed.  If -1, means system entropy has not yet reached full
669 *	entropy or been explicitly consolidated; never reverts back to
670 *	-1.  Never zero, so you can always use zero as an uninitialized
671 *	sentinel value meaning `reseed ASAP'.
672 *
673 *	Usage model:
674 *
675 *		struct foo {
676 *			struct crypto_prng prng;
677 *			unsigned epoch;
678 *		} *foo;
679 *
680 *		unsigned epoch = entropy_epoch();
681 *		if (__predict_false(epoch != foo->epoch)) {
682 *			uint8_t seed[32];
683 *			if (entropy_extract(seed, sizeof seed, 0) != 0)
684 *				warn("no entropy");
685 *			crypto_prng_reseed(&foo->prng, seed, sizeof seed);
686 *			foo->epoch = epoch;
687 *		}
688 */
689unsigned
690entropy_epoch(void)
691{
692
693	/*
694	 * Unsigned int, so no need for seqlock for an atomic read, but
695	 * make sure we read it afresh each time.
696	 */
697	return atomic_load_relaxed(&E->epoch);
698}
699
700/*
701 * entropy_ready()
702 *
703 *	True if the entropy pool has full entropy.
704 */
705bool
706entropy_ready(void)
707{
708
709	return atomic_load_relaxed(&E->bitsneeded) == 0;
710}
711
712/*
713 * entropy_account_cpu(ec)
714 *
715 *	Consider whether to consolidate entropy into the global pool
716 *	after we just added some into the current CPU's pending pool.
717 *
718 *	- If this CPU can provide enough entropy now, do so.
719 *
720 *	- If this and whatever else is available on other CPUs can
721 *	  provide enough entropy, kick the consolidation thread.
722 *
723 *	- Otherwise, do as little as possible, except maybe consolidate
724 *	  entropy at most once a minute.
725 *
726 *	Caller must be bound to a CPU and therefore have exclusive
727 *	access to ec.  Will acquire and release the global lock.
728 */
729static void
730entropy_account_cpu(struct entropy_cpu *ec)
731{
732	struct entropy_cpu_lock lock;
733	struct entropy_cpu *ec0;
734	unsigned bitsdiff, samplesdiff;
735
736	KASSERT(!cpu_intr_p());
737	KASSERT(!cold);
738	KASSERT(curlwp->l_pflag & LP_BOUND);
739
740	/*
741	 * If there's no entropy needed, and entropy has been
742	 * consolidated in the last minute, do nothing.
743	 */
744	if (__predict_true(atomic_load_relaxed(&E->bitsneeded) == 0) &&
745	    __predict_true(!atomic_load_relaxed(&entropy_depletion)) &&
746	    __predict_true((time_uptime - E->timestamp) <= 60))
747		return;
748
749	/*
750	 * Consider consolidation, under the global lock and with the
751	 * per-CPU state locked.
752	 */
753	mutex_enter(&E->lock);
754	ec0 = entropy_cpu_get(&lock);
755	KASSERT(ec0 == ec);
756
757	if (ec->ec_bitspending == 0 && ec->ec_samplespending == 0) {
758		/* Raced with consolidation xcall.  Nothing to do.  */
759	} else if (E->bitsneeded != 0 && E->bitsneeded <= ec->ec_bitspending) {
760		/*
761		 * If we have not yet attained full entropy but we can
762		 * now, do so.  This way we disseminate entropy
763		 * promptly when it becomes available early at boot;
764		 * otherwise we leave it to the entropy consolidation
765		 * thread, which is rate-limited to mitigate side
766		 * channels and abuse.
767		 */
768		uint8_t buf[ENTPOOL_CAPACITY];
769
770		/* Transfer from the local pool to the global pool.  */
771		entpool_extract(ec->ec_pool, buf, sizeof buf);
772		entpool_enter(&E->pool, buf, sizeof buf);
773		atomic_store_relaxed(&ec->ec_bitspending, 0);
774		atomic_store_relaxed(&ec->ec_samplespending, 0);
775		atomic_store_relaxed(&E->bitsneeded, 0);
776		atomic_store_relaxed(&E->samplesneeded, 0);
777
778		/* Notify waiters that we now have full entropy.  */
779		entropy_notify();
780		entropy_immediate_evcnt.ev_count++;
781	} else {
782		/* Determine how much we can add to the global pool.  */
783		KASSERTMSG(E->bitspending <= MINENTROPYBITS,
784		    "E->bitspending=%u", E->bitspending);
785		bitsdiff = MIN(ec->ec_bitspending,
786		    MINENTROPYBITS - E->bitspending);
787		KASSERTMSG(E->samplespending <= MINSAMPLES,
788		    "E->samplespending=%u", E->samplespending);
789		samplesdiff = MIN(ec->ec_samplespending,
790		    MINSAMPLES - E->samplespending);
791
792		/*
793		 * This should make a difference unless we are already
794		 * saturated.
795		 */
796		KASSERTMSG((bitsdiff || samplesdiff ||
797			E->bitspending == MINENTROPYBITS ||
798			E->samplespending == MINSAMPLES),
799		    "bitsdiff=%u E->bitspending=%u ec->ec_bitspending=%u"
800		    "samplesdiff=%u E->samplespending=%u"
801		    " ec->ec_samplespending=%u"
802		    " minentropybits=%u minsamples=%u",
803		    bitsdiff, E->bitspending, ec->ec_bitspending,
804		    samplesdiff, E->samplespending, ec->ec_samplespending,
805		    (unsigned)MINENTROPYBITS, (unsigned)MINSAMPLES);
806
807		/* Add to the global, subtract from the local.  */
808		E->bitspending += bitsdiff;
809		KASSERTMSG(E->bitspending <= MINENTROPYBITS,
810		    "E->bitspending=%u", E->bitspending);
811		atomic_store_relaxed(&ec->ec_bitspending,
812		    ec->ec_bitspending - bitsdiff);
813
814		E->samplespending += samplesdiff;
815		KASSERTMSG(E->samplespending <= MINSAMPLES,
816		    "E->samplespending=%u", E->samplespending);
817		atomic_store_relaxed(&ec->ec_samplespending,
818		    ec->ec_samplespending - samplesdiff);
819
820		/* One or the other must have gone up from zero.  */
821		KASSERT(E->bitspending || E->samplespending);
822
823		if (E->bitsneeded <= E->bitspending ||
824		    E->samplesneeded <= E->samplespending) {
825			/*
826			 * Enough bits or at least samples between all
827			 * the per-CPU pools.  Leave a note for the
828			 * housekeeping thread to consolidate entropy
829			 * next time it wakes up -- and wake it up if
830			 * this is the first time, to speed things up.
831			 *
832			 * If we don't need any entropy, this doesn't
833			 * mean much, but it is the only time we ever
834			 * gather additional entropy in case the
835			 * accounting has been overly optimistic.  This
836			 * happens at most once a minute, so there's
837			 * negligible performance cost.
838			 */
839			E->consolidate = true;
840			if (E->epoch == (unsigned)-1)
841				cv_broadcast(&E->cv);
842			if (E->bitsneeded == 0)
843				entropy_discretionary_evcnt.ev_count++;
844		} else {
845			/* Can't get full entropy.  Keep gathering.  */
846			entropy_partial_evcnt.ev_count++;
847		}
848	}
849
850	entropy_cpu_put(&lock, ec);
851	mutex_exit(&E->lock);
852}
853
854/*
855 * entropy_enter_early(buf, len, nbits)
856 *
857 *	Do entropy bookkeeping globally, before we have established
858 *	per-CPU pools.  Enter directly into the global pool in the hope
859 *	that we enter enough before the first entropy_extract to thwart
860 *	iterative-guessing attacks; entropy_extract will warn if not.
861 */
862static void
863entropy_enter_early(const void *buf, size_t len, unsigned nbits)
864{
865	bool notify = false;
866	int s;
867
868	KASSERT(cold);
869
870	/*
871	 * We're early at boot before multithreading and multi-CPU
872	 * operation, and we don't have softints yet to defer
873	 * processing from interrupt context, so we have to enter the
874	 * samples directly into the global pool.  But interrupts may
875	 * be enabled, and we enter this path from interrupt context,
876	 * so block interrupts until we're done.
877	 */
878	s = splhigh();
879
880	/* Enter it into the pool.  */
881	entpool_enter(&E->pool, buf, len);
882
883	/*
884	 * Decide whether to notify reseed -- we will do so if either:
885	 * (a) we transition from partial entropy to full entropy, or
886	 * (b) we get a batch of full entropy all at once.
887	 * We don't count timing samples because we assume, while cold,
888	 * there's not likely to be much jitter yet.
889	 */
890	notify |= (E->bitsneeded && E->bitsneeded <= nbits);
891	notify |= (nbits >= MINENTROPYBITS);
892
893	/*
894	 * Subtract from the needed count and notify if appropriate.
895	 * We don't count samples here because entropy_timer might
896	 * still be returning zero at this point if there's no CPU
897	 * cycle counter.
898	 */
899	E->bitsneeded -= MIN(E->bitsneeded, nbits);
900	if (notify) {
901		entropy_notify();
902		entropy_immediate_evcnt.ev_count++;
903	}
904
905	splx(s);
906}
907
908/*
909 * entropy_enter(buf, len, nbits, count)
910 *
911 *	Enter len bytes of data from buf into the system's entropy
912 *	pool, stirring as necessary when the internal buffer fills up.
913 *	nbits is a lower bound on the number of bits of entropy in the
914 *	process that led to this sample.
915 */
916static void
917entropy_enter(const void *buf, size_t len, unsigned nbits, bool count)
918{
919	struct entropy_cpu_lock lock;
920	struct entropy_cpu *ec;
921	unsigned bitspending, samplespending;
922	int bound;
923
924	KASSERTMSG(!cpu_intr_p(),
925	    "use entropy_enter_intr from interrupt context");
926	KASSERTMSG(howmany(nbits, NBBY) <= len,
927	    "impossible entropy rate: %u bits in %zu-byte string", nbits, len);
928
929	/*
930	 * If we're still cold, just use entropy_enter_early to put
931	 * samples directly into the global pool.
932	 */
933	if (__predict_false(cold)) {
934		entropy_enter_early(buf, len, nbits);
935		return;
936	}
937
938	/*
939	 * Bind ourselves to the current CPU so we don't switch CPUs
940	 * between entering data into the current CPU's pool (and
941	 * updating the pending count) and transferring it to the
942	 * global pool in entropy_account_cpu.
943	 */
944	bound = curlwp_bind();
945
946	/*
947	 * With the per-CPU state locked, enter into the per-CPU pool
948	 * and count up what we can add.
949	 *
950	 * We don't count samples while cold because entropy_timer
951	 * might still be returning zero if there's no CPU cycle
952	 * counter.
953	 */
954	ec = entropy_cpu_get(&lock);
955	entpool_enter(ec->ec_pool, buf, len);
956	bitspending = ec->ec_bitspending;
957	bitspending += MIN(MINENTROPYBITS - bitspending, nbits);
958	atomic_store_relaxed(&ec->ec_bitspending, bitspending);
959	samplespending = ec->ec_samplespending;
960	if (__predict_true(count)) {
961		samplespending += MIN(MINSAMPLES - samplespending, 1);
962		atomic_store_relaxed(&ec->ec_samplespending, samplespending);
963	}
964	entropy_cpu_put(&lock, ec);
965
966	/* Consolidate globally if appropriate based on what we added.  */
967	if (bitspending > 0 || samplespending >= MINSAMPLES)
968		entropy_account_cpu(ec);
969
970	curlwp_bindx(bound);
971}
972
973/*
974 * entropy_enter_intr(buf, len, nbits, count)
975 *
976 *	Enter up to len bytes of data from buf into the system's
977 *	entropy pool without stirring.  nbits is a lower bound on the
978 *	number of bits of entropy in the process that led to this
979 *	sample.  If the sample could be entered completely, assume
980 *	nbits of entropy pending; otherwise assume none, since we don't
981 *	know whether some parts of the sample are constant, for
982 *	instance.  Schedule a softint to stir the entropy pool if
983 *	needed.  Return true if used fully, false if truncated at all.
984 *
985 *	Using this in thread or softint context with no spin locks held
986 *	will work, but you might as well use entropy_enter in that
987 *	case.
988 */
989static bool
990entropy_enter_intr(const void *buf, size_t len, unsigned nbits, bool count)
991{
992	struct entropy_cpu *ec;
993	bool fullyused = false;
994	uint32_t bitspending, samplespending;
995	int s;
996
997	KASSERTMSG(howmany(nbits, NBBY) <= len,
998	    "impossible entropy rate: %u bits in %zu-byte string", nbits, len);
999
1000	/*
1001	 * If we're still cold, just use entropy_enter_early to put
1002	 * samples directly into the global pool.
1003	 */
1004	if (__predict_false(cold)) {
1005		entropy_enter_early(buf, len, nbits);
1006		return true;
1007	}
1008
1009	/*
1010	 * In case we were called in thread or interrupt context with
1011	 * interrupts unblocked, block soft interrupts up to
1012	 * IPL_SOFTSERIAL.  This way logic that is safe in interrupt
1013	 * context or under a spin lock is also safe in less
1014	 * restrictive contexts.
1015	 */
1016	s = splsoftserial();
1017
1018	/*
1019	 * Acquire the per-CPU state.  If someone is in the middle of
1020	 * using it, drop the sample.  Otherwise, take the lock so that
1021	 * higher-priority interrupts will drop their samples.
1022	 */
1023	ec = percpu_getref(entropy_percpu);
1024	if (ec->ec_locked) {
1025		ec->ec_evcnt->intrdrop.ev_count++;
1026		goto out0;
1027	}
1028	ec->ec_locked = true;
1029	__insn_barrier();
1030
1031	/*
1032	 * Enter as much as we can into the per-CPU pool.  If it was
1033	 * truncated, schedule a softint to stir the pool and stop.
1034	 */
1035	if (!entpool_enter_nostir(ec->ec_pool, buf, len)) {
1036		if (__predict_true(!cold))
1037			softint_schedule(entropy_sih);
1038		ec->ec_evcnt->intrtrunc.ev_count++;
1039		goto out1;
1040	}
1041	fullyused = true;
1042
1043	/*
1044	 * Count up what we can contribute.
1045	 *
1046	 * We don't count samples while cold because entropy_timer
1047	 * might still be returning zero if there's no CPU cycle
1048	 * counter.
1049	 */
1050	bitspending = ec->ec_bitspending;
1051	bitspending += MIN(MINENTROPYBITS - bitspending, nbits);
1052	atomic_store_relaxed(&ec->ec_bitspending, bitspending);
1053	if (__predict_true(count)) {
1054		samplespending = ec->ec_samplespending;
1055		samplespending += MIN(MINSAMPLES - samplespending, 1);
1056		atomic_store_relaxed(&ec->ec_samplespending, samplespending);
1057	}
1058
1059	/* Schedule a softint if we added anything and it matters.  */
1060	if (__predict_false(atomic_load_relaxed(&E->bitsneeded) ||
1061		atomic_load_relaxed(&entropy_depletion)) &&
1062	    (nbits != 0 || count) &&
1063	    __predict_true(!cold))
1064		softint_schedule(entropy_sih);
1065
1066out1:	/* Release the per-CPU state.  */
1067	KASSERT(ec->ec_locked);
1068	__insn_barrier();
1069	ec->ec_locked = false;
1070out0:	percpu_putref(entropy_percpu);
1071	splx(s);
1072
1073	return fullyused;
1074}
1075
1076/*
1077 * entropy_softintr(cookie)
1078 *
1079 *	Soft interrupt handler for entering entropy.  Takes care of
1080 *	stirring the local CPU's entropy pool if it filled up during
1081 *	hard interrupts, and promptly crediting entropy from the local
1082 *	CPU's entropy pool to the global entropy pool if needed.
1083 */
1084static void
1085entropy_softintr(void *cookie)
1086{
1087	struct entropy_cpu_lock lock;
1088	struct entropy_cpu *ec;
1089	unsigned bitspending, samplespending;
1090
1091	/*
1092	 * With the per-CPU state locked, stir the pool if necessary
1093	 * and determine if there's any pending entropy on this CPU to
1094	 * account globally.
1095	 */
1096	ec = entropy_cpu_get(&lock);
1097	ec->ec_evcnt->softint.ev_count++;
1098	entpool_stir(ec->ec_pool);
1099	bitspending = ec->ec_bitspending;
1100	samplespending = ec->ec_samplespending;
1101	entropy_cpu_put(&lock, ec);
1102
1103	/* Consolidate globally if appropriate based on what we added.  */
1104	if (bitspending > 0 || samplespending >= MINSAMPLES)
1105		entropy_account_cpu(ec);
1106}
1107
1108/*
1109 * entropy_thread(cookie)
1110 *
1111 *	Handle any asynchronous entropy housekeeping.
1112 */
1113static void
1114entropy_thread(void *cookie)
1115{
1116	bool consolidate;
1117
1118#ifndef _RUMPKERNEL		/* XXX rump starts threads before cold */
1119	KASSERT(!cold);
1120#endif
1121
1122	for (;;) {
1123		/*
1124		 * Wait until there's full entropy somewhere among the
1125		 * CPUs, as confirmed at most once per minute, or
1126		 * someone wants to consolidate.
1127		 */
1128		if (entropy_pending()) {
1129			consolidate = true;
1130		} else {
1131			mutex_enter(&E->lock);
1132			if (!E->consolidate)
1133				cv_timedwait(&E->cv, &E->lock, 60*hz);
1134			consolidate = E->consolidate;
1135			E->consolidate = false;
1136			mutex_exit(&E->lock);
1137		}
1138
1139		if (consolidate) {
1140			/* Do it.  */
1141			entropy_do_consolidate();
1142
1143			/* Mitigate abuse.  */
1144			kpause("entropy", false, hz, NULL);
1145		}
1146	}
1147}
1148
1149struct entropy_pending_count {
1150	uint32_t bitspending;
1151	uint32_t samplespending;
1152};
1153
1154/*
1155 * entropy_pending()
1156 *
1157 *	True if enough bits or samples are pending on other CPUs to
1158 *	warrant consolidation.
1159 */
1160static bool
1161entropy_pending(void)
1162{
1163	struct entropy_pending_count count = { 0, 0 }, *C = &count;
1164
1165	percpu_foreach(entropy_percpu, &entropy_pending_cpu, C);
1166	return C->bitspending >= MINENTROPYBITS ||
1167	    C->samplespending >= MINSAMPLES;
1168}
1169
1170static void
1171entropy_pending_cpu(void *ptr, void *cookie, struct cpu_info *ci)
1172{
1173	struct entropy_cpu *ec = ptr;
1174	struct entropy_pending_count *C = cookie;
1175	uint32_t cpu_bitspending;
1176	uint32_t cpu_samplespending;
1177
1178	cpu_bitspending = atomic_load_relaxed(&ec->ec_bitspending);
1179	cpu_samplespending = atomic_load_relaxed(&ec->ec_samplespending);
1180	C->bitspending += MIN(MINENTROPYBITS - C->bitspending,
1181	    cpu_bitspending);
1182	C->samplespending += MIN(MINSAMPLES - C->samplespending,
1183	    cpu_samplespending);
1184}
1185
1186/*
1187 * entropy_do_consolidate()
1188 *
1189 *	Issue a cross-call to gather entropy on all CPUs and advance
1190 *	the entropy epoch.
1191 */
1192static void
1193entropy_do_consolidate(void)
1194{
1195	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
1196	static struct timeval lasttime; /* serialized by E->lock */
1197	struct entpool pool;
1198	uint8_t buf[ENTPOOL_CAPACITY];
1199	unsigned bitsdiff, samplesdiff;
1200	uint64_t ticket;
1201
1202	KASSERT(!cold);
1203	ASSERT_SLEEPABLE();
1204
1205	/* Gather entropy on all CPUs into a temporary pool.  */
1206	memset(&pool, 0, sizeof pool);
1207	ticket = xc_broadcast(0, &entropy_consolidate_xc, &pool, NULL);
1208	xc_wait(ticket);
1209
1210	/* Acquire the lock to notify waiters.  */
1211	mutex_enter(&E->lock);
1212
1213	/* Count another consolidation.  */
1214	entropy_consolidate_evcnt.ev_count++;
1215
1216	/* Note when we last consolidated, i.e. now.  */
1217	E->timestamp = time_uptime;
1218
1219	/* Mix what we gathered into the global pool.  */
1220	entpool_extract(&pool, buf, sizeof buf);
1221	entpool_enter(&E->pool, buf, sizeof buf);
1222	explicit_memset(&pool, 0, sizeof pool);
1223
1224	/* Count the entropy that was gathered.  */
1225	bitsdiff = MIN(E->bitsneeded, E->bitspending);
1226	atomic_store_relaxed(&E->bitsneeded, E->bitsneeded - bitsdiff);
1227	E->bitspending -= bitsdiff;
1228	if (__predict_false(E->bitsneeded > 0) && bitsdiff != 0) {
1229		if ((boothowto & AB_DEBUG) != 0 &&
1230		    ratecheck(&lasttime, &interval)) {
1231			printf("WARNING:"
1232			    " consolidating less than full entropy\n");
1233		}
1234	}
1235
1236	samplesdiff = MIN(E->samplesneeded, E->samplespending);
1237	atomic_store_relaxed(&E->samplesneeded,
1238	    E->samplesneeded - samplesdiff);
1239	E->samplespending -= samplesdiff;
1240
1241	/* Advance the epoch and notify waiters.  */
1242	entropy_notify();
1243
1244	/* Release the lock.  */
1245	mutex_exit(&E->lock);
1246}
1247
1248/*
1249 * entropy_consolidate_xc(vpool, arg2)
1250 *
1251 *	Extract output from the local CPU's input pool and enter it
1252 *	into a temporary pool passed as vpool.
1253 */
1254static void
1255entropy_consolidate_xc(void *vpool, void *arg2 __unused)
1256{
1257	struct entpool *pool = vpool;
1258	struct entropy_cpu_lock lock;
1259	struct entropy_cpu *ec;
1260	uint8_t buf[ENTPOOL_CAPACITY];
1261	uint32_t extra[7];
1262	unsigned i = 0;
1263
1264	/* Grab CPU number and cycle counter to mix extra into the pool.  */
1265	extra[i++] = cpu_number();
1266	extra[i++] = entropy_timer();
1267
1268	/*
1269	 * With the per-CPU state locked, extract from the per-CPU pool
1270	 * and count it as no longer pending.
1271	 */
1272	ec = entropy_cpu_get(&lock);
1273	extra[i++] = entropy_timer();
1274	entpool_extract(ec->ec_pool, buf, sizeof buf);
1275	atomic_store_relaxed(&ec->ec_bitspending, 0);
1276	atomic_store_relaxed(&ec->ec_samplespending, 0);
1277	extra[i++] = entropy_timer();
1278	entropy_cpu_put(&lock, ec);
1279	extra[i++] = entropy_timer();
1280
1281	/*
1282	 * Copy over statistics, and enter the per-CPU extract and the
1283	 * extra timing into the temporary pool, under the global lock.
1284	 */
1285	mutex_enter(&E->lock);
1286	extra[i++] = entropy_timer();
1287	entpool_enter(pool, buf, sizeof buf);
1288	explicit_memset(buf, 0, sizeof buf);
1289	extra[i++] = entropy_timer();
1290	KASSERT(i == __arraycount(extra));
1291	entpool_enter(pool, extra, sizeof extra);
1292	explicit_memset(extra, 0, sizeof extra);
1293	mutex_exit(&E->lock);
1294}
1295
1296/*
1297 * entropy_notify()
1298 *
1299 *	Caller just contributed entropy to the global pool.  Advance
1300 *	the entropy epoch and notify waiters.
1301 *
1302 *	Caller must hold the global entropy lock.
1303 */
1304static void
1305entropy_notify(void)
1306{
1307	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
1308	static struct timeval lasttime; /* serialized by E->lock */
1309	static bool ready = false, besteffort = false;
1310	unsigned epoch;
1311
1312	KASSERT(__predict_false(cold) || mutex_owned(&E->lock));
1313
1314	/*
1315	 * If this is the first time, print a message to the console
1316	 * that we're ready so operators can compare it to the timing
1317	 * of other events.
1318	 *
1319	 * If we didn't get full entropy from reliable sources, report
1320	 * instead that we are running on fumes with best effort.  (If
1321	 * we ever do get full entropy after that, print the ready
1322	 * message once.)
1323	 */
1324	if (__predict_false(!ready)) {
1325		if (E->bitsneeded == 0) {
1326			printf("entropy: ready\n");
1327			ready = true;
1328		} else if (E->samplesneeded == 0 && !besteffort) {
1329			printf("entropy: best effort\n");
1330			besteffort = true;
1331		}
1332	}
1333
1334	/* Set the epoch; roll over from UINTMAX-1 to 1.  */
1335	if (__predict_true(!atomic_load_relaxed(&entropy_depletion)) ||
1336	    ratecheck(&lasttime, &interval)) {
1337		epoch = E->epoch + 1;
1338		if (epoch == 0 || epoch == (unsigned)-1)
1339			epoch = 1;
1340		atomic_store_relaxed(&E->epoch, epoch);
1341	}
1342	KASSERT(E->epoch != (unsigned)-1);
1343
1344	/* Notify waiters.  */
1345	if (__predict_true(!cold)) {
1346		cv_broadcast(&E->cv);
1347		selnotify(&E->selq, POLLIN|POLLRDNORM, NOTE_SUBMIT);
1348	}
1349
1350	/* Count another notification.  */
1351	entropy_notify_evcnt.ev_count++;
1352}
1353
1354/*
1355 * entropy_consolidate()
1356 *
1357 *	Trigger entropy consolidation and wait for it to complete.
1358 *
1359 *	This should be used sparingly, not periodically -- requiring
1360 *	conscious intervention by the operator or a clear policy
1361 *	decision.  Otherwise, the kernel will automatically consolidate
1362 *	when enough entropy has been gathered into per-CPU pools to
1363 *	transition to full entropy.
1364 */
1365void
1366entropy_consolidate(void)
1367{
1368	uint64_t ticket;
1369	int error;
1370
1371	KASSERT(!cold);
1372	ASSERT_SLEEPABLE();
1373
1374	mutex_enter(&E->lock);
1375	ticket = entropy_consolidate_evcnt.ev_count;
1376	E->consolidate = true;
1377	cv_broadcast(&E->cv);
1378	while (ticket == entropy_consolidate_evcnt.ev_count) {
1379		error = cv_wait_sig(&E->cv, &E->lock);
1380		if (error)
1381			break;
1382	}
1383	mutex_exit(&E->lock);
1384}
1385
1386/*
1387 * sysctl -w kern.entropy.consolidate=1
1388 *
1389 *	Trigger entropy consolidation and wait for it to complete.
1390 *	Writable only by superuser.  This, writing to /dev/random, and
1391 *	ioctl(RNDADDDATA) are the only ways for the system to
1392 *	consolidate entropy if the operator knows something the kernel
1393 *	doesn't about how unpredictable the pending entropy pools are.
1394 */
1395static int
1396sysctl_entropy_consolidate(SYSCTLFN_ARGS)
1397{
1398	struct sysctlnode node = *rnode;
1399	int arg = 0;
1400	int error;
1401
1402	node.sysctl_data = &arg;
1403	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1404	if (error || newp == NULL)
1405		return error;
1406	if (arg)
1407		entropy_consolidate();
1408
1409	return error;
1410}
1411
1412/*
1413 * sysctl -w kern.entropy.gather=1
1414 *
1415 *	Trigger gathering entropy from all on-demand sources, and wait
1416 *	for synchronous sources (but not asynchronous sources) to
1417 *	complete.  Writable only by superuser.
1418 */
1419static int
1420sysctl_entropy_gather(SYSCTLFN_ARGS)
1421{
1422	struct sysctlnode node = *rnode;
1423	int arg = 0;
1424	int error;
1425
1426	node.sysctl_data = &arg;
1427	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1428	if (error || newp == NULL)
1429		return error;
1430	if (arg) {
1431		mutex_enter(&E->lock);
1432		error = entropy_request(ENTROPY_CAPACITY,
1433		    ENTROPY_WAIT|ENTROPY_SIG);
1434		mutex_exit(&E->lock);
1435	}
1436
1437	return 0;
1438}
1439
1440/*
1441 * entropy_extract(buf, len, flags)
1442 *
1443 *	Extract len bytes from the global entropy pool into buf.
1444 *
1445 *	Caller MUST NOT expose these bytes directly -- must use them
1446 *	ONLY to seed a cryptographic pseudorandom number generator
1447 *	(`CPRNG'), a.k.a. deterministic random bit generator (`DRBG'),
1448 *	and then erase them.  entropy_extract does not, on its own,
1449 *	provide backtracking resistance -- it must be combined with a
1450 *	PRNG/DRBG that does.
1451 *
1452 *	This may be used very early at boot, before even entropy_init
1453 *	has been called.
1454 *
1455 *	You generally shouldn't use this directly -- use cprng(9)
1456 *	instead.
1457 *
1458 *	Flags may have:
1459 *
1460 *		ENTROPY_WAIT	Wait for entropy if not available yet.
1461 *		ENTROPY_SIG	Allow interruption by a signal during wait.
1462 *		ENTROPY_HARDFAIL Either fill the buffer with full entropy,
1463 *				or fail without filling it at all.
1464 *
1465 *	Return zero on success, or error on failure:
1466 *
1467 *		EWOULDBLOCK	No entropy and ENTROPY_WAIT not set.
1468 *		EINTR/ERESTART	No entropy, ENTROPY_SIG set, and interrupted.
1469 *
1470 *	If ENTROPY_WAIT is set, allowed only in thread context.  If
1471 *	ENTROPY_WAIT is not set, allowed also in softint context -- may
1472 *	sleep on an adaptive lock up to IPL_SOFTSERIAL.  Forbidden in
1473 *	hard interrupt context.
1474 */
1475int
1476entropy_extract(void *buf, size_t len, int flags)
1477{
1478	static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
1479	static struct timeval lasttime; /* serialized by E->lock */
1480	bool printed = false;
1481	int s = -1/*XXXGCC*/, error;
1482
1483	if (ISSET(flags, ENTROPY_WAIT)) {
1484		ASSERT_SLEEPABLE();
1485		KASSERT(!cold);
1486	}
1487
1488	/* Refuse to operate in interrupt context.  */
1489	KASSERT(!cpu_intr_p());
1490
1491	/*
1492	 * If we're cold, we are only contending with interrupts on the
1493	 * current CPU, so block them.  Otherwise, we are _not_
1494	 * contending with interrupts on the current CPU, but we are
1495	 * contending with other threads, to exclude them with a mutex.
1496	 */
1497	if (__predict_false(cold))
1498		s = splhigh();
1499	else
1500		mutex_enter(&E->lock);
1501
1502	/* Wait until there is enough entropy in the system.  */
1503	error = 0;
1504	if (E->bitsneeded > 0 && E->samplesneeded == 0) {
1505		/*
1506		 * We don't have full entropy from reliable sources,
1507		 * but we gathered a plausible number of samples from
1508		 * other sources such as timers.  Try asking for more
1509		 * from any sources we can, but don't worry if it
1510		 * fails -- best effort.
1511		 */
1512		(void)entropy_request(ENTROPY_CAPACITY, flags);
1513	} else while (E->bitsneeded > 0 && E->samplesneeded > 0) {
1514		/* Ask for more, synchronously if possible.  */
1515		error = entropy_request(len, flags);
1516		if (error)
1517			break;
1518
1519		/* If we got enough, we're done.  */
1520		if (E->bitsneeded == 0 || E->samplesneeded == 0) {
1521			KASSERT(error == 0);
1522			break;
1523		}
1524
1525		/* If not waiting, stop here.  */
1526		if (!ISSET(flags, ENTROPY_WAIT)) {
1527			error = EWOULDBLOCK;
1528			break;
1529		}
1530
1531		/* Wait for some entropy to come in and try again.  */
1532		KASSERT(!cold);
1533		if (!printed) {
1534			printf("entropy: pid %d (%s) waiting for entropy(7)\n",
1535			    curproc->p_pid, curproc->p_comm);
1536			printed = true;
1537		}
1538
1539		if (ISSET(flags, ENTROPY_SIG)) {
1540			error = cv_timedwait_sig(&E->cv, &E->lock, hz);
1541			if (error && error != EWOULDBLOCK)
1542				break;
1543		} else {
1544			cv_timedwait(&E->cv, &E->lock, hz);
1545		}
1546	}
1547
1548	/*
1549	 * Count failure -- but fill the buffer nevertheless, unless
1550	 * the caller specified ENTROPY_HARDFAIL.
1551	 */
1552	if (error) {
1553		if (ISSET(flags, ENTROPY_HARDFAIL))
1554			goto out;
1555		entropy_extract_fail_evcnt.ev_count++;
1556	}
1557
1558	/*
1559	 * Report a warning if we haven't yet reached full entropy.
1560	 * This is the only case where we consider entropy to be
1561	 * `depleted' without kern.entropy.depletion enabled -- when we
1562	 * only have partial entropy, an adversary may be able to
1563	 * narrow the state of the pool down to a small number of
1564	 * possibilities; the output then enables them to confirm a
1565	 * guess, reducing its entropy from the adversary's perspective
1566	 * to zero.
1567	 *
1568	 * This should only happen if the operator has chosen to
1569	 * consolidate, either through sysctl kern.entropy.consolidate
1570	 * or by writing less than full entropy to /dev/random as root
1571	 * (which /dev/random promises will immediately affect
1572	 * subsequent output, for better or worse).
1573	 */
1574	if (E->bitsneeded > 0 && E->samplesneeded > 0) {
1575		if (__predict_false(E->epoch == (unsigned)-1) &&
1576		    ratecheck(&lasttime, &interval)) {
1577			printf("WARNING:"
1578			    " system needs entropy for security;"
1579			    " see entropy(7)\n");
1580		}
1581		atomic_store_relaxed(&E->bitsneeded, MINENTROPYBITS);
1582		atomic_store_relaxed(&E->samplesneeded, MINSAMPLES);
1583	}
1584
1585	/* Extract data from the pool, and `deplete' if we're doing that.  */
1586	entpool_extract(&E->pool, buf, len);
1587	if (__predict_false(atomic_load_relaxed(&entropy_depletion)) &&
1588	    error == 0) {
1589		unsigned cost = MIN(len, ENTROPY_CAPACITY)*NBBY;
1590		unsigned bitsneeded = E->bitsneeded;
1591		unsigned samplesneeded = E->samplesneeded;
1592
1593		bitsneeded += MIN(MINENTROPYBITS - bitsneeded, cost);
1594		samplesneeded += MIN(MINSAMPLES - samplesneeded, cost);
1595
1596		atomic_store_relaxed(&E->bitsneeded, bitsneeded);
1597		atomic_store_relaxed(&E->samplesneeded, samplesneeded);
1598		entropy_deplete_evcnt.ev_count++;
1599	}
1600
1601out:	/* Release the global lock and return the error.  */
1602	if (__predict_false(cold))
1603		splx(s);
1604	else
1605		mutex_exit(&E->lock);
1606	return error;
1607}
1608
1609/*
1610 * entropy_poll(events)
1611 *
1612 *	Return the subset of events ready, and if it is not all of
1613 *	events, record curlwp as waiting for entropy.
1614 */
1615int
1616entropy_poll(int events)
1617{
1618	int revents = 0;
1619
1620	KASSERT(!cold);
1621
1622	/* Always ready for writing.  */
1623	revents |= events & (POLLOUT|POLLWRNORM);
1624
1625	/* Narrow it down to reads.  */
1626	events &= POLLIN|POLLRDNORM;
1627	if (events == 0)
1628		return revents;
1629
1630	/*
1631	 * If we have reached full entropy and we're not depleting
1632	 * entropy, we are forever ready.
1633	 */
1634	if (__predict_true(atomic_load_relaxed(&E->bitsneeded) == 0 ||
1635		atomic_load_relaxed(&E->samplesneeded) == 0) &&
1636	    __predict_true(!atomic_load_relaxed(&entropy_depletion)))
1637		return revents | events;
1638
1639	/*
1640	 * Otherwise, check whether we need entropy under the lock.  If
1641	 * we don't, we're ready; if we do, add ourselves to the queue.
1642	 */
1643	mutex_enter(&E->lock);
1644	if (E->bitsneeded == 0 || E->samplesneeded == 0)
1645		revents |= events;
1646	else
1647		selrecord(curlwp, &E->selq);
1648	mutex_exit(&E->lock);
1649
1650	return revents;
1651}
1652
1653/*
1654 * filt_entropy_read_detach(kn)
1655 *
1656 *	struct filterops::f_detach callback for entropy read events:
1657 *	remove kn from the list of waiters.
1658 */
1659static void
1660filt_entropy_read_detach(struct knote *kn)
1661{
1662
1663	KASSERT(!cold);
1664
1665	mutex_enter(&E->lock);
1666	selremove_knote(&E->selq, kn);
1667	mutex_exit(&E->lock);
1668}
1669
1670/*
1671 * filt_entropy_read_event(kn, hint)
1672 *
1673 *	struct filterops::f_event callback for entropy read events:
1674 *	poll for entropy.  Caller must hold the global entropy lock if
1675 *	hint is NOTE_SUBMIT, and must not if hint is not NOTE_SUBMIT.
1676 */
1677static int
1678filt_entropy_read_event(struct knote *kn, long hint)
1679{
1680	int ret;
1681
1682	KASSERT(!cold);
1683
1684	/* Acquire the lock, if caller is outside entropy subsystem.  */
1685	if (hint == NOTE_SUBMIT)
1686		KASSERT(mutex_owned(&E->lock));
1687	else
1688		mutex_enter(&E->lock);
1689
1690	/*
1691	 * If we still need entropy, can't read anything; if not, can
1692	 * read arbitrarily much.
1693	 */
1694	if (E->bitsneeded != 0 && E->samplesneeded != 0) {
1695		ret = 0;
1696	} else {
1697		if (atomic_load_relaxed(&entropy_depletion))
1698			kn->kn_data = ENTROPY_CAPACITY; /* bytes */
1699		else
1700			kn->kn_data = MIN(INT64_MAX, SSIZE_MAX);
1701		ret = 1;
1702	}
1703
1704	/* Release the lock, if caller is outside entropy subsystem.  */
1705	if (hint == NOTE_SUBMIT)
1706		KASSERT(mutex_owned(&E->lock));
1707	else
1708		mutex_exit(&E->lock);
1709
1710	return ret;
1711}
1712
1713/* XXX Makes sense only for /dev/u?random.  */
1714static const struct filterops entropy_read_filtops = {
1715	.f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
1716	.f_attach = NULL,
1717	.f_detach = filt_entropy_read_detach,
1718	.f_event = filt_entropy_read_event,
1719};
1720
1721/*
1722 * entropy_kqfilter(kn)
1723 *
1724 *	Register kn to receive entropy event notifications.  May be
1725 *	EVFILT_READ or EVFILT_WRITE; anything else yields EINVAL.
1726 */
1727int
1728entropy_kqfilter(struct knote *kn)
1729{
1730
1731	KASSERT(!cold);
1732
1733	switch (kn->kn_filter) {
1734	case EVFILT_READ:
1735		/* Enter into the global select queue.  */
1736		mutex_enter(&E->lock);
1737		kn->kn_fop = &entropy_read_filtops;
1738		selrecord_knote(&E->selq, kn);
1739		mutex_exit(&E->lock);
1740		return 0;
1741	case EVFILT_WRITE:
1742		/* Can always dump entropy into the system.  */
1743		kn->kn_fop = &seltrue_filtops;
1744		return 0;
1745	default:
1746		return EINVAL;
1747	}
1748}
1749
1750/*
1751 * rndsource_setcb(rs, get, getarg)
1752 *
1753 *	Set the request callback for the entropy source rs, if it can
1754 *	provide entropy on demand.  Must precede rnd_attach_source.
1755 */
1756void
1757rndsource_setcb(struct krndsource *rs, void (*get)(size_t, void *),
1758    void *getarg)
1759{
1760
1761	rs->get = get;
1762	rs->getarg = getarg;
1763}
1764
1765/*
1766 * rnd_attach_source(rs, name, type, flags)
1767 *
1768 *	Attach the entropy source rs.  Must be done after
1769 *	rndsource_setcb, if any, and before any calls to rnd_add_data.
1770 */
1771void
1772rnd_attach_source(struct krndsource *rs, const char *name, uint32_t type,
1773    uint32_t flags)
1774{
1775	uint32_t extra[4];
1776	unsigned i = 0;
1777
1778	KASSERTMSG(name[0] != '\0', "rndsource must have nonempty name");
1779
1780	/* Grab cycle counter to mix extra into the pool.  */
1781	extra[i++] = entropy_timer();
1782
1783	/*
1784	 * Apply some standard flags:
1785	 *
1786	 * - We do not bother with network devices by default, for
1787	 *   hysterical raisins (perhaps: because it is often the case
1788	 *   that an adversary can influence network packet timings).
1789	 */
1790	switch (type) {
1791	case RND_TYPE_NET:
1792		flags |= RND_FLAG_NO_COLLECT;
1793		break;
1794	}
1795
1796	/* Sanity-check the callback if RND_FLAG_HASCB is set.  */
1797	KASSERT(!ISSET(flags, RND_FLAG_HASCB) || rs->get != NULL);
1798
1799	/* Initialize the random source.  */
1800	memset(rs->name, 0, sizeof(rs->name)); /* paranoia */
1801	strlcpy(rs->name, name, sizeof(rs->name));
1802	memset(&rs->time_delta, 0, sizeof(rs->time_delta));
1803	memset(&rs->value_delta, 0, sizeof(rs->value_delta));
1804	rs->total = 0;
1805	rs->type = type;
1806	rs->flags = flags;
1807	if (entropy_percpu != NULL)
1808		rs->state = percpu_alloc(sizeof(struct rndsource_cpu));
1809	extra[i++] = entropy_timer();
1810
1811	/* Wire it into the global list of random sources.  */
1812	if (__predict_true(!cold))
1813		mutex_enter(&E->lock);
1814	LIST_INSERT_HEAD(&E->sources, rs, list);
1815	if (__predict_true(!cold))
1816		mutex_exit(&E->lock);
1817	extra[i++] = entropy_timer();
1818
1819	/* Request that it provide entropy ASAP, if we can.  */
1820	if (ISSET(flags, RND_FLAG_HASCB))
1821		(*rs->get)(ENTROPY_CAPACITY, rs->getarg);
1822	extra[i++] = entropy_timer();
1823
1824	/* Mix the extra into the pool.  */
1825	KASSERT(i == __arraycount(extra));
1826	entropy_enter(extra, sizeof extra, 0, /*count*/__predict_true(!cold));
1827	explicit_memset(extra, 0, sizeof extra);
1828}
1829
1830/*
1831 * rnd_detach_source(rs)
1832 *
1833 *	Detach the entropy source rs.  May sleep waiting for users to
1834 *	drain.  Further use is not allowed.
1835 */
1836void
1837rnd_detach_source(struct krndsource *rs)
1838{
1839
1840	/*
1841	 * If we're cold (shouldn't happen, but hey), just remove it
1842	 * from the list -- there's nothing allocated.
1843	 */
1844	if (__predict_false(cold) && entropy_percpu == NULL) {
1845		LIST_REMOVE(rs, list);
1846		return;
1847	}
1848
1849	/* We may have to wait for entropy_request.  */
1850	ASSERT_SLEEPABLE();
1851
1852	/* Wait until the source list is not in use, and remove it.  */
1853	mutex_enter(&E->lock);
1854	while (E->sourcelock)
1855		cv_wait(&E->sourcelock_cv, &E->lock);
1856	LIST_REMOVE(rs, list);
1857	mutex_exit(&E->lock);
1858
1859	/* Free the per-CPU data.  */
1860	percpu_free(rs->state, sizeof(struct rndsource_cpu));
1861}
1862
1863/*
1864 * rnd_lock_sources(flags)
1865 *
1866 *	Lock the list of entropy sources.  Caller must hold the global
1867 *	entropy lock.  If successful, no rndsource will go away until
1868 *	rnd_unlock_sources even while the caller releases the global
1869 *	entropy lock.
1870 *
1871 *	May be called very early at boot, before entropy_init.
1872 *
1873 *	If flags & ENTROPY_WAIT, wait for concurrent access to finish.
1874 *	If flags & ENTROPY_SIG, allow interruption by signal.
1875 */
1876static int __attribute__((warn_unused_result))
1877rnd_lock_sources(int flags)
1878{
1879	int error;
1880
1881	KASSERT(__predict_false(cold) || mutex_owned(&E->lock));
1882	KASSERT(!cpu_intr_p());
1883
1884	while (E->sourcelock) {
1885		KASSERT(!cold);
1886		if (!ISSET(flags, ENTROPY_WAIT))
1887			return EWOULDBLOCK;
1888		if (ISSET(flags, ENTROPY_SIG)) {
1889			error = cv_wait_sig(&E->sourcelock_cv, &E->lock);
1890			if (error)
1891				return error;
1892		} else {
1893			cv_wait(&E->sourcelock_cv, &E->lock);
1894		}
1895	}
1896
1897	E->sourcelock = curlwp;
1898	return 0;
1899}
1900
1901/*
1902 * rnd_unlock_sources()
1903 *
1904 *	Unlock the list of sources after rnd_lock_sources.  Caller must
1905 *	hold the global entropy lock.
1906 *
1907 *	May be called very early at boot, before entropy_init.
1908 */
1909static void
1910rnd_unlock_sources(void)
1911{
1912
1913	KASSERT(__predict_false(cold) || mutex_owned(&E->lock));
1914	KASSERT(!cpu_intr_p());
1915
1916	KASSERTMSG(E->sourcelock == curlwp, "lwp %p releasing lock held by %p",
1917	    curlwp, E->sourcelock);
1918	E->sourcelock = NULL;
1919	if (__predict_true(!cold))
1920		cv_signal(&E->sourcelock_cv);
1921}
1922
1923/*
1924 * rnd_sources_locked()
1925 *
1926 *	True if we hold the list of rndsources locked, for diagnostic
1927 *	assertions.
1928 *
1929 *	May be called very early at boot, before entropy_init.
1930 */
1931static bool __diagused
1932rnd_sources_locked(void)
1933{
1934
1935	return E->sourcelock == curlwp;
1936}
1937
1938/*
1939 * entropy_request(nbytes, flags)
1940 *
1941 *	Request nbytes bytes of entropy from all sources in the system.
1942 *	OK if we overdo it.  Caller must hold the global entropy lock;
1943 *	will release and re-acquire it.
1944 *
1945 *	May be called very early at boot, before entropy_init.
1946 *
1947 *	If flags & ENTROPY_WAIT, wait for concurrent access to finish.
1948 *	If flags & ENTROPY_SIG, allow interruption by signal.
1949 */
1950static int
1951entropy_request(size_t nbytes, int flags)
1952{
1953	struct krndsource *rs;
1954	int error;
1955
1956	KASSERT(__predict_false(cold) || mutex_owned(&E->lock));
1957	KASSERT(!cpu_intr_p());
1958	if ((flags & ENTROPY_WAIT) != 0 && __predict_false(!cold))
1959		ASSERT_SLEEPABLE();
1960
1961	/*
1962	 * Lock the list of entropy sources to block rnd_detach_source
1963	 * until we're done, and to serialize calls to the entropy
1964	 * callbacks as guaranteed to drivers.
1965	 */
1966	error = rnd_lock_sources(flags);
1967	if (error)
1968		return error;
1969	entropy_request_evcnt.ev_count++;
1970
1971	/* Clamp to the maximum reasonable request.  */
1972	nbytes = MIN(nbytes, ENTROPY_CAPACITY);
1973
1974	/* Walk the list of sources.  */
1975	LIST_FOREACH(rs, &E->sources, list) {
1976		/* Skip sources without callbacks.  */
1977		if (!ISSET(rs->flags, RND_FLAG_HASCB))
1978			continue;
1979
1980		/*
1981		 * Skip sources that are disabled altogether -- we
1982		 * would just ignore their samples anyway.
1983		 */
1984		if (ISSET(rs->flags, RND_FLAG_NO_COLLECT))
1985			continue;
1986
1987		/* Drop the lock while we call the callback.  */
1988		if (__predict_true(!cold))
1989			mutex_exit(&E->lock);
1990		(*rs->get)(nbytes, rs->getarg);
1991		if (__predict_true(!cold))
1992			mutex_enter(&E->lock);
1993	}
1994
1995	/* Request done; unlock the list of entropy sources.  */
1996	rnd_unlock_sources();
1997	return 0;
1998}
1999
2000static inline uint32_t
2001rnd_delta_estimate(rnd_delta_t *d, uint32_t v, int32_t delta)
2002{
2003	int32_t delta2, delta3;
2004
2005	/*
2006	 * Calculate the second and third order differentials
2007	 */
2008	delta2 = d->dx - delta;
2009	if (delta2 < 0)
2010		delta2 = -delta2; /* XXX arithmetic overflow */
2011
2012	delta3 = d->d2x - delta2;
2013	if (delta3 < 0)
2014		delta3 = -delta3; /* XXX arithmetic overflow */
2015
2016	d->x = v;
2017	d->dx = delta;
2018	d->d2x = delta2;
2019
2020	/*
2021	 * If any delta is 0, we got no entropy.  If all are non-zero, we
2022	 * might have something.
2023	 */
2024	if (delta == 0 || delta2 == 0 || delta3 == 0)
2025		return 0;
2026
2027	return 1;
2028}
2029
2030static inline uint32_t
2031rnd_dt_estimate(struct krndsource *rs, uint32_t t)
2032{
2033	int32_t delta;
2034	uint32_t ret;
2035	rnd_delta_t *d;
2036	struct rndsource_cpu *rc;
2037
2038	rc = percpu_getref(rs->state);
2039	d = &rc->rc_timedelta;
2040
2041	if (t < d->x) {
2042		delta = UINT32_MAX - d->x + t;
2043	} else {
2044		delta = d->x - t;
2045	}
2046
2047	if (delta < 0) {
2048		delta = -delta;	/* XXX arithmetic overflow */
2049	}
2050
2051	ret = rnd_delta_estimate(d, t, delta);
2052
2053	KASSERT(d->x == t);
2054	KASSERT(d->dx == delta);
2055	percpu_putref(rs->state);
2056	return ret;
2057}
2058
2059/*
2060 * rnd_add_uint32(rs, value)
2061 *
2062 *	Enter 32 bits of data from an entropy source into the pool.
2063 *
2064 *	May be called from any context or with spin locks held, but may
2065 *	drop data.
2066 *
2067 *	This is meant for cheaply taking samples from devices that
2068 *	aren't designed to be hardware random number generators.
2069 */
2070void
2071rnd_add_uint32(struct krndsource *rs, uint32_t value)
2072{
2073	bool intr_p = true;
2074
2075	rnd_add_data_internal(rs, &value, sizeof value, 0, intr_p);
2076}
2077
2078void
2079_rnd_add_uint32(struct krndsource *rs, uint32_t value)
2080{
2081	bool intr_p = true;
2082
2083	rnd_add_data_internal(rs, &value, sizeof value, 0, intr_p);
2084}
2085
2086void
2087_rnd_add_uint64(struct krndsource *rs, uint64_t value)
2088{
2089	bool intr_p = true;
2090
2091	rnd_add_data_internal(rs, &value, sizeof value, 0, intr_p);
2092}
2093
2094/*
2095 * rnd_add_data(rs, buf, len, entropybits)
2096 *
2097 *	Enter data from an entropy source into the pool, with a
2098 *	driver's estimate of how much entropy the physical source of
2099 *	the data has.  If RND_FLAG_NO_ESTIMATE, we ignore the driver's
2100 *	estimate and treat it as zero.
2101 *
2102 *	rs MAY but SHOULD NOT be NULL.  If rs is NULL, MUST NOT be
2103 *	called from interrupt context or with spin locks held.
2104 *
2105 *	If rs is non-NULL, MAY but SHOULD NOT be called from interrupt
2106 *	context, in which case act like rnd_add_data_intr -- if the
2107 *	sample buffer is full, schedule a softint and drop any
2108 *	additional data on the floor.  (This may change later once we
2109 *	fix drivers that still call this from interrupt context to use
2110 *	rnd_add_data_intr instead.)  MUST NOT be called with spin locks
2111 *	held if not in hard interrupt context -- i.e., MUST NOT be
2112 *	called in thread context or softint context with spin locks
2113 *	held.
2114 */
2115void
2116rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len,
2117    uint32_t entropybits)
2118{
2119	bool intr_p = cpu_intr_p(); /* XXX make this unconditionally false */
2120
2121	/*
2122	 * Weird legacy exception that we should rip out and replace by
2123	 * creating new rndsources to attribute entropy to the callers:
2124	 * If there's no rndsource, just enter the data and time now.
2125	 */
2126	if (rs == NULL) {
2127		uint32_t extra;
2128
2129		KASSERT(!intr_p);
2130		KASSERTMSG(howmany(entropybits, NBBY) <= len,
2131		    "%s: impossible entropy rate:"
2132		    " %"PRIu32" bits in %"PRIu32"-byte string",
2133		    rs ? rs->name : "(anonymous)", entropybits, len);
2134		entropy_enter(buf, len, entropybits, /*count*/false);
2135		extra = entropy_timer();
2136		entropy_enter(&extra, sizeof extra, 0, /*count*/false);
2137		explicit_memset(&extra, 0, sizeof extra);
2138		return;
2139	}
2140
2141	rnd_add_data_internal(rs, buf, len, entropybits, intr_p);
2142}
2143
2144/*
2145 * rnd_add_data_intr(rs, buf, len, entropybits)
2146 *
2147 *	Try to enter data from an entropy source into the pool, with a
2148 *	driver's estimate of how much entropy the physical source of
2149 *	the data has.  If RND_FLAG_NO_ESTIMATE, we ignore the driver's
2150 *	estimate and treat it as zero.  If the sample buffer is full,
2151 *	schedule a softint and drop any additional data on the floor.
2152 */
2153void
2154rnd_add_data_intr(struct krndsource *rs, const void *buf, uint32_t len,
2155    uint32_t entropybits)
2156{
2157	bool intr_p = true;
2158
2159	rnd_add_data_internal(rs, buf, len, entropybits, intr_p);
2160}
2161
2162/*
2163 * rnd_add_data_internal(rs, buf, len, entropybits, intr_p)
2164 *
2165 *	Internal subroutine to decide whether or not to enter data or
2166 *	timing for a particular rndsource, and if so, to enter it.
2167 *
2168 *	intr_p is true for callers from interrupt context or spin locks
2169 *	held, and false for callers from thread or soft interrupt
2170 *	context and no spin locks held.
2171 */
2172static void
2173rnd_add_data_internal(struct krndsource *rs, const void *buf, uint32_t len,
2174    uint32_t entropybits, bool intr_p)
2175{
2176	uint32_t flags;
2177
2178	KASSERTMSG(howmany(entropybits, NBBY) <= len,
2179	    "%s: impossible entropy rate:"
2180	    " %"PRIu32" bits in %"PRIu32"-byte string",
2181	    rs ? rs->name : "(anonymous)", entropybits, len);
2182
2183	/*
2184	 * Hold up the reset xcall before it zeroes the entropy counts
2185	 * on this CPU or globally.  Otherwise, we might leave some
2186	 * nonzero entropy attributed to an untrusted source in the
2187	 * event of a race with a change to flags.
2188	 */
2189	kpreempt_disable();
2190
2191	/* Load a snapshot of the flags.  Ioctl may change them under us.  */
2192	flags = atomic_load_relaxed(&rs->flags);
2193
2194	/*
2195	 * Skip if:
2196	 * - we're not collecting entropy, or
2197	 * - the operator doesn't want to collect entropy from this, or
2198	 * - neither data nor timings are being collected from this.
2199	 */
2200	if (!atomic_load_relaxed(&entropy_collection) ||
2201	    ISSET(flags, RND_FLAG_NO_COLLECT) ||
2202	    !ISSET(flags, RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME))
2203		goto out;
2204
2205	/* If asked, ignore the estimate.  */
2206	if (ISSET(flags, RND_FLAG_NO_ESTIMATE))
2207		entropybits = 0;
2208
2209	/* If we are collecting data, enter them.  */
2210	if (ISSET(flags, RND_FLAG_COLLECT_VALUE)) {
2211		rnd_add_data_1(rs, buf, len, entropybits, /*count*/false,
2212		    RND_FLAG_COLLECT_VALUE, intr_p);
2213	}
2214
2215	/* If we are collecting timings, enter one.  */
2216	if (ISSET(flags, RND_FLAG_COLLECT_TIME)) {
2217		uint32_t extra;
2218		bool count;
2219
2220		/* Sample a timer.  */
2221		extra = entropy_timer();
2222
2223		/* If asked, do entropy estimation on the time.  */
2224		if ((flags & (RND_FLAG_ESTIMATE_TIME|RND_FLAG_NO_ESTIMATE)) ==
2225		    RND_FLAG_ESTIMATE_TIME && __predict_true(!cold))
2226			count = rnd_dt_estimate(rs, extra);
2227		else
2228			count = false;
2229
2230		rnd_add_data_1(rs, &extra, sizeof extra, 0, count,
2231		    RND_FLAG_COLLECT_TIME, intr_p);
2232	}
2233
2234out:	/* Allow concurrent changes to flags to finish.  */
2235	kpreempt_enable();
2236}
2237
2238static unsigned
2239add_sat(unsigned a, unsigned b)
2240{
2241	unsigned c = a + b;
2242
2243	return (c < a ? UINT_MAX : c);
2244}
2245
2246/*
2247 * rnd_add_data_1(rs, buf, len, entropybits, count, flag)
2248 *
2249 *	Internal subroutine to call either entropy_enter_intr, if we're
2250 *	in interrupt context, or entropy_enter if not, and to count the
2251 *	entropy in an rndsource.
2252 */
2253static void
2254rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len,
2255    uint32_t entropybits, bool count, uint32_t flag, bool intr_p)
2256{
2257	bool fullyused;
2258
2259	/*
2260	 * For the interrupt-like path, use entropy_enter_intr and take
2261	 * note of whether it consumed the full sample; otherwise, use
2262	 * entropy_enter, which always consumes the full sample.
2263	 */
2264	if (intr_p) {
2265		fullyused = entropy_enter_intr(buf, len, entropybits, count);
2266	} else {
2267		entropy_enter(buf, len, entropybits, count);
2268		fullyused = true;
2269	}
2270
2271	/*
2272	 * If we used the full sample, note how many bits were
2273	 * contributed from this source.
2274	 */
2275	if (fullyused) {
2276		if (__predict_false(cold)) {
2277			const int s = splhigh();
2278			rs->total = add_sat(rs->total, entropybits);
2279			switch (flag) {
2280			case RND_FLAG_COLLECT_TIME:
2281				rs->time_delta.insamples =
2282				    add_sat(rs->time_delta.insamples, 1);
2283				break;
2284			case RND_FLAG_COLLECT_VALUE:
2285				rs->value_delta.insamples =
2286				    add_sat(rs->value_delta.insamples, 1);
2287				break;
2288			}
2289			splx(s);
2290		} else {
2291			struct rndsource_cpu *rc = percpu_getref(rs->state);
2292
2293			atomic_store_relaxed(&rc->rc_entropybits,
2294			    add_sat(rc->rc_entropybits, entropybits));
2295			switch (flag) {
2296			case RND_FLAG_COLLECT_TIME:
2297				atomic_store_relaxed(&rc->rc_timesamples,
2298				    add_sat(rc->rc_timesamples, 1));
2299				break;
2300			case RND_FLAG_COLLECT_VALUE:
2301				atomic_store_relaxed(&rc->rc_datasamples,
2302				    add_sat(rc->rc_datasamples, 1));
2303				break;
2304			}
2305			percpu_putref(rs->state);
2306		}
2307	}
2308}
2309
2310/*
2311 * rnd_add_data_sync(rs, buf, len, entropybits)
2312 *
2313 *	Same as rnd_add_data.  Originally used in rndsource callbacks,
2314 *	to break an unnecessary cycle; no longer really needed.
2315 */
2316void
2317rnd_add_data_sync(struct krndsource *rs, const void *buf, uint32_t len,
2318    uint32_t entropybits)
2319{
2320
2321	rnd_add_data(rs, buf, len, entropybits);
2322}
2323
2324/*
2325 * rndsource_entropybits(rs)
2326 *
2327 *	Return approximately the number of bits of entropy that have
2328 *	been contributed via rs so far.  Approximate if other CPUs may
2329 *	be calling rnd_add_data concurrently.
2330 */
2331static unsigned
2332rndsource_entropybits(struct krndsource *rs)
2333{
2334	unsigned nbits = rs->total;
2335
2336	KASSERT(!cold);
2337	KASSERT(rnd_sources_locked());
2338	percpu_foreach(rs->state, rndsource_entropybits_cpu, &nbits);
2339	return nbits;
2340}
2341
2342static void
2343rndsource_entropybits_cpu(void *ptr, void *cookie, struct cpu_info *ci)
2344{
2345	struct rndsource_cpu *rc = ptr;
2346	unsigned *nbitsp = cookie;
2347	unsigned cpu_nbits;
2348
2349	cpu_nbits = atomic_load_relaxed(&rc->rc_entropybits);
2350	*nbitsp += MIN(UINT_MAX - *nbitsp, cpu_nbits);
2351}
2352
2353/*
2354 * rndsource_to_user(rs, urs)
2355 *
2356 *	Copy a description of rs out to urs for userland.
2357 */
2358static void
2359rndsource_to_user(struct krndsource *rs, rndsource_t *urs)
2360{
2361
2362	KASSERT(!cold);
2363	KASSERT(rnd_sources_locked());
2364
2365	/* Avoid kernel memory disclosure.  */
2366	memset(urs, 0, sizeof(*urs));
2367
2368	CTASSERT(sizeof(urs->name) == sizeof(rs->name));
2369	strlcpy(urs->name, rs->name, sizeof(urs->name));
2370	urs->total = rndsource_entropybits(rs);
2371	urs->type = rs->type;
2372	urs->flags = atomic_load_relaxed(&rs->flags);
2373}
2374
2375/*
2376 * rndsource_to_user_est(rs, urse)
2377 *
2378 *	Copy a description of rs and estimation statistics out to urse
2379 *	for userland.
2380 */
2381static void
2382rndsource_to_user_est(struct krndsource *rs, rndsource_est_t *urse)
2383{
2384
2385	KASSERT(!cold);
2386	KASSERT(rnd_sources_locked());
2387
2388	/* Avoid kernel memory disclosure.  */
2389	memset(urse, 0, sizeof(*urse));
2390
2391	/* Copy out the rndsource description.  */
2392	rndsource_to_user(rs, &urse->rt);
2393
2394	/* Gather the statistics.  */
2395	urse->dt_samples = rs->time_delta.insamples;
2396	urse->dt_total = 0;
2397	urse->dv_samples = rs->value_delta.insamples;
2398	urse->dv_total = urse->rt.total;
2399	percpu_foreach(rs->state, rndsource_to_user_est_cpu, urse);
2400}
2401
2402static void
2403rndsource_to_user_est_cpu(void *ptr, void *cookie, struct cpu_info *ci)
2404{
2405	struct rndsource_cpu *rc = ptr;
2406	rndsource_est_t *urse = cookie;
2407
2408	urse->dt_samples = add_sat(urse->dt_samples,
2409	    atomic_load_relaxed(&rc->rc_timesamples));
2410	urse->dv_samples = add_sat(urse->dv_samples,
2411	    atomic_load_relaxed(&rc->rc_datasamples));
2412}
2413
2414/*
2415 * entropy_reset_xc(arg1, arg2)
2416 *
2417 *	Reset the current CPU's pending entropy to zero.
2418 */
2419static void
2420entropy_reset_xc(void *arg1 __unused, void *arg2 __unused)
2421{
2422	uint32_t extra = entropy_timer();
2423	struct entropy_cpu_lock lock;
2424	struct entropy_cpu *ec;
2425
2426	/*
2427	 * With the per-CPU state locked, zero the pending count and
2428	 * enter a cycle count for fun.
2429	 */
2430	ec = entropy_cpu_get(&lock);
2431	ec->ec_bitspending = 0;
2432	ec->ec_samplespending = 0;
2433	entpool_enter(ec->ec_pool, &extra, sizeof extra);
2434	entropy_cpu_put(&lock, ec);
2435}
2436
2437/*
2438 * entropy_ioctl(cmd, data)
2439 *
2440 *	Handle various /dev/random ioctl queries.
2441 */
2442int
2443entropy_ioctl(unsigned long cmd, void *data)
2444{
2445	struct krndsource *rs;
2446	bool privileged;
2447	int error;
2448
2449	KASSERT(!cold);
2450
2451	/* Verify user's authorization to perform the ioctl.  */
2452	switch (cmd) {
2453	case RNDGETENTCNT:
2454	case RNDGETPOOLSTAT:
2455	case RNDGETSRCNUM:
2456	case RNDGETSRCNAME:
2457	case RNDGETESTNUM:
2458	case RNDGETESTNAME:
2459		error = kauth_authorize_device(kauth_cred_get(),
2460		    KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL);
2461		break;
2462	case RNDCTL:
2463		error = kauth_authorize_device(kauth_cred_get(),
2464		    KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL);
2465		break;
2466	case RNDADDDATA:
2467		error = kauth_authorize_device(kauth_cred_get(),
2468		    KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL);
2469		/* Ascertain whether the user's inputs should be counted.  */
2470		if (kauth_authorize_device(kauth_cred_get(),
2471			KAUTH_DEVICE_RND_ADDDATA_ESTIMATE,
2472			NULL, NULL, NULL, NULL) == 0)
2473			privileged = true;
2474		break;
2475	default: {
2476		/*
2477		 * XXX Hack to avoid changing module ABI so this can be
2478		 * pulled up.  Later, we can just remove the argument.
2479		 */
2480		static const struct fileops fops = {
2481			.fo_ioctl = rnd_system_ioctl,
2482		};
2483		struct file f = {
2484			.f_ops = &fops,
2485		};
2486		MODULE_HOOK_CALL(rnd_ioctl_50_hook, (&f, cmd, data),
2487		    enosys(), error);
2488#if defined(_LP64)
2489		if (error == ENOSYS)
2490			MODULE_HOOK_CALL(rnd_ioctl32_50_hook, (&f, cmd, data),
2491			    enosys(), error);
2492#endif
2493		if (error == ENOSYS)
2494			error = ENOTTY;
2495		break;
2496	}
2497	}
2498
2499	/* If anything went wrong with authorization, stop here.  */
2500	if (error)
2501		return error;
2502
2503	/* Dispatch on the command.  */
2504	switch (cmd) {
2505	case RNDGETENTCNT: {	/* Get current entropy count in bits.  */
2506		uint32_t *countp = data;
2507
2508		mutex_enter(&E->lock);
2509		*countp = MINENTROPYBITS - E->bitsneeded;
2510		mutex_exit(&E->lock);
2511
2512		break;
2513	}
2514	case RNDGETPOOLSTAT: {	/* Get entropy pool statistics.  */
2515		rndpoolstat_t *pstat = data;
2516
2517		mutex_enter(&E->lock);
2518
2519		/* parameters */
2520		pstat->poolsize = ENTPOOL_SIZE/sizeof(uint32_t); /* words */
2521		pstat->threshold = MINENTROPYBITS/NBBY; /* bytes */
2522		pstat->maxentropy = ENTROPY_CAPACITY*NBBY; /* bits */
2523
2524		/* state */
2525		pstat->added = 0; /* XXX total entropy_enter count */
2526		pstat->curentropy = MINENTROPYBITS - E->bitsneeded; /* bits */
2527		pstat->removed = 0; /* XXX total entropy_extract count */
2528		pstat->discarded = 0; /* XXX bits of entropy beyond capacity */
2529
2530		/*
2531		 * This used to be bits of data fabricated in some
2532		 * sense; we'll take it to mean number of samples,
2533		 * excluding the bits of entropy from HWRNG or seed.
2534		 */
2535		pstat->generated = MINSAMPLES - E->samplesneeded;
2536		pstat->generated -= MIN(pstat->generated, pstat->curentropy);
2537
2538		mutex_exit(&E->lock);
2539		break;
2540	}
2541	case RNDGETSRCNUM: {	/* Get entropy sources by number.  */
2542		rndstat_t *stat = data;
2543		uint32_t start = 0, i = 0;
2544
2545		/* Skip if none requested; fail if too many requested.  */
2546		if (stat->count == 0)
2547			break;
2548		if (stat->count > RND_MAXSTATCOUNT)
2549			return EINVAL;
2550
2551		/*
2552		 * Under the lock, find the first one, copy out as many
2553		 * as requested, and report how many we copied out.
2554		 */
2555		mutex_enter(&E->lock);
2556		error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG);
2557		if (error) {
2558			mutex_exit(&E->lock);
2559			return error;
2560		}
2561		LIST_FOREACH(rs, &E->sources, list) {
2562			if (start++ == stat->start)
2563				break;
2564		}
2565		while (i < stat->count && rs != NULL) {
2566			mutex_exit(&E->lock);
2567			rndsource_to_user(rs, &stat->source[i++]);
2568			mutex_enter(&E->lock);
2569			rs = LIST_NEXT(rs, list);
2570		}
2571		KASSERT(i <= stat->count);
2572		stat->count = i;
2573		rnd_unlock_sources();
2574		mutex_exit(&E->lock);
2575		break;
2576	}
2577	case RNDGETESTNUM: {	/* Get sources and estimates by number.  */
2578		rndstat_est_t *estat = data;
2579		uint32_t start = 0, i = 0;
2580
2581		/* Skip if none requested; fail if too many requested.  */
2582		if (estat->count == 0)
2583			break;
2584		if (estat->count > RND_MAXSTATCOUNT)
2585			return EINVAL;
2586
2587		/*
2588		 * Under the lock, find the first one, copy out as many
2589		 * as requested, and report how many we copied out.
2590		 */
2591		mutex_enter(&E->lock);
2592		error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG);
2593		if (error) {
2594			mutex_exit(&E->lock);
2595			return error;
2596		}
2597		LIST_FOREACH(rs, &E->sources, list) {
2598			if (start++ == estat->start)
2599				break;
2600		}
2601		while (i < estat->count && rs != NULL) {
2602			mutex_exit(&E->lock);
2603			rndsource_to_user_est(rs, &estat->source[i++]);
2604			mutex_enter(&E->lock);
2605			rs = LIST_NEXT(rs, list);
2606		}
2607		KASSERT(i <= estat->count);
2608		estat->count = i;
2609		rnd_unlock_sources();
2610		mutex_exit(&E->lock);
2611		break;
2612	}
2613	case RNDGETSRCNAME: {	/* Get entropy sources by name.  */
2614		rndstat_name_t *nstat = data;
2615		const size_t n = sizeof(rs->name);
2616
2617		CTASSERT(sizeof(rs->name) == sizeof(nstat->name));
2618
2619		/*
2620		 * Under the lock, search by name.  If found, copy it
2621		 * out; if not found, fail with ENOENT.
2622		 */
2623		mutex_enter(&E->lock);
2624		error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG);
2625		if (error) {
2626			mutex_exit(&E->lock);
2627			return error;
2628		}
2629		LIST_FOREACH(rs, &E->sources, list) {
2630			if (strncmp(rs->name, nstat->name, n) == 0)
2631				break;
2632		}
2633		if (rs != NULL) {
2634			mutex_exit(&E->lock);
2635			rndsource_to_user(rs, &nstat->source);
2636			mutex_enter(&E->lock);
2637		} else {
2638			error = ENOENT;
2639		}
2640		rnd_unlock_sources();
2641		mutex_exit(&E->lock);
2642		break;
2643	}
2644	case RNDGETESTNAME: {	/* Get sources and estimates by name.  */
2645		rndstat_est_name_t *enstat = data;
2646		const size_t n = sizeof(rs->name);
2647
2648		CTASSERT(sizeof(rs->name) == sizeof(enstat->name));
2649
2650		/*
2651		 * Under the lock, search by name.  If found, copy it
2652		 * out; if not found, fail with ENOENT.
2653		 */
2654		mutex_enter(&E->lock);
2655		error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG);
2656		if (error) {
2657			mutex_exit(&E->lock);
2658			return error;
2659		}
2660		LIST_FOREACH(rs, &E->sources, list) {
2661			if (strncmp(rs->name, enstat->name, n) == 0)
2662				break;
2663		}
2664		if (rs != NULL) {
2665			mutex_exit(&E->lock);
2666			rndsource_to_user_est(rs, &enstat->source);
2667			mutex_enter(&E->lock);
2668		} else {
2669			error = ENOENT;
2670		}
2671		rnd_unlock_sources();
2672		mutex_exit(&E->lock);
2673		break;
2674	}
2675	case RNDCTL: {		/* Modify entropy source flags.  */
2676		rndctl_t *rndctl = data;
2677		const size_t n = sizeof(rs->name);
2678		uint32_t resetflags = RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT;
2679		uint32_t flags;
2680		bool reset = false, request = false;
2681
2682		CTASSERT(sizeof(rs->name) == sizeof(rndctl->name));
2683
2684		/* Whitelist the flags that user can change.  */
2685		rndctl->mask &= RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT;
2686
2687		/*
2688		 * For each matching rndsource, either by type if
2689		 * specified or by name if not, set the masked flags.
2690		 */
2691		mutex_enter(&E->lock);
2692		LIST_FOREACH(rs, &E->sources, list) {
2693			if (rndctl->type != 0xff) {
2694				if (rs->type != rndctl->type)
2695					continue;
2696			} else if (rndctl->name[0] != '\0') {
2697				if (strncmp(rs->name, rndctl->name, n) != 0)
2698					continue;
2699			}
2700			flags = rs->flags & ~rndctl->mask;
2701			flags |= rndctl->flags & rndctl->mask;
2702			if ((rs->flags & resetflags) == 0 &&
2703			    (flags & resetflags) != 0)
2704				reset = true;
2705			if ((rs->flags ^ flags) & resetflags)
2706				request = true;
2707			atomic_store_relaxed(&rs->flags, flags);
2708		}
2709		mutex_exit(&E->lock);
2710
2711		/*
2712		 * If we disabled estimation or collection, nix all the
2713		 * pending entropy and set needed to the maximum.
2714		 */
2715		if (reset) {
2716			xc_broadcast(0, &entropy_reset_xc, NULL, NULL);
2717			mutex_enter(&E->lock);
2718			E->bitspending = 0;
2719			E->samplespending = 0;
2720			atomic_store_relaxed(&E->bitsneeded, MINENTROPYBITS);
2721			atomic_store_relaxed(&E->samplesneeded, MINSAMPLES);
2722			E->consolidate = false;
2723			mutex_exit(&E->lock);
2724		}
2725
2726		/*
2727		 * If we changed any of the estimation or collection
2728		 * flags, request new samples from everyone -- either
2729		 * to make up for what we just lost, or to get new
2730		 * samples from what we just added.
2731		 *
2732		 * Failing on signal, while waiting for another process
2733		 * to finish requesting entropy, is OK here even though
2734		 * we have committed side effects, because this ioctl
2735		 * command is idempotent, so repeating it is safe.
2736		 */
2737		if (request) {
2738			mutex_enter(&E->lock);
2739			error = entropy_request(ENTROPY_CAPACITY,
2740			    ENTROPY_WAIT|ENTROPY_SIG);
2741			mutex_exit(&E->lock);
2742		}
2743		break;
2744	}
2745	case RNDADDDATA: {	/* Enter seed into entropy pool.  */
2746		rnddata_t *rdata = data;
2747		unsigned entropybits = 0;
2748
2749		if (!atomic_load_relaxed(&entropy_collection))
2750			break;	/* thanks but no thanks */
2751		if (rdata->len > MIN(sizeof(rdata->data), UINT32_MAX/NBBY))
2752			return EINVAL;
2753
2754		/*
2755		 * This ioctl serves as the userland alternative a
2756		 * bootloader-provided seed -- typically furnished by
2757		 * /etc/rc.d/random_seed.  We accept the user's entropy
2758		 * claim only if
2759		 *
2760		 * (a) the user is privileged, and
2761		 * (b) we have not entered a bootloader seed.
2762		 *
2763		 * under the assumption that the user may use this to
2764		 * load a seed from disk that we have already loaded
2765		 * from the bootloader, so we don't double-count it.
2766		 */
2767		if (privileged && rdata->entropy && rdata->len) {
2768			mutex_enter(&E->lock);
2769			if (!E->seeded) {
2770				entropybits = MIN(rdata->entropy,
2771				    MIN(rdata->len, ENTROPY_CAPACITY)*NBBY);
2772				E->seeded = true;
2773			}
2774			mutex_exit(&E->lock);
2775		}
2776
2777		/* Enter the data and consolidate entropy.  */
2778		rnd_add_data(&seed_rndsource, rdata->data, rdata->len,
2779		    entropybits);
2780		entropy_consolidate();
2781		break;
2782	}
2783	default:
2784		error = ENOTTY;
2785	}
2786
2787	/* Return any error that may have come up.  */
2788	return error;
2789}
2790
2791/* Legacy entry points */
2792
2793void
2794rnd_seed(void *seed, size_t len)
2795{
2796
2797	if (len != sizeof(rndsave_t)) {
2798		printf("entropy: invalid seed length: %zu,"
2799		    " expected sizeof(rndsave_t) = %zu\n",
2800		    len, sizeof(rndsave_t));
2801		return;
2802	}
2803	entropy_seed(seed);
2804}
2805
2806void
2807rnd_init(void)
2808{
2809
2810	entropy_init();
2811}
2812
2813void
2814rnd_init_softint(void)
2815{
2816
2817	entropy_init_late();
2818	entropy_bootrequest();
2819}
2820
2821int
2822rnd_system_ioctl(struct file *fp, unsigned long cmd, void *data)
2823{
2824
2825	return entropy_ioctl(cmd, data);
2826}
2827