1255071Smarkm/*-
2256381Smarkm * Copyright (c) 2000-2013 Mark R V Murray
3255071Smarkm * Copyright (c) 2013 Arthur Mesh
4255071Smarkm * Copyright (c) 2004 Robert N. M. Watson
5255071Smarkm * All rights reserved.
6255071Smarkm *
7255071Smarkm * Redistribution and use in source and binary forms, with or without
8255071Smarkm * modification, are permitted provided that the following conditions
9255071Smarkm * are met:
10255071Smarkm * 1. Redistributions of source code must retain the above copyright
11255071Smarkm *    notice, this list of conditions and the following disclaimer
12255071Smarkm *    in this position and unchanged.
13255071Smarkm * 2. Redistributions in binary form must reproduce the above copyright
14255071Smarkm *    notice, this list of conditions and the following disclaimer in the
15255071Smarkm *    documentation and/or other materials provided with the distribution.
16255071Smarkm *
17255071Smarkm * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18255071Smarkm * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19255071Smarkm * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20255071Smarkm * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21255071Smarkm * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22255071Smarkm * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23255071Smarkm * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24255071Smarkm * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25255071Smarkm * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26255071Smarkm * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27255071Smarkm *
28255071Smarkm */
29255071Smarkm
30255071Smarkm#include <sys/cdefs.h>
31255071Smarkm__FBSDID("$FreeBSD$");
32255071Smarkm
33256381Smarkm#include "opt_random.h"
34256381Smarkm
35255071Smarkm#include <sys/param.h>
36255071Smarkm#include <sys/systm.h>
37256381Smarkm#include <sys/eventhandler.h>
38255071Smarkm#include <sys/kernel.h>
39255071Smarkm#include <sys/kthread.h>
40256381Smarkm#include <sys/linker.h>
41255071Smarkm#include <sys/lock.h>
42255071Smarkm#include <sys/malloc.h>
43255071Smarkm#include <sys/mutex.h>
44255071Smarkm#include <sys/random.h>
45256381Smarkm#include <sys/selinfo.h>
46255071Smarkm#include <sys/sysctl.h>
47255071Smarkm#include <sys/unistd.h>
48255071Smarkm
49256381Smarkm#include <machine/cpu.h>
50256381Smarkm#include <machine/vmparam.h>
51256381Smarkm
52256381Smarkm#include <dev/random/randomdev.h>
53255071Smarkm#include <dev/random/randomdev_soft.h>
54256381Smarkm#include <dev/random/random_adaptors.h>
55256381Smarkm#include <dev/random/random_harvestq.h>
56256381Smarkm#include <dev/random/live_entropy_sources.h>
57256381Smarkm#include <dev/random/rwfile.h>
58255071Smarkm
59256381Smarkm#define RANDOM_FIFO_MAX	1024	/* How many events to queue up */
60255071Smarkm
61255071Smarkm/*
62255071Smarkm * The harvest mutex protects the consistency of the entropy fifos and
63256381Smarkm * empty fifo and other associated structures.
64255071Smarkm */
65255071Smarkmstruct mtx	harvest_mtx;
66255071Smarkm
67255071Smarkm/* Lockable FIFO queue holding entropy buffers */
68255071Smarkmstruct entropyfifo {
69255071Smarkm	int count;
70255071Smarkm	STAILQ_HEAD(harvestlist, harvest) head;
71255071Smarkm};
72255071Smarkm
73255071Smarkm/* Empty entropy buffers */
74255071Smarkmstatic struct entropyfifo emptyfifo;
75255071Smarkm
76255071Smarkm/* Harvested entropy */
77256381Smarkmstatic struct entropyfifo harvestfifo;
78255071Smarkm
79255071Smarkm/* <0 to end the kthread, 0 to let it run, 1 to flush the harvest queues */
80255071Smarkmint random_kthread_control = 0;
81255071Smarkm
82255071Smarkmstatic struct proc *random_kthread_proc;
83255071Smarkm
84256381Smarkm#ifdef RANDOM_RWFILE
85256381Smarkmstatic const char *entropy_files[] = {
86256381Smarkm	"/entropy",
87256381Smarkm	NULL
88256381Smarkm};
89256381Smarkm#endif
90256381Smarkm
91256381Smarkm/* Deal with entropy cached externally if this is present.
92256381Smarkm * Lots of policy may eventually arrive in this function.
93256381Smarkm * Called after / is mounted.
94256381Smarkm */
95255071Smarkmstatic void
96256381Smarkmrandom_harvestq_cache(void *arg __unused)
97256381Smarkm{
98256381Smarkm	uint8_t *keyfile, *data;
99256381Smarkm	size_t size, i;
100256381Smarkm#ifdef RANDOM_RWFILE
101256381Smarkm	const char **entropy_file;
102256381Smarkm	uint8_t *zbuf;
103256381Smarkm	int error;
104256381Smarkm#endif
105256381Smarkm
106256381Smarkm	/* Get stuff that may have been preloaded by loader(8) */
107256381Smarkm	keyfile = preload_search_by_type("/boot/entropy");
108256381Smarkm	if (keyfile != NULL) {
109256381Smarkm		data = preload_fetch_addr(keyfile);
110256381Smarkm		size = preload_fetch_size(keyfile);
111256381Smarkm		if (data != NULL && size != 0) {
112256381Smarkm			for (i = 0; i < size; i += 16)
113256381Smarkm				random_harvestq_internal(get_cyclecount(), data + i, 16, 16, RANDOM_CACHED);
114256381Smarkm			printf("random: read %zu bytes from preloaded cache\n", size);
115256381Smarkm			bzero(data, size);
116256381Smarkm		}
117256381Smarkm		else
118256381Smarkm			printf("random: no preloaded entropy cache available\n");
119256381Smarkm	}
120256381Smarkm
121256381Smarkm#ifdef RANDOM_RWFILE
122256381Smarkm	/* Read and attempt to overwrite the entropy cache files.
123256381Smarkm	 * If the file exists, can be read and then overwritten,
124256381Smarkm	 * then use it. Ignore it otherwise, but print out what is
125256381Smarkm	 * going on.
126256381Smarkm	 */
127256381Smarkm	data = malloc(PAGE_SIZE, M_ENTROPY, M_WAITOK);
128256381Smarkm	zbuf = __DECONST(void *, zero_region);
129256381Smarkm	for (entropy_file = entropy_files; *entropy_file; entropy_file++) {
130256381Smarkm		error = randomdev_read_file(*entropy_file, data, PAGE_SIZE);
131256381Smarkm		if (error == 0) {
132256381Smarkm			printf("random: entropy cache '%s' provides %ld bytes\n", *entropy_file, (long)PAGE_SIZE);
133256381Smarkm			error = randomdev_write_file(*entropy_file, zbuf, PAGE_SIZE);
134256381Smarkm			if (error == 0) {
135256381Smarkm				printf("random: entropy cache '%s' contents used and successfully overwritten\n", *entropy_file);
136256381Smarkm				for (i = 0; i < PAGE_SIZE; i += 16)
137256381Smarkm					random_harvestq_internal(get_cyclecount(), data + i, 16, 16, RANDOM_CACHED);
138256381Smarkm			}
139256381Smarkm			else
140256381Smarkm				printf("random: entropy cache '%s' not overwritten and therefore not used; error = %d\n", *entropy_file, error);
141256381Smarkm		}
142256381Smarkm		else
143256381Smarkm			printf("random: entropy cache '%s' not present or unreadable; error = %d\n", *entropy_file, error);
144256381Smarkm	}
145256381Smarkm	bzero(data, PAGE_SIZE);
146256381Smarkm	free(data, M_ENTROPY);
147256381Smarkm#endif
148256381Smarkm}
149256381SmarkmEVENTHANDLER_DEFINE(mountroot, random_harvestq_cache, NULL, 0);
150256381Smarkm
151256381Smarkmstatic void
152255071Smarkmrandom_kthread(void *arg)
153255071Smarkm{
154255071Smarkm	STAILQ_HEAD(, harvest) local_queue;
155255071Smarkm	struct harvest *event = NULL;
156255071Smarkm	int local_count;
157256381Smarkm	event_proc_f entropy_processor = arg;
158255071Smarkm
159255071Smarkm	STAILQ_INIT(&local_queue);
160255071Smarkm	local_count = 0;
161255071Smarkm
162255071Smarkm	/* Process until told to stop */
163255071Smarkm	mtx_lock_spin(&harvest_mtx);
164255071Smarkm	for (; random_kthread_control >= 0;) {
165255071Smarkm
166256381Smarkm		/*
167256381Smarkm		 * Grab all the entropy events.
168256381Smarkm		 * Drain entropy source records into a thread-local
169256381Smarkm		 * queue for processing while not holding the mutex.
170256381Smarkm		 */
171256381Smarkm		STAILQ_CONCAT(&local_queue, &harvestfifo.head);
172256381Smarkm		local_count += harvestfifo.count;
173256381Smarkm		harvestfifo.count = 0;
174255071Smarkm
175255071Smarkm		/*
176256381Smarkm		 * Deal with events, if any.
177256381Smarkm		 * Then transfer the used events back into the empty fifo.
178255071Smarkm		 */
179255071Smarkm		if (!STAILQ_EMPTY(&local_queue)) {
180255071Smarkm			mtx_unlock_spin(&harvest_mtx);
181255071Smarkm			STAILQ_FOREACH(event, &local_queue, next)
182256381Smarkm				entropy_processor(event);
183255071Smarkm			mtx_lock_spin(&harvest_mtx);
184255071Smarkm			STAILQ_CONCAT(&emptyfifo.head, &local_queue);
185255071Smarkm			emptyfifo.count += local_count;
186255071Smarkm			local_count = 0;
187255071Smarkm		}
188255071Smarkm
189255071Smarkm		KASSERT(local_count == 0, ("random_kthread: local_count %d",
190255071Smarkm		    local_count));
191255071Smarkm
192255071Smarkm		/*
193256381Smarkm		 * Do only one round of the hardware sources for now.
194256381Smarkm		 * Later we'll need to make it rate-adaptive.
195256381Smarkm		 */
196256381Smarkm		mtx_unlock_spin(&harvest_mtx);
197256381Smarkm		live_entropy_sources_feed(1, entropy_processor);
198256381Smarkm		mtx_lock_spin(&harvest_mtx);
199256381Smarkm
200256381Smarkm		/*
201255071Smarkm		 * If a queue flush was commanded, it has now happened,
202255071Smarkm		 * and we can mark this by resetting the command.
203255071Smarkm		 */
204256381Smarkm
205255071Smarkm		if (random_kthread_control == 1)
206255071Smarkm			random_kthread_control = 0;
207255071Smarkm
208255071Smarkm		/* Work done, so don't belabour the issue */
209255071Smarkm		msleep_spin_sbt(&random_kthread_control, &harvest_mtx,
210256381Smarkm		    "-", SBT_1S/10, 0, C_PREL(1));
211255071Smarkm
212255071Smarkm	}
213255071Smarkm	mtx_unlock_spin(&harvest_mtx);
214255071Smarkm
215255071Smarkm	random_set_wakeup_exit(&random_kthread_control);
216255071Smarkm	/* NOTREACHED */
217255071Smarkm}
218255071Smarkm
219255071Smarkmvoid
220255071Smarkmrandom_harvestq_init(event_proc_f cb)
221255071Smarkm{
222255071Smarkm	int error, i;
223255071Smarkm	struct harvest *np;
224255071Smarkm
225255071Smarkm	/* Initialise the harvest fifos */
226256381Smarkm
227256381Smarkm	/* Contains the currently unused event structs. */
228255071Smarkm	STAILQ_INIT(&emptyfifo.head);
229256381Smarkm	for (i = 0; i < RANDOM_FIFO_MAX; i++) {
230255071Smarkm		np = malloc(sizeof(struct harvest), M_ENTROPY, M_WAITOK);
231255071Smarkm		STAILQ_INSERT_TAIL(&emptyfifo.head, np, next);
232255071Smarkm	}
233256381Smarkm	emptyfifo.count = RANDOM_FIFO_MAX;
234255071Smarkm
235256381Smarkm	/* Will contain the queued-up events. */
236256381Smarkm	STAILQ_INIT(&harvestfifo.head);
237256381Smarkm	harvestfifo.count = 0;
238256381Smarkm
239255071Smarkm	mtx_init(&harvest_mtx, "entropy harvest mutex", NULL, MTX_SPIN);
240255071Smarkm
241255071Smarkm	/* Start the hash/reseed thread */
242255071Smarkm	error = kproc_create(random_kthread, cb,
243255071Smarkm	    &random_kthread_proc, RFHIGHPID, 0, "rand_harvestq"); /* RANDOM_CSPRNG_NAME */
244255071Smarkm
245255071Smarkm	if (error != 0)
246255071Smarkm		panic("Cannot create entropy maintenance thread.");
247255071Smarkm}
248255071Smarkm
249255071Smarkmvoid
250255071Smarkmrandom_harvestq_deinit(void)
251255071Smarkm{
252255071Smarkm	struct harvest *np;
253255071Smarkm
254255071Smarkm	/* Destroy the harvest fifos */
255255071Smarkm	while (!STAILQ_EMPTY(&emptyfifo.head)) {
256255071Smarkm		np = STAILQ_FIRST(&emptyfifo.head);
257255071Smarkm		STAILQ_REMOVE_HEAD(&emptyfifo.head, next);
258255071Smarkm		free(np, M_ENTROPY);
259255071Smarkm	}
260256381Smarkm	emptyfifo.count = 0;
261256381Smarkm	while (!STAILQ_EMPTY(&harvestfifo.head)) {
262256381Smarkm		np = STAILQ_FIRST(&harvestfifo.head);
263256381Smarkm		STAILQ_REMOVE_HEAD(&harvestfifo.head, next);
264256381Smarkm		free(np, M_ENTROPY);
265255071Smarkm	}
266256381Smarkm	harvestfifo.count = 0;
267255071Smarkm
268255071Smarkm	mtx_destroy(&harvest_mtx);
269255071Smarkm}
270255071Smarkm
271255071Smarkm/*
272256381Smarkm * Entropy harvesting routine.
273256381Smarkm * This is supposed to be fast; do not do anything slow in here!
274256381Smarkm *
275256381Smarkm * It is also illegal (and morally reprehensible) to insert any
276256381Smarkm * high-rate data here. "High-rate" is define as a data source
277256381Smarkm * that will usually cause lots of failures of the "Lockless read"
278256381Smarkm * check a few lines below. This includes the "always-on" sources
279256381Smarkm * like the Intel "rdrand" or the VIA Nehamiah "xstore" sources.
280255071Smarkm */
281255071Smarkmvoid
282255071Smarkmrandom_harvestq_internal(u_int64_t somecounter, const void *entropy,
283256381Smarkm    u_int count, u_int bits, enum esource origin)
284255071Smarkm{
285255071Smarkm	struct harvest *event;
286255071Smarkm
287256381Smarkm	KASSERT(origin >= RANDOM_START && origin < ENTROPYSOURCE,
288255071Smarkm	    ("random_harvest_internal: origin %d invalid\n", origin));
289255071Smarkm
290255071Smarkm	/* Lockless read to avoid lock operations if fifo is full. */
291256381Smarkm	if (harvestfifo.count >= RANDOM_FIFO_MAX)
292255071Smarkm		return;
293255071Smarkm
294255071Smarkm	mtx_lock_spin(&harvest_mtx);
295255071Smarkm
296255071Smarkm	/*
297256381Smarkm	 * On't overfill the harvest queue; this could steal all
298256381Smarkm	 * our memory.
299255071Smarkm	 */
300256381Smarkm	if (harvestfifo.count < RANDOM_FIFO_MAX) {
301255071Smarkm		event = STAILQ_FIRST(&emptyfifo.head);
302255071Smarkm		if (event != NULL) {
303255071Smarkm			/* Add the harvested data to the fifo */
304255071Smarkm			STAILQ_REMOVE_HEAD(&emptyfifo.head, next);
305256381Smarkm			emptyfifo.count--;
306255071Smarkm			event->somecounter = somecounter;
307255071Smarkm			event->size = count;
308255071Smarkm			event->bits = bits;
309255071Smarkm			event->source = origin;
310255071Smarkm
311255071Smarkm			/* XXXX Come back and make this dynamic! */
312255071Smarkm			count = MIN(count, HARVESTSIZE);
313255071Smarkm			memcpy(event->entropy, entropy, count);
314255071Smarkm
315256381Smarkm			STAILQ_INSERT_TAIL(&harvestfifo.head,
316255071Smarkm			    event, next);
317256381Smarkm			harvestfifo.count++;
318255071Smarkm		}
319255071Smarkm	}
320256381Smarkm
321255071Smarkm	mtx_unlock_spin(&harvest_mtx);
322255071Smarkm}
323