1255071Smarkm/*-
2284959Smarkm * Copyright (c) 2000-2015 Mark R V Murray
3255071Smarkm * Copyright (c) 2013 Arthur Mesh
4255071Smarkm * Copyright (c) 2004 Robert N. M. Watson
5255071Smarkm * All rights reserved.
6255071Smarkm *
7255071Smarkm * Redistribution and use in source and binary forms, with or without
8255071Smarkm * modification, are permitted provided that the following conditions
9255071Smarkm * are met:
10255071Smarkm * 1. Redistributions of source code must retain the above copyright
11255071Smarkm *    notice, this list of conditions and the following disclaimer
12255071Smarkm *    in this position and unchanged.
13255071Smarkm * 2. Redistributions in binary form must reproduce the above copyright
14255071Smarkm *    notice, this list of conditions and the following disclaimer in the
15255071Smarkm *    documentation and/or other materials provided with the distribution.
16255071Smarkm *
17255071Smarkm * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18255071Smarkm * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19255071Smarkm * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20255071Smarkm * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21255071Smarkm * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22255071Smarkm * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23255071Smarkm * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24255071Smarkm * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25255071Smarkm * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26255071Smarkm * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27255071Smarkm *
28255071Smarkm */
29255071Smarkm
30255071Smarkm#include <sys/cdefs.h>
31255071Smarkm__FBSDID("$FreeBSD: stable/11/sys/dev/random/random_harvestq.c 346725 2019-04-26 01:58:36Z mw $");
32255071Smarkm
33255071Smarkm#include <sys/param.h>
34255071Smarkm#include <sys/systm.h>
35284959Smarkm#include <sys/conf.h>
36256377Smarkm#include <sys/eventhandler.h>
37284959Smarkm#include <sys/hash.h>
38255071Smarkm#include <sys/kernel.h>
39255071Smarkm#include <sys/kthread.h>
40256377Smarkm#include <sys/linker.h>
41255071Smarkm#include <sys/lock.h>
42255071Smarkm#include <sys/malloc.h>
43284959Smarkm#include <sys/module.h>
44255071Smarkm#include <sys/mutex.h>
45255071Smarkm#include <sys/random.h>
46273872Smarkm#include <sys/sbuf.h>
47255071Smarkm#include <sys/sysctl.h>
48255071Smarkm#include <sys/unistd.h>
49255071Smarkm
50286839Smarkm#if defined(RANDOM_LOADABLE)
51286839Smarkm#include <sys/lock.h>
52286839Smarkm#include <sys/sx.h>
53286839Smarkm#endif
54286839Smarkm
55286839Smarkm#include <machine/atomic.h>
56256377Smarkm#include <machine/cpu.h>
57256377Smarkm
58345981Smarkm#include <crypto/rijndael/rijndael-api-fst.h>
59345981Smarkm#include <crypto/sha2/sha256.h>
60345981Smarkm
61345981Smarkm#include <dev/random/hash.h>
62256377Smarkm#include <dev/random/randomdev.h>
63256377Smarkm#include <dev/random/random_harvestq.h>
64255071Smarkm
65284959Smarkmstatic void random_kthread(void);
66286839Smarkmstatic void random_sources_feed(void);
67284959Smarkm
68286839Smarkmstatic u_int read_rate;
69286839Smarkm
70273872Smarkm/* List for the dynamic sysctls */
71273872Smarkmstatic struct sysctl_ctx_list random_clist;
72255071Smarkm
73255071Smarkm/*
74273872Smarkm * How many events to queue up. We create this many items in
75273872Smarkm * an 'empty' queue, then transfer them to the 'harvest' queue with
76273872Smarkm * supplied junk. When used, they are transferred back to the
77273872Smarkm * 'empty' queue.
78273872Smarkm */
79284959Smarkm#define	RANDOM_RING_MAX		1024
80284959Smarkm#define	RANDOM_ACCUM_MAX	8
81273872Smarkm
82286839Smarkm/* 1 to let the kernel thread run, 0 to terminate, -1 to mark completion */
83284959Smarkmvolatile int random_kthread_control;
84255071Smarkm
85273872Smarkm/*
86284959Smarkm * Put all the harvest queue context stuff in one place.
87284959Smarkm * this make is a bit easier to lock and protect.
88273872Smarkm */
89284959Smarkmstatic struct harvest_context {
90285422Smarkm	/* The harvest mutex protects all of harvest_context and
91285422Smarkm	 * the related data.
92284959Smarkm	 */
93284959Smarkm	struct mtx hc_mtx;
94284959Smarkm	/* Round-robin destination cache. */
95284959Smarkm	u_int hc_destination[ENTROPYSOURCE];
96284959Smarkm	/* The context of the kernel thread processing harvested entropy */
97284959Smarkm	struct proc *hc_kthread_proc;
98284959Smarkm	/* Allow the sysadmin to select the broad category of
99284959Smarkm	 * entropy types to harvest.
100284959Smarkm	 */
101284959Smarkm	u_int hc_source_mask;
102284959Smarkm	/*
103284959Smarkm	 * Lockless ring buffer holding entropy events
104284959Smarkm	 * If ring.in == ring.out,
105284959Smarkm	 *     the buffer is empty.
106284959Smarkm	 * If ring.in != ring.out,
107284959Smarkm	 *     the buffer contains harvested entropy.
108284959Smarkm	 * If (ring.in + 1) == ring.out (mod RANDOM_RING_MAX),
109284959Smarkm	 *     the buffer is full.
110284959Smarkm	 *
111285422Smarkm	 * NOTE: ring.in points to the last added element,
112285422Smarkm	 * and ring.out points to the last consumed element.
113285422Smarkm	 *
114284959Smarkm	 * The ring.in variable needs locking as there are multiple
115284959Smarkm	 * sources to the ring. Only the sources may change ring.in,
116284959Smarkm	 * but the consumer may examine it.
117284959Smarkm	 *
118284959Smarkm	 * The ring.out variable does not need locking as there is
119284959Smarkm	 * only one consumer. Only the consumer may change ring.out,
120284959Smarkm	 * but the sources may examine it.
121284959Smarkm	 */
122284959Smarkm	struct entropy_ring {
123284959Smarkm		struct harvest_event ring[RANDOM_RING_MAX];
124284959Smarkm		volatile u_int in;
125284959Smarkm		volatile u_int out;
126284959Smarkm	} hc_entropy_ring;
127284959Smarkm	struct fast_entropy_accumulator {
128284959Smarkm		volatile u_int pos;
129285422Smarkm		uint32_t buf[RANDOM_ACCUM_MAX];
130284959Smarkm	} hc_entropy_fast_accumulator;
131284959Smarkm} harvest_context;
132255071Smarkm
133284959Smarkmstatic struct kproc_desc random_proc_kp = {
134284959Smarkm	"rand_harvestq",
135284959Smarkm	random_kthread,
136284959Smarkm	&harvest_context.hc_kthread_proc,
137284959Smarkm};
138255071Smarkm
139284959Smarkm/* Pass the given event straight through to Fortuna/Yarrow/Whatever. */
140284959Smarkmstatic __inline void
141284959Smarkmrandom_harvestq_fast_process_event(struct harvest_event *event)
142284959Smarkm{
143286839Smarkm#if defined(RANDOM_LOADABLE)
144286839Smarkm	RANDOM_CONFIG_S_LOCK();
145286839Smarkm	if (p_random_alg_context)
146286839Smarkm#endif
147286839Smarkm	p_random_alg_context->ra_event_processor(event);
148286839Smarkm#if defined(RANDOM_LOADABLE)
149286839Smarkm	RANDOM_CONFIG_S_UNLOCK();
150286839Smarkm#endif
151284959Smarkm}
152273872Smarkm
153255071Smarkmstatic void
154284959Smarkmrandom_kthread(void)
155256377Smarkm{
156284959Smarkm        u_int maxloop, ring_out, i;
157256377Smarkm
158273872Smarkm	/*
159284959Smarkm	 * Locking is not needed as this is the only place we modify ring.out, and
160284959Smarkm	 * we only examine ring.in without changing it. Both of these are volatile,
161273872Smarkm	 * and this is a unique thread.
162256377Smarkm	 */
163284959Smarkm	for (random_kthread_control = 1; random_kthread_control;) {
164273872Smarkm		/* Deal with events, if any. Restrict the number we do in one go. */
165284959Smarkm		maxloop = RANDOM_RING_MAX;
166284959Smarkm		while (harvest_context.hc_entropy_ring.out != harvest_context.hc_entropy_ring.in) {
167284959Smarkm			ring_out = (harvest_context.hc_entropy_ring.out + 1)%RANDOM_RING_MAX;
168284959Smarkm			random_harvestq_fast_process_event(harvest_context.hc_entropy_ring.ring + ring_out);
169284959Smarkm			harvest_context.hc_entropy_ring.out = ring_out;
170284959Smarkm			if (!--maxloop)
171273872Smarkm				break;
172255071Smarkm		}
173284959Smarkm		random_sources_feed();
174285422Smarkm		/* XXX: FIX!! Increase the high-performance data rate? Need some measurements first. */
175284959Smarkm		for (i = 0; i < RANDOM_ACCUM_MAX; i++) {
176284959Smarkm			if (harvest_context.hc_entropy_fast_accumulator.buf[i]) {
177287023Smarkm				random_harvest_direct(harvest_context.hc_entropy_fast_accumulator.buf + i, sizeof(harvest_context.hc_entropy_fast_accumulator.buf[0]), 4, RANDOM_UMA);
178284959Smarkm				harvest_context.hc_entropy_fast_accumulator.buf[i] = 0;
179284959Smarkm			}
180284959Smarkm		}
181284959Smarkm		/* XXX: FIX!! This is a *great* place to pass hardware/live entropy to random(9) */
182284959Smarkm		tsleep_sbt(&harvest_context.hc_kthread_proc, 0, "-", SBT_1S/10, 0, C_PREL(1));
183255071Smarkm	}
184286839Smarkm	random_kthread_control = -1;
185284959Smarkm	wakeup(&harvest_context.hc_kthread_proc);
186284959Smarkm	kproc_exit(0);
187255071Smarkm	/* NOTREACHED */
188255071Smarkm}
189286839Smarkm/* This happens well after SI_SUB_RANDOM */
190297366SjhbSYSINIT(random_device_h_proc, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, kproc_start,
191297366Sjhb    &random_proc_kp);
192255071Smarkm
193286839Smarkm/*
194286839Smarkm * Run through all fast sources reading entropy for the given
195286839Smarkm * number of rounds, which should be a multiple of the number
196286839Smarkm * of entropy accumulation pools in use; 2 for Yarrow and 32
197286839Smarkm * for Fortuna.
198286839Smarkm */
199286839Smarkmstatic void
200286839Smarkmrandom_sources_feed(void)
201286839Smarkm{
202286839Smarkm	uint32_t entropy[HARVESTSIZE];
203286839Smarkm	struct random_sources *rrs;
204286839Smarkm	u_int i, n, local_read_rate;
205286839Smarkm
206286839Smarkm	/*
207286839Smarkm	 * Step over all of live entropy sources, and feed their output
208286839Smarkm	 * to the system-wide RNG.
209286839Smarkm	 */
210286839Smarkm#if defined(RANDOM_LOADABLE)
211286839Smarkm	RANDOM_CONFIG_S_LOCK();
212286839Smarkm	if (p_random_alg_context) {
213286839Smarkm	/* It's an indenting error. Yeah, Yeah. */
214286839Smarkm#endif
215286839Smarkm	local_read_rate = atomic_readandclear_32(&read_rate);
216345981Smarkm	/* Perform at least one read per round */
217345981Smarkm	local_read_rate = MAX(local_read_rate, 1);
218345981Smarkm	/* But not exceeding RANDOM_KEYSIZE_WORDS */
219345981Smarkm	local_read_rate = MIN(local_read_rate, RANDOM_KEYSIZE_WORDS);
220286839Smarkm	LIST_FOREACH(rrs, &source_list, rrs_entries) {
221345981Smarkm		for (i = 0; i < p_random_alg_context->ra_poolcount*local_read_rate; i++) {
222286839Smarkm			n = rrs->rrs_source->rs_read(entropy, sizeof(entropy));
223288780Smarkm			KASSERT((n <= sizeof(entropy)), ("%s: rs_read returned too much data (%u > %zu)", __func__, n, sizeof(entropy)));
224288703Smarkm			/* It would appear that in some circumstances (e.g. virtualisation),
225288703Smarkm			 * the underlying hardware entropy source might not always return
226288703Smarkm			 * random numbers. Accept this but make a noise. If too much happens,
227288703Smarkm			 * can that source be trusted?
228288703Smarkm			 */
229288703Smarkm			if (n == 0) {
230288703Smarkm				printf("%s: rs_read for hardware device '%s' returned no entropy.\n", __func__, rrs->rrs_source->rs_ident);
231288703Smarkm				continue;
232288703Smarkm			}
233286839Smarkm			random_harvest_direct(entropy, n, (n*8)/2, rrs->rrs_source->rs_source);
234286839Smarkm		}
235286839Smarkm	}
236286839Smarkm	explicit_bzero(entropy, sizeof(entropy));
237286839Smarkm#if defined(RANDOM_LOADABLE)
238286839Smarkm	}
239286839Smarkm	RANDOM_CONFIG_S_UNLOCK();
240286839Smarkm#endif
241286839Smarkm}
242286839Smarkm
243286839Smarkmvoid
244286839Smarkmread_rate_increment(u_int chunk)
245286839Smarkm{
246286839Smarkm
247286839Smarkm	atomic_add_32(&read_rate, chunk);
248286839Smarkm}
249286839Smarkm
250273872Smarkm/* ARGSUSED */
251284959SmarkmRANDOM_CHECK_UINT(harvestmask, 0, RANDOM_HARVEST_EVERYTHING_MASK);
252273872Smarkm
253273872Smarkm/* ARGSUSED */
254273872Smarkmstatic int
255273872Smarkmrandom_print_harvestmask(SYSCTL_HANDLER_ARGS)
256273872Smarkm{
257273872Smarkm	struct sbuf sbuf;
258255071Smarkm	int error, i;
259255071Smarkm
260273872Smarkm	error = sysctl_wire_old_buffer(req, 0);
261273872Smarkm	if (error == 0) {
262273872Smarkm		sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
263284959Smarkm		for (i = RANDOM_ENVIRONMENTAL_END; i >= 0; i--)
264284959Smarkm			sbuf_cat(&sbuf, (harvest_context.hc_source_mask & (1 << i)) ? "1" : "0");
265273872Smarkm		error = sbuf_finish(&sbuf);
266273872Smarkm		sbuf_delete(&sbuf);
267273872Smarkm	}
268273872Smarkm	return (error);
269273872Smarkm}
270273872Smarkm
271273872Smarkmstatic const char *(random_source_descr[]) = {
272273872Smarkm	"CACHED",
273273872Smarkm	"ATTACH",
274273872Smarkm	"KEYBOARD",
275273872Smarkm	"MOUSE",
276273872Smarkm	"NET_TUN",
277273872Smarkm	"NET_ETHER",
278273872Smarkm	"NET_NG",
279273872Smarkm	"INTERRUPT",
280273872Smarkm	"SWI",
281284959Smarkm	"FS_ATIME",
282287023Smarkm	"UMA", /* ENVIRONMENTAL_END */
283273872Smarkm	"PURE_OCTEON",
284273872Smarkm	"PURE_SAFE",
285273872Smarkm	"PURE_GLXSB",
286273872Smarkm	"PURE_UBSEC",
287273872Smarkm	"PURE_HIFN",
288273872Smarkm	"PURE_RDRAND",
289273872Smarkm	"PURE_NEHEMIAH",
290273872Smarkm	"PURE_RNDTEST",
291346725Smw	[RANDOM_PURE_TPM] = "PURE_TPM",
292273872Smarkm	/* "ENTROPYSOURCE" */
293273872Smarkm};
294273872Smarkm
295273872Smarkm/* ARGSUSED */
296273872Smarkmstatic int
297273872Smarkmrandom_print_harvestmask_symbolic(SYSCTL_HANDLER_ARGS)
298273872Smarkm{
299273872Smarkm	struct sbuf sbuf;
300273872Smarkm	int error, i;
301273872Smarkm
302273872Smarkm	error = sysctl_wire_old_buffer(req, 0);
303273872Smarkm	if (error == 0) {
304273872Smarkm		sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
305284959Smarkm		for (i = RANDOM_ENVIRONMENTAL_END; i >= 0; i--) {
306284959Smarkm			sbuf_cat(&sbuf, (i == RANDOM_ENVIRONMENTAL_END) ? "" : ",");
307284959Smarkm			sbuf_cat(&sbuf, !(harvest_context.hc_source_mask & (1 << i)) ? "[" : "");
308284959Smarkm			sbuf_cat(&sbuf, random_source_descr[i]);
309284959Smarkm			sbuf_cat(&sbuf, !(harvest_context.hc_source_mask & (1 << i)) ? "]" : "");
310273872Smarkm		}
311273872Smarkm		error = sbuf_finish(&sbuf);
312273872Smarkm		sbuf_delete(&sbuf);
313255071Smarkm	}
314273872Smarkm	return (error);
315273872Smarkm}
316256377Smarkm
317284959Smarkm/* ARGSUSED */
318284959Smarkmstatic void
319284959Smarkmrandom_harvestq_init(void *unused __unused)
320273872Smarkm{
321273872Smarkm	struct sysctl_oid *random_sys_o;
322273872Smarkm
323273872Smarkm	random_sys_o = SYSCTL_ADD_NODE(&random_clist,
324273872Smarkm	    SYSCTL_STATIC_CHILDREN(_kern_random),
325273872Smarkm	    OID_AUTO, "harvest", CTLFLAG_RW, 0,
326273872Smarkm	    "Entropy Device Parameters");
327284959Smarkm	harvest_context.hc_source_mask = RANDOM_HARVEST_EVERYTHING_MASK;
328273872Smarkm	SYSCTL_ADD_PROC(&random_clist,
329273872Smarkm	    SYSCTL_CHILDREN(random_sys_o),
330273872Smarkm	    OID_AUTO, "mask", CTLTYPE_UINT | CTLFLAG_RW,
331284959Smarkm	    &harvest_context.hc_source_mask, 0,
332273872Smarkm	    random_check_uint_harvestmask, "IU",
333273872Smarkm	    "Entropy harvesting mask");
334273872Smarkm	SYSCTL_ADD_PROC(&random_clist,
335273872Smarkm	    SYSCTL_CHILDREN(random_sys_o),
336273872Smarkm	    OID_AUTO, "mask_bin", CTLTYPE_STRING | CTLFLAG_RD,
337273872Smarkm	    NULL, 0, random_print_harvestmask, "A", "Entropy harvesting mask (printable)");
338273872Smarkm	SYSCTL_ADD_PROC(&random_clist,
339273872Smarkm	    SYSCTL_CHILDREN(random_sys_o),
340273872Smarkm	    OID_AUTO, "mask_symbolic", CTLTYPE_STRING | CTLFLAG_RD,
341273872Smarkm	    NULL, 0, random_print_harvestmask_symbolic, "A", "Entropy harvesting mask (symbolic)");
342284959Smarkm	RANDOM_HARVEST_INIT_LOCK();
343284959Smarkm	harvest_context.hc_entropy_ring.in = harvest_context.hc_entropy_ring.out = 0;
344284959Smarkm}
345284959SmarkmSYSINIT(random_device_h_init, SI_SUB_RANDOM, SI_ORDER_SECOND, random_harvestq_init, NULL);
346273872Smarkm
347284959Smarkm/*
348284959Smarkm * This is used to prime the RNG by grabbing any early random stuff
349284959Smarkm * known to the kernel, and inserting it directly into the hashing
350284959Smarkm * module, e.g. Fortuna or Yarrow.
351284959Smarkm */
352284959Smarkm/* ARGSUSED */
353284959Smarkmstatic void
354284959Smarkmrandom_harvestq_prime(void *unused __unused)
355284959Smarkm{
356284959Smarkm	struct harvest_event event;
357284959Smarkm	size_t count, size, i;
358284959Smarkm	uint8_t *keyfile, *data;
359273872Smarkm
360284959Smarkm	/*
361284959Smarkm	 * Get entropy that may have been preloaded by loader(8)
362273872Smarkm	 * and use it to pre-charge the entropy harvest queue.
363273872Smarkm	 */
364284959Smarkm	keyfile = preload_search_by_type(RANDOM_HARVESTQ_BOOT_ENTROPY_FILE);
365273872Smarkm	if (keyfile != NULL) {
366273872Smarkm		data = preload_fetch_addr(keyfile);
367273872Smarkm		size = preload_fetch_size(keyfile);
368285422Smarkm		/* Trim the size. If the admin has a file with a funny size, we lose some. Tough. */
369285422Smarkm		size -= (size % sizeof(event.he_entropy));
370273872Smarkm		if (data != NULL && size != 0) {
371284959Smarkm			for (i = 0; i < size; i += sizeof(event.he_entropy)) {
372284959Smarkm				count = sizeof(event.he_entropy);
373284959Smarkm				event.he_somecounter = (uint32_t)get_cyclecount();
374284959Smarkm				event.he_size = count;
375284959Smarkm				event.he_bits = count/4; /* Underestimate the size for Yarrow */
376284959Smarkm				event.he_source = RANDOM_CACHED;
377284959Smarkm				event.he_destination = harvest_context.hc_destination[0]++;
378284959Smarkm				memcpy(event.he_entropy, data + i, sizeof(event.he_entropy));
379284959Smarkm				random_harvestq_fast_process_event(&event);
380284959Smarkm				explicit_bzero(&event, sizeof(event));
381284959Smarkm			}
382284959Smarkm			explicit_bzero(data, size);
383284959Smarkm			if (bootverbose)
384284959Smarkm				printf("random: read %zu bytes from preloaded cache\n", size);
385284959Smarkm		} else
386284959Smarkm			if (bootverbose)
387284959Smarkm				printf("random: no preloaded entropy cache\n");
388273872Smarkm	}
389255071Smarkm}
390284959SmarkmSYSINIT(random_device_prime, SI_SUB_RANDOM, SI_ORDER_FOURTH, random_harvestq_prime, NULL);
391255071Smarkm
392284959Smarkm/* ARGSUSED */
393284959Smarkmstatic void
394284959Smarkmrandom_harvestq_deinit(void *unused __unused)
395255071Smarkm{
396255071Smarkm
397284959Smarkm	/* Command the hash/reseed thread to end and wait for it to finish */
398284959Smarkm	random_kthread_control = 0;
399286839Smarkm	while (random_kthread_control >= 0)
400286839Smarkm		tsleep(&harvest_context.hc_kthread_proc, 0, "harvqterm", hz/5);
401273872Smarkm	sysctl_ctx_free(&random_clist);
402255071Smarkm}
403284959SmarkmSYSUNINIT(random_device_h_init, SI_SUB_RANDOM, SI_ORDER_SECOND, random_harvestq_deinit, NULL);
404255071Smarkm
405284959Smarkm/*-
406284959Smarkm * Entropy harvesting queue routine.
407284959Smarkm *
408256377Smarkm * This is supposed to be fast; do not do anything slow in here!
409256377Smarkm * It is also illegal (and morally reprehensible) to insert any
410284959Smarkm * high-rate data here. "High-rate" is defined as a data source
411256377Smarkm * that will usually cause lots of failures of the "Lockless read"
412256377Smarkm * check a few lines below. This includes the "always-on" sources
413256377Smarkm * like the Intel "rdrand" or the VIA Nehamiah "xstore" sources.
414255071Smarkm */
415273872Smarkm/* XXXRW: get_cyclecount() is cheap on most modern hardware, where cycle
416273872Smarkm * counters are built in, but on older hardware it will do a real time clock
417273872Smarkm * read which can be quite expensive.
418273872Smarkm */
419255071Smarkmvoid
420285422Smarkmrandom_harvest_queue(const void *entropy, u_int size, u_int bits, enum random_entropy_source origin)
421255071Smarkm{
422273872Smarkm	struct harvest_event *event;
423273872Smarkm	u_int ring_in;
424255071Smarkm
425284959Smarkm	KASSERT(origin >= RANDOM_START && origin < ENTROPYSOURCE, ("%s: origin %d invalid\n", __func__, origin));
426284959Smarkm	if (!(harvest_context.hc_source_mask & (1 << origin)))
427255071Smarkm		return;
428284959Smarkm	RANDOM_HARVEST_LOCK();
429284959Smarkm	ring_in = (harvest_context.hc_entropy_ring.in + 1)%RANDOM_RING_MAX;
430284959Smarkm	if (ring_in != harvest_context.hc_entropy_ring.out) {
431273872Smarkm		/* The ring is not full */
432284959Smarkm		event = harvest_context.hc_entropy_ring.ring + ring_in;
433284959Smarkm		event->he_somecounter = (uint32_t)get_cyclecount();
434284959Smarkm		event->he_source = origin;
435284959Smarkm		event->he_destination = harvest_context.hc_destination[origin]++;
436273872Smarkm		event->he_bits = bits;
437285422Smarkm		if (size <= sizeof(event->he_entropy)) {
438285422Smarkm			event->he_size = size;
439285422Smarkm			memcpy(event->he_entropy, entropy, size);
440284959Smarkm		}
441284959Smarkm		else {
442284959Smarkm			/* Big event, so squash it */
443284959Smarkm			event->he_size = sizeof(event->he_entropy[0]);
444285422Smarkm			event->he_entropy[0] = jenkins_hash(entropy, size, (uint32_t)(uintptr_t)event);
445284959Smarkm		}
446284959Smarkm		harvest_context.hc_entropy_ring.in = ring_in;
447255071Smarkm	}
448284959Smarkm	RANDOM_HARVEST_UNLOCK();
449255071Smarkm}
450284959Smarkm
451284959Smarkm/*-
452284959Smarkm * Entropy harvesting fast routine.
453284959Smarkm *
454284959Smarkm * This is supposed to be very fast; do not do anything slow in here!
455284959Smarkm * This is the right place for high-rate harvested data.
456284959Smarkm */
457284959Smarkmvoid
458285422Smarkmrandom_harvest_fast(const void *entropy, u_int size, u_int bits, enum random_entropy_source origin)
459284959Smarkm{
460284959Smarkm	u_int pos;
461284959Smarkm
462284959Smarkm	KASSERT(origin >= RANDOM_START && origin < ENTROPYSOURCE, ("%s: origin %d invalid\n", __func__, origin));
463284959Smarkm	/* XXX: FIX!! The above KASSERT is BS. Right now we ignore most structure and just accumulate the supplied data */
464284959Smarkm	if (!(harvest_context.hc_source_mask & (1 << origin)))
465284959Smarkm		return;
466284959Smarkm	pos = harvest_context.hc_entropy_fast_accumulator.pos;
467285422Smarkm	harvest_context.hc_entropy_fast_accumulator.buf[pos] ^= jenkins_hash(entropy, size, (uint32_t)get_cyclecount());
468284959Smarkm	harvest_context.hc_entropy_fast_accumulator.pos = (pos + 1)%RANDOM_ACCUM_MAX;
469284959Smarkm}
470284959Smarkm
471284959Smarkm/*-
472284959Smarkm * Entropy harvesting direct routine.
473284959Smarkm *
474284959Smarkm * This is not supposed to be fast, but will only be used during
475284959Smarkm * (e.g.) booting when initial entropy is being gathered.
476284959Smarkm */
477284959Smarkmvoid
478285422Smarkmrandom_harvest_direct(const void *entropy, u_int size, u_int bits, enum random_entropy_source origin)
479284959Smarkm{
480284959Smarkm	struct harvest_event event;
481284959Smarkm
482284959Smarkm	KASSERT(origin >= RANDOM_START && origin < ENTROPYSOURCE, ("%s: origin %d invalid\n", __func__, origin));
483284959Smarkm	if (!(harvest_context.hc_source_mask & (1 << origin)))
484284959Smarkm		return;
485285422Smarkm	size = MIN(size, sizeof(event.he_entropy));
486284959Smarkm	event.he_somecounter = (uint32_t)get_cyclecount();
487285422Smarkm	event.he_size = size;
488284959Smarkm	event.he_bits = bits;
489284959Smarkm	event.he_source = origin;
490284959Smarkm	event.he_destination = harvest_context.hc_destination[origin]++;
491285422Smarkm	memcpy(event.he_entropy, entropy, size);
492284959Smarkm	random_harvestq_fast_process_event(&event);
493284959Smarkm	explicit_bzero(&event, sizeof(event));
494284959Smarkm}
495286839Smarkm
496286839SmarkmMODULE_VERSION(random_harvestq, 1);
497