yarrow.c revision 73379
191094Sdes/*-
291094Sdes * Copyright (c) 2000 Mark R V Murray
391094Sdes * All rights reserved.
491094Sdes *
591094Sdes * Redistribution and use in source and binary forms, with or without
691094Sdes * modification, are permitted provided that the following conditions
791094Sdes * are met:
891094Sdes * 1. Redistributions of source code must retain the above copyright
991094Sdes *    notice, this list of conditions and the following disclaimer
1091094Sdes *    in this position and unchanged.
1191094Sdes * 2. Redistributions in binary form must reproduce the above copyright
1291094Sdes *    notice, this list of conditions and the following disclaimer in the
1391094Sdes *    documentation and/or other materials provided with the distribution.
1491094Sdes *
1591094Sdes * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1691094Sdes * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1791094Sdes * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1891094Sdes * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1991094Sdes * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2091094Sdes * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2191094Sdes * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2291094Sdes * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2391094Sdes * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2491094Sdes * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2591094Sdes *
2691094Sdes * $FreeBSD: head/sys/dev/random/yarrow.c 73379 2001-03-03 14:35:01Z markm $
2791094Sdes */
2891094Sdes
2991094Sdes#include <sys/param.h>
3091094Sdes#include <sys/systm.h>
3191094Sdes#include <sys/kernel.h>
3291094Sdes#include <sys/kthread.h>
3391094Sdes#include <sys/libkern.h>
3491094Sdes#include <sys/mutex.h>
3591094Sdes#include <sys/selinfo.h>
3691094Sdes#include <sys/random.h>
3791094Sdes#include <sys/types.h>
3891094Sdes#include <sys/unistd.h>
3991094Sdes
4091094Sdes#include <machine/atomic.h>
4191094Sdes#include <machine/cpu.h>
4291094Sdes
4391094Sdes#include <crypto/blowfish/blowfish.h>
4491094Sdes
4591094Sdes#include <dev/random/hash.h>
4691097Sdes#include <dev/random/yarrow.h>
4791097Sdes
4891094Sdes/* #define DEBUG */
4991097Sdes
5091094Sdesstatic void generator_gate(void);
5191097Sdesstatic void reseed(int);
5291094Sdesstatic void random_harvest_internal(u_int64_t, void *, u_int, u_int, u_int, enum esource);
5391094Sdes
5491094Sdesstatic void random_kthread(void *);
5591094Sdes
5691094Sdes/* Structure holding the entropy state */
5791094Sdesstruct random_state random_state;
5891094Sdes
5991094Sdes/* These are used to queue harvested packets of entropy. The entropy
6091094Sdes * buffer size is pretty arbitrary.
6191094Sdes */
6291094Sdesstruct harvest {
6391094Sdes	u_int64_t somecounter;		/* fast counter for clock jitter */
6491094Sdes	u_char entropy[HARVESTSIZE];	/* the harvested entropy */
6591094Sdes	u_int size, bits, frac;		/* stats about the entropy */
6691094Sdes	enum esource source;		/* stats about the entropy */
6791094Sdes};
6891094Sdes
6991094Sdes/* Ring buffer holding harvested entropy */
7091094Sdesstatic struct harvestring {
7191094Sdes	volatile int	head;
7291094Sdes	volatile int	tail;
7391094Sdes	struct harvest	data[HARVEST_RING_SIZE];
7491094Sdes} harvestring;
7591094Sdes
7691094Sdes/* The reseed thread mutex */
7791094Sdesstatic struct mtx random_reseed_mtx;
7891094Sdes
7991094Sdes/* <0 to end the kthread, 0 to let it run */
8091094Sdesstatic int random_kthread_control = 0;
8191094Sdes
8291094Sdesstatic struct proc *random_kthread_proc;
8391094Sdes
8491094Sdesstatic void
8591094Sdesrandom_kthread(void *arg /* NOTUSED */)
8691094Sdes{
8791094Sdes	int pl, src, overthreshhold[2], newtail;
8891094Sdes	struct harvest *event;
8991094Sdes	struct source *source;
9091094Sdes
9191100Sdes#ifdef DEBUG
9291094Sdes	mtx_lock(&Giant);
9391094Sdes	printf("OWNERSHIP Giant == %d sched_lock == %d\n",
9491094Sdes		mtx_owned(&Giant), mtx_owned(&sched_lock));
9591094Sdes	mtx_unlock(&Giant);
9691094Sdes#endif
9791094Sdes
9891094Sdes	for (pl = 0; pl < 2; pl++)
9991094Sdes		yarrow_hash_init(&random_state.pool[pl].hash, NULL, 0);
10091094Sdes
10191094Sdes	for (;;) {
10291094Sdes
10391094Sdes		if (harvestring.tail == harvestring.head)
10491094Sdes			tsleep(&harvestring, PUSER, "rndslp", hz/10);
10591094Sdes
10691094Sdes		else {
10791094Sdes
10891094Sdes			/* Suck the harvested entropy out of the queue and hash
10991094Sdes			 * it into the appropriate pool.
11091094Sdes			 */
11191094Sdes
11291094Sdes			newtail = (harvestring.tail + 1) & HARVEST_RING_MASK;
11391094Sdes			event = &harvestring.data[harvestring.tail];
11491094Sdes
11591094Sdes			/* Bump the ring counter. This action is assumed
11691094Sdes			 * to be atomic.
11791094Sdes			 */
11891094Sdes			harvestring.tail = newtail;
11991094Sdes
12091094Sdes			pl = random_state.which = !random_state.which;
12191094Sdes
12291094Sdes			source = &random_state.pool[pl].source[event->source];
12391094Sdes			yarrow_hash_iterate(&random_state.pool[pl].hash,
12491094Sdes				event->entropy, sizeof(event->entropy));
12591094Sdes			yarrow_hash_iterate(&random_state.pool[pl].hash,
12691094Sdes				&event->somecounter, sizeof(event->somecounter));
12791094Sdes			source->frac += event->frac;
12891094Sdes			source->bits += event->bits + source->frac/1024;
12991094Sdes			source->frac %= 1024;
13091094Sdes
13191094Sdes			/* Count the over-threshold sources in each pool */
13291094Sdes			for (pl = 0; pl < 2; pl++) {
13391094Sdes				overthreshhold[pl] = 0;
13491094Sdes				for (src = 0; src < ENTROPYSOURCE; src++) {
13591094Sdes					if (random_state.pool[pl].source[src].bits
13691094Sdes						> random_state.pool[pl].thresh)
13791094Sdes						overthreshhold[pl]++;
13891094Sdes				}
13991094Sdes			}
14091094Sdes
14191094Sdes			/* if any fast source over threshhold, reseed */
14291094Sdes			if (overthreshhold[FAST])
14391094Sdes				reseed(FAST);
14491094Sdes
14591094Sdes			/* if enough slow sources are over threshhold, reseed */
14691094Sdes			if (overthreshhold[SLOW] >= random_state.slowoverthresh)
14791094Sdes				reseed(SLOW);
14891094Sdes
14991094Sdes		}
15091094Sdes
15191094Sdes		/* Is the thread scheduled for a shutdown? */
15291094Sdes		if (random_kthread_control != 0) {
15391094Sdes#ifdef DEBUG
15491094Sdes			mtx_lock(&Giant);
15591094Sdes			printf("Random kthread setting terminate\n");
15691094Sdes			mtx_unlock(&Giant);
15791094Sdes#endif
15891094Sdes			random_set_wakeup_exit(&random_kthread_control);
15991094Sdes			/* NOTREACHED */
16091094Sdes			break;
16191094Sdes		}
16291094Sdes
16391094Sdes	}
16491094Sdes
16591094Sdes}
16691094Sdes
16791094Sdesint
16891094Sdesrandom_init(void)
16991094Sdes{
17091094Sdes	int error;
17191094Sdes
17291094Sdes#ifdef DEBUG
17391094Sdes	mtx_lock(&Giant);
17491094Sdes	printf("Random initialise\n");
17591094Sdes	mtx_unlock(&Giant);
17691094Sdes#endif
17791094Sdes
17891094Sdes	/* This can be turned off by the very paranoid
17991094Sdes	 * a reseed will turn it back on.
18091094Sdes	 */
18191094Sdes	random_state.seeded = 1;
18291094Sdes
18391094Sdes	/* Yarrow parameters. Do not adjust these unless you have
18491094Sdes	 * have a very good clue about what they do!
18591094Sdes	 */
18691094Sdes	random_state.gengateinterval = 10;
18791094Sdes	random_state.bins = 10;
18891094Sdes	random_state.pool[0].thresh = 100;
18991094Sdes	random_state.pool[1].thresh = 160;
19091094Sdes	random_state.slowoverthresh = 2;
19191094Sdes	random_state.which = FAST;
19291094Sdes
19391094Sdes	mtx_init(&random_reseed_mtx, "random reseed", MTX_DEF);
19491094Sdes
19591094Sdes	harvestring.head = 0;
19691094Sdes	harvestring.tail = 0;
19791094Sdes
19891094Sdes	/* Start the hash/reseed thread */
19991094Sdes	error = kthread_create(random_kthread, NULL,
20091094Sdes		&random_kthread_proc, RFHIGHPID, "random");
20191094Sdes	if (error != 0)
20291094Sdes		return error;
20391094Sdes
20491094Sdes	/* Register the randomness harvesting routine */
20591094Sdes	random_init_harvester(random_harvest_internal, read_random_real);
20691094Sdes
20791094Sdes#ifdef DEBUG
20891094Sdes	mtx_lock(&Giant);
20991094Sdes	printf("Random initialise finish\n");
21091094Sdes	mtx_unlock(&Giant);
21191094Sdes#endif
21291094Sdes
21391094Sdes	return 0;
21491094Sdes}
21591094Sdes
21691094Sdesvoid
21791100Sdesrandom_deinit(void)
21891100Sdes{
21991100Sdes#ifdef DEBUG
22091100Sdes	mtx_lock(&Giant);
221	printf("Random deinitialise\n");
222	mtx_unlock(&Giant);
223#endif
224
225	/* Deregister the randomness harvesting routine */
226	random_deinit_harvester();
227
228#ifdef DEBUG
229	mtx_lock(&Giant);
230	printf("Random deinitialise waiting for thread to terminate\n");
231	mtx_unlock(&Giant);
232#endif
233
234	/* Command the hash/reseed thread to end and wait for it to finish */
235	random_kthread_control = -1;
236	tsleep((void *)&random_kthread_control, PUSER, "rndend", 0);
237
238#ifdef DEBUG
239	mtx_lock(&Giant);
240	printf("Random deinitialise removing mutexes\n");
241	mtx_unlock(&Giant);
242#endif
243
244	mtx_destroy(&random_reseed_mtx);
245
246#ifdef DEBUG
247	mtx_lock(&Giant);
248	printf("Random deinitialise finish\n");
249	mtx_unlock(&Giant);
250#endif
251}
252
253static void
254reseed(int fastslow)
255{
256	/* Interrupt-context stack is a limited resource; make large
257	 * structures static.
258	 */
259	static u_char v[TIMEBIN][KEYSIZE];	/* v[i] */
260	static struct yarrowhash context;
261	u_char hash[KEYSIZE];			/* h' */
262	u_char temp[KEYSIZE];
263	int i, j;
264
265#ifdef DEBUG
266	mtx_lock(&Giant);
267	printf("Reseed type %d\n", fastslow);
268	mtx_unlock(&Giant);
269#endif
270
271	/* The reseed task must not be jumped on */
272	mtx_lock(&random_reseed_mtx);
273
274	/* 1. Hash the accumulated entropy into v[0] */
275
276	yarrow_hash_init(&context, NULL, 0);
277	/* Feed the slow pool hash in if slow */
278	if (fastslow == SLOW)
279		yarrow_hash_iterate(&context,
280			&random_state.pool[SLOW].hash, sizeof(struct yarrowhash));
281
282	yarrow_hash_iterate(&context,
283		&random_state.pool[FAST].hash, sizeof(struct yarrowhash));
284
285	/* 2. Compute hash values for all v. _Supposed_ to be computationally
286	 *    intensive.
287	 */
288
289	if (random_state.bins > TIMEBIN)
290		random_state.bins = TIMEBIN;
291	for (i = 1; i < random_state.bins; i++) {
292		yarrow_hash_init(&context, NULL, 0);
293		/* v[i] #= h(v[i-1]) */
294		yarrow_hash_iterate(&context, v[i - 1], KEYSIZE);
295		/* v[i] #= h(v[0]) */
296		yarrow_hash_iterate(&context, v[0], KEYSIZE);
297		/* v[i] #= h(i) */
298		yarrow_hash_iterate(&context, &i, sizeof(int));
299		/* Return the hashval */
300		yarrow_hash_finish(&context, v[i]);
301	}
302
303	/* 3. Compute a new key; h' is the identity function here;
304	 *    it is not being ignored!
305	 */
306
307	yarrow_hash_init(&context, NULL, 0);
308	yarrow_hash_iterate(&context, &random_state.key, KEYSIZE);
309	for (i = 1; i < random_state.bins; i++)
310		yarrow_hash_iterate(&context, &v[i], KEYSIZE);
311	yarrow_hash_finish(&context, temp);
312	yarrow_encrypt_init(&random_state.key, temp, KEYSIZE);
313
314	/* 4. Recompute the counter */
315
316	random_state.counter = 0;
317	yarrow_encrypt(&random_state.key, &random_state.counter, temp,
318		sizeof(random_state.counter));
319	memcpy(&random_state.counter, temp, random_state.counter);
320
321	/* 5. Reset entropy estimate accumulators to zero */
322
323	for (i = 0; i <= fastslow; i++) {
324		for (j = 0; j < ENTROPYSOURCE; j++) {
325			if (random_state.pool[i].source[j].bits >
326				random_state.pool[i].thresh) {
327				random_state.pool[i].source[j].bits = 0;
328				random_state.pool[i].source[j].frac = 0;
329			}
330		}
331	}
332
333	/* 6. Wipe memory of intermediate values */
334
335	memset((void *)v, 0, sizeof(v));
336	memset((void *)temp, 0, sizeof(temp));
337	memset((void *)hash, 0, sizeof(hash));
338
339	/* 7. Dump to seed file */
340	/* XXX Not done here yet */
341
342	/* Release the reseed mutex */
343	mtx_unlock(&random_reseed_mtx);
344
345#ifdef DEBUG
346	mtx_lock(&Giant);
347	printf("Reseed finish\n");
348	mtx_unlock(&Giant);
349#endif
350
351	if (!random_state.seeded) {
352		random_state.seeded = 1;
353		selwakeup(&random_state.rsel);
354		wakeup(&random_state);
355	}
356
357}
358
359u_int
360read_random_real(void *buf, u_int count)
361{
362	static u_int64_t genval;
363	static int cur = 0;
364	static int gate = 1;
365	u_int i;
366	u_int retval;
367
368	/* The reseed task must not be jumped on */
369	mtx_lock(&random_reseed_mtx);
370
371	if (gate) {
372		generator_gate();
373		random_state.outputblocks = 0;
374		gate = 0;
375	}
376	if (count >= sizeof(random_state.counter)) {
377		retval = 0;
378		for (i = 0; i < count; i += sizeof(random_state.counter)) {
379			random_state.counter++;
380			yarrow_encrypt(&random_state.key, &random_state.counter,
381				&genval, sizeof(random_state.counter));
382			memcpy((char *)buf + i, &genval,
383				sizeof(random_state.counter));
384			if (++random_state.outputblocks >= random_state.gengateinterval) {
385				generator_gate();
386				random_state.outputblocks = 0;
387			}
388			retval += sizeof(random_state.counter);
389		}
390	}
391	else {
392		if (!cur) {
393			random_state.counter++;
394			yarrow_encrypt(&random_state.key, &random_state.counter,
395				&genval, sizeof(random_state.counter));
396			memcpy(buf, &genval, count);
397			cur = sizeof(random_state.counter) - count;
398			if (++random_state.outputblocks >= random_state.gengateinterval) {
399				generator_gate();
400				random_state.outputblocks = 0;
401			}
402			retval = count;
403		}
404		else {
405			retval = cur < count ? cur : count;
406			memcpy(buf,
407				(char *)&genval +
408					(sizeof(random_state.counter) - cur),
409				retval);
410			cur -= retval;
411		}
412	}
413	mtx_unlock(&random_reseed_mtx);
414	return retval;
415}
416
417void
418write_random(void *buf, u_int count)
419{
420	u_int i;
421
422	/* Break the input up into HARVESTSIZE chunks.
423	 * The writer has too much control here, so "estimate" the
424	 * the entropy as zero.
425	 */
426	for (i = 0; i < count; i += HARVESTSIZE) {
427		random_harvest_internal(get_cyclecount(), (char *)buf + i,
428			HARVESTSIZE, 0, 0, RANDOM_WRITE);
429	}
430
431	/* Maybe the loop iterated at least once */
432	if (i > count)
433		i -= HARVESTSIZE;
434
435	/* Get the last bytes even if the input length is not
436	 * a multiple of HARVESTSIZE.
437	 */
438	count %= HARVESTSIZE;
439	if (count) {
440		random_harvest_internal(get_cyclecount(), (char *)buf + i,
441			count, 0, 0, RANDOM_WRITE);
442	}
443}
444
445static void
446generator_gate(void)
447{
448	int i;
449	u_char temp[KEYSIZE];
450
451#ifdef DEBUG
452	mtx_lock(&Giant);
453	printf("Generator gate\n");
454	mtx_unlock(&Giant);
455#endif
456
457	for (i = 0; i < KEYSIZE; i += sizeof(random_state.counter)) {
458		random_state.counter++;
459		yarrow_encrypt(&random_state.key, &random_state.counter,
460			&(temp[i]), sizeof(random_state.counter));
461	}
462
463	yarrow_encrypt_init(&random_state.key, temp, KEYSIZE);
464	memset((void *)temp, 0, KEYSIZE);
465
466#ifdef DEBUG
467	mtx_lock(&Giant);
468	printf("Generator gate finish\n");
469	mtx_unlock(&Giant);
470#endif
471}
472
473/* Entropy harvesting routine. This is supposed to be fast; do
474 * not do anything slow in here!
475 */
476
477static void
478random_harvest_internal(u_int64_t somecounter, void *entropy, u_int count,
479	u_int bits, u_int frac, enum esource origin)
480{
481	struct harvest *harvest;
482	int newhead;
483
484	newhead = (harvestring.head + 1) & HARVEST_RING_MASK;
485
486	if (newhead != harvestring.tail) {
487
488		/* Add the harvested data to the ring buffer */
489
490		harvest = &harvestring.data[harvestring.head];
491
492		/* Stuff the harvested data into the ring */
493		harvest->somecounter = somecounter;
494		count = count > HARVESTSIZE ? HARVESTSIZE : count;
495		memcpy(harvest->entropy, entropy, count);
496		harvest->size = count;
497		harvest->bits = bits;
498		harvest->frac = frac;
499		harvest->source = origin < ENTROPYSOURCE ? origin : 0;
500
501		/* Bump the ring counter. This action is assumed
502		 * to be atomic.
503		 */
504		harvestring.head = newhead;
505
506	}
507
508}
509
510/* Helper routine to perform explicit reseeds */
511void
512random_reseed(void)
513{
514	reseed(FAST);
515}
516