yarrow.c revision 67365
155714Skris/*-
255714Skris * Copyright (c) 2000 Mark R V Murray
355714Skris * All rights reserved.
455714Skris *
555714Skris * Redistribution and use in source and binary forms, with or without
655714Skris * modification, are permitted provided that the following conditions
755714Skris * are met:
8296465Sdelphij * 1. Redistributions of source code must retain the above copyright
955714Skris *    notice, this list of conditions and the following disclaimer
1055714Skris *    in this position and unchanged.
1155714Skris * 2. Redistributions in binary form must reproduce the above copyright
1255714Skris *    notice, this list of conditions and the following disclaimer in the
1355714Skris *    documentation and/or other materials provided with the distribution.
1455714Skris *
15296465Sdelphij * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1655714Skris * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1755714Skris * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1855714Skris * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1955714Skris * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2055714Skris * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2155714Skris * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22296465Sdelphij * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2355714Skris * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2455714Skris * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2555714Skris *
2655714Skris * $FreeBSD: head/sys/dev/random/yarrow.c 67365 2000-10-20 07:58:15Z jhb $
2755714Skris */
2855714Skris
2955714Skris/* NOTE NOTE NOTE - This is not finished! It will supply numbers, but
3055714Skris *                  it is not yet cryptographically secure!!
3155714Skris */
3255714Skris
3355714Skris#include <sys/param.h>
3455714Skris#include <sys/systm.h>
3555714Skris#include <sys/queue.h>
3655714Skris#include <sys/kernel.h>
37296465Sdelphij#include <sys/kthread.h>
3855714Skris#include <sys/libkern.h>
3955714Skris#include <sys/malloc.h>
40296465Sdelphij#include <sys/mutex.h>
4155714Skris#include <sys/proc.h>
4255714Skris#include <sys/select.h>
4355714Skris#include <sys/random.h>
4455714Skris#include <sys/time.h>
4555714Skris#include <sys/types.h>
4655714Skris#include <sys/unistd.h>
4755714Skris#include <crypto/blowfish/blowfish.h>
4855714Skris
4955714Skris#include <dev/random/hash.h>
5055714Skris#include <dev/random/yarrow.h>
5155714Skris
52296465Sdelphij/* #define DEBUG */
5355714Skris/* #define DEBUG1 */	/* Very noisy - prints plenty harvesting stats */
5455714Skris
5555714Skrisstatic void generator_gate(void);
5655714Skrisstatic void reseed(int);
5755714Skrisstatic void random_harvest_internal(struct timespec *, void *, u_int, u_int, u_int, enum esource);
5855714Skris
5955714Skrisstatic void random_kthread(void *);
6055714Skris
6155714Skris/* Structure holding the entropy state */
62109998Smarkmstruct random_state random_state;
6355714Skris
6455714Skris/* Queue holding harvested entropy */
65296465SdelphijTAILQ_HEAD(harvestqueue, harvest) harvestqueue,
66296465Sdelphij	initqueue = TAILQ_HEAD_INITIALIZER(harvestqueue);
67109998Smarkm
68109998Smarkm/* These are used to queue harvested packets of entropy. The entropy
69296465Sdelphij * buffer size is pretty arbitrary.
70296465Sdelphij */
71296465Sdelphijstruct harvest {
72296465Sdelphij	struct timespec time;		/* nanotime for clock jitter */
73296465Sdelphij	u_char entropy[HARVESTSIZE];	/* the harvested entropy */
74296465Sdelphij	u_int size, bits, frac;		/* stats about the entropy */
75296465Sdelphij	enum esource source;		/* stats about the entropy */
76296465Sdelphij	u_int pool;			/* which pool this goes into */
77296465Sdelphij	TAILQ_ENTRY(harvest) harvest;	/* link to next */
78109998Smarkm};
79109998Smarkm
80109998Smarkm/* The reseed thread mutex */
81109998Smarkmstatic struct mtx random_reseed_mtx;
82109998Smarkm
83109998Smarkm/* The entropy harvest mutex, as well as the mutex associated
84109998Smarkm * with the msleep() call during deinit
8555714Skris */
86109998Smarkmstatic struct mtx random_harvest_mtx;
87296465Sdelphij
88296465Sdelphij/* <0 to end the kthread, 0 to let it run */
89109998Smarkmstatic int random_kthread_control = 0;
9055714Skris
91109998Smarkmstatic struct proc *random_kthread_proc;
92296465Sdelphij
93296465Sdelphijstatic void
94296465Sdelphijrandom_kthread(void *arg /* NOTUSED */)
95109998Smarkm{
9655714Skris	int pl, src, overthreshhold[2];
97109998Smarkm	struct harvest *event;
98109998Smarkm	struct source *source;
9955714Skris#ifdef DEBUG1
10055714Skris	int queuecount;
101296465Sdelphij#endif
102296465Sdelphij
103296465Sdelphij#ifdef DEBUG
10455714Skris	printf("At %s, line %d: mtx_owned(&Giant) == %d, mtx_owned(&sched_lock) == %d\n", __FILE__, __LINE__, mtx_owned(&Giant), mtx_owned(&sched_lock));
105296465Sdelphij#endif
106296465Sdelphij
107296465Sdelphij	for (pl = 0; pl < 2; pl++)
108296465Sdelphij		yarrow_hash_init(&random_state.pool[pl].hash, NULL, 0);
109296465Sdelphij
110296465Sdelphij	for (;;) {
111296465Sdelphij
112296465Sdelphij		if (TAILQ_EMPTY(&harvestqueue)) {
113296465Sdelphij
114296465Sdelphij			/* Sleep for a second to give the system a chance */
11555714Skris			mtx_enter(&Giant, MTX_DEF);
116296465Sdelphij			tsleep(&harvestqueue, PUSER, "rndslp", hz);
117296465Sdelphij			mtx_exit(&Giant, MTX_DEF);
118296465Sdelphij
119296465Sdelphij		}
120296465Sdelphij		else {
121296465Sdelphij
122296465Sdelphij			/* Suck the harvested entropy out of the queue and hash
123296465Sdelphij			 * it into the fast and slow pools.
124296465Sdelphij			 */
125#ifdef DEBUG1
126			queuecount = 0;
127#endif
128			while (!TAILQ_EMPTY(&harvestqueue)) {
129#ifdef DEBUG1
130				queuecount++;
131#endif
132				mtx_enter(&random_harvest_mtx, MTX_DEF);
133
134				event = TAILQ_FIRST(&harvestqueue);
135				TAILQ_REMOVE(&harvestqueue, event, harvest);
136
137				mtx_exit(&random_harvest_mtx, MTX_DEF);
138
139				source = &random_state.pool[event->pool].source[event->source];
140				yarrow_hash_iterate(&random_state.pool[event->pool].hash,
141					event->entropy, sizeof(event->entropy));
142				yarrow_hash_iterate(&random_state.pool[event->pool].hash,
143					&event->time, sizeof(event->time));
144				source->frac += event->frac;
145				source->bits += event->bits + source->frac/1024;
146				source->frac %= 1024;
147				free(event, M_TEMP);
148
149			}
150#ifdef DEBUG1
151			printf("Harvested %d events\n", queuecount);
152#endif
153
154			/* Count the over-threshold sources in each pool */
155			for (pl = 0; pl < 2; pl++) {
156				overthreshhold[pl] = 0;
157				for (src = 0; src < ENTROPYSOURCE; src++) {
158					if (random_state.pool[pl].source[src].bits
159						> random_state.pool[pl].thresh)
160						overthreshhold[pl]++;
161				}
162			}
163
164			/* if any fast source over threshhold, reseed */
165			if (overthreshhold[FAST])
166				reseed(FAST);
167
168			/* if enough slow sources are over threshhold, reseed */
169			if (overthreshhold[SLOW] >= random_state.slowoverthresh)
170				reseed(SLOW);
171
172		}
173
174		/* Is the thread scheduled for a shutdown? */
175		if (random_kthread_control != 0) {
176			if (!TAILQ_EMPTY(&harvestqueue)) {
177#ifdef DEBUG
178				printf("Random cleaning extraneous events\n");
179#endif
180				mtx_enter(&random_harvest_mtx, MTX_DEF);
181				TAILQ_FOREACH(event, &harvestqueue, harvest) {
182					TAILQ_REMOVE(&harvestqueue, event, harvest);
183					free(event, M_TEMP);
184				}
185				mtx_exit(&random_harvest_mtx, MTX_DEF);
186			}
187#ifdef DEBUG
188			printf("Random kthread setting terminate\n");
189#endif
190			random_set_wakeup_exit(&random_kthread_control);
191			/* NOTREACHED */
192			break;
193		}
194
195	}
196
197}
198
199int
200random_init(void)
201{
202	int error;
203
204#ifdef DEBUG
205	printf("Random initialise\n");
206#endif
207
208	random_state.gengateinterval = 10;
209	random_state.bins = 10;
210	random_state.pool[0].thresh = 100;
211	random_state.pool[1].thresh = 160;
212	random_state.slowoverthresh = 2;
213	random_state.which = FAST;
214
215	harvestqueue = initqueue;
216
217	/* Initialise the mutexes */
218	mtx_init(&random_reseed_mtx, "random reseed", MTX_DEF);
219	mtx_init(&random_harvest_mtx, "random harvest", MTX_DEF);
220
221	/* Start the hash/reseed thread */
222	error = kthread_create(random_kthread, NULL,
223		&random_kthread_proc, RFHIGHPID, "random");
224	if (error != 0)
225		return error;
226
227	/* Register the randomness harvesting routine */
228	random_init_harvester(random_harvest_internal, read_random_real);
229
230#ifdef DEBUG
231	printf("Random initalise finish\n");
232#endif
233
234	return 0;
235}
236
237void
238random_deinit(void)
239{
240#ifdef DEBUG
241	printf("Random deinitalise\n");
242#endif
243
244	/* Deregister the randomness harvesting routine */
245	random_deinit_harvester();
246
247#ifdef DEBUG
248	printf("Random deinitalise waiting for thread to terminate\n");
249#endif
250
251	/* Command the hash/reseed thread to end and wait for it to finish */
252	mtx_enter(&random_harvest_mtx, MTX_DEF);
253	random_kthread_control = -1;
254	msleep((void *)&random_kthread_control, &random_harvest_mtx, PUSER,
255		"rndend", 0);
256	mtx_exit(&random_harvest_mtx, MTX_DEF);
257
258#ifdef DEBUG
259	printf("Random deinitalise removing mutexes\n");
260#endif
261
262	/* Remove the mutexes */
263	mtx_destroy(&random_reseed_mtx);
264	mtx_destroy(&random_harvest_mtx);
265
266#ifdef DEBUG
267	printf("Random deinitalise finish\n");
268#endif
269}
270
271static void
272reseed(int fastslow)
273{
274	/* Interrupt-context stack is a limited resource; make large
275	 * structures static.
276	 */
277	static u_char v[TIMEBIN][KEYSIZE];	/* v[i] */
278	static struct yarrowhash context;
279	u_char hash[KEYSIZE];			/* h' */
280	u_char temp[KEYSIZE];
281	int i, j;
282
283#ifdef DEBUG
284	printf("Reseed type %d\n", fastslow);
285#endif
286
287	/* The reseed task must not be jumped on */
288	mtx_enter(&random_reseed_mtx, MTX_DEF);
289
290	/* 1. Hash the accumulated entropy into v[0] */
291
292	yarrow_hash_init(&context, NULL, 0);
293	/* Feed the slow pool hash in if slow */
294	if (fastslow == SLOW)
295		yarrow_hash_iterate(&context,
296			&random_state.pool[SLOW].hash, sizeof(struct yarrowhash));
297
298	yarrow_hash_iterate(&context,
299		&random_state.pool[FAST].hash, sizeof(struct yarrowhash));
300
301	/* 2. Compute hash values for all v. _Supposed_ to be computationally
302	 *    intensive.
303	 */
304
305	if (random_state.bins > TIMEBIN)
306		random_state.bins = TIMEBIN;
307	for (i = 1; i < random_state.bins; i++) {
308		yarrow_hash_init(&context, NULL, 0);
309		/* v[i] #= h(v[i-1]) */
310		yarrow_hash_iterate(&context, v[i - 1], KEYSIZE);
311		/* v[i] #= h(v[0]) */
312		yarrow_hash_iterate(&context, v[0], KEYSIZE);
313		/* v[i] #= h(i) */
314		yarrow_hash_iterate(&context, &i, sizeof(int));
315		/* Return the hashval */
316		yarrow_hash_finish(&context, v[i]);
317	}
318
319	/* 3. Compute a new key; h' is the identity function here;
320	 *    it is not being ignored!
321	 */
322
323	yarrow_hash_init(&context, NULL, 0);
324	yarrow_hash_iterate(&context, &random_state.key, KEYSIZE);
325	for (i = 1; i < random_state.bins; i++)
326		yarrow_hash_iterate(&context, &v[i], KEYSIZE);
327	yarrow_hash_finish(&context, temp);
328	yarrow_encrypt_init(&random_state.key, temp, KEYSIZE);
329
330	/* 4. Recompute the counter */
331
332	random_state.counter = 0;
333	yarrow_encrypt(&random_state.key, &random_state.counter, temp,
334		sizeof(random_state.counter));
335	memcpy(&random_state.counter, temp, random_state.counter);
336
337	/* 5. Reset entropy estimate accumulators to zero */
338
339	for (i = 0; i <= fastslow; i++) {
340		for (j = 0; j < ENTROPYSOURCE; j++) {
341			if (random_state.pool[i].source[j].bits >
342				random_state.pool[i].thresh) {
343				random_state.pool[i].source[j].bits = 0;
344				random_state.pool[i].source[j].frac = 0;
345			}
346		}
347	}
348
349	/* 6. Wipe memory of intermediate values */
350
351	memset((void *)v, 0, sizeof(v));
352	memset((void *)temp, 0, sizeof(temp));
353	memset((void *)hash, 0, sizeof(hash));
354
355	/* 7. Dump to seed file */
356	/* XXX Not done here yet */
357
358	/* Release the reseed mutex */
359	mtx_exit(&random_reseed_mtx, MTX_DEF);
360
361#ifdef DEBUG
362	printf("Reseed finish\n");
363#endif
364
365	if (!random_state.seeded) {
366		random_state.seeded = 1;
367		selwakeup(&random_state.rsel);
368		wakeup(&random_state);
369	}
370
371}
372
373u_int
374read_random_real(void *buf, u_int count)
375{
376	static u_int64_t genval;
377	static int cur = 0;
378	static int gate = 1;
379	u_int i;
380	u_int retval;
381
382	/* The reseed task must not be jumped on */
383	mtx_enter(&random_reseed_mtx, MTX_DEF);
384
385	if (gate) {
386		generator_gate();
387		random_state.outputblocks = 0;
388		gate = 0;
389	}
390	if (count >= sizeof(random_state.counter)) {
391		retval = 0;
392		for (i = 0; i < count; i += sizeof(random_state.counter)) {
393			random_state.counter++;
394			yarrow_encrypt(&random_state.key, &random_state.counter,
395				&genval, sizeof(random_state.counter));
396			memcpy((char *)buf + i, &genval,
397				sizeof(random_state.counter));
398			if (++random_state.outputblocks >= random_state.gengateinterval) {
399				generator_gate();
400				random_state.outputblocks = 0;
401			}
402			retval += sizeof(random_state.counter);
403		}
404	}
405	else {
406		if (!cur) {
407			random_state.counter++;
408			yarrow_encrypt(&random_state.key, &random_state.counter,
409				&genval, sizeof(random_state.counter));
410			memcpy(buf, &genval, count);
411			cur = sizeof(random_state.counter) - count;
412			if (++random_state.outputblocks >= random_state.gengateinterval) {
413				generator_gate();
414				random_state.outputblocks = 0;
415			}
416			retval = count;
417		}
418		else {
419			retval = cur < count ? cur : count;
420			memcpy(buf,
421				(char *)&genval +
422					(sizeof(random_state.counter) - cur),
423				retval);
424			cur -= retval;
425		}
426	}
427	mtx_exit(&random_reseed_mtx, MTX_DEF);
428	return retval;
429}
430
431void
432write_random(void *buf, u_int count)
433{
434	u_int i;
435	struct timespec timebuf;
436
437	/* arbitrarily break the input up into HARVESTSIZE chunks */
438	for (i = 0; i < count; i += HARVESTSIZE) {
439		nanotime(&timebuf);
440		random_harvest_internal(&timebuf, (char *)buf + i, HARVESTSIZE, 0, 0,
441			RANDOM_WRITE);
442	}
443
444	/* Maybe the loop iterated at least once */
445	if (i > count)
446		i -= HARVESTSIZE;
447
448	/* Get the last bytes even if the input length is not a multiple of HARVESTSIZE */
449	count %= HARVESTSIZE;
450	if (count) {
451		nanotime(&timebuf);
452		random_harvest_internal(&timebuf, (char *)buf + i, count, 0, 0,
453			RANDOM_WRITE);
454	}
455
456	/* Explicit reseed */
457	reseed(FAST);
458}
459
460static void
461generator_gate(void)
462{
463	int i;
464	u_char temp[KEYSIZE];
465
466#ifdef DEBUG
467	printf("Generator gate\n");
468#endif
469
470	for (i = 0; i < KEYSIZE; i += sizeof(random_state.counter)) {
471		random_state.counter++;
472		yarrow_encrypt(&random_state.key, &random_state.counter,
473			&(temp[i]), sizeof(random_state.counter));
474	}
475
476	yarrow_encrypt_init(&random_state.key, temp, KEYSIZE);
477	memset((void *)temp, 0, KEYSIZE);
478
479#ifdef DEBUG
480	printf("Generator gate finish\n");
481#endif
482}
483
484/* Entropy harvesting routine. This is supposed to be fast; do
485 * not do anything slow in here!
486 */
487
488static void
489random_harvest_internal(struct timespec *timep, void *entropy, u_int count,
490	u_int bits, u_int frac, enum esource origin)
491{
492	struct harvest *event;
493
494#if 0
495#ifdef DEBUG
496	printf("Random harvest\n");
497#endif
498#endif
499	event = malloc(sizeof(struct harvest), M_TEMP, M_NOWAIT);
500
501	if (origin < ENTROPYSOURCE && event != NULL) {
502
503		/* nanotime provides clock jitter */
504		event->time = *timep;
505
506		/* the harvested entropy */
507		count = count > sizeof(event->entropy)
508			? sizeof(event->entropy)
509			: count;
510		memcpy(event->entropy, entropy, count);
511
512		event->size = count;
513		event->bits = bits;
514		event->frac = frac;
515		event->source = origin;
516
517		/* protect the queue from simultaneous updates */
518		mtx_enter(&random_harvest_mtx, MTX_DEF);
519
520		/* toggle the pool for next insertion */
521		event->pool = random_state.which;
522		random_state.which = !random_state.which;
523
524		TAILQ_INSERT_TAIL(&harvestqueue, event, harvest);
525
526		mtx_exit(&random_harvest_mtx, MTX_DEF);
527	}
528}
529