yarrow.c revision 63771
162053Smarkm/*-
262765Smarkm * Copyright (c) 2000 Mark R V Murray
362053Smarkm * All rights reserved.
462053Smarkm *
562053Smarkm * Redistribution and use in source and binary forms, with or without
662053Smarkm * modification, are permitted provided that the following conditions
762053Smarkm * are met:
862053Smarkm * 1. Redistributions of source code must retain the above copyright
962053Smarkm *    notice, this list of conditions and the following disclaimer
1062053Smarkm *    in this position and unchanged.
1162053Smarkm * 2. Redistributions in binary form must reproduce the above copyright
1262053Smarkm *    notice, this list of conditions and the following disclaimer in the
1362053Smarkm *    documentation and/or other materials provided with the distribution.
1462053Smarkm *
1562053Smarkm * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1662053Smarkm * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1762053Smarkm * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1862053Smarkm * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1962053Smarkm * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2062053Smarkm * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2162053Smarkm * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2262053Smarkm * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2362053Smarkm * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2462053Smarkm * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2562053Smarkm *
2662053Smarkm * $FreeBSD: head/sys/dev/random/yarrow.c 63771 2000-07-23 11:08:16Z markm $
2762053Smarkm */
2862053Smarkm
2962053Smarkm/* NOTE NOTE NOTE - This is not finished! It will supply numbers, but
3063771Smarkm *                  it is not yet cryptographically secure!!
3163771Smarkm */
3262053Smarkm
3362053Smarkm#include <sys/param.h>
3462053Smarkm#include <sys/systm.h>
3562053Smarkm#include <sys/queue.h>
3662765Smarkm#include <sys/taskqueue.h>
3762053Smarkm#include <sys/linker.h>
3862053Smarkm#include <sys/libkern.h>
3962053Smarkm#include <sys/mbuf.h>
4062053Smarkm#include <sys/random.h>
4162765Smarkm#include <sys/time.h>
4262053Smarkm#include <sys/types.h>
4362053Smarkm#include <crypto/blowfish/blowfish.h>
4462053Smarkm
4562117Smarkm#include <dev/randomdev/yarrow.h>
4662053Smarkm
4762765Smarkm/* #define DEBUG */
4862053Smarkm
4962765Smarkmstatic void generator_gate(void);
5062765Smarkmstatic void reseed(int);
5163771Smarkmstatic void random_harvest_internal(struct timespec *, u_int64_t, u_int, u_int, enum esource);
5262053Smarkm
5362765Smarkm/* Structure holding the entropy state */
5462765Smarkmstruct random_state random_state;
5562765Smarkm
5663771Smarkm/* When enough entropy has been harvested, asynchronously "stir" it in.
5763771Smarkm * The regate task is run at splsofttq()
5863771Smarkm */
5962841Smarkmstatic struct task regate_task[2];
6062765Smarkm
6162841Smarkmstruct context {
6262765Smarkm	u_int pool;
6362841Smarkm} context[2] =	{
6462841Smarkm			{ 0 },
6562841Smarkm			{ 1 }
6662841Smarkm		};
6762765Smarkm
6862765Smarkmstatic void
6962765Smarkmregate(void *context, int pending)
7062765Smarkm{
7162765Smarkm#ifdef DEBUG
7262765Smarkm	printf("Regate task\n");
7362765Smarkm#endif
7462765Smarkm	reseed(((struct context *)context)->pool);
7562765Smarkm}
7662765Smarkm
7762053Smarkmvoid
7862765Smarkmrandom_init(void)
7962053Smarkm{
8062765Smarkm#ifdef DEBUG
8162765Smarkm	printf("Random init\n");
8262765Smarkm#endif
8362765Smarkm	random_state.gengateinterval = 10;
8462765Smarkm	random_state.bins = 10;
8562765Smarkm	random_state.pool[0].thresh = 100;
8662765Smarkm	random_state.pool[1].thresh = 160;
8762765Smarkm	random_state.slowoverthresh = 2;
8862765Smarkm	random_state.which = FAST;
8962841Smarkm	TASK_INIT(&regate_task[FAST], FAST, &regate, (void *)&context[FAST]);
9062841Smarkm	TASK_INIT(&regate_task[SLOW], SLOW, &regate, (void *)&context[SLOW]);
9162765Smarkm	random_init_harvester(random_harvest_internal);
9262053Smarkm}
9362053Smarkm
9462053Smarkmvoid
9562765Smarkmrandom_deinit(void)
9662053Smarkm{
9762765Smarkm#ifdef DEBUG
9862765Smarkm	printf("Random deinit\n");
9962765Smarkm#endif
10062765Smarkm	random_deinit_harvester();
10162765Smarkm}
10262765Smarkm
10362765Smarkmstatic void
10462765Smarkmreseed(int fastslow)
10562765Smarkm{
10663771Smarkm	/* Interrupt-context stack is a limited resource; make static
10763771Smarkm	 * large structures; XXX Revisit - needs to move to the large
10863771Smarkm	 * random_state structure.
10963771Smarkm	 */
11062967Smarkm	static unsigned char v[TIMEBIN][KEYSIZE];	/* v[i] */
11162967Smarkm	unsigned char hash[KEYSIZE];			/* h' */
11262936Sgreen	static BF_KEY hashkey;
11362053Smarkm	unsigned char ivec[8];
11462053Smarkm	unsigned char temp[KEYSIZE];
11562765Smarkm	struct entropy *bucket;
11662053Smarkm	int i, j;
11762053Smarkm
11862765Smarkm#ifdef DEBUG
11962765Smarkm	printf("Reseed type %d\n", fastslow);
12062765Smarkm#endif
12162765Smarkm
12262053Smarkm	/* 1. Hash the accumulated entropy into v[0] */
12362053Smarkm
12462053Smarkm	bzero((void *)&v[0], KEYSIZE);
12562765Smarkm	if (fastslow == SLOW) {
12662765Smarkm		/* Feed a hash of the slow pool into the fast pool */
12762765Smarkm		for (i = 0; i < ENTROPYSOURCE; i++) {
12862765Smarkm			for (j = 0; j < ENTROPYBIN; j++) {
12962765Smarkm				bucket = &random_state.pool[SLOW].source[i].entropy[j];
13062765Smarkm				if(bucket->nanotime.tv_sec || bucket->nanotime.tv_nsec) {
13162765Smarkm					BF_set_key(&hashkey, sizeof(struct entropy),
13262765Smarkm						(void *)bucket);
13362765Smarkm					BF_cbc_encrypt(v[0], temp, KEYSIZE, &hashkey, ivec,
13462765Smarkm						BF_ENCRYPT);
13562765Smarkm					memcpy(&v[0], temp, KEYSIZE);
13662765Smarkm					bucket->nanotime.tv_sec = 0;
13762765Smarkm					bucket->nanotime.tv_nsec = 0;
13862765Smarkm				}
13962765Smarkm			}
14062765Smarkm		}
14162053Smarkm	}
14262053Smarkm
14362765Smarkm	for (i = 0; i < ENTROPYSOURCE; i++) {
14462765Smarkm		for (j = 0; j < ENTROPYBIN; j++) {
14562765Smarkm			bucket = &random_state.pool[FAST].source[i].entropy[j];
14662765Smarkm			if(bucket->nanotime.tv_sec || bucket->nanotime.tv_nsec) {
14762765Smarkm				BF_set_key(&hashkey, sizeof(struct entropy), (void *)bucket);
14862765Smarkm				BF_cbc_encrypt(v[0], temp, KEYSIZE, &hashkey, ivec, BF_ENCRYPT);
14962765Smarkm				memcpy(&v[0], temp, KEYSIZE);
15062765Smarkm				bucket->nanotime.tv_sec = 0;
15162765Smarkm				bucket->nanotime.tv_nsec = 0;
15262765Smarkm			}
15362765Smarkm		}
15462765Smarkm	}
15562765Smarkm
15663771Smarkm	/* 2. Compute hash values for all v. _Supposed_ to be computationally
15763771Smarkm	 *    intensive.
15863771Smarkm	 */
15962053Smarkm
16062765Smarkm	if (random_state.bins > TIMEBIN)
16162765Smarkm		random_state.bins = TIMEBIN;
16262765Smarkm	for (i = 1; i < random_state.bins; i++) {
16362053Smarkm		bzero((void *)&v[i], KEYSIZE);
16462765Smarkm		/* v[i] #= h(v[i-1]) */
16562765Smarkm		BF_set_key(&hashkey, KEYSIZE, v[i - 1]);
16662765Smarkm		BF_cbc_encrypt(v[i], temp, KEYSIZE, &hashkey, ivec, BF_ENCRYPT);
16762765Smarkm		memcpy(&v[i], temp, KEYSIZE);
16862765Smarkm		/* v[i] #= h(v[0]) */
16962765Smarkm		BF_set_key(&hashkey, KEYSIZE, v[0]);
17062765Smarkm		BF_cbc_encrypt(v[i], temp, KEYSIZE, &hashkey, ivec, BF_ENCRYPT);
17162765Smarkm		memcpy(&v[i], temp, KEYSIZE);
17262765Smarkm		/* v[i] #= h(i) */
17362765Smarkm		BF_set_key(&hashkey, sizeof(int), (unsigned char *)&i);
17462765Smarkm		BF_cbc_encrypt(v[i], temp, KEYSIZE, &hashkey, ivec, BF_ENCRYPT);
17562765Smarkm		memcpy(&v[i], temp, KEYSIZE);
17662053Smarkm	}
17762053Smarkm
17862053Smarkm	/* 3. Compute a new Key. */
17962053Smarkm
18062053Smarkm	bzero((void *)hash, KEYSIZE);
18162765Smarkm	BF_set_key(&hashkey, KEYSIZE, (unsigned char *)&random_state.key);
18262765Smarkm	BF_cbc_encrypt(hash, temp, KEYSIZE, &hashkey, ivec, BF_ENCRYPT);
18362053Smarkm	memcpy(hash, temp, KEYSIZE);
18462765Smarkm	for (i = 1; i < random_state.bins; i++) {
18562053Smarkm		BF_set_key(&hashkey, KEYSIZE, v[i]);
18662765Smarkm		BF_cbc_encrypt(hash, temp, KEYSIZE, &hashkey, ivec, BF_ENCRYPT);
18762053Smarkm		memcpy(hash, temp, KEYSIZE);
18862053Smarkm	}
18962765Smarkm	BF_set_key(&random_state.key, KEYSIZE, hash);
19062053Smarkm
19162053Smarkm	/* 4. Recompute the counter */
19262053Smarkm
19362765Smarkm	random_state.counter = 0;
19462765Smarkm	BF_cbc_encrypt((unsigned char *)&random_state.counter, temp,
19562765Smarkm		sizeof(random_state.counter), &random_state.key,
19662765Smarkm		random_state.ivec, BF_ENCRYPT);
19762765Smarkm	memcpy(&random_state.counter, temp, random_state.counter);
19862053Smarkm
19962765Smarkm	/* 5. Reset entropy estimate accumulators to zero */
20062053Smarkm
20162765Smarkm	for (i = 0; i <= fastslow; i++) {
20262765Smarkm		for (j = 0; j < ENTROPYSOURCE; j++) {
20362765Smarkm			random_state.pool[i].source[j].bits = 0;
20462765Smarkm			random_state.pool[i].source[j].frac = 0;
20562765Smarkm		}
20662765Smarkm	}
20762053Smarkm
20862053Smarkm	/* 6. Wipe memory of intermediate values */
20962053Smarkm
21062053Smarkm	bzero((void *)v, sizeof(v));
21162053Smarkm	bzero((void *)temp, sizeof(temp));
21262053Smarkm	bzero((void *)hash, sizeof(hash));
21362053Smarkm
21463771Smarkm	/* 7. Dump to seed file (done by external process) */
21562053Smarkm
21662053Smarkm}
21762053Smarkm
21862053Smarkmu_int
21962053Smarkmread_random(char *buf, u_int count)
22062053Smarkm{
22162053Smarkm	static int cur = 0;
22262053Smarkm	static int gate = 1;
22362053Smarkm	u_int i;
22462053Smarkm	u_int retval;
22562053Smarkm	u_int64_t genval;
22662875Smarkm	intrmask_t mask;
22762053Smarkm
22862875Smarkm	/* The reseed task must not be jumped on */
22962875Smarkm	mask = splsofttq();
23062875Smarkm
23162053Smarkm	if (gate) {
23262053Smarkm		generator_gate();
23362765Smarkm		random_state.outputblocks = 0;
23462053Smarkm		gate = 0;
23562053Smarkm	}
23662765Smarkm	if (count >= sizeof(random_state.counter)) {
23762053Smarkm		retval = 0;
23862765Smarkm		for (i = 0; i < count; i += sizeof(random_state.counter)) {
23962765Smarkm			random_state.counter++;
24062765Smarkm			BF_cbc_encrypt((unsigned char *)&random_state.counter,
24162765Smarkm				(unsigned char *)&genval,
24262765Smarkm				sizeof(random_state.counter),
24362765Smarkm				&random_state.key, random_state.ivec, BF_ENCRYPT);
24462765Smarkm			memcpy(&buf[i], &genval, sizeof(random_state.counter));
24562765Smarkm			if (++random_state.outputblocks >= random_state.gengateinterval) {
24662053Smarkm				generator_gate();
24762765Smarkm				random_state.outputblocks = 0;
24862053Smarkm			}
24962765Smarkm			retval += sizeof(random_state.counter);
25062053Smarkm		}
25162053Smarkm	}
25262053Smarkm	else {
25362053Smarkm		if (!cur) {
25462765Smarkm			random_state.counter++;
25562765Smarkm			BF_cbc_encrypt((unsigned char *)&random_state.counter,
25662765Smarkm				(unsigned char *)&genval,
25762765Smarkm				sizeof(random_state.counter),
25862765Smarkm				&random_state.key, random_state.ivec,
25962765Smarkm				BF_ENCRYPT);
26062053Smarkm			memcpy(buf, &genval, count);
26162765Smarkm			cur = sizeof(random_state.counter) - count;
26262765Smarkm			if (++random_state.outputblocks >= random_state.gengateinterval) {
26362053Smarkm				generator_gate();
26462765Smarkm				random_state.outputblocks = 0;
26562053Smarkm			}
26662053Smarkm			retval = count;
26762053Smarkm		}
26862053Smarkm		else {
26962053Smarkm			retval = cur < count ? cur : count;
27062053Smarkm			memcpy(buf,
27162765Smarkm				(char *)&random_state.counter +
27262765Smarkm					(sizeof(random_state.counter) - retval),
27362053Smarkm				retval);
27462053Smarkm			cur -= retval;
27562053Smarkm		}
27662053Smarkm	}
27762875Smarkm	splx(mask);
27862053Smarkm	return retval;
27962053Smarkm}
28062053Smarkm
28163306Smarkmvoid
28263306Smarkmwrite_random(char *buf, u_int count)
28363306Smarkm{
28463306Smarkm	u_int i;
28563306Smarkm	intrmask_t mask;
28663771Smarkm	struct timespec timebuf;
28763306Smarkm
28863306Smarkm	/* The reseed task must not be jumped on */
28963306Smarkm	mask = splsofttq();
29063306Smarkm	for (i = 0; i < count/sizeof(u_int64_t); i++) {
29163771Smarkm		nanotime(&timebuf);
29263771Smarkm		random_harvest_internal(&timebuf,
29363306Smarkm			*(u_int64_t *)&buf[i*sizeof(u_int64_t)],
29463306Smarkm			0, 0, RANDOM_WRITE);
29563306Smarkm	}
29663306Smarkm	reseed(FAST);
29763306Smarkm	splx(mask);
29863306Smarkm}
29963306Smarkm
30062765Smarkmstatic void
30162053Smarkmgenerator_gate(void)
30262053Smarkm{
30362053Smarkm	int i;
30462053Smarkm	unsigned char temp[KEYSIZE];
30562875Smarkm	intrmask_t mask;
30662053Smarkm
30762765Smarkm#ifdef DEBUG
30862875Smarkm	printf("Generator gate\n");
30962765Smarkm#endif
31062875Smarkm
31162875Smarkm	/* The reseed task must not be jumped on */
31262875Smarkm	mask = splsofttq();
31362875Smarkm
31462765Smarkm	for (i = 0; i < KEYSIZE; i += sizeof(random_state.counter)) {
31562765Smarkm		random_state.counter++;
31662765Smarkm		BF_cbc_encrypt((unsigned char *)&random_state.counter,
31762765Smarkm			&(temp[i]), sizeof(random_state.counter),
31862765Smarkm			&random_state.key, random_state.ivec, BF_ENCRYPT);
31962053Smarkm	}
32062053Smarkm
32162765Smarkm	BF_set_key(&random_state.key, KEYSIZE, temp);
32262053Smarkm	bzero((void *)temp, KEYSIZE);
32362875Smarkm
32462875Smarkm	splx(mask);
32562053Smarkm}
32662765Smarkm
32763771Smarkm/* Entropy harvesting routine. This is supposed to be fast; do
32863771Smarkm * not do anything slow in here!
32963771Smarkm */
33062765Smarkm
33162765Smarkmstatic void
33263771Smarkmrandom_harvest_internal(struct timespec *timep, u_int64_t entropy,
33362841Smarkm	u_int bits, u_int frac, enum esource origin)
33462765Smarkm{
33562765Smarkm	u_int insert;
33662765Smarkm	int which;	/* fast or slow */
33762765Smarkm	struct entropy *bucket;
33862765Smarkm	struct source *source;
33962765Smarkm	struct pool *pool;
34062850Smarkm	intrmask_t mask;
34162765Smarkm
34262765Smarkm#ifdef DEBUG
34362765Smarkm	printf("Random harvest\n");
34462765Smarkm#endif
34562765Smarkm	if (origin < ENTROPYSOURCE) {
34662765Smarkm
34762969Smarkm		/* Called inside irq handlers; protect from interference */
34862969Smarkm		mask = splhigh();
34962850Smarkm
35062765Smarkm		which = random_state.which;
35162765Smarkm		pool = &random_state.pool[which];
35262765Smarkm		source = &pool->source[origin];
35362765Smarkm
35462765Smarkm		insert = source->current + 1;
35562765Smarkm		if (insert >= ENTROPYBIN)
35662765Smarkm			insert = 0;
35762765Smarkm
35862765Smarkm		bucket = &source->entropy[insert];
35962765Smarkm
36062765Smarkm		if (!bucket->nanotime.tv_sec && !bucket->nanotime.tv_nsec) {
36162765Smarkm
36262765Smarkm			/* nanotime provides clock jitter */
36363771Smarkm			bucket->nanotime = *timep;
36462765Smarkm
36562765Smarkm			/* the harvested entropy */
36662765Smarkm			bucket->data = entropy;
36762765Smarkm
36862765Smarkm			/* update the estimates - including "fractional bits" */
36962765Smarkm			source->bits += bits;
37062765Smarkm			source->frac += frac;
37162765Smarkm			if (source->frac >= 1024) {
37262765Smarkm				source->bits += source->frac / 1024;
37362765Smarkm				source->frac %= 1024;
37462765Smarkm			}
37562765Smarkm			if (source->bits >= pool->thresh) {
37663771Smarkm				/* XXX Slowoverthresh needs to be considered */
37762841Smarkm				taskqueue_enqueue(taskqueue_swi, &regate_task[which]);
37862765Smarkm			}
37962765Smarkm
38062765Smarkm			/* bump the insertion point */
38162765Smarkm			source->current = insert;
38262765Smarkm
38362841Smarkm			/* toggle the pool for next insertion */
38462765Smarkm			random_state.which = !random_state.which;
38562765Smarkm
38662765Smarkm		}
38762850Smarkm		splx(mask);
38862765Smarkm	}
38962765Smarkm}
390