yarrow.c revision 62875
1/*-
2 * Copyright (c) 2000 Mark R V Murray
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer
10 *    in this position and unchanged.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/dev/random/yarrow.c 62875 2000-07-10 06:40:23Z markm $
27 */
28
29/* NOTE NOTE NOTE - This is not finished! It will supply numbers, but
30                    it is not yet cryptographically secure!! */
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/queue.h>
35#include <sys/taskqueue.h>
36#include <sys/linker.h>
37#include <sys/libkern.h>
38#include <sys/mbuf.h>
39#include <sys/random.h>
40#include <sys/time.h>
41#include <sys/types.h>
42#include <crypto/blowfish/blowfish.h>
43
44#include <dev/randomdev/yarrow.h>
45
46/* #define DEBUG */
47
48static void generator_gate(void);
49static void reseed(int);
50static void random_harvest_internal(struct timespec *nanotime, u_int64_t entropy, u_int bits, u_int frac, enum esource source);
51
52/* Structure holding the entropy state */
53struct random_state random_state;
54
55/* When enough entropy has been harvested, asynchronously "stir" it in */
56/* The regate task is run at splsofttq()                               */
57static struct task regate_task[2];
58
59struct context {
60	u_int pool;
61} context[2] =	{
62			{ 0 },
63			{ 1 }
64		};
65
66static void
67regate(void *context, int pending)
68{
69#ifdef DEBUG
70	printf("Regate task\n");
71#endif
72	reseed(((struct context *)context)->pool);
73}
74
75void
76random_init(void)
77{
78#ifdef DEBUG
79	printf("Random init\n");
80#endif
81	random_state.gengateinterval = 10;
82	random_state.bins = 10;
83	random_state.pool[0].thresh = 100;
84	random_state.pool[1].thresh = 160;
85	random_state.slowoverthresh = 2;
86	random_state.which = FAST;
87	TASK_INIT(&regate_task[FAST], FAST, &regate, (void *)&context[FAST]);
88	TASK_INIT(&regate_task[SLOW], SLOW, &regate, (void *)&context[SLOW]);
89	random_init_harvester(random_harvest_internal);
90}
91
92void
93random_deinit(void)
94{
95#ifdef DEBUG
96	printf("Random deinit\n");
97#endif
98	random_deinit_harvester();
99}
100
101static void
102reseed(int fastslow)
103{
104	unsigned char v[TIMEBIN][KEYSIZE];	/* v[i] */
105	unsigned char hash[KEYSIZE];		/* h' */
106	BF_KEY hashkey;
107	unsigned char ivec[8];
108	unsigned char temp[KEYSIZE];
109	struct entropy *bucket;
110	int i, j;
111
112#ifdef DEBUG
113	printf("Reseed type %d\n", fastslow);
114#endif
115
116	/* 1. Hash the accumulated entropy into v[0] */
117
118	bzero((void *)&v[0], KEYSIZE);
119	if (fastslow == SLOW) {
120		/* Feed a hash of the slow pool into the fast pool */
121		for (i = 0; i < ENTROPYSOURCE; i++) {
122			for (j = 0; j < ENTROPYBIN; j++) {
123				bucket = &random_state.pool[SLOW].source[i].entropy[j];
124				if(bucket->nanotime.tv_sec || bucket->nanotime.tv_nsec) {
125					BF_set_key(&hashkey, sizeof(struct entropy),
126						(void *)bucket);
127					BF_cbc_encrypt(v[0], temp, KEYSIZE, &hashkey, ivec,
128						BF_ENCRYPT);
129					memcpy(&v[0], temp, KEYSIZE);
130					bucket->nanotime.tv_sec = 0;
131					bucket->nanotime.tv_nsec = 0;
132				}
133			}
134		}
135	}
136
137	for (i = 0; i < ENTROPYSOURCE; i++) {
138		for (j = 0; j < ENTROPYBIN; j++) {
139			bucket = &random_state.pool[FAST].source[i].entropy[j];
140			if(bucket->nanotime.tv_sec || bucket->nanotime.tv_nsec) {
141				BF_set_key(&hashkey, sizeof(struct entropy), (void *)bucket);
142				BF_cbc_encrypt(v[0], temp, KEYSIZE, &hashkey, ivec, BF_ENCRYPT);
143				memcpy(&v[0], temp, KEYSIZE);
144				bucket->nanotime.tv_sec = 0;
145				bucket->nanotime.tv_nsec = 0;
146			}
147		}
148	}
149
150	/* 2. Compute hash values for all v. _Supposed_ to be computationally */
151	/*    intensive.                                                      */
152
153	if (random_state.bins > TIMEBIN)
154		random_state.bins = TIMEBIN;
155	for (i = 1; i < random_state.bins; i++) {
156		bzero((void *)&v[i], KEYSIZE);
157		/* v[i] #= h(v[i-1]) */
158		BF_set_key(&hashkey, KEYSIZE, v[i - 1]);
159		BF_cbc_encrypt(v[i], temp, KEYSIZE, &hashkey, ivec, BF_ENCRYPT);
160		memcpy(&v[i], temp, KEYSIZE);
161		/* v[i] #= h(v[0]) */
162		BF_set_key(&hashkey, KEYSIZE, v[0]);
163		BF_cbc_encrypt(v[i], temp, KEYSIZE, &hashkey, ivec, BF_ENCRYPT);
164		memcpy(&v[i], temp, KEYSIZE);
165		/* v[i] #= h(i) */
166		BF_set_key(&hashkey, sizeof(int), (unsigned char *)&i);
167		BF_cbc_encrypt(v[i], temp, KEYSIZE, &hashkey, ivec, BF_ENCRYPT);
168		memcpy(&v[i], temp, KEYSIZE);
169	}
170
171	/* 3. Compute a new Key. */
172
173	bzero((void *)hash, KEYSIZE);
174	BF_set_key(&hashkey, KEYSIZE, (unsigned char *)&random_state.key);
175	BF_cbc_encrypt(hash, temp, KEYSIZE, &hashkey, ivec, BF_ENCRYPT);
176	memcpy(hash, temp, KEYSIZE);
177	for (i = 1; i < random_state.bins; i++) {
178		BF_set_key(&hashkey, KEYSIZE, v[i]);
179		BF_cbc_encrypt(hash, temp, KEYSIZE, &hashkey, ivec, BF_ENCRYPT);
180		memcpy(hash, temp, KEYSIZE);
181	}
182	BF_set_key(&random_state.key, KEYSIZE, hash);
183
184	/* 4. Recompute the counter */
185
186	random_state.counter = 0;
187	BF_cbc_encrypt((unsigned char *)&random_state.counter, temp,
188		sizeof(random_state.counter), &random_state.key,
189		random_state.ivec, BF_ENCRYPT);
190	memcpy(&random_state.counter, temp, random_state.counter);
191
192	/* 5. Reset entropy estimate accumulators to zero */
193
194	for (i = 0; i <= fastslow; i++) {
195		for (j = 0; j < ENTROPYSOURCE; j++) {
196			random_state.pool[i].source[j].bits = 0;
197			random_state.pool[i].source[j].frac = 0;
198		}
199	}
200
201	/* 6. Wipe memory of intermediate values */
202
203	bzero((void *)v, sizeof(v));
204	bzero((void *)temp, sizeof(temp));
205	bzero((void *)hash, sizeof(hash));
206
207	/* 7. Dump to seed file (XXX done by external process?) */
208
209}
210
211u_int
212read_random(char *buf, u_int count)
213{
214	static int cur = 0;
215	static int gate = 1;
216	u_int i;
217	u_int retval;
218	u_int64_t genval;
219	intrmask_t mask;
220
221	/* The reseed task must not be jumped on */
222	mask = splsofttq();
223
224	if (gate) {
225		generator_gate();
226		random_state.outputblocks = 0;
227		gate = 0;
228	}
229	if (count >= sizeof(random_state.counter)) {
230		retval = 0;
231		for (i = 0; i < count; i += sizeof(random_state.counter)) {
232			random_state.counter++;
233			BF_cbc_encrypt((unsigned char *)&random_state.counter,
234				(unsigned char *)&genval,
235				sizeof(random_state.counter),
236				&random_state.key, random_state.ivec, BF_ENCRYPT);
237			memcpy(&buf[i], &genval, sizeof(random_state.counter));
238			if (++random_state.outputblocks >= random_state.gengateinterval) {
239				generator_gate();
240				random_state.outputblocks = 0;
241			}
242			retval += sizeof(random_state.counter);
243		}
244	}
245	else {
246		if (!cur) {
247			random_state.counter++;
248			BF_cbc_encrypt((unsigned char *)&random_state.counter,
249				(unsigned char *)&genval,
250				sizeof(random_state.counter),
251				&random_state.key, random_state.ivec,
252				BF_ENCRYPT);
253			memcpy(buf, &genval, count);
254			cur = sizeof(random_state.counter) - count;
255			if (++random_state.outputblocks >= random_state.gengateinterval) {
256				generator_gate();
257				random_state.outputblocks = 0;
258			}
259			retval = count;
260		}
261		else {
262			retval = cur < count ? cur : count;
263			memcpy(buf,
264				(char *)&random_state.counter +
265					(sizeof(random_state.counter) - retval),
266				retval);
267			cur -= retval;
268		}
269	}
270	splx(mask);
271	return retval;
272}
273
274static void
275generator_gate(void)
276{
277	int i;
278	unsigned char temp[KEYSIZE];
279	intrmask_t mask;
280
281#ifdef DEBUG
282	printf("Generator gate\n");
283#endif
284
285	/* The reseed task must not be jumped on */
286	mask = splsofttq();
287
288	for (i = 0; i < KEYSIZE; i += sizeof(random_state.counter)) {
289		random_state.counter++;
290		BF_cbc_encrypt((unsigned char *)&random_state.counter,
291			&(temp[i]), sizeof(random_state.counter),
292			&random_state.key, random_state.ivec, BF_ENCRYPT);
293	}
294
295	BF_set_key(&random_state.key, KEYSIZE, temp);
296	bzero((void *)temp, KEYSIZE);
297
298	splx(mask);
299}
300
301/* Entropy harvesting routine. This is supposed to be fast; do */
302/* not do anything slow in here!                               */
303
304static void
305random_harvest_internal(struct timespec *nanotime, u_int64_t entropy,
306	u_int bits, u_int frac, enum esource origin)
307{
308	u_int insert;
309	int which;	/* fast or slow */
310	struct entropy *bucket;
311	struct source *source;
312	struct pool *pool;
313	intrmask_t mask;
314
315#ifdef DEBUG
316	printf("Random harvest\n");
317#endif
318	if (origin < ENTROPYSOURCE) {
319
320		/* The reseed task must not be jumped on */
321		mask = splsofttq();
322
323		which = random_state.which;
324		pool = &random_state.pool[which];
325		source = &pool->source[origin];
326
327		insert = source->current + 1;
328		if (insert >= ENTROPYBIN)
329			insert = 0;
330
331		bucket = &source->entropy[insert];
332
333		if (!bucket->nanotime.tv_sec && !bucket->nanotime.tv_nsec) {
334
335			/* nanotime provides clock jitter */
336			bucket->nanotime = *nanotime;
337
338			/* the harvested entropy */
339			bucket->data = entropy;
340
341			/* update the estimates - including "fractional bits" */
342			source->bits += bits;
343			source->frac += frac;
344			if (source->frac >= 1024) {
345				source->bits += source->frac / 1024;
346				source->frac %= 1024;
347			}
348			if (source->bits >= pool->thresh) {
349				/* XXX Slowoverthresh nees to be considered */
350				taskqueue_enqueue(taskqueue_swi, &regate_task[which]);
351			}
352
353			/* bump the insertion point */
354			source->current = insert;
355
356			/* toggle the pool for next insertion */
357			random_state.which = !random_state.which;
358
359		}
360		splx(mask);
361	}
362}
363