yarrow.c revision 72200
1/*- 2 * Copyright (c) 2000 Mark R V Murray 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer 10 * in this position and unchanged. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/dev/random/yarrow.c 72200 2001-02-09 06:11:45Z bmilekic $ 27 */ 28 29/* NOTE NOTE NOTE - This is not finished! It will supply numbers, but 30 * it is not yet cryptographically secure!! 31 */ 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/kernel.h> 36#include <sys/kthread.h> 37#include <sys/libkern.h> 38#include <sys/mutex.h> 39#include <sys/selinfo.h> 40#include <sys/random.h> 41#include <sys/types.h> 42#include <sys/unistd.h> 43 44#include <machine/atomic.h> 45#include <machine/cpu.h> 46 47#include <crypto/blowfish/blowfish.h> 48 49#include <dev/random/hash.h> 50#include <dev/random/yarrow.h> 51 52/* #define DEBUG */ 53/* #define DEBUG1 */ /* Very noisy - prints plenty harvesting stats */ 54 55static void generator_gate(void); 56static void reseed(int); 57static void random_harvest_internal(u_int64_t, void *, u_int, u_int, u_int, enum esource); 58 59static void random_kthread(void *); 60 61/* Structure holding the entropy state */ 62struct random_state random_state; 63 64/* These are used to queue harvested packets of entropy. The entropy 65 * buffer size is pretty arbitrary. 66 */ 67struct harvest { 68 u_int64_t somecounter; /* fast counter for clock jitter */ 69 u_char entropy[HARVESTSIZE]; /* the harvested entropy */ 70 u_int size, bits, frac; /* stats about the entropy */ 71 enum esource source; /* stats about the entropy */ 72 u_int pool; /* which pool this goes into */ 73}; 74 75/* Ring buffer holding harvested entropy */ 76static struct harvestring { 77 struct mtx lockout_mtx; 78 int head; 79 int tail; 80 struct harvest data[HARVEST_RING_SIZE]; 81} harvestring; 82 83/* The reseed thread mutex */ 84static struct mtx random_reseed_mtx; 85 86/* <0 to end the kthread, 0 to let it run */ 87static int random_kthread_control = 0; 88 89static struct proc *random_kthread_proc; 90 91static void 92random_kthread(void *arg /* NOTUSED */) 93{ 94 int pl, src, overthreshhold[2], head, newtail; 95 struct harvest *event; 96 struct source *source; 97 98#ifdef DEBUG 99 mtx_lock(&Giant); 100 printf("OWNERSHIP Giant == %d sched_lock == %d\n", 101 mtx_owned(&Giant), mtx_owned(&sched_lock)); 102 mtx_unlock(&Giant); 103#endif 104 105 for (pl = 0; pl < 2; pl++) 106 yarrow_hash_init(&random_state.pool[pl].hash, NULL, 0); 107 108 for (;;) { 109 110 head = atomic_load_acq_int(&harvestring.head); 111 newtail = (harvestring.tail + 1) % HARVEST_RING_SIZE; 112 if (harvestring.tail == head) 113 tsleep(&harvestring.head, PUSER, "rndslp", hz/10); 114 115 else { 116#ifdef DEBUG1 117 mtx_lock(&Giant); 118 printf("HARVEST src=%d bits=%d/%d pool=%d count=%lld\n", 119 event->source, event->bits, event->frac, 120 event->pool, event->somecounter); 121 mtx_unlock(&Giant); 122#endif 123 124 /* Suck the harvested entropy out of the queue and hash 125 * it into the appropriate pool. 126 */ 127 128 event = &harvestring.data[harvestring.tail]; 129 harvestring.tail = newtail; 130 131 source = &random_state.pool[event->pool].source[event->source]; 132 yarrow_hash_iterate(&random_state.pool[event->pool].hash, 133 event->entropy, sizeof(event->entropy)); 134 yarrow_hash_iterate(&random_state.pool[event->pool].hash, 135 &event->somecounter, sizeof(event->somecounter)); 136 source->frac += event->frac; 137 source->bits += event->bits + source->frac/1024; 138 source->frac %= 1024; 139 140 /* Count the over-threshold sources in each pool */ 141 for (pl = 0; pl < 2; pl++) { 142 overthreshhold[pl] = 0; 143 for (src = 0; src < ENTROPYSOURCE; src++) { 144 if (random_state.pool[pl].source[src].bits 145 > random_state.pool[pl].thresh) 146 overthreshhold[pl]++; 147 } 148 } 149 150 /* if any fast source over threshhold, reseed */ 151 if (overthreshhold[FAST]) 152 reseed(FAST); 153 154 /* if enough slow sources are over threshhold, reseed */ 155 if (overthreshhold[SLOW] >= random_state.slowoverthresh) 156 reseed(SLOW); 157 158 } 159 160 /* Is the thread scheduled for a shutdown? */ 161 if (random_kthread_control != 0) { 162#ifdef DEBUG 163 mtx_lock(&Giant); 164 printf("Random kthread setting terminate\n"); 165 mtx_unlock(&Giant); 166#endif 167 random_set_wakeup_exit(&random_kthread_control); 168 /* NOTREACHED */ 169 break; 170 } 171 172 } 173 174} 175 176int 177random_init(void) 178{ 179 int error; 180 181#ifdef DEBUG 182 mtx_lock(&Giant); 183 printf("Random initialise\n"); 184 mtx_unlock(&Giant); 185#endif 186 187 /* This can be turned off by the very paranoid 188 * a reseed will turn it back on. 189 */ 190 random_state.seeded = 1; 191 192 random_state.gengateinterval = 10; 193 random_state.bins = 10; 194 random_state.pool[0].thresh = 100; 195 random_state.pool[1].thresh = 160; 196 random_state.slowoverthresh = 2; 197 random_state.which = FAST; 198 199 /* Initialise the mutexes */ 200 mtx_init(&random_reseed_mtx, "random reseed", MTX_DEF); 201 mtx_init(&harvestring.lockout_mtx, "random harvest", MTX_DEF); 202 203 harvestring.head = 0; 204 harvestring.tail = 0; 205 206 /* Start the hash/reseed thread */ 207 error = kthread_create(random_kthread, NULL, 208 &random_kthread_proc, RFHIGHPID, "random"); 209 if (error != 0) 210 return error; 211 212 /* Register the randomness harvesting routine */ 213 random_init_harvester(random_harvest_internal, read_random_real); 214 215#ifdef DEBUG 216 mtx_lock(&Giant); 217 printf("Random initialise finish\n"); 218 mtx_unlock(&Giant); 219#endif 220 221 return 0; 222} 223 224void 225random_deinit(void) 226{ 227#ifdef DEBUG 228 mtx_lock(&Giant); 229 printf("Random deinitialise\n"); 230 mtx_unlock(&Giant); 231#endif 232 233 /* Deregister the randomness harvesting routine */ 234 random_deinit_harvester(); 235 236#ifdef DEBUG 237 mtx_lock(&Giant); 238 printf("Random deinitialise waiting for thread to terminate\n"); 239 mtx_unlock(&Giant); 240#endif 241 242 /* Command the hash/reseed thread to end and wait for it to finish */ 243 mtx_lock(&harvestring.lockout_mtx); 244 random_kthread_control = -1; 245 msleep((void *)&random_kthread_control, &harvestring.lockout_mtx, PUSER, 246 "rndend", 0); 247 mtx_unlock(&harvestring.lockout_mtx); 248 249#ifdef DEBUG 250 mtx_lock(&Giant); 251 printf("Random deinitialise removing mutexes\n"); 252 mtx_unlock(&Giant); 253#endif 254 255 /* Remove the mutexes */ 256 mtx_destroy(&random_reseed_mtx); 257 mtx_destroy(&harvestring.lockout_mtx); 258 259#ifdef DEBUG 260 mtx_lock(&Giant); 261 printf("Random deinitialise finish\n"); 262 mtx_unlock(&Giant); 263#endif 264} 265 266static void 267reseed(int fastslow) 268{ 269 /* Interrupt-context stack is a limited resource; make large 270 * structures static. 271 */ 272 static u_char v[TIMEBIN][KEYSIZE]; /* v[i] */ 273 static struct yarrowhash context; 274 u_char hash[KEYSIZE]; /* h' */ 275 u_char temp[KEYSIZE]; 276 int i, j; 277 278#ifdef DEBUG 279 mtx_lock(&Giant); 280 printf("Reseed type %d\n", fastslow); 281 mtx_unlock(&Giant); 282#endif 283 284 /* The reseed task must not be jumped on */ 285 mtx_lock(&random_reseed_mtx); 286 287 /* 1. Hash the accumulated entropy into v[0] */ 288 289 yarrow_hash_init(&context, NULL, 0); 290 /* Feed the slow pool hash in if slow */ 291 if (fastslow == SLOW) 292 yarrow_hash_iterate(&context, 293 &random_state.pool[SLOW].hash, sizeof(struct yarrowhash)); 294 295 yarrow_hash_iterate(&context, 296 &random_state.pool[FAST].hash, sizeof(struct yarrowhash)); 297 298 /* 2. Compute hash values for all v. _Supposed_ to be computationally 299 * intensive. 300 */ 301 302 if (random_state.bins > TIMEBIN) 303 random_state.bins = TIMEBIN; 304 for (i = 1; i < random_state.bins; i++) { 305 yarrow_hash_init(&context, NULL, 0); 306 /* v[i] #= h(v[i-1]) */ 307 yarrow_hash_iterate(&context, v[i - 1], KEYSIZE); 308 /* v[i] #= h(v[0]) */ 309 yarrow_hash_iterate(&context, v[0], KEYSIZE); 310 /* v[i] #= h(i) */ 311 yarrow_hash_iterate(&context, &i, sizeof(int)); 312 /* Return the hashval */ 313 yarrow_hash_finish(&context, v[i]); 314 } 315 316 /* 3. Compute a new key; h' is the identity function here; 317 * it is not being ignored! 318 */ 319 320 yarrow_hash_init(&context, NULL, 0); 321 yarrow_hash_iterate(&context, &random_state.key, KEYSIZE); 322 for (i = 1; i < random_state.bins; i++) 323 yarrow_hash_iterate(&context, &v[i], KEYSIZE); 324 yarrow_hash_finish(&context, temp); 325 yarrow_encrypt_init(&random_state.key, temp, KEYSIZE); 326 327 /* 4. Recompute the counter */ 328 329 random_state.counter = 0; 330 yarrow_encrypt(&random_state.key, &random_state.counter, temp, 331 sizeof(random_state.counter)); 332 memcpy(&random_state.counter, temp, random_state.counter); 333 334 /* 5. Reset entropy estimate accumulators to zero */ 335 336 for (i = 0; i <= fastslow; i++) { 337 for (j = 0; j < ENTROPYSOURCE; j++) { 338 if (random_state.pool[i].source[j].bits > 339 random_state.pool[i].thresh) { 340 random_state.pool[i].source[j].bits = 0; 341 random_state.pool[i].source[j].frac = 0; 342 } 343 } 344 } 345 346 /* 6. Wipe memory of intermediate values */ 347 348 memset((void *)v, 0, sizeof(v)); 349 memset((void *)temp, 0, sizeof(temp)); 350 memset((void *)hash, 0, sizeof(hash)); 351 352 /* 7. Dump to seed file */ 353 /* XXX Not done here yet */ 354 355 /* Release the reseed mutex */ 356 mtx_unlock(&random_reseed_mtx); 357 358#ifdef DEBUG 359 mtx_lock(&Giant); 360 printf("Reseed finish\n"); 361 mtx_unlock(&Giant); 362#endif 363 364 if (!random_state.seeded) { 365 random_state.seeded = 1; 366 selwakeup(&random_state.rsel); 367 wakeup(&random_state); 368 } 369 370} 371 372u_int 373read_random_real(void *buf, u_int count) 374{ 375 static u_int64_t genval; 376 static int cur = 0; 377 static int gate = 1; 378 u_int i; 379 u_int retval; 380 381 /* The reseed task must not be jumped on */ 382 mtx_lock(&random_reseed_mtx); 383 384 if (gate) { 385 generator_gate(); 386 random_state.outputblocks = 0; 387 gate = 0; 388 } 389 if (count >= sizeof(random_state.counter)) { 390 retval = 0; 391 for (i = 0; i < count; i += sizeof(random_state.counter)) { 392 random_state.counter++; 393 yarrow_encrypt(&random_state.key, &random_state.counter, 394 &genval, sizeof(random_state.counter)); 395 memcpy((char *)buf + i, &genval, 396 sizeof(random_state.counter)); 397 if (++random_state.outputblocks >= random_state.gengateinterval) { 398 generator_gate(); 399 random_state.outputblocks = 0; 400 } 401 retval += sizeof(random_state.counter); 402 } 403 } 404 else { 405 if (!cur) { 406 random_state.counter++; 407 yarrow_encrypt(&random_state.key, &random_state.counter, 408 &genval, sizeof(random_state.counter)); 409 memcpy(buf, &genval, count); 410 cur = sizeof(random_state.counter) - count; 411 if (++random_state.outputblocks >= random_state.gengateinterval) { 412 generator_gate(); 413 random_state.outputblocks = 0; 414 } 415 retval = count; 416 } 417 else { 418 retval = cur < count ? cur : count; 419 memcpy(buf, 420 (char *)&genval + 421 (sizeof(random_state.counter) - cur), 422 retval); 423 cur -= retval; 424 } 425 } 426 mtx_unlock(&random_reseed_mtx); 427 return retval; 428} 429 430void 431write_random(void *buf, u_int count) 432{ 433 u_int i; 434 435 /* Break the input up into HARVESTSIZE chunks. 436 * The writer has too much control here, so "estimate" the 437 * the entropy as zero. 438 */ 439 for (i = 0; i < count; i += HARVESTSIZE) { 440 random_harvest_internal(get_cyclecount(), (char *)buf + i, 441 HARVESTSIZE, 0, 0, RANDOM_WRITE); 442 } 443 444 /* Maybe the loop iterated at least once */ 445 if (i > count) 446 i -= HARVESTSIZE; 447 448 /* Get the last bytes even if the input length is not 449 * a multiple of HARVESTSIZE. 450 */ 451 count %= HARVESTSIZE; 452 if (count) { 453 random_harvest_internal(get_cyclecount(), (char *)buf + i, 454 count, 0, 0, RANDOM_WRITE); 455 } 456} 457 458static void 459generator_gate(void) 460{ 461 int i; 462 u_char temp[KEYSIZE]; 463 464#ifdef DEBUG 465 mtx_lock(&Giant); 466 printf("Generator gate\n"); 467 mtx_unlock(&Giant); 468#endif 469 470 for (i = 0; i < KEYSIZE; i += sizeof(random_state.counter)) { 471 random_state.counter++; 472 yarrow_encrypt(&random_state.key, &random_state.counter, 473 &(temp[i]), sizeof(random_state.counter)); 474 } 475 476 yarrow_encrypt_init(&random_state.key, temp, KEYSIZE); 477 memset((void *)temp, 0, KEYSIZE); 478 479#ifdef DEBUG 480 mtx_lock(&Giant); 481 printf("Generator gate finish\n"); 482 mtx_unlock(&Giant); 483#endif 484} 485 486/* Entropy harvesting routine. This is supposed to be fast; do 487 * not do anything slow in here! 488 */ 489 490static void 491random_harvest_internal(u_int64_t somecounter, void *entropy, u_int count, 492 u_int bits, u_int frac, enum esource origin) 493{ 494 struct harvest *harvest; 495 int newhead, tail; 496 497#ifdef DEBUG1 498 mtx_lock(&Giant); 499 printf("Random harvest\n"); 500 mtx_unlock(&Giant); 501#endif 502 if (origin < ENTROPYSOURCE) { 503 504 /* Add the harvested data to the ring buffer, but 505 * do not block. 506 */ 507 if (mtx_trylock(&harvestring.lockout_mtx)) { 508 509 tail = atomic_load_acq_int(&harvestring.tail); 510 newhead = (harvestring.head + 1) % HARVEST_RING_SIZE; 511 512 if (newhead != tail) { 513 514 harvest = &harvestring.data[harvestring.head]; 515 516 /* toggle the pool for next insertion */ 517 harvest->pool = random_state.which; 518 random_state.which = !random_state.which; 519 520 /* Stuff the harvested data into the ring */ 521 harvest->somecounter = somecounter; 522 count = count > HARVESTSIZE ? HARVESTSIZE : count; 523 memcpy(harvest->entropy, entropy, count); 524 harvest->size = count; 525 harvest->bits = bits; 526 harvest->frac = frac; 527 harvest->source = origin; 528 529 /* Bump the ring counter and shake the reseed 530 * process 531 */ 532 harvestring.head = newhead; 533 wakeup(&harvestring.head); 534 535 } 536 mtx_unlock(&harvestring.lockout_mtx); 537 538 } 539 540 } 541} 542 543/* Helper routine to perform explicit reseeds */ 544void 545random_reseed(void) 546{ 547 reseed(FAST); 548} 549