yarrow.c revision 63856
1218887Sdim/*- 2218887Sdim * Copyright (c) 2000 Mark R V Murray 3218887Sdim * All rights reserved. 4218887Sdim * 5218887Sdim * Redistribution and use in source and binary forms, with or without 6218887Sdim * modification, are permitted provided that the following conditions 7218887Sdim * are met: 8218887Sdim * 1. Redistributions of source code must retain the above copyright 9218887Sdim * notice, this list of conditions and the following disclaimer 10218887Sdim * in this position and unchanged. 11218887Sdim * 2. Redistributions in binary form must reproduce the above copyright 12218887Sdim * notice, this list of conditions and the following disclaimer in the 13218887Sdim * documentation and/or other materials provided with the distribution. 14218887Sdim * 15218887Sdim * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16218887Sdim * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17218887Sdim * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18218887Sdim * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19218887Sdim * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20218887Sdim * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21218887Sdim * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22218887Sdim * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23218887Sdim * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24218887Sdim * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25218887Sdim * 26218887Sdim * $FreeBSD: head/sys/dev/random/yarrow.c 63855 2000-07-25 21:18:47Z markm $ 27218887Sdim */ 28218887Sdim 29218887Sdim/* NOTE NOTE NOTE - This is not finished! It will supply numbers, but 30218887Sdim * it is not yet cryptographically secure!! 31218887Sdim */ 32218887Sdim 33218887Sdim#include <sys/param.h> 34218887Sdim#include <sys/systm.h> 35218887Sdim#include <sys/queue.h> 36218887Sdim#include <sys/taskqueue.h> 37218887Sdim#include <sys/linker.h> 38218887Sdim#include <sys/libkern.h> 39218887Sdim#include <sys/mbuf.h> 40218887Sdim#include <sys/random.h> 41218887Sdim#include <sys/time.h> 42218887Sdim#include <sys/types.h> 43218887Sdim#include <crypto/blowfish/blowfish.h> 44218887Sdim 45218887Sdim#include <dev/randomdev/yarrow.h> 46218887Sdim 47218887Sdim/* #define DEBUG */ 48218887Sdim 49218887Sdimstatic void generator_gate(void); 50218887Sdimstatic void reseed(int); 51218887Sdimstatic void random_harvest_internal(struct timespec *, void *, u_int, u_int, u_int, enum esource); 52218887Sdim 53218887Sdim/* Structure holding the entropy state */ 54218887Sdimstruct random_state random_state; 55218887Sdim 56218887Sdim/* When enough entropy has been harvested, asynchronously "stir" it in. 57218887Sdim * The regate task is run at splsofttq() 58218887Sdim */ 59218887Sdimstatic struct task regate_task[2]; 60218887Sdim 61218887Sdimstruct context { 62218887Sdim u_int pool; 63218887Sdim} context[2] = { 64218887Sdim { 0 }, 65218887Sdim { 1 } 66218887Sdim }; 67218887Sdim 68218887Sdimstatic void 69218887Sdimregate(void *context, int pending) 70218887Sdim{ 71218887Sdim#ifdef DEBUG 72218887Sdim printf("Regate task\n"); 73218887Sdim#endif 74218887Sdim reseed(((struct context *)context)->pool); 75218887Sdim} 76218887Sdim 77218887Sdimvoid 78218887Sdimrandom_init(void) 79218887Sdim{ 80218887Sdim#ifdef DEBUG 81218887Sdim printf("Random init\n"); 82218887Sdim#endif 83218887Sdim random_state.gengateinterval = 10; 84218887Sdim random_state.bins = 10; 85218887Sdim random_state.pool[0].thresh = 100; 86218887Sdim random_state.pool[1].thresh = 160; 87218887Sdim random_state.slowoverthresh = 2; 88218887Sdim random_state.which = FAST; 89218887Sdim TASK_INIT(®ate_task[FAST], FAST, ®ate, (void *)&context[FAST]); 90218887Sdim TASK_INIT(®ate_task[SLOW], SLOW, ®ate, (void *)&context[SLOW]); 91218887Sdim random_init_harvester(random_harvest_internal); 92218887Sdim} 93218887Sdim 94218887Sdimvoid 95218887Sdimrandom_deinit(void) 96218887Sdim{ 97218887Sdim#ifdef DEBUG 98218887Sdim printf("Random deinit\n"); 99218887Sdim#endif 100218887Sdim random_deinit_harvester(); 101218887Sdim} 102218887Sdim 103218887Sdimstatic void 104218887Sdimreseed(int fastslow) 105218887Sdim{ 106218887Sdim /* Interrupt-context stack is a limited resource; make static 107218887Sdim * large structures; XXX Revisit - needs to move to the large 108218887Sdim * random_state structure. 109218887Sdim */ 110218887Sdim static unsigned char v[TIMEBIN][KEYSIZE]; /* v[i] */ 111218887Sdim unsigned char hash[KEYSIZE]; /* h' */ 112218887Sdim static BF_KEY hashkey; 113218887Sdim unsigned char ivec[8]; 114218887Sdim unsigned char temp[KEYSIZE]; 115218887Sdim struct entropy *bucket; 116218887Sdim int i, j; 117218887Sdim 118218887Sdim#ifdef DEBUG 119218887Sdim printf("Reseed type %d\n", fastslow); 120218887Sdim#endif 121218887Sdim 122218887Sdim /* 1. Hash the accumulated entropy into v[0] */ 123218887Sdim 124218887Sdim bzero((void *)&v[0], KEYSIZE); 125218887Sdim if (fastslow == SLOW) { 126218887Sdim /* Feed a hash of the slow pool into the fast pool */ 127218887Sdim for (i = 0; i < ENTROPYSOURCE; i++) { 128218887Sdim for (j = 0; j < ENTROPYBIN; j++) { 129218887Sdim bucket = &random_state.pool[SLOW].source[i].entropy[j]; 130218887Sdim if(bucket->nanotime.tv_sec || bucket->nanotime.tv_nsec) { 131218887Sdim BF_set_key(&hashkey, sizeof(struct entropy), 132218887Sdim (void *)bucket); 133218887Sdim BF_cbc_encrypt(v[0], temp, KEYSIZE, &hashkey, ivec, 134218887Sdim BF_ENCRYPT); 135218887Sdim memcpy(&v[0], temp, KEYSIZE); 136218887Sdim bucket->nanotime.tv_sec = 0; 137218887Sdim bucket->nanotime.tv_nsec = 0; 138218887Sdim } 139218887Sdim } 140218887Sdim } 141218887Sdim } 142218887Sdim 143218887Sdim for (i = 0; i < ENTROPYSOURCE; i++) { 144218887Sdim for (j = 0; j < ENTROPYBIN; j++) { 145218887Sdim bucket = &random_state.pool[FAST].source[i].entropy[j]; 146218887Sdim if(bucket->nanotime.tv_sec || bucket->nanotime.tv_nsec) { 147218887Sdim BF_set_key(&hashkey, sizeof(struct entropy), (void *)bucket); 148218887Sdim BF_cbc_encrypt(v[0], temp, KEYSIZE, &hashkey, ivec, BF_ENCRYPT); 149218887Sdim memcpy(&v[0], temp, KEYSIZE); 150218887Sdim bucket->nanotime.tv_sec = 0; 151218887Sdim bucket->nanotime.tv_nsec = 0; 152218887Sdim } 153218887Sdim } 154218887Sdim } 155218887Sdim 156218887Sdim /* 2. Compute hash values for all v. _Supposed_ to be computationally 157218887Sdim * intensive. 158218887Sdim */ 159218887Sdim 160218887Sdim if (random_state.bins > TIMEBIN) 161218887Sdim random_state.bins = TIMEBIN; 162218887Sdim for (i = 1; i < random_state.bins; i++) { 163218887Sdim bzero((void *)&v[i], KEYSIZE); 164218887Sdim /* v[i] #= h(v[i-1]) */ 165218887Sdim BF_set_key(&hashkey, KEYSIZE, v[i - 1]); 166218887Sdim BF_cbc_encrypt(v[i], temp, KEYSIZE, &hashkey, ivec, BF_ENCRYPT); 167218887Sdim memcpy(&v[i], temp, KEYSIZE); 168218887Sdim /* v[i] #= h(v[0]) */ 169218887Sdim BF_set_key(&hashkey, KEYSIZE, v[0]); 170218887Sdim BF_cbc_encrypt(v[i], temp, KEYSIZE, &hashkey, ivec, BF_ENCRYPT); 171218887Sdim memcpy(&v[i], temp, KEYSIZE); 172218887Sdim /* v[i] #= h(i) */ 173218887Sdim BF_set_key(&hashkey, sizeof(int), (unsigned char *)&i); 174218887Sdim BF_cbc_encrypt(v[i], temp, KEYSIZE, &hashkey, ivec, BF_ENCRYPT); 175218887Sdim memcpy(&v[i], temp, KEYSIZE); 176218887Sdim } 177218887Sdim 178218887Sdim /* 3. Compute a new Key. */ 179218887Sdim 180218887Sdim bzero((void *)hash, KEYSIZE); 181218887Sdim BF_set_key(&hashkey, KEYSIZE, (unsigned char *)&random_state.key); 182218887Sdim BF_cbc_encrypt(hash, temp, KEYSIZE, &hashkey, ivec, BF_ENCRYPT); 183218887Sdim memcpy(hash, temp, KEYSIZE); 184218887Sdim for (i = 1; i < random_state.bins; i++) { 185218887Sdim BF_set_key(&hashkey, KEYSIZE, v[i]); 186218887Sdim BF_cbc_encrypt(hash, temp, KEYSIZE, &hashkey, ivec, BF_ENCRYPT); 187218887Sdim memcpy(hash, temp, KEYSIZE); 188218887Sdim } 189218887Sdim BF_set_key(&random_state.key, KEYSIZE, hash); 190218887Sdim 191218887Sdim /* 4. Recompute the counter */ 192218887Sdim 193218887Sdim random_state.counter = 0; 194218887Sdim BF_cbc_encrypt((unsigned char *)&random_state.counter, temp, 195218887Sdim sizeof(random_state.counter), &random_state.key, 196218887Sdim random_state.ivec, BF_ENCRYPT); 197218887Sdim memcpy(&random_state.counter, temp, random_state.counter); 198218887Sdim 199218887Sdim /* 5. Reset entropy estimate accumulators to zero */ 200218887Sdim 201218887Sdim for (i = 0; i <= fastslow; i++) { 202218887Sdim for (j = 0; j < ENTROPYSOURCE; j++) { 203218887Sdim random_state.pool[i].source[j].bits = 0; 204218887Sdim random_state.pool[i].source[j].frac = 0; 205218887Sdim } 206218887Sdim } 207218887Sdim 208218887Sdim /* 6. Wipe memory of intermediate values */ 209218887Sdim 210218887Sdim bzero((void *)v, sizeof(v)); 211218887Sdim bzero((void *)temp, sizeof(temp)); 212218887Sdim bzero((void *)hash, sizeof(hash)); 213218887Sdim 214218887Sdim /* 7. Dump to seed file (done by external process) */ 215218887Sdim 216218887Sdim} 217218887Sdim 218218887Sdimu_int 219218887Sdimread_random(void *buf, u_int count) 220218887Sdim{ 221218887Sdim static u_int64_t genval; 222218887Sdim static int cur = 0; 223218887Sdim static int gate = 1; 224218887Sdim u_int i; 225218887Sdim u_int retval; 226218887Sdim intrmask_t mask; 227218887Sdim 228218887Sdim /* The reseed task must not be jumped on */ 229218887Sdim mask = splsofttq(); 230218887Sdim 231218887Sdim if (gate) { 232218887Sdim generator_gate(); 233218887Sdim random_state.outputblocks = 0; 234218887Sdim gate = 0; 235218887Sdim } 236218887Sdim if (count >= sizeof(random_state.counter)) { 237218887Sdim retval = 0; 238218887Sdim for (i = 0; i < count; i += sizeof(random_state.counter)) { 239218887Sdim random_state.counter++; 240218887Sdim BF_cbc_encrypt((unsigned char *)&random_state.counter, 241218887Sdim (unsigned char *)&genval, 242218887Sdim sizeof(random_state.counter), 243218887Sdim &random_state.key, random_state.ivec, BF_ENCRYPT); 244218887Sdim memcpy((char *)buf + i, &genval, 245218887Sdim sizeof(random_state.counter)); 246218887Sdim if (++random_state.outputblocks >= random_state.gengateinterval) { 247218887Sdim generator_gate(); 248218887Sdim random_state.outputblocks = 0; 249218887Sdim } 250218887Sdim retval += sizeof(random_state.counter); 251218887Sdim } 252218887Sdim } 253218887Sdim else { 254218887Sdim if (!cur) { 255218887Sdim random_state.counter++; 256218887Sdim BF_cbc_encrypt((unsigned char *)&random_state.counter, 257218887Sdim (unsigned char *)&genval, 258218887Sdim sizeof(random_state.counter), 259218887Sdim &random_state.key, random_state.ivec, 260218887Sdim BF_ENCRYPT); 261218887Sdim memcpy(buf, &genval, count); 262218887Sdim cur = sizeof(random_state.counter) - count; 263218887Sdim if (++random_state.outputblocks >= random_state.gengateinterval) { 264218887Sdim generator_gate(); 265218887Sdim random_state.outputblocks = 0; 266218887Sdim } 267218887Sdim retval = count; 268218887Sdim } 269218887Sdim else { 270218887Sdim retval = cur < count ? cur : count; 271218887Sdim memcpy(buf, 272218887Sdim (char *)&genval + 273218887Sdim (sizeof(random_state.counter) - cur), 274218887Sdim retval); 275218887Sdim cur -= retval; 276218887Sdim } 277218887Sdim } 278218887Sdim splx(mask); 279218887Sdim return retval; 280218887Sdim} 281218887Sdim 282218887Sdimvoid 283218887Sdimwrite_random(void *buf, u_int count) 284218887Sdim{ 285218887Sdim u_int i; 286218887Sdim intrmask_t mask; 287218887Sdim struct timespec timebuf; 288218887Sdim 289218887Sdim /* The reseed task must not be jumped on */ 290218887Sdim mask = splsofttq(); 291218887Sdim /* arbitrarily break the input up into 8-byte chunks */ 292218887Sdim for (i = 0; i < count; i += 8) { 293218887Sdim nanotime(&timebuf); 294218887Sdim random_harvest_internal(&timebuf, (char *)buf + i, 8, 0, 0, 295218887Sdim RANDOM_WRITE); 296218887Sdim } 297218887Sdim /* Maybe the loop iterated at least once */ 298218887Sdim if (i > count) 299218887Sdim i -= 8; 300218887Sdim /* Get the last bytes even if the input length is not a multiple of 8 */ 301218887Sdim count %= 8; 302218887Sdim if (count) { 303218887Sdim nanotime(&timebuf); 304218887Sdim random_harvest_internal(&timebuf, (char *)buf + i, count, 0, 0, 305218887Sdim RANDOM_WRITE); 306218887Sdim } 307218887Sdim reseed(FAST); 308218887Sdim splx(mask); 309218887Sdim} 310218887Sdim 311218887Sdimstatic void 312218887Sdimgenerator_gate(void) 313218887Sdim{ 314218887Sdim int i; 315218887Sdim unsigned char temp[KEYSIZE]; 316218887Sdim intrmask_t mask; 317218887Sdim 318218887Sdim#ifdef DEBUG 319218887Sdim printf("Generator gate\n"); 320218887Sdim#endif 321218887Sdim 322218887Sdim /* The reseed task must not be jumped on */ 323218887Sdim mask = splsofttq(); 324218887Sdim 325218887Sdim for (i = 0; i < KEYSIZE; i += sizeof(random_state.counter)) { 326218887Sdim random_state.counter++; 327218887Sdim BF_cbc_encrypt((unsigned char *)&random_state.counter, 328218887Sdim &(temp[i]), sizeof(random_state.counter), 329218887Sdim &random_state.key, random_state.ivec, BF_ENCRYPT); 330218887Sdim } 331218887Sdim 332218887Sdim BF_set_key(&random_state.key, KEYSIZE, temp); 333218887Sdim bzero((void *)temp, KEYSIZE); 334218887Sdim 335218887Sdim splx(mask); 336218887Sdim} 337218887Sdim 338218887Sdim/* Entropy harvesting routine. This is supposed to be fast; do 339218887Sdim * not do anything slow in here! 340218887Sdim */ 341218887Sdim 342218887Sdimstatic void 343218887Sdimrandom_harvest_internal(struct timespec *timep, void *entropy, u_int count, 344218887Sdim u_int bits, u_int frac, enum esource origin) 345218887Sdim{ 346218887Sdim u_int insert; 347218887Sdim int which; /* fast or slow */ 348218887Sdim struct entropy *bucket; 349218887Sdim struct source *source; 350218887Sdim struct pool *pool; 351218887Sdim intrmask_t mask; 352218887Sdim u_int64_t entropy_buf; 353218887Sdim 354218887Sdim#ifdef DEBUG 355218887Sdim printf("Random harvest\n"); 356218887Sdim#endif 357218887Sdim if (origin < ENTROPYSOURCE) { 358218887Sdim 359218887Sdim /* Called inside irq handlers; protect from interference */ 360218887Sdim mask = splhigh(); 361218887Sdim 362218887Sdim which = random_state.which; 363218887Sdim pool = &random_state.pool[which]; 364218887Sdim source = &pool->source[origin]; 365218887Sdim 366218887Sdim insert = source->current + 1; 367218887Sdim if (insert >= ENTROPYBIN) 368218887Sdim insert = 0; 369218887Sdim 370218887Sdim bucket = &source->entropy[insert]; 371218887Sdim 372218887Sdim if (!bucket->nanotime.tv_sec && !bucket->nanotime.tv_nsec) { 373218887Sdim 374218887Sdim /* nanotime provides clock jitter */ 375218887Sdim bucket->nanotime = *timep; 376218887Sdim 377218887Sdim /* the harvested entropy */ 378218887Sdim count = count > sizeof(entropy_buf) 379218887Sdim ? sizeof(entropy_buf) 380218887Sdim : count; 381218887Sdim memcpy(&entropy_buf, entropy, count); 382218887Sdim /* XOR it in to really foul up the works */ 383218887Sdim bucket->data ^= entropy_buf; 384218887Sdim 385218887Sdim /* update the estimates - including "fractional bits" */ 386218887Sdim source->bits += bits; 387218887Sdim source->frac += frac; 388218887Sdim if (source->frac >= 1024) { 389218887Sdim source->bits += source->frac / 1024; 390218887Sdim source->frac %= 1024; 391218887Sdim } 392218887Sdim if (source->bits >= pool->thresh) { 393218887Sdim /* XXX Slowoverthresh needs to be considered */ 394218887Sdim taskqueue_enqueue(taskqueue_swi, ®ate_task[which]); 395218887Sdim } 396218887Sdim 397218887Sdim /* bump the insertion point */ 398218887Sdim source->current = insert; 399218887Sdim 400218887Sdim /* toggle the pool for next insertion */ 401218887Sdim random_state.which = !random_state.which; 402218887Sdim 403218887Sdim } 404218887Sdim splx(mask); 405218887Sdim } 406218887Sdim} 407218887Sdim