diff -urpNX dontdiff linux-2.5.50/drivers/char/random.c linux-2.5.50-random/drivers/char/random.c --- linux-2.5.50/drivers/char/random.c 2002-11-17 23:29:22.000000000 -0500 +++ linux-2.5.50-random/drivers/char/random.c 2002-11-29 17:03:10.000000000 -0500 @@ -240,25 +240,25 @@ * Eastlake, Steve Crocker, and Jeff Schiller. */ -#include #include -#include +#include +#include +#include +#include #include #include +#include +#include +#include +#include +#include #include +#include +#include #include -#include -#include -#include -#include -#include -#include -#include #include #include -#include -#include /* * Configuration information @@ -341,7 +341,7 @@ static struct poolinfo { * (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR generators. ACM * Transactions on Modeling and Computer Simulation 2(3):179-194. * Also see M. Matsumoto & Y. Kurita, 1994. Twisted GFSR generators - * II. ACM Transactions on Mdeling and Computer Simulation 4:254-266) + * II. ACM Transactions on Modeling and Computer Simulation 4:254-266) * * Thanks to Colin Plumb for suggesting this. * @@ -625,23 +625,43 @@ static __u32 *batch_entropy_pool; static int *batch_entropy_credit; static int batch_max; static int batch_head, batch_tail; -static void batch_entropy_process(void *private_); -static DECLARE_WORK(batch_work, batch_entropy_process, NULL); + +/* + * entropy_lock protects these 5 variables and the data they point to. + * Is acquired in interrupt context so must be acquired in process or BH + * context with interrupts disabled. + */ +static spinlock_t entropy_lock = SPIN_LOCK_UNLOCKED; + +static void batch_entropy_process(unsigned long private_); +static DECLARE_TASKLET(batch_work, batch_entropy_process, 0); +struct entropy_data { + __u32 entropy[2]; + int credit; +}; /* note: the size must be a power of 2 */ static int __init batch_entropy_init(int size, struct entropy_store *r) { - batch_entropy_pool = kmalloc(2*size*sizeof(__u32), GFP_KERNEL); - if (!batch_entropy_pool) + void *pool, *credit; + + pool = kmalloc(2*size*sizeof(__u32), GFP_KERNEL); + if (!pool) return -1; - batch_entropy_credit =kmalloc(size*sizeof(int), GFP_KERNEL); - if (!batch_entropy_credit) { - kfree(batch_entropy_pool); + credit = kmalloc(size*sizeof(int), GFP_KERNEL); + if (!credit) { + kfree(pool); return -1; } + + spin_lock_irq(&entropy_lock); + batch_entropy_pool = pool; + batch_entropy_credit = credit; batch_head = batch_tail = 0; batch_max = size; - batch_work.data = r; + batch_work.data = (unsigned long)r; + spin_unlock_irq(&entropy_lock); + return 0; } @@ -649,15 +669,21 @@ static int __init batch_entropy_init(int * Changes to the entropy data is put into a queue rather than being added to * the entropy counts directly. This is presumably to avoid doing heavy * hashing calculations during an interrupt in add_timer_randomness(). - * Instead, the entropy is only added to the pool by keventd. + * Instead, the entropy is added to the pool next time we run the tasklets. */ void batch_entropy_store(u32 a, u32 b, int num) { int new; + unsigned long flags; if (!batch_max) return; - + + /* + * This function is _probably_ only called in irq context, but + * better safe than sorry. + */ + spin_lock_irqsave(&entropy_lock, flags); batch_entropy_pool[2*batch_head] = a; batch_entropy_pool[(2*batch_head) + 1] = b; batch_entropy_credit[batch_head] = num; @@ -667,11 +693,12 @@ void batch_entropy_store(u32 a, u32 b, i /* * Schedule it for the next timer tick: */ - schedule_delayed_work(&batch_work, 1); + tasklet_schedule(&batch_work); batch_head = new; } else { DEBUG_ENT("batch entropy buffer full\n"); } + spin_unlock_irqrestore(&entropy_lock, flags); } /* @@ -679,25 +706,34 @@ void batch_entropy_store(u32 a, u32 b, i * store (normally random_state). If that store has enough entropy, alternate * between randomizing the data of the primary and secondary stores. */ -static void batch_entropy_process(void *private_) +static void batch_entropy_process(unsigned long private_) { struct entropy_store *r = (struct entropy_store *) private_, *p; int max_entropy = r->poolinfo.POOLBITS; + struct entropy_data cache; if (!batch_max) return; p = r; + spin_lock_irq(&entropy_lock); while (batch_head != batch_tail) { + cache.entropy[0] = batch_entropy_pool[2*batch_tail]; + cache.entropy[1] = batch_entropy_pool[2*batch_tail + 1]; + cache.credit = batch_entropy_credit[batch_tail]; + batch_tail = (batch_tail+1) & (batch_max-1); + spin_unlock_irq(&entropy_lock); + if (r->entropy_count >= max_entropy) { r = (r == sec_random_state) ? random_state : sec_random_state; max_entropy = r->poolinfo.POOLBITS; } - add_entropy_words(r, batch_entropy_pool + 2*batch_tail, 2); - credit_entropy_store(r, batch_entropy_credit[batch_tail]); - batch_tail = (batch_tail+1) & (batch_max-1); + add_entropy_words(r, cache.entropy, 2); + credit_entropy_store(r, cache.credit); + spin_lock_irq(&entropy_lock); } + spin_unlock_irq(&entropy_lock); if (p->entropy_count >= random_read_wakeup_thresh) wake_up_interruptible(&random_read_wait); } @@ -1738,7 +1774,8 @@ static int change_poolsize(int poolsize) sysctl_init_random(new_store); old_store = random_state; - random_state = batch_work.data = new_store; + random_state = new_store; + batch_work.data = (unsigned long)new_store; free_entropy_store(old_store); return 0; } .