Fix a major faux pas of mine. I was causing 2 very bad things to

happen in interrupt context; 1) sleep locks, and 2) malloc/free
calls.

1) is fixed by using spin locks instead.

2) is fixed by preallocating a FIFO (implemented with a STAILQ)
   and using elements from this FIFO instead. This turns out
   to be rather fast.

OK'ed by:	re (scottl)
Thanks to:	peter, jhb, rwatson, jake
Apologies to:	*
This commit is contained in:
markm 2003-11-20 15:35:48 +00:00
parent dced91d8f3
commit 6a2f4748c4
3 changed files with 58 additions and 16 deletions

View File

@ -78,12 +78,19 @@ static void random_write_internal(void *, int);
MALLOC_DEFINE(M_ENTROPY, "entropy", "Entropy harvesting buffers");
/* FIFO queues holding harvested entropy */
static struct harvestfifo {
/* Lockable FIFO queue holding entropy buffers */
struct entropyfifo {
struct mtx lock;
int count;
STAILQ_HEAD(harvestlist, harvest) head;
} harvestfifo[ENTROPYSOURCE];
};
/* Empty entropy buffers */
static struct entropyfifo emptyfifo;
#define EMPTYBUFFERS 1024
/* Harvested entropy */
static struct entropyfifo harvestfifo[ENTROPYSOURCE];
static struct random_systat {
u_int seeded; /* 0 causes blocking 1 allows normal output */
@ -239,10 +246,20 @@ random_modevent(module_t mod __unused, int type, void *data __unused)
random_systat.seeded = 1;
/* Initialise the harvest fifos */
STAILQ_INIT(&emptyfifo.head);
emptyfifo.count = 0;
mtx_init(&emptyfifo.lock, "entropy harvest buffers", NULL,
MTX_SPIN);
for (i = 0; i < EMPTYBUFFERS; i++) {
np = malloc(sizeof(struct harvest), M_ENTROPY,
M_WAITOK);
STAILQ_INSERT_TAIL(&emptyfifo.head, np, next);
}
for (i = 0; i < ENTROPYSOURCE; i++) {
STAILQ_INIT(&harvestfifo[i].head);
harvestfifo[i].count = 0;
mtx_init(&harvestfifo[i].lock, "entropy harvest", NULL, MTX_DEF);
mtx_init(&harvestfifo[i].lock, "entropy harvest", NULL,
MTX_SPIN);
}
if (bootverbose)
@ -274,6 +291,12 @@ random_modevent(module_t mod __unused, int type, void *data __unused)
tsleep((void *)&random_kthread_control, PUSER, "term", 0);
/* Destroy the harvest fifos */
while (!STAILQ_EMPTY(&emptyfifo.head)) {
np = STAILQ_FIRST(&emptyfifo.head);
STAILQ_REMOVE_HEAD(&emptyfifo.head, next);
free(np, M_ENTROPY);
}
mtx_destroy(&emptyfifo.lock);
for (i = 0; i < ENTROPYSOURCE; i++) {
while (!STAILQ_EMPTY(&harvestfifo[i].head)) {
np = STAILQ_FIRST(&harvestfifo[i].head);
@ -318,7 +341,7 @@ random_kthread(void *arg __unused)
found = 0;
/* Lock up queue draining */
mtx_lock(&harvestfifo[source].lock);
mtx_lock_spin(&harvestfifo[source].lock);
if (!STAILQ_EMPTY(&harvestfifo[source].head)) {
@ -327,17 +350,26 @@ random_kthread(void *arg __unused)
event = STAILQ_FIRST(&harvestfifo[source].head);
STAILQ_REMOVE_HEAD(&harvestfifo[source].head,
next);
active = found = 1;
}
/* Unlock the queue */
mtx_unlock(&harvestfifo[source].lock);
mtx_unlock_spin(&harvestfifo[source].lock);
/* Deal with the event and dispose of it */
if (found) {
random_process_event(event);
free(event, M_ENTROPY);
/* Lock the empty event buffer fifo */
mtx_lock_spin(&emptyfifo.lock);
STAILQ_INSERT_TAIL(&emptyfifo.head, event, next);
mtx_unlock_spin(&emptyfifo.lock);
}
}
@ -362,14 +394,26 @@ random_harvest_internal(u_int64_t somecounter, void *entropy, u_int count,
struct harvest *event;
/* Lock the particular fifo */
mtx_lock(&harvestfifo[origin].lock);
mtx_lock_spin(&harvestfifo[origin].lock);
/* Don't make the harvest queues too big - memory is precious */
/* Don't make the harvest queues too big - help to prevent
* low-grade entropy swamping
*/
if (harvestfifo[origin].count < RANDOM_FIFO_MAX) {
event = malloc(sizeof(struct harvest), M_ENTROPY, M_NOWAIT);
/* Lock the empty event buffer fifo */
mtx_lock_spin(&emptyfifo.lock);
/* If we can't malloc() a buffer, tough */
if (!STAILQ_EMPTY(&emptyfifo.head)) {
event = STAILQ_FIRST(&emptyfifo.head);
STAILQ_REMOVE_HEAD(&emptyfifo.head, next);
}
else
event = NULL;
mtx_unlock_spin(&emptyfifo.lock);
/* If we didn't obtain a buffer, tough */
if (event) {
/* Add the harvested data to the fifo */
@ -389,7 +433,7 @@ random_harvest_internal(u_int64_t somecounter, void *entropy, u_int count,
}
mtx_unlock(&harvestfifo[origin].lock);
mtx_unlock_spin(&harvestfifo[origin].lock);
}

View File

@ -356,9 +356,7 @@ ithread_remove_handler(void *cookie)
int
ithread_schedule(struct ithd *ithread, int do_switch)
{
#if 0
struct int_entropy entropy;
#endif
struct thread *td;
struct thread *ctd;
struct proc *p;
@ -370,7 +368,6 @@ ithread_schedule(struct ithd *ithread, int do_switch)
return (EINVAL);
ctd = curthread;
#if 0
/*
* If any of the handlers for this ithread claim to be good
* sources of entropy, then gather some.
@ -381,7 +378,6 @@ ithread_schedule(struct ithd *ithread, int do_switch)
random_harvest(&entropy, sizeof(entropy), 2, 0,
RANDOM_INTERRUPT);
}
#endif
td = ithread->it_td;
p = td->td_proc;

View File

@ -288,6 +288,8 @@ static struct witness_order_list_entry order_lists[] = {
{ "turnstile chain", &lock_class_mtx_spin },
{ "td_contested", &lock_class_mtx_spin },
{ "callout", &lock_class_mtx_spin },
{ "entropy harvest", &lock_class_mtx_spin },
{ "entropy harvest buffers", &lock_class_mtx_spin },
/*
* leaf locks
*/