Improve kernel dumping reliability for busy ATA channels:

- Generate fake channel interrupts even if channel busy with previous
request to let it finish. Without this, dumping requests were just queued
and never processed.
 - Drop pre-dump requests queue on dumping. ATA code, working in dumping
(interruptless) mode, unable to handle long request queue. Actually, to get
coherent dump we anyway should do as few unrelated actions as possible.
This commit is contained in:
mav 2009-05-01 08:03:46 +00:00
parent 700abeab8a
commit e304146698
3 changed files with 35 additions and 11 deletions

View File

@ -584,6 +584,7 @@ void ata_finish(struct ata_request *request);
void ata_timeout(struct ata_request *);
void ata_catch_inflight(device_t dev);
void ata_fail_requests(device_t dev);
void ata_drop_requests(device_t dev);
char *ata_cmd2str(struct ata_request *request);
/* ata-lowlevel.c: */

View File

@ -346,15 +346,23 @@ ad_dump(void *arg, void *virtual, vm_offset_t physical,
off_t offset, size_t length)
{
struct disk *dp = arg;
device_t dev = dp->d_drv1;
struct bio bp;
/* XXX: Drop pre-dump request queue. Long request queue processing
* causes stack overflow in ATA working in dumping (interruptless) mode.
* Conter-XXX: To make dump coherent we should avoid doing anything
* else while dumping.
*/
ata_drop_requests(dev);
/* length zero is special and really means flush buffers to media */
if (!length) {
struct ata_device *atadev = device_get_softc(dp->d_drv1);
struct ata_device *atadev = device_get_softc(dev);
int error = 0;
if (atadev->param.support.command2 & ATA_SUPPORT_FLUSHCACHE)
error = ata_controlcmd(dp->d_drv1, ATA_FLUSHCACHE, 0, 0, 0);
error = ata_controlcmd(dev, ATA_FLUSHCACHE, 0, 0, 0);
return error;
}

View File

@ -218,20 +218,17 @@ ata_start(device_t dev)
ata_finish(request);
return;
}
if (dumping) {
mtx_unlock(&ch->state_mtx);
mtx_unlock(&ch->queue_mtx);
while (ch->running) {
ata_interrupt(ch);
DELAY(10);
}
return;
}
}
mtx_unlock(&ch->state_mtx);
}
}
mtx_unlock(&ch->queue_mtx);
if (dumping) {
while (ch->running) {
ata_interrupt(ch);
DELAY(10);
}
}
}
void
@ -560,6 +557,24 @@ ata_fail_requests(device_t dev)
}
}
/*
* Rudely drop all requests queued to the channel of specified device.
* XXX: The requests are leaked, use only in fatal case.
*/
void
ata_drop_requests(device_t dev)
{
struct ata_channel *ch = device_get_softc(device_get_parent(dev));
struct ata_request *request, *tmp;
mtx_lock(&ch->queue_mtx);
TAILQ_FOREACH_SAFE(request, &ch->ata_queue, chain, tmp) {
TAILQ_REMOVE(&ch->ata_queue, request, chain);
request->result = ENXIO;
}
mtx_unlock(&ch->queue_mtx);
}
static u_int64_t
ata_get_lba(struct ata_request *request)
{