diff --git a/sys/dev/ata/ata-all.h b/sys/dev/ata/ata-all.h index c590ea2e9678..e22758fb5936 100644 --- a/sys/dev/ata/ata-all.h +++ b/sys/dev/ata/ata-all.h @@ -584,6 +584,7 @@ void ata_finish(struct ata_request *request); void ata_timeout(struct ata_request *); void ata_catch_inflight(device_t dev); void ata_fail_requests(device_t dev); +void ata_drop_requests(device_t dev); char *ata_cmd2str(struct ata_request *request); /* ata-lowlevel.c: */ diff --git a/sys/dev/ata/ata-disk.c b/sys/dev/ata/ata-disk.c index e11f3f90fb2e..15b9edc30b2c 100644 --- a/sys/dev/ata/ata-disk.c +++ b/sys/dev/ata/ata-disk.c @@ -346,15 +346,23 @@ ad_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct disk *dp = arg; + device_t dev = dp->d_drv1; struct bio bp; + /* XXX: Drop pre-dump request queue. Long request queue processing + * causes stack overflow in ATA working in dumping (interruptless) mode. + * Conter-XXX: To make dump coherent we should avoid doing anything + * else while dumping. + */ + ata_drop_requests(dev); + /* length zero is special and really means flush buffers to media */ if (!length) { - struct ata_device *atadev = device_get_softc(dp->d_drv1); + struct ata_device *atadev = device_get_softc(dev); int error = 0; if (atadev->param.support.command2 & ATA_SUPPORT_FLUSHCACHE) - error = ata_controlcmd(dp->d_drv1, ATA_FLUSHCACHE, 0, 0, 0); + error = ata_controlcmd(dev, ATA_FLUSHCACHE, 0, 0, 0); return error; } diff --git a/sys/dev/ata/ata-queue.c b/sys/dev/ata/ata-queue.c index 4a4bb190f5f1..87568844e58e 100644 --- a/sys/dev/ata/ata-queue.c +++ b/sys/dev/ata/ata-queue.c @@ -218,20 +218,17 @@ ata_start(device_t dev) ata_finish(request); return; } - if (dumping) { - mtx_unlock(&ch->state_mtx); - mtx_unlock(&ch->queue_mtx); - while (ch->running) { - ata_interrupt(ch); - DELAY(10); - } - return; - } } mtx_unlock(&ch->state_mtx); } } mtx_unlock(&ch->queue_mtx); + if (dumping) { + while (ch->running) { + ata_interrupt(ch); + DELAY(10); + } + } } void @@ -560,6 +557,24 @@ ata_fail_requests(device_t dev) } } +/* + * Rudely drop all requests queued to the channel of specified device. + * XXX: The requests are leaked, use only in fatal case. + */ +void +ata_drop_requests(device_t dev) +{ + struct ata_channel *ch = device_get_softc(device_get_parent(dev)); + struct ata_request *request, *tmp; + + mtx_lock(&ch->queue_mtx); + TAILQ_FOREACH_SAFE(request, &ch->ata_queue, chain, tmp) { + TAILQ_REMOVE(&ch->ata_queue, request, chain); + request->result = ENXIO; + } + mtx_unlock(&ch->queue_mtx); +} + static u_int64_t ata_get_lba(struct ata_request *request) {