Implement automatic SCSI sense fetching for mvs(4).

Make few improvements/changes to ATAPI PIO support to pass most of scgcheck
(cdrtools) tests.
This commit is contained in:
Alexander Motin 2011-04-12 16:01:27 +00:00
parent 9974d151ec
commit 97fd3ac63f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=220569
2 changed files with 111 additions and 38 deletions

View File

@ -86,14 +86,21 @@ static void mvs_requeue_frozen(device_t dev);
static void mvs_execute_transaction(struct mvs_slot *slot);
static void mvs_end_transaction(struct mvs_slot *slot, enum mvs_err_type et);
static void mvs_issue_read_log(device_t dev);
static void mvs_issue_recovery(device_t dev);
static void mvs_process_read_log(device_t dev, union ccb *ccb);
static void mvs_process_request_sense(device_t dev, union ccb *ccb);
static void mvsaction(struct cam_sim *sim, union ccb *ccb);
static void mvspoll(struct cam_sim *sim);
MALLOC_DEFINE(M_MVS, "MVS driver", "MVS driver data buffers");
#define recovery_type spriv_field0
#define RECOVERY_NONE 0
#define RECOVERY_READ_LOG 1
#define RECOVERY_REQUEST_SENSE 2
#define recovery_slot spriv_field1
static int
mvs_ch_probe(device_t dev)
{
@ -822,7 +829,8 @@ mvs_legacy_intr(device_t dev)
union ccb *ccb = slot->ccb;
enum mvs_err_type et = MVS_ERR_NONE;
int port;
u_int length;
u_int length, resid, size;
uint8_t buf[2];
uint8_t status, ireason;
/* Clear interrupt and get status. */
@ -895,6 +903,7 @@ mvs_legacy_intr(device_t dev)
} else { /* ATAPI PIO */
length = ATA_INB(ch->r_mem,ATA_CYL_LSB) |
(ATA_INB(ch->r_mem,ATA_CYL_MSB) << 8);
size = min(ch->transfersize, length);
ireason = ATA_INB(ch->r_mem,ATA_IREASON);
switch ((ireason & (ATA_I_CMD | ATA_I_IN)) |
(status & ATA_S_DRQ)) {
@ -913,7 +922,10 @@ mvs_legacy_intr(device_t dev)
}
ATA_OUTSW_STRM(ch->r_mem, ATA_DATA,
(uint16_t *)(ccb->csio.data_ptr + ch->donecount),
length / 2);
(size + 1) / 2);
for (resid = ch->transfersize + (size & 1);
resid < length; resid += sizeof(int16_t))
ATA_OUTW(ch->r_mem, ATA_DATA, 0);
ch->donecount += length;
/* Set next transfer size according to HW capabilities */
ch->transfersize = min(ccb->csio.dxfer_len - ch->donecount,
@ -927,9 +939,19 @@ mvs_legacy_intr(device_t dev)
et = MVS_ERR_TFE;
goto end_finished;
}
ATA_INSW_STRM(ch->r_mem, ATA_DATA,
(uint16_t *)(ccb->csio.data_ptr + ch->donecount),
length / 2);
if (size >= 2) {
ATA_INSW_STRM(ch->r_mem, ATA_DATA,
(uint16_t *)(ccb->csio.data_ptr + ch->donecount),
size / 2);
}
if (size & 1) {
ATA_INSW_STRM(ch->r_mem, ATA_DATA, (void*)buf, 1);
((uint8_t *)ccb->csio.data_ptr + ch->donecount +
(size & ~1))[0] = buf[0];
}
for (resid = ch->transfersize + (size & 1);
resid < length; resid += sizeof(int16_t))
ATA_INW(ch->r_mem, ATA_DATA);
ch->donecount += length;
/* Set next transfer size according to HW capabilities */
ch->transfersize = min(ccb->csio.dxfer_len - ch->donecount,
@ -1363,8 +1385,7 @@ mvs_legacy_execute_transaction(struct mvs_slot *slot)
ATA_OUTL(ch->r_mem, DMA_C, DMA_C_START |
(((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) ?
DMA_C_READ : 0));
} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE)
ch->fake_busy = 1;
}
}
/* Start command execution timeout */
callout_reset(&slot->timeout, (int)ccb->ccb_h.timeout * hz / 1000,
@ -1579,6 +1600,10 @@ mvs_end_transaction(struct mvs_slot *slot, enum mvs_err_type et)
mvs_tfd_read(dev, ccb);
} else
bzero(res, sizeof(*res));
} else {
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
ch->basic_dma == 0)
ccb->csio.resid = ccb->csio.dxfer_len - ch->donecount;
}
if (ch->numpslots == 0 || ch->basic_dma) {
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
@ -1591,7 +1616,7 @@ mvs_end_transaction(struct mvs_slot *slot, enum mvs_err_type et)
if (et != MVS_ERR_NONE)
ch->eslots |= (1 << slot->slot);
/* In case of error, freeze device for proper recovery. */
if ((et != MVS_ERR_NONE) && (!ch->readlog) &&
if ((et != MVS_ERR_NONE) && (!ch->recoverycmd) &&
!(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
xpt_freeze_devq(ccb->ccb_h.path, 1);
ccb->ccb_h.status |= CAM_DEV_QFRZN;
@ -1622,7 +1647,7 @@ mvs_end_transaction(struct mvs_slot *slot, enum mvs_err_type et)
break;
case MVS_ERR_SATA:
ch->fatalerr = 1;
if (!ch->readlog) {
if (!ch->recoverycmd) {
xpt_freeze_simq(ch->sim, 1);
ccb->ccb_h.status &= ~CAM_STATUS_MASK;
ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
@ -1630,7 +1655,7 @@ mvs_end_transaction(struct mvs_slot *slot, enum mvs_err_type et)
ccb->ccb_h.status |= CAM_UNCOR_PARITY;
break;
case MVS_ERR_TIMEOUT:
if (!ch->readlog) {
if (!ch->recoverycmd) {
xpt_freeze_simq(ch->sim, 1);
ccb->ccb_h.status &= ~CAM_STATUS_MASK;
ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
@ -1672,10 +1697,15 @@ mvs_end_transaction(struct mvs_slot *slot, enum mvs_err_type et)
xpt_release_simq(ch->sim, TRUE);
}
/* If it was our READ LOG command - process it. */
if (ch->readlog) {
if (ccb->ccb_h.recovery_type == RECOVERY_READ_LOG) {
mvs_process_read_log(dev, ccb);
/* If it was NCQ command error, put result on hold. */
} else if (et == MVS_ERR_NCQ) {
/* If it was our REQUEST SENSE command - process it. */
} else if (ccb->ccb_h.recovery_type == RECOVERY_REQUEST_SENSE) {
mvs_process_request_sense(dev, ccb);
/* If it was NCQ or ATAPI command error, put result on hold. */
} else if (et == MVS_ERR_NCQ ||
((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR &&
(ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)) {
ch->hold[slot->slot] = ccb;
ch->holdtag[slot->slot] = slot->tag;
ch->numhslots++;
@ -1700,8 +1730,8 @@ mvs_end_transaction(struct mvs_slot *slot, enum mvs_err_type et)
ch->eslots = 0;
}
/* if there commands on hold, we can do READ LOG. */
if (!ch->readlog && ch->numhslots)
mvs_issue_read_log(dev);
if (!ch->recoverycmd && ch->numhslots)
mvs_issue_recovery(dev);
}
/* If all the rest of commands are in timeout - give them chance. */
} else if ((ch->rslots & ~ch->toslots) == 0 &&
@ -1716,14 +1746,15 @@ mvs_end_transaction(struct mvs_slot *slot, enum mvs_err_type et)
}
static void
mvs_issue_read_log(device_t dev)
mvs_issue_recovery(device_t dev)
{
struct mvs_channel *ch = device_get_softc(dev);
union ccb *ccb;
struct ccb_ataio *ataio;
struct ccb_scsiio *csio;
int i;
ch->readlog = 1;
ch->recoverycmd = 1;
/* Find some holden command. */
for (i = 0; i < MVS_MAX_SLOTS; i++) {
if (ch->hold[i])
@ -1735,25 +1766,44 @@ mvs_issue_read_log(device_t dev)
return; /* XXX */
}
ccb->ccb_h = ch->hold[i]->ccb_h; /* Reuse old header. */
ccb->ccb_h.func_code = XPT_ATA_IO;
ccb->ccb_h.flags = CAM_DIR_IN;
ccb->ccb_h.timeout = 1000; /* 1s should be enough. */
ataio = &ccb->ataio;
ataio->data_ptr = malloc(512, M_MVS, M_NOWAIT);
if (ataio->data_ptr == NULL) {
xpt_free_ccb(ccb);
device_printf(dev, "Unable allocate memory for READ LOG command");
return; /* XXX */
if (ccb->ccb_h.func_code == XPT_ATA_IO) {
/* READ LOG */
ccb->ccb_h.recovery_type = RECOVERY_READ_LOG;
ccb->ccb_h.func_code = XPT_ATA_IO;
ccb->ccb_h.flags = CAM_DIR_IN;
ccb->ccb_h.timeout = 1000; /* 1s should be enough. */
ataio = &ccb->ataio;
ataio->data_ptr = malloc(512, M_MVS, M_NOWAIT);
if (ataio->data_ptr == NULL) {
xpt_free_ccb(ccb);
device_printf(dev, "Unable allocate memory for READ LOG command");
return; /* XXX */
}
ataio->dxfer_len = 512;
bzero(&ataio->cmd, sizeof(ataio->cmd));
ataio->cmd.flags = CAM_ATAIO_48BIT;
ataio->cmd.command = 0x2F; /* READ LOG EXT */
ataio->cmd.sector_count = 1;
ataio->cmd.sector_count_exp = 0;
ataio->cmd.lba_low = 0x10;
ataio->cmd.lba_mid = 0;
ataio->cmd.lba_mid_exp = 0;
} else {
/* REQUEST SENSE */
ccb->ccb_h.recovery_type = RECOVERY_REQUEST_SENSE;
ccb->ccb_h.recovery_slot = i;
ccb->ccb_h.func_code = XPT_SCSI_IO;
ccb->ccb_h.flags = CAM_DIR_IN;
ccb->ccb_h.status = 0;
ccb->ccb_h.timeout = 1000; /* 1s should be enough. */
csio = &ccb->csio;
csio->data_ptr = (void *)&ch->hold[i]->csio.sense_data;
csio->dxfer_len = ch->hold[i]->csio.sense_len;
csio->cdb_len = 6;
bzero(&csio->cdb_io, sizeof(csio->cdb_io));
csio->cdb_io.cdb_bytes[0] = 0x03;
csio->cdb_io.cdb_bytes[4] = csio->dxfer_len;
}
ataio->dxfer_len = 512;
bzero(&ataio->cmd, sizeof(ataio->cmd));
ataio->cmd.flags = CAM_ATAIO_48BIT;
ataio->cmd.command = 0x2F; /* READ LOG EXT */
ataio->cmd.sector_count = 1;
ataio->cmd.sector_count_exp = 0;
ataio->cmd.lba_low = 0x10;
ataio->cmd.lba_mid = 0;
ataio->cmd.lba_mid_exp = 0;
/* Freeze SIM while doing READ LOG EXT. */
xpt_freeze_simq(ch->sim, 1);
mvs_begin_transaction(dev, ccb);
@ -1767,7 +1817,7 @@ mvs_process_read_log(device_t dev, union ccb *ccb)
struct ata_res *res;
int i;
ch->readlog = 0;
ch->recoverycmd = 0;
data = ccb->ataio.data_ptr;
if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
@ -1820,6 +1870,28 @@ mvs_process_read_log(device_t dev, union ccb *ccb)
xpt_release_simq(ch->sim, TRUE);
}
static void
mvs_process_request_sense(device_t dev, union ccb *ccb)
{
struct mvs_channel *ch = device_get_softc(dev);
int i;
ch->recoverycmd = 0;
i = ccb->ccb_h.recovery_slot;
if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
ch->hold[i]->ccb_h.status |= CAM_AUTOSNS_VALID;
} else {
ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK;
ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL;
}
xpt_done(ch->hold[i]);
ch->hold[i] = NULL;
ch->numhslots--;
xpt_free_ccb(ccb);
xpt_release_simq(ch->sim, TRUE);
}
static int
mvs_wait(device_t dev, u_int s, u_int c, int t)
{
@ -2052,6 +2124,7 @@ mvsaction(struct cam_sim *sim, union ccb *ccb)
ccb->ccb_h.status = CAM_SEL_TIMEOUT;
break;
}
ccb->ccb_h.recovery_type = RECOVERY_NONE;
/* Check for command collision. */
if (mvs_check_collision(dev, ccb)) {
/* Freeze command. */

View File

@ -557,7 +557,7 @@ struct mvs_channel {
int numtslots; /* Number of NCQ slots */
int numtslotspd[16];/* Number of NCQ slots per dev */
int numhslots; /* Number of holden slots */
int readlog; /* Our READ LOG active */
int recoverycmd; /* Our READ LOG active */
int fatalerr; /* Fatal error happend */
int lastslot; /* Last used slot */
int taggedtarget; /* Last tagged target */