Cleanup DMA handling.

- Make isp_start() to set all the IOCB fields aside of S/G list, removing
extra information from isp_send_cmd(), now only doing S/G lists and sending.
 - Turn DMA setup/free from being card and PCI-specific into OS-specific,
instead add new card-specific method for isp_send_cmd().  Previously this
function was a monster handling all the cards.
 - Remove double error code translation.
This commit is contained in:
Alexander Motin 2020-11-20 18:02:04 +00:00
parent 96fbe51956
commit f6854a0cd5
8 changed files with 137 additions and 177 deletions

View File

@ -2822,35 +2822,31 @@ isp_start(XS_T *xs)
goto start_again;
}
reqp->req_header.rqs_entry_count = 1;
reqp->req_header.rqs_entry_type = RQSTYPE_T7RQS;
/*
* Set task attributes
*/
if (XS_TAG_P(xs))
reqp->req_task_attribute = XS_TAG_TYPE(xs);
else
reqp->req_task_attribute = FCP_CMND_TASK_ATTR_SIMPLE;
reqp->req_task_attribute |= (XS_PRIORITY(xs) << FCP_CMND_PRIO_SHIFT) &
FCP_CMND_PRIO_MASK;
/*
* NB: we do not support long CDBs (yet)
*/
cdblen = XS_CDBLEN(xs);
if (cdblen > sizeof (reqp->req_cdb)) {
isp_prt(isp, ISP_LOGERR, "Command Length %u too long for this chip", cdblen);
XS_SETERR(xs, HBA_REQINVAL);
return (CMD_COMPLETE);
}
reqp->req_header.rqs_entry_type = RQSTYPE_T7RQS;
reqp->req_header.rqs_entry_count = 1;
reqp->req_nphdl = lp->handle;
reqp->req_tidlo = lp->portid;
reqp->req_tidhi = lp->portid >> 16;
reqp->req_vpidx = ISP_GET_VPIDX(isp, XS_CHANNEL(xs));
reqp->req_time = XS_TIME(xs);
be64enc(reqp->req_lun, CAM_EXTLUN_BYTE_SWIZZLE(XS_LUN(xs)));
if (XS_XFRIN(xs))
reqp->req_alen_datadir = FCP_CMND_DATA_READ;
else if (XS_XFROUT(xs))
reqp->req_alen_datadir = FCP_CMND_DATA_WRITE;
if (XS_TAG_P(xs))
reqp->req_task_attribute = XS_TAG_TYPE(xs);
else
reqp->req_task_attribute = FCP_CMND_TASK_ATTR_SIMPLE;
reqp->req_task_attribute |= (XS_PRIORITY(xs) << FCP_CMND_PRIO_SHIFT) &
FCP_CMND_PRIO_MASK;
if (FCPARAM(isp, XS_CHANNEL(xs))->fctape_enabled && (lp->prli_word3 & PRLI_WD3_RETRY)) {
if (FCP_NEXT_CRN(isp, &reqp->req_crn, xs)) {
isp_prt(isp, ISP_LOG_WARN1,
@ -2860,8 +2856,11 @@ isp_start(XS_T *xs)
return (CMD_EAGAIN);
}
}
reqp->req_time = XS_TIME(xs);
ISP_MEMCPY(reqp->req_cdb, XS_CDBP(xs), cdblen);
reqp->req_dl = XS_XFRLEN(xs);
reqp->req_tidlo = lp->portid;
reqp->req_tidhi = lp->portid >> 16;
reqp->req_vpidx = ISP_GET_VPIDX(isp, XS_CHANNEL(xs));
/* Whew. Thankfully the same for type 7 requests */
reqp->req_handle = isp_allocate_handle(isp, xs, ISP_HANDLE_INITIATOR);
@ -2878,7 +2877,7 @@ isp_start(XS_T *xs)
* The callee is responsible for adding all requests at this point.
*/
dmaresult = ISP_DMASETUP(isp, xs, reqp);
if (dmaresult != CMD_QUEUED) {
if (dmaresult != 0) {
isp_destroy_handle(isp, reqp->req_handle);
/*
* dmasetup sets actual error in packet, and
@ -2887,7 +2886,7 @@ isp_start(XS_T *xs)
return (dmaresult);
}
isp_xs_prt(isp, xs, ISP_LOGDEBUG0, "START cmd cdb[0]=0x%x datalen %ld", XS_CDBP(xs)[0], (long) XS_XFRLEN(xs));
return (CMD_QUEUED);
return (0);
}
/*
@ -3442,8 +3441,7 @@ isp_intr_respq(ispsoftc_t *isp)
isp_prt(isp, ISP_LOGDEBUG2, "asked for %lu got raw resid %lu settled for %lu",
(u_long)XS_XFRLEN(xs), (u_long)resid, (u_long)XS_GET_RESID(xs));
if (XS_XFRLEN(xs))
ISP_DMAFREE(isp, xs, sp->req_handle);
ISP_DMAFREE(isp, xs);
isp_destroy_handle(isp, sp->req_handle);
ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */

View File

@ -1270,7 +1270,7 @@ isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb, enum Start_Ctio_How how)
*/
cto->ct_syshandle = handle;
dmaresult = ISP_DMASETUP(isp, cso, cto);
if (dmaresult != CMD_QUEUED) {
if (dmaresult != 0) {
isp_destroy_handle(isp, handle);
isp_free_pcmd(isp, ccb);
if (dmaresult == CMD_EAGAIN) {
@ -2033,9 +2033,7 @@ isp_watchdog(void *arg)
/*
* After this point, the command is really dead.
*/
if (XS_XFRLEN(xs)) {
ISP_DMAFREE(isp, xs, handle);
}
ISP_DMAFREE(isp, xs);
isp_destroy_handle(isp, handle);
isp_prt(isp, ISP_LOGERR, "%s: timeout for handle 0x%x", __func__, handle);
XS_SETERR(xs, CAM_CMD_TIMEOUT);
@ -2234,9 +2232,7 @@ isp_loop_dead(ispsoftc_t *isp, int chan)
continue;
}
if (XS_XFRLEN(xs)) {
ISP_DMAFREE(isp, xs, isp->isp_xflist[i].handle);
}
ISP_DMAFREE(isp, xs);
isp_destroy_handle(isp, isp->isp_xflist[i].handle);
isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx could not be aborted and was destroyed",
isp->isp_xflist[i].handle, chan, XS_TGT(xs),
@ -2457,7 +2453,6 @@ isp_action(struct cam_sim *sim, union ccb *ccb)
break;
}
}
ccb->csio.req_map = NULL;
#ifdef DIAGNOSTIC
if (ccb->ccb_h.target_id >= ISP_MAX_TARGETS(isp)) {
xpt_print(ccb->ccb_h.path, "invalid target\n");
@ -2479,7 +2474,7 @@ isp_action(struct cam_sim *sim, union ccb *ccb)
}
error = isp_start((XS_T *) ccb);
switch (error) {
case CMD_QUEUED:
case 0:
ccb->ccb_h.status |= CAM_SIM_QUEUED;
if (ccb->ccb_h.timeout == CAM_TIME_INFINITY)
break;
@ -2541,8 +2536,6 @@ isp_action(struct cam_sim *sim, union ccb *ccb)
xpt_done(ccb);
break;
}
ccb->ccb_h.spriv_field0 = 0;
ccb->ccb_h.spriv_ptr1 = isp;
if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
ccb->atio.tag_id = 0;
@ -3444,14 +3437,91 @@ isp_platform_intr_atio(void *arg)
ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
}
void
isp_common_dmateardown(ispsoftc_t *isp, struct ccb_scsiio *csio, uint32_t hdl)
typedef struct {
ispsoftc_t *isp;
struct ccb_scsiio *csio;
void *qe;
int error;
} mush_t;
static void
isp_dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
{
if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_POSTREAD);
} else {
bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_POSTWRITE);
mush_t *mp = (mush_t *) arg;
ispsoftc_t *isp= mp->isp;
struct ccb_scsiio *csio = mp->csio;
bus_dmasync_op_t op;
if (error) {
mp->error = error;
return;
}
if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^
((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN))
op = BUS_DMASYNC_PREREAD;
else
op = BUS_DMASYNC_PREWRITE;
bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, op);
mp->error = ISP_SEND_CMD(isp, mp->qe, dm_segs, nseg);
if (mp->error)
isp_dmafree(isp, csio);
}
int
isp_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *qe)
{
mush_t mp;
int error;
if (XS_XFRLEN(csio)) {
mp.isp = isp;
mp.csio = csio;
mp.qe = qe;
mp.error = 0;
error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap,
(union ccb *)csio, isp_dma2, &mp, BUS_DMA_NOWAIT);
if (error == 0)
error = mp.error;
} else {
error = ISP_SEND_CMD(isp, qe, NULL, 0);
}
switch (error) {
case 0:
case CMD_COMPLETE:
case CMD_EAGAIN:
case CMD_RQLATER:
break;
case ENOMEM:
error = CMD_EAGAIN;
break;
case EINVAL:
case EFBIG:
csio->ccb_h.status = CAM_REQ_INVALID;
error = CMD_COMPLETE;
break;
default:
csio->ccb_h.status = CAM_UNREC_HBA_ERROR;
error = CMD_COMPLETE;
break;
}
return (error);
}
void
isp_dmafree(ispsoftc_t *isp, struct ccb_scsiio *csio)
{
bus_dmasync_op_t op;
if (XS_XFRLEN(csio) == 0)
return;
if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^
((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN))
op = BUS_DMASYNC_POSTREAD;
else
op = BUS_DMASYNC_POSTWRITE;
bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, op);
bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap);
}

View File

@ -341,6 +341,9 @@ struct isposinfo {
#define ISP_INLINE
#endif
#define ISP_DMASETUP(isp, xs, req) isp_dmasetup(isp, xs, req)
#define ISP_DMAFREE(isp, xs) isp_dmafree(isp, xs)
#define NANOTIME_T struct timespec
#define GET_NANOTIME nanotime
#define GET_NANOSEC(x) ((x)->tv_sec * 1000000000 + (x)->tv_nsec)
@ -472,6 +475,8 @@ default: \
#define XS_CDBLEN(ccb) (ccb)->cdb_len
#define XS_XFRLEN(ccb) (ccb)->dxfer_len
#define XS_XFRIN(ccb) (((ccb)->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
#define XS_XFROUT(ccb) (((ccb)->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
#define XS_TIME(ccb) \
(((ccb)->ccb_h.timeout > 0xffff * 1000 - 999) ? 0 : \
(((ccb)->ccb_h.timeout + 999) / 1000))
@ -638,7 +643,8 @@ int isp_fc_scratch_acquire(ispsoftc_t *, int);
void isp_platform_intr(void *);
void isp_platform_intr_resp(void *);
void isp_platform_intr_atio(void *);
void isp_common_dmateardown(ispsoftc_t *, struct ccb_scsiio *, uint32_t);
int isp_dmasetup(ispsoftc_t *, XS_T *, void *);
void isp_dmafree(ispsoftc_t *, struct ccb_scsiio *);
void isp_fcp_reset_crn(ispsoftc_t *, int, uint32_t, int);
int isp_fcp_next_crn(ispsoftc_t *, uint8_t *, XS_T *);

View File

@ -61,30 +61,25 @@ const char *isp_class3_roles[4] = {
* Called with the first queue entry at least partially filled out.
*/
int
isp_send_cmd(ispsoftc_t *isp, void *fqe, void *segp, uint32_t nsegs, uint32_t totalcnt, isp_ddir_t ddir, ispds64_t *ecmd)
isp_send_cmd(ispsoftc_t *isp, void *fqe, void *segp, uint32_t nsegs)
{
ispcontreq64_t crq;
uint8_t type, nqe;
uint32_t seg, seglim, nxt, nxtnxt, ddf;
uint8_t type, nqe = 1;
uint32_t seg, seglim, nxt, nxtnxt;
ispds64_t *dsp64 = NULL;
void *qe0, *qe1;
qe0 = isp_getrqentry(isp);
if (qe0 == NULL) {
if (qe0 == NULL)
return (CMD_EAGAIN);
}
nxt = ISP_NXT_QENTRY(isp->isp_reqidx, RQUEST_QUEUE_LEN(isp));
type = ((isphdr_t *)fqe)->rqs_entry_type;
nqe = 1;
/*
* If we have no data to transmit, just copy the first IOCB and start it up.
*/
if (ddir == ISP_NOXFR) {
ddf = 0;
if (nsegs == 0)
goto copy_and_sync;
}
/*
* First figure out how many pieces of data to transfer, what
@ -92,7 +87,6 @@ isp_send_cmd(ispsoftc_t *isp, void *fqe, void *segp, uint32_t nsegs, uint32_t to
*/
switch (type) {
case RQSTYPE_T7RQS:
ddf = (ddir == ISP_TO_DEVICE)? FCP_CMND_DATA_WRITE : FCP_CMND_DATA_READ;
dsp64 = &((ispreqt7_t *)fqe)->req_dataseg;
seglim = 1;
break;
@ -103,7 +97,7 @@ isp_send_cmd(ispsoftc_t *isp, void *fqe, void *segp, uint32_t nsegs, uint32_t to
break;
#endif
default:
return (CMD_COMPLETE);
panic("%s: unsupported IOCB type 0x%x\n", __func__, type);
}
if (seglim > nsegs)
seglim = nsegs;
@ -146,9 +140,7 @@ isp_send_cmd(ispsoftc_t *isp, void *fqe, void *segp, uint32_t nsegs, uint32_t to
((isphdr_t *)fqe)->rqs_entry_count = nqe;
switch (type) {
case RQSTYPE_T7RQS:
((ispreqt7_t *)fqe)->req_alen_datadir = ddf;
((ispreqt7_t *)fqe)->req_seg_count = nsegs;
((ispreqt7_t *)fqe)->req_dl = totalcnt;
isp_put_request_t7(isp, fqe, qe0);
break;
#ifdef ISP_TARGET_MODE
@ -162,13 +154,13 @@ isp_send_cmd(ispsoftc_t *isp, void *fqe, void *segp, uint32_t nsegs, uint32_t to
break;
#endif
default:
return (CMD_COMPLETE);
panic("%s: unsupported IOCB type 0x%x\n", __func__, type);
}
if (isp->isp_dblev & ISP_LOGDEBUG1) {
isp_print_bytes(isp, "first queue entry", QENTRY_LEN, qe0);
}
ISP_ADD_REQUEST(isp, nxt);
return (CMD_QUEUED);
return (0);
}
uint32_t
@ -500,13 +492,9 @@ isp_clear_commands(ispsoftc_t *isp)
switch (ISP_H2HT(hdp->handle)) {
case ISP_HANDLE_INITIATOR: {
XS_T *xs = hdp->cmd;
if (XS_XFRLEN(xs)) {
ISP_DMAFREE(isp, xs, hdp->handle);
XS_SET_RESID(xs, XS_XFRLEN(xs));
} else {
XS_SET_RESID(xs, 0);
}
ISP_DMAFREE(isp, xs);
isp_destroy_handle(isp, hdp->handle);
XS_SET_RESID(xs, XS_XFRLEN(xs));
XS_SETERR(xs, HBA_BUSRESET);
isp_done(xs);
break;
@ -515,7 +503,7 @@ isp_clear_commands(ispsoftc_t *isp)
case ISP_HANDLE_TARGET: {
ct7_entry_t ctio;
ISP_DMAFREE(isp, hdp->cmd, hdp->handle);
ISP_DMAFREE(isp, hdp->cmd);
ISP_MEMZERO(&ctio, sizeof(ct7_entry_t));
ctio.ct_syshandle = hdp->handle;
ctio.ct_nphdl = CT_HBA_RESET;

View File

@ -38,8 +38,7 @@
* This used to be platform specific, but basically once you get the segment
* stuff figured out, you can make all the code in one spot.
*/
typedef enum { ISP_TO_DEVICE, ISP_FROM_DEVICE, ISP_NOXFR} isp_ddir_t;
int isp_send_cmd(ispsoftc_t *, void *, void *, uint32_t, uint32_t, isp_ddir_t, ispds64_t *);
int isp_send_cmd(ispsoftc_t *, void *, void *, uint32_t);
/*
* Handle management functions.

View File

@ -57,7 +57,6 @@ static void isp_pci_wr_reg_2600(ispsoftc_t *, int, uint32_t);
static void isp_pci_run_isr_2400(ispsoftc_t *);
static int isp_pci_mbxdma(ispsoftc_t *);
static void isp_pci_mbxdmafree(ispsoftc_t *);
static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *);
static int isp_pci_irqsetup(ispsoftc_t *);
static struct ispmdvec mdvec_2400 = {
@ -65,8 +64,7 @@ static struct ispmdvec mdvec_2400 = {
isp_pci_rd_reg_2400,
isp_pci_wr_reg_2400,
isp_pci_mbxdma,
isp_pci_dmasetup,
isp_common_dmateardown,
isp_send_cmd,
isp_pci_irqsetup,
NULL
};
@ -76,8 +74,7 @@ static struct ispmdvec mdvec_2500 = {
isp_pci_rd_reg_2400,
isp_pci_wr_reg_2400,
isp_pci_mbxdma,
isp_pci_dmasetup,
isp_common_dmateardown,
isp_send_cmd,
isp_pci_irqsetup,
NULL
};
@ -87,8 +84,7 @@ static struct ispmdvec mdvec_2600 = {
isp_pci_rd_reg_2600,
isp_pci_wr_reg_2600,
isp_pci_mbxdma,
isp_pci_dmasetup,
isp_common_dmateardown,
isp_send_cmd,
isp_pci_irqsetup,
NULL
};
@ -98,8 +94,7 @@ static struct ispmdvec mdvec_2700 = {
isp_pci_rd_reg_2600,
isp_pci_wr_reg_2600,
isp_pci_mbxdma,
isp_pci_dmasetup,
isp_common_dmateardown,
isp_send_cmd,
isp_pci_irqsetup,
NULL
};
@ -1232,96 +1227,6 @@ isp_pci_mbxdmafree(ispsoftc_t *isp)
}
}
typedef struct {
ispsoftc_t *isp;
void *cmd_token;
void *rq; /* original request */
int error;
} mush_t;
#define MUSHERR_NOQENTRIES -2
static void
dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
{
mush_t *mp = (mush_t *) arg;
ispsoftc_t *isp= mp->isp;
struct ccb_scsiio *csio = mp->cmd_token;
isp_ddir_t ddir;
int sdir;
if (error) {
mp->error = error;
return;
}
if (nseg == 0) {
ddir = ISP_NOXFR;
} else {
if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
ddir = ISP_FROM_DEVICE;
} else {
ddir = ISP_TO_DEVICE;
}
if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^
((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)) {
sdir = BUS_DMASYNC_PREREAD;
} else {
sdir = BUS_DMASYNC_PREWRITE;
}
bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap,
sdir);
}
error = isp_send_cmd(isp, mp->rq, dm_segs, nseg, XS_XFRLEN(csio),
ddir, (ispds64_t *)csio->req_map);
switch (error) {
case CMD_EAGAIN:
mp->error = MUSHERR_NOQENTRIES;
break;
case CMD_QUEUED:
break;
default:
mp->error = EIO;
break;
}
}
static int
isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff)
{
mush_t mush, *mp;
int error;
mp = &mush;
mp->isp = isp;
mp->cmd_token = csio;
mp->rq = ff;
mp->error = 0;
error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap,
(union ccb *)csio, dma2, mp, BUS_DMA_NOWAIT);
if (error && mp->error == 0) {
#ifdef DIAGNOSTIC
isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error);
#endif
mp->error = error;
}
if (mp->error) {
int retval = CMD_COMPLETE;
if (mp->error == MUSHERR_NOQENTRIES) {
retval = CMD_EAGAIN;
} else if (mp->error == EFBIG) {
csio->ccb_h.status = CAM_REQ_TOO_BIG;
} else if (mp->error == EINVAL) {
csio->ccb_h.status = CAM_REQ_INVALID;
} else {
csio->ccb_h.status = CAM_UNREC_HBA_ERROR;
}
return (retval);
}
return (CMD_QUEUED);
}
static int
isp_pci_irqsetup(ispsoftc_t *isp)
{

View File

@ -730,9 +730,7 @@ isp_handle_ctio7(ispsoftc_t *isp, ct7_entry_t *ct)
isp_prt(isp, pl, "NO xs for CTIO (handle 0x%x) status 0x%x", ct->ct_syshandle, ct->ct_nphdl);
}
} else {
if ((ct->ct_flags & CT7_DATAMASK) != CT7_NO_DATA) {
ISP_DMAFREE(isp, xs, ct->ct_syshandle);
}
ISP_DMAFREE(isp, xs);
if (ct->ct_flags & CT7_SENDSTATUS) {
/*
* Sent status and command complete.

View File

@ -65,8 +65,7 @@ struct ispmdvec {
uint32_t (*dv_rd_reg) (ispsoftc_t *, int);
void (*dv_wr_reg) (ispsoftc_t *, int, uint32_t);
int (*dv_mbxdma) (ispsoftc_t *);
int (*dv_dmaset) (ispsoftc_t *, XS_T *, void *);
void (*dv_dmaclr) (ispsoftc_t *, XS_T *, uint32_t);
int (*dv_send_cmd) (ispsoftc_t *, void *, void *, uint32_t);
int (*dv_irqsetup) (ispsoftc_t *);
void (*dv_dregs) (ispsoftc_t *, const char *);
const void * dv_ispfw; /* ptr to f/w */
@ -98,12 +97,8 @@ struct ispmdvec {
#define ISP_MBOXDMASETUP(isp) \
(*(isp)->isp_mdvec->dv_mbxdma)((isp))
#define ISP_DMASETUP(isp, xs, req) \
(*(isp)->isp_mdvec->dv_dmaset)((isp), (xs), (req))
#define ISP_DMAFREE(isp, xs, hndl) \
if ((isp)->isp_mdvec->dv_dmaclr) \
(*(isp)->isp_mdvec->dv_dmaclr)((isp), (xs), (hndl))
#define ISP_SEND_CMD(isp, qe, segp, nseg) \
(*(isp)->isp_mdvec->dv_send_cmd)((isp), (qe), (segp), (nseg))
#define ISP_IRQSETUP(isp) \
(((isp)->isp_mdvec->dv_irqsetup) ? (*(isp)->isp_mdvec->dv_irqsetup)(isp) : 0)
@ -683,8 +678,7 @@ int isp_start(XS_T *);
/* these values are what isp_start returns */
#define CMD_COMPLETE 101 /* command completed */
#define CMD_EAGAIN 102 /* busy- maybe retry later */
#define CMD_QUEUED 103 /* command has been queued for execution */
#define CMD_RQLATER 104 /* requeue this command later */
#define CMD_RQLATER 103 /* requeue this command later */
/*
* Command Completion Point- Core layers call out from this with completed cmds
@ -867,6 +861,8 @@ void isp_async(ispsoftc_t *, ispasync_t, ...);
* XS_CDBP(xs) gets a pointer to the scsi CDB ""
* XS_CDBLEN(xs) gets the CDB's length ""
* XS_XFRLEN(xs) gets the associated data transfer length ""
* XS_XFRIN(xs) gets IN direction
* XS_XFROUT(xs) gets OUT direction
* XS_TIME(xs) gets the time (in seconds) for this command
* XS_GET_RESID(xs) gets the current residual count
* XS_GET_RESID(xs, resid) sets the current residual count