Unify initiator and target DMA setup and command sending.

The code is so alike that it is pointless to keep it separate.

MFC after:	2 weeks
This commit is contained in:
mav 2017-03-24 14:44:03 +00:00
parent 720ca41ea8
commit d0838c6c39
7 changed files with 122 additions and 340 deletions

View File

@ -1372,10 +1372,7 @@ isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb, enum Start_Ctio_How how)
} else {
ct2_entry_t *cto = (ct2_entry_t *) local;
if (isp->isp_osinfo.sixtyfourbit)
cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3;
else
cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
cto->ct_header.rqs_entry_count = 1;
cto->ct_header.rqs_seqno |= ATPD_SEQ_NOTIFY_CAM;
ATPD_SET_SEQNO(cto, atp);
@ -1468,14 +1465,8 @@ isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb, enum Start_Ctio_How how)
isp_prt(isp, ISP_LOGTDEBUG0, "%s: ests base %p vaddr %p ecmd_dma %jx addr %jx len %u", __func__, isp->isp_osinfo.ecmd_base, atp->ests,
(uintmax_t) isp->isp_osinfo.ecmd_dma, (uintmax_t)addr, MIN_FCP_RESPONSE_SIZE + sense_length);
cto->rsp.m2.ct_datalen = MIN_FCP_RESPONSE_SIZE + sense_length;
if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) {
cto->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_base = DMA_LO32(addr);
cto->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_basehi = DMA_HI32(addr);
cto->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length;
} else {
cto->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_base = DMA_LO32(addr);
cto->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length;
}
cto->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_base = DMA_LO32(addr);
cto->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length;
}
if (sense_length) {
isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO2[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d sense: %x %x/%x/%x", __func__,
@ -3468,7 +3459,7 @@ isp_action(struct cam_sim *sim, union ccb *ccb)
cpi->max_lun = ISP_MAX_LUNS(isp) == 0 ?
255 : ISP_MAX_LUNS(isp) - 1;
cpi->bus_id = cam_sim_bus(sim);
if (isp->isp_osinfo.sixtyfourbit)
if (sizeof (bus_size_t) > 4)
cpi->maxio = (ISP_NSEG64_MAX - 1) * PAGE_SIZE;
else
cpi->maxio = (ISP_NSEG_MAX - 1) * PAGE_SIZE;

View File

@ -293,7 +293,6 @@ struct isposinfo {
struct isp_pcmd * pcmd_pool;
struct isp_pcmd * pcmd_free;
int sixtyfourbit; /* sixtyfour bit platform */
int mbox_sleeping;
int mbox_sleep_ok;
int mboxbsy;
@ -505,6 +504,13 @@ default: \
d->ds_base = DMA_LO32(e->ds_addr); \
d->ds_count = e->ds_len; \
}
#if (BUS_SPACE_MAXADDR > UINT32_MAX)
#define XS_NEED_DMA64_SEG(s, n) \
(((bus_dma_segment_t *)s)[n].ds_addr + \
((bus_dma_segment_t *)s)[n].ds_len > UINT32_MAX)
#else
#define XS_NEED_DMA64_SEG(s, n) (0)
#endif
#define XS_ISP(ccb) cam_sim_softc(xpt_path_sim((ccb)->ccb_h.path))
#define XS_CHANNEL(ccb) cam_sim_bus(xpt_path_sim((ccb)->ccb_h.path))
#define XS_TGT(ccb) (ccb)->ccb_h.target_id

View File

@ -61,8 +61,8 @@ int
isp_send_cmd(ispsoftc_t *isp, void *fqe, void *segp, uint32_t nsegs, uint32_t totalcnt, isp_ddir_t ddir, ispds64_t *ecmd)
{
uint8_t storage[QENTRY_LEN];
uint8_t type, nqe;
uint32_t seg, curseg, seglim, nxt, nxtnxt, ddf;
uint8_t type, nqe, need64;
uint32_t seg, seglim, nxt, nxtnxt, ddf;
ispds_t *dsp = NULL;
ispds64_t *dsp64 = NULL;
void *qe0, *qe1;
@ -88,8 +88,21 @@ isp_send_cmd(ispsoftc_t *isp, void *fqe, void *segp, uint32_t nsegs, uint32_t to
goto copy_and_sync;
}
need64 = 0;
for (seg = 0; seg < nsegs; seg++)
need64 |= XS_NEED_DMA64_SEG(segp, seg);
if (need64) {
if (type == RQSTYPE_T2RQS)
((isphdr_t *)fqe)->rqs_entry_type = type = RQSTYPE_T3RQS;
else if (type == RQSTYPE_REQUEST)
((isphdr_t *)fqe)->rqs_entry_type = type = RQSTYPE_A64;
else if (type == RQSTYPE_CTIO2)
((isphdr_t *)fqe)->rqs_entry_type = type = RQSTYPE_CTIO3;
}
/*
* First figure out how many pieces of data to transfer and what kind and how many we can put into the first queue entry.
* First figure out how many pieces of data to transfer, what
* kind and how many we can put into the first queue entry.
*/
switch (type) {
case RQSTYPE_REQUEST:
@ -121,15 +134,27 @@ isp_send_cmd(ispsoftc_t *isp, void *fqe, void *segp, uint32_t nsegs, uint32_t to
dsp64 = &((ispreqt7_t *)fqe)->req_dataseg;
seglim = 1;
break;
#ifdef ISP_TARGET_MODE
case RQSTYPE_CTIO2:
dsp = ((ct2_entry_t *)fqe)->rsp.m0.u.ct_dataseg;
seglim = ISP_RQDSEG_T2;
break;
case RQSTYPE_CTIO3:
dsp64 = ((ct2_entry_t *)fqe)->rsp.m0.u.ct_dataseg64;
seglim = ISP_RQDSEG_T3;
break;
case RQSTYPE_CTIO7:
dsp64 = &((ct7_entry_t *)fqe)->rsp.m0.ds;
seglim = 1;
break;
#endif
default:
return (CMD_COMPLETE);
}
if (seglim > nsegs) {
if (seglim > nsegs)
seglim = nsegs;
}
for (seg = curseg = 0; curseg < seglim; curseg++) {
seg = 0;
while (seg < seglim) {
if (dsp64) {
XS_GET_DMA64_SEG(dsp64++, segp, seg++);
} else {
@ -137,7 +162,6 @@ isp_send_cmd(ispsoftc_t *isp, void *fqe, void *segp, uint32_t nsegs, uint32_t to
}
}
/*
* Second, start building additional continuation segments as needed.
*/
@ -164,10 +188,10 @@ isp_send_cmd(ispsoftc_t *isp, void *fqe, void *segp, uint32_t nsegs, uint32_t to
crq->req_header.rqs_entry_count = 1;
dsp = crq->req_dataseg;
}
if (seg + seglim > nsegs) {
seglim = nsegs - seg;
}
for (curseg = 0; curseg < seglim; curseg++) {
seglim += seg;
if (seglim > nsegs)
seglim = nsegs;
while (seg < seglim) {
if (dsp64) {
XS_GET_DMA64_SEG(dsp64++, segp, seg++);
} else {
@ -191,23 +215,17 @@ isp_send_cmd(ispsoftc_t *isp, void *fqe, void *segp, uint32_t nsegs, uint32_t to
switch (type) {
case RQSTYPE_REQUEST:
((ispreq_t *)fqe)->req_flags |= ddf;
/*
* This is historical and not clear whether really needed.
*/
if (nsegs == 0) {
/* This is historical and not clear whether really needed. */
if (nsegs == 0)
nsegs = 1;
}
((ispreq_t *)fqe)->req_seg_count = nsegs;
isp_put_request(isp, fqe, qe0);
break;
case RQSTYPE_CMDONLY:
((ispreq_t *)fqe)->req_flags |= ddf;
/*
* This is historical and not clear whether really needed.
*/
if (nsegs == 0) {
/* This is historical and not clear whether really needed. */
if (nsegs == 0)
nsegs = 1;
}
((ispextreq_t *)fqe)->req_seg_count = nsegs;
isp_put_extended_request(isp, fqe, qe0);
break;
@ -233,11 +251,34 @@ isp_send_cmd(ispsoftc_t *isp, void *fqe, void *segp, uint32_t nsegs, uint32_t to
}
break;
case RQSTYPE_T7RQS:
((ispreqt7_t *)fqe)->req_alen_datadir = ddf;
((ispreqt7_t *)fqe)->req_alen_datadir = ddf;
((ispreqt7_t *)fqe)->req_seg_count = nsegs;
((ispreqt7_t *)fqe)->req_dl = totalcnt;
isp_put_request_t7(isp, fqe, qe0);
break;
#ifdef ISP_TARGET_MODE
case RQSTYPE_CTIO2:
case RQSTYPE_CTIO3:
if (((ct2_entry_t *)fqe)->ct_flags & CT2_FLAG_MODE2) {
((ct2_entry_t *)fqe)->ct_seg_count = 1;
} else {
((ct2_entry_t *)fqe)->ct_seg_count = nsegs;
}
if (ISP_CAP_2KLOGIN(isp)) {
isp_put_ctio2e(isp, fqe, qe0);
} else {
isp_put_ctio2(isp, fqe, qe0);
}
break;
case RQSTYPE_CTIO7:
if (((ct7_entry_t *)fqe)->ct_flags & CT7_FLAG_MODE2) {
((ct7_entry_t *)fqe)->ct_seg_count = 1;
} else {
((ct7_entry_t *)fqe)->ct_seg_count = nsegs;
}
isp_put_ctio7(isp, fqe, qe0);
break;
#endif
default:
return (CMD_COMPLETE);
}
@ -2057,168 +2098,6 @@ isp_put_fcp_rsp_iu(ispsoftc_t *isp, fcp_rsp_iu_t *src, fcp_rsp_iu_t *dst)
ISP_IOZPUT_32(isp, src->fcp_rsp_rsplen, &dst->fcp_rsp_rsplen);
}
#ifdef ISP_TARGET_MODE
/*
* Command shipping- finish off first queue entry and do dma mapping and
* additional segments as needed.
*
* Called with the first queue entry mostly filled out.
* Our job here is to finish that and add additional data
* segments if needed.
*
* We used to do synthetic entries to split data and status
* at this level, but that started getting too tricky.
*/
int
isp_send_tgt_cmd(ispsoftc_t *isp, void *fqe, void *segp, uint32_t nsegs, uint32_t totalcnt, isp_ddir_t ddir, void *snsptr, uint32_t snslen)
{
uint8_t storage[QENTRY_LEN];
uint8_t type, nqe;
uint32_t seg, curseg, seglim, nxt, nxtnxt;
ispds_t *dsp = NULL;
ispds64_t *dsp64 = NULL;
void *qe0, *qe1;
qe0 = isp_getrqentry(isp);
if (qe0 == NULL) {
return (CMD_EAGAIN);
}
nxt = ISP_NXT_QENTRY(isp->isp_reqidx, RQUEST_QUEUE_LEN(isp));
type = ((isphdr_t *)fqe)->rqs_entry_type;
nqe = 1;
seglim = 0;
/*
* If we have data to transmit, figure out how many segments can fit into the first entry.
*/
if (ddir != ISP_NOXFR) {
/*
* First, figure out how many pieces of data to transfer and what kind and how many we can put into the first queue entry.
*/
switch (type) {
case RQSTYPE_CTIO2:
dsp = ((ct2_entry_t *)fqe)->rsp.m0.u.ct_dataseg;
seglim = ISP_RQDSEG_T2;
break;
case RQSTYPE_CTIO3:
dsp64 = ((ct2_entry_t *)fqe)->rsp.m0.u.ct_dataseg64;
seglim = ISP_RQDSEG_T3;
break;
case RQSTYPE_CTIO7:
dsp64 = &((ct7_entry_t *)fqe)->rsp.m0.ds;
seglim = 1;
break;
default:
return (CMD_COMPLETE);
}
}
/*
* First, fill out any of the data transfer stuff that fits
* in the first queue entry.
*/
if (seglim > nsegs) {
seglim = nsegs;
}
for (seg = curseg = 0; curseg < seglim; curseg++) {
if (dsp64) {
XS_GET_DMA64_SEG(dsp64++, segp, seg++);
} else {
XS_GET_DMA_SEG(dsp++, segp, seg++);
}
}
/*
* Second, start building additional continuation segments as needed.
*/
while (seg < nsegs) {
nxtnxt = ISP_NXT_QENTRY(nxt, RQUEST_QUEUE_LEN(isp));
if (nxtnxt == isp->isp_reqodx) {
isp->isp_reqodx = ISP_READ(isp, isp->isp_rqstoutrp);
if (nxtnxt == isp->isp_reqodx)
return (CMD_EAGAIN);
}
ISP_MEMZERO(storage, QENTRY_LEN);
qe1 = ISP_QUEUE_ENTRY(isp->isp_rquest, nxt);
nxt = nxtnxt;
if (dsp64) {
ispcontreq64_t *crq = (ispcontreq64_t *) storage;
seglim = ISP_CDSEG64;
crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
crq->req_header.rqs_entry_count = 1;
dsp64 = crq->req_dataseg;
} else {
ispcontreq_t *crq = (ispcontreq_t *) storage;
seglim = ISP_CDSEG;
crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
crq->req_header.rqs_entry_count = 1;
dsp = crq->req_dataseg;
}
if (seg + seglim > nsegs) {
seglim = nsegs - seg;
}
for (curseg = 0; curseg < seglim; curseg++) {
if (dsp64) {
XS_GET_DMA64_SEG(dsp64++, segp, seg++);
} else {
XS_GET_DMA_SEG(dsp++, segp, seg++);
}
}
if (dsp64) {
isp_put_cont64_req(isp, (ispcontreq64_t *)storage, qe1);
} else {
isp_put_cont_req(isp, (ispcontreq_t *)storage, qe1);
}
if (isp->isp_dblev & ISP_LOGTDEBUG1) {
isp_print_bytes(isp, "additional queue entry",
QENTRY_LEN, qe1);
}
nqe++;
}
/*
* Third, not patch up the first queue entry with the number of segments
* we actually are going to be transmitting. At the same time, handle
* any mode 2 requests.
*/
((isphdr_t *)fqe)->rqs_entry_count = nqe;
switch (type) {
case RQSTYPE_CTIO2:
case RQSTYPE_CTIO3:
if (((ct2_entry_t *)fqe)->ct_flags & CT2_FLAG_MODE2) {
((ct2_entry_t *)fqe)->ct_seg_count = 1;
} else {
((ct2_entry_t *)fqe)->ct_seg_count = nsegs;
}
if (ISP_CAP_2KLOGIN(isp)) {
isp_put_ctio2e(isp, fqe, qe0);
} else {
isp_put_ctio2(isp, fqe, qe0);
}
break;
case RQSTYPE_CTIO7:
if (((ct7_entry_t *)fqe)->ct_flags & CT7_FLAG_MODE2) {
((ct7_entry_t *)fqe)->ct_seg_count = 1;
} else {
((ct7_entry_t *)fqe)->ct_seg_count = nsegs;
}
isp_put_ctio7(isp, fqe, qe0);
break;
default:
return (CMD_COMPLETE);
}
if (isp->isp_dblev & ISP_LOGTDEBUG1) {
isp_print_bytes(isp, "first queue entry", QENTRY_LEN, qe0);
}
ISP_ADD_REQUEST(isp, nxt);
return (CMD_QUEUED);
}
#endif
/*
* Find port database entries
*/

View File

@ -156,9 +156,8 @@ void isp_put_fcp_rsp_iu(ispsoftc_t *isp, fcp_rsp_iu_t *, fcp_rsp_iu_t *);
#else
#include "isp_target.h"
#endif
int isp_send_tgt_cmd(ispsoftc_t *, void *, void *, uint32_t, uint32_t, isp_ddir_t, void *, uint32_t);
#endif
int isp_find_pdb_empty(ispsoftc_t *, int, fcportdb_t **);
int isp_find_pdb_by_wwpn(ispsoftc_t *, int, uint64_t, fcportdb_t **);
int isp_find_pdb_by_handle(ispsoftc_t *, int, uint16_t, fcportdb_t **);

View File

@ -682,8 +682,6 @@ isp_pci_attach(device_t dev)
pcs->pci_dev = dev;
isp->isp_dev = dev;
isp->isp_nchan = 1;
if (sizeof (bus_addr_t) > 4)
isp->isp_osinfo.sixtyfourbit = 1;
mtx_init(&isp->isp_lock, "isp", NULL, MTX_DEF);
/*
@ -1527,7 +1525,7 @@ isp_pci_mbxdma(ispsoftc_t *isp)
slim = (1UL << 24);
llim = BUS_SPACE_MAXADDR_32BIT;
}
if (isp->isp_osinfo.sixtyfourbit)
if (sizeof (bus_size_t) > 4)
nsegs = ISP_NSEG64_MAX;
else
nsegs = ISP_NSEG_MAX;
@ -1839,122 +1837,39 @@ typedef struct {
#define MUSHERR_NOQENTRIES -2
#ifdef ISP_TARGET_MODE
static void
tdma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
{
mush_t *mp;
ispsoftc_t *isp;
struct ccb_scsiio *csio;
isp_ddir_t ddir;
ispreq_t *rq;
mp = (mush_t *) arg;
if (error) {
mp->error = error;
return;
}
csio = mp->cmd_token;
isp = mp->isp;
rq = mp->rq;
if (nseg) {
if (isp->isp_osinfo.sixtyfourbit) {
if (nseg >= ISP_NSEG64_MAX) {
isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX);
mp->error = EFAULT;
return;
}
if (rq->req_header.rqs_entry_type == RQSTYPE_CTIO2) {
rq->req_header.rqs_entry_type = RQSTYPE_CTIO3;
}
} else {
if (nseg >= ISP_NSEG_MAX) {
isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX);
mp->error = EFAULT;
return;
}
}
if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE);
ddir = ISP_TO_DEVICE;
} else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD);
ddir = ISP_FROM_DEVICE;
} else {
dm_segs = NULL;
nseg = 0;
ddir = ISP_NOXFR;
}
} else {
dm_segs = NULL;
nseg = 0;
ddir = ISP_NOXFR;
}
error = isp_send_tgt_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, &csio->sense_data, csio->sense_len);
switch (error) {
case CMD_EAGAIN:
mp->error = MUSHERR_NOQENTRIES;
case CMD_QUEUED:
break;
default:
mp->error = EIO;
}
}
#endif
static void
dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
{
mush_t *mp;
ispsoftc_t *isp;
struct ccb_scsiio *csio;
mush_t *mp = (mush_t *) arg;
ispsoftc_t *isp= mp->isp;
struct ccb_scsiio *csio = mp->cmd_token;
isp_ddir_t ddir;
ispreq_t *rq;
int sdir;
mp = (mush_t *) arg;
if (error) {
mp->error = error;
return;
}
csio = mp->cmd_token;
isp = mp->isp;
rq = mp->rq;
if (nseg) {
if (isp->isp_osinfo.sixtyfourbit) {
if (nseg >= ISP_NSEG64_MAX) {
isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX);
mp->error = EFAULT;
return;
}
if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) {
rq->req_header.rqs_entry_type = RQSTYPE_T3RQS;
} else if (rq->req_header.rqs_entry_type == RQSTYPE_REQUEST) {
rq->req_header.rqs_entry_type = RQSTYPE_A64;
}
} else {
if (nseg >= ISP_NSEG_MAX) {
isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX);
mp->error = EFAULT;
return;
}
}
if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD);
ddir = ISP_FROM_DEVICE;
} else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE);
ddir = ISP_TO_DEVICE;
} else {
ddir = ISP_NOXFR;
}
} else {
dm_segs = NULL;
nseg = 0;
if (nseg == 0) {
ddir = ISP_NOXFR;
} else {
if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
ddir = ISP_FROM_DEVICE;
} else {
ddir = ISP_TO_DEVICE;
}
if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^
((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)) {
sdir = BUS_DMASYNC_PREREAD;
} else {
sdir = BUS_DMASYNC_PREWRITE;
}
bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap,
sdir);
}
error = isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, (ispds64_t *)csio->req_map);
error = isp_send_cmd(isp, mp->rq, dm_segs, nseg, XS_XFRLEN(csio),
ddir, (ispds64_t *)csio->req_map);
switch (error) {
case CMD_EAGAIN:
mp->error = MUSHERR_NOQENTRIES;
@ -1971,7 +1886,6 @@ static int
isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff)
{
mush_t mush, *mp;
void (*eptr)(void *, bus_dma_segment_t *, int, int);
int error;
mp = &mush;
@ -1980,15 +1894,8 @@ isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff)
mp->rq = ff;
mp->error = 0;
#ifdef ISP_TARGET_MODE
if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO)
eptr = tdma2;
else
#endif
eptr = dma2;
error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap,
(union ccb *)csio, eptr, mp, 0);
(union ccb *)csio, dma2, mp, 0);
if (error == EINPROGRESS) {
bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap);
mp->error = EINVAL;

View File

@ -592,37 +592,36 @@ typedef struct {
static void
dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
{
mush_t *mp;
ispsoftc_t *isp;
struct ccb_scsiio *csio;
mush_t *mp = (mush_t *) arg;
ispsoftc_t *isp = mp->isp;
struct ccb_scsiio *csio = mp->cmd_token;
isp_ddir_t ddir;
ispreq_t *rq;
int sdir;
mp = (mush_t *) arg;
if (error) {
mp->error = error;
return;
}
csio = mp->cmd_token;
isp = mp->isp;
rq = mp->rq;
if (nseg) {
if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD);
ddir = ISP_FROM_DEVICE;
} else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE);
ddir = ISP_TO_DEVICE;
} else {
ddir = ISP_NOXFR;
}
} else {
dm_segs = NULL;
nseg = 0;
if (nseg == 0) {
ddir = ISP_NOXFR;
} else {
if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
ddir = ISP_FROM_DEVICE;
} else {
ddir = ISP_TO_DEVICE;
}
if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^
((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)) {
sdir = BUS_DMASYNC_PREREAD;
} else {
sdir = BUS_DMASYNC_PREWRITE;
}
bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap,
sdir);
}
if (isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, NULL) != CMD_QUEUED) {
if (isp_send_cmd(isp, mp->rq, dm_segs, nseg, XS_XFRLEN(csio),
ddir, NULL) != CMD_QUEUED) {
mp->error = MUSHERR_NOQENTRIES;
}
}

View File

@ -999,6 +999,7 @@ void isp_async(ispsoftc_t *, ispasync_t, ...);
* XS_DMA_ADDR_T Platform PCI DMA Address Type
* XS_GET_DMA_SEG(..) Get 32 bit dma segment list value
* XS_GET_DMA64_SEG(..) Get 64 bit dma segment list value
* XS_NEED_DMA64_SEG(..) dma segment needs 64 bit storage
* XS_ISP(xs) gets an instance out of an XS_T
* XS_CHANNEL(xs) gets the channel (bus # for DUALBUS cards) ""
* XS_TGT(xs) gets the target ""