Allocate separate DMA area for synchronous IOCB execution.

Usually IOCBs should be put on queue for asynchronous processing and should
not require additional DMA memory.  But there are some cases like aborts and
resets that for external reasons has to be synchronous.  Give those cases
separate 2*64 byte DMA area to decouple them from other DMA scratch area
users, using it for asynchronous requests.
This commit is contained in:
Alexander Motin 2016-04-12 14:19:19 +00:00
parent 0d63fc3ed8
commit 4ff970c462
4 changed files with 70 additions and 39 deletions

View File

@ -4667,31 +4667,25 @@ isp_control(ispsoftc_t *isp, ispctl_t ctl, ...)
tmf->tmf_tidlo = lp->portid;
tmf->tmf_tidhi = lp->portid >> 16;
tmf->tmf_vpidx = ISP_GET_VPIDX(isp, chan);
isp_put_24xx_tmf(isp, tmf, isp->isp_iocb);
MEMORYBARRIER(isp, SYNC_IFORDEV, 0, QENTRY_LEN, chan);
fcp->sendmarker = 1;
isp_prt(isp, ISP_LOGALL, "Chan %d Reset N-Port Handle 0x%04x @ Port 0x%06x", chan, lp->handle, lp->portid);
MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL,
MBCMD_DEFAULT_TIMEOUT + tmf->tmf_timeout * 1000000);
mbs.param[1] = QENTRY_LEN;
mbs.param[2] = DMA_WD1(fcp->isp_scdma);
mbs.param[3] = DMA_WD0(fcp->isp_scdma);
mbs.param[6] = DMA_WD3(fcp->isp_scdma);
mbs.param[7] = DMA_WD2(fcp->isp_scdma);
if (FC_SCRATCH_ACQUIRE(isp, chan)) {
isp_prt(isp, ISP_LOGERR, sacq);
break;
}
isp_put_24xx_tmf(isp, tmf, fcp->isp_scratch);
MEMORYBARRIER(isp, SYNC_SFORDEV, 0, QENTRY_LEN, chan);
fcp->sendmarker = 1;
mbs.param[2] = DMA_WD1(isp->isp_iocb_dma);
mbs.param[3] = DMA_WD0(isp->isp_iocb_dma);
mbs.param[6] = DMA_WD3(isp->isp_iocb_dma);
mbs.param[7] = DMA_WD2(isp->isp_iocb_dma);
isp_mboxcmd(isp, &mbs);
if (mbs.param[0] != MBOX_COMMAND_COMPLETE) {
FC_SCRATCH_RELEASE(isp, chan);
if (mbs.param[0] != MBOX_COMMAND_COMPLETE)
break;
}
MEMORYBARRIER(isp, SYNC_SFORCPU, QENTRY_LEN, QENTRY_LEN, chan);
MEMORYBARRIER(isp, SYNC_IFORCPU, QENTRY_LEN, QENTRY_LEN, chan);
sp = (isp24xx_statusreq_t *) local;
isp_get_24xx_response(isp, &((isp24xx_statusreq_t *)fcp->isp_scratch)[1], sp);
FC_SCRATCH_RELEASE(isp, chan);
isp_get_24xx_response(isp, &((isp24xx_statusreq_t *)isp->isp_iocb)[1], sp);
if (sp->req_completion_status == 0) {
return (0);
}
@ -4731,7 +4725,7 @@ isp_control(ispsoftc_t *isp, ispctl_t ctl, ...)
break;
}
if (IS_24XX(isp)) {
isp24xx_abrt_t local, *ab = &local, *ab2;
isp24xx_abrt_t local, *ab = &local;
fcparam *fcp;
fcportdb_t *lp;
@ -4755,31 +4749,23 @@ isp_control(ispsoftc_t *isp, ispctl_t ctl, ...)
ab->abrt_tidlo = lp->portid;
ab->abrt_tidhi = lp->portid >> 16;
ab->abrt_vpidx = ISP_GET_VPIDX(isp, chan);
isp_put_24xx_abrt(isp, ab, isp->isp_iocb);
MEMORYBARRIER(isp, SYNC_IFORDEV, 0, 2 * QENTRY_LEN, chan);
ISP_MEMZERO(&mbs, sizeof (mbs));
MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 5000000);
mbs.param[1] = QENTRY_LEN;
mbs.param[2] = DMA_WD1(fcp->isp_scdma);
mbs.param[3] = DMA_WD0(fcp->isp_scdma);
mbs.param[6] = DMA_WD3(fcp->isp_scdma);
mbs.param[7] = DMA_WD2(fcp->isp_scdma);
mbs.param[2] = DMA_WD1(isp->isp_iocb_dma);
mbs.param[3] = DMA_WD0(isp->isp_iocb_dma);
mbs.param[6] = DMA_WD3(isp->isp_iocb_dma);
mbs.param[7] = DMA_WD2(isp->isp_iocb_dma);
if (FC_SCRATCH_ACQUIRE(isp, chan)) {
isp_prt(isp, ISP_LOGERR, sacq);
break;
}
isp_put_24xx_abrt(isp, ab, fcp->isp_scratch);
ab2 = (isp24xx_abrt_t *) &((uint8_t *)fcp->isp_scratch)[QENTRY_LEN];
ab2->abrt_nphdl = 0xdeaf;
MEMORYBARRIER(isp, SYNC_SFORDEV, 0, 2 * QENTRY_LEN, chan);
isp_mboxcmd(isp, &mbs);
if (mbs.param[0] != MBOX_COMMAND_COMPLETE) {
FC_SCRATCH_RELEASE(isp, chan);
if (mbs.param[0] != MBOX_COMMAND_COMPLETE)
break;
}
MEMORYBARRIER(isp, SYNC_SFORCPU, QENTRY_LEN, QENTRY_LEN, chan);
isp_get_24xx_abrt(isp, ab2, ab);
FC_SCRATCH_RELEASE(isp, chan);
MEMORYBARRIER(isp, SYNC_IFORCPU, QENTRY_LEN, QENTRY_LEN, chan);
isp_get_24xx_abrt(isp, &((isp24xx_abrt_t *)isp->isp_iocb)[1], ab);
if (ab->abrt_nphdl == ISP24XX_ABRT_OKAY) {
return (0);
}

View File

@ -293,10 +293,12 @@ struct isposinfo {
bus_dma_tag_t reqdmat;
bus_dma_tag_t respdmat;
bus_dma_tag_t atiodmat;
bus_dma_tag_t iocbdmat;
bus_dma_tag_t scdmat;
bus_dmamap_t reqmap;
bus_dmamap_t respmap;
bus_dmamap_t atiomap;
bus_dmamap_t iocbmap;
/*
* Command and transaction related related stuff
@ -441,6 +443,14 @@ case SYNC_ATIOQ: \
bus_dmamap_sync(isp->isp_osinfo.atiodmat, \
isp->isp_osinfo.atiomap, BUS_DMASYNC_POSTREAD); \
break; \
case SYNC_IFORDEV: \
bus_dmamap_sync(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap, \
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \
break; \
case SYNC_IFORCPU: \
bus_dmamap_sync(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap, \
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); \
break; \
default: \
break; \
}
@ -469,6 +479,14 @@ case SYNC_REG: \
bus_barrier(isp->isp_osinfo.regs, offset, size, \
BUS_SPACE_BARRIER_WRITE); \
break; \
case SYNC_IFORDEV: \
bus_dmamap_sync(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap, \
BUS_DMASYNC_PREWRITE); \
break; \
case SYNC_IFORCPU: \
bus_dmamap_sync(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap, \
BUS_DMASYNC_POSTWRITE); \
break; \
default: \
break; \
}

View File

@ -1730,9 +1730,23 @@ isp_pci_mbxdma(ispsoftc_t *isp)
if (IS_FC(isp)) {
if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
ISP_FC_SCRLEN, 1, ISP_FC_SCRLEN, 0, &isp->isp_osinfo.scdmat)) {
2*QENTRY_LEN, 1, 2*QENTRY_LEN, 0, &isp->isp_osinfo.iocbdmat)) {
goto bad;
}
if (bus_dmamem_alloc(isp->isp_osinfo.iocbdmat,
(void **)&base, BUS_DMA_COHERENT, &isp->isp_osinfo.iocbmap) != 0)
goto bad;
isp->isp_iocb = base;
im.error = 0;
if (bus_dmamap_load(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap,
base, 2*QENTRY_LEN, imc, &im, 0) || im.error)
goto bad;
isp->isp_iocb_dma = im.maddr;
if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
ISP_FC_SCRLEN, 1, ISP_FC_SCRLEN, 0, &isp->isp_osinfo.scdmat))
goto bad;
for (cmap = 0; cmap < isp->isp_nchan; cmap++) {
struct isp_fc *fc = ISP_FC_PC(isp, cmap);
if (bus_dmamem_alloc(isp->isp_osinfo.scdmat,
@ -1791,7 +1805,8 @@ bad:
while (--cmap >= 0) {
struct isp_fc *fc = ISP_FC_PC(isp, cmap);
bus_dmamap_unload(isp->isp_osinfo.scdmat, fc->scmap);
bus_dmamem_free(isp->isp_osinfo.scdmat, base, fc->scmap);
bus_dmamem_free(isp->isp_osinfo.scdmat,
FCPARAM(isp, cmap)->isp_scratch, fc->scmap);
while (fc->nexus_free_list) {
struct isp_nexus *n = fc->nexus_free_list;
fc->nexus_free_list = n->next;
@ -1799,6 +1814,10 @@ bad:
}
}
bus_dma_tag_destroy(isp->isp_osinfo.scdmat);
bus_dmamap_unload(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap);
bus_dmamem_free(isp->isp_osinfo.iocbdmat, isp->isp_iocb,
isp->isp_osinfo.iocbmap);
bus_dma_tag_destroy(isp->isp_osinfo.iocbdmat);
}
bad1:
if (isp->isp_rquest_dma != 0) {

View File

@ -130,6 +130,8 @@ struct ispmdvec {
#define SYNC_SFORCPU 3 /* scratch, sync for CPU */
#define SYNC_REG 4 /* for registers */
#define SYNC_ATIOQ 5 /* atio result queue (24xx) */
#define SYNC_IFORDEV 6 /* synchrounous IOCB, sync for ISP */
#define SYNC_IFORCPU 7 /* synchrounous IOCB, sync for CPU */
/*
* Request/Response Queue defines and macros.
@ -595,6 +597,12 @@ struct ispsoftc {
isp_hdl_t *isp_xflist;
isp_hdl_t *isp_xffree;
/*
* DMA mapped in area for synchronous IOCB requests.
*/
void * isp_iocb;
XS_DMA_ADDR_T isp_iocb_dma;
/*
* request/result queue pointers and DMA handles for them.
*/