Add a isp_target_putback_atio- we aren't using CCINCR at this time, so

we need a function that tells the Qlogic f/w that a target mode command
is done, so increase the resource count for that lun. Add in a timeout
function to kick the putback again if we fail to do it the first time (we
may not have the request queue space for ATIO push). Split the function
isp_handle_platform_ctio into two parts so that the timeout function for
the ATIO push or isp_handle_platform_ctio can inform CAM that the requested
CTIO(s) are now done.

Clean up (cough) residual handling. What we need for Fibre Channel
is to preserve the at_datalen field from the original incoming ATIO
so we can calculate a 'true' residual.  Unfortunately, we're not
guaranteed to get that back from CAM. We'll *try* to find it hiding
in the periph_priv field (layering violation)- but if an ATIO was
passed in from user land- forget it. This means that we'll probably
get residuals wrong for Fibre Channel commands we're completing
with an error. It's too late to 4.1 release to fix this- too bad.
Luckily the only device we'd really care about this occurring on
is a tape device and they're still so rare as FC attached devices
that this can be considered an untested combination anyway.

Remove all CCINCR usage (resource autoreplenish). When we've proved
to ourself that things are working properly, we can add it back
in.

Make sure we propage 'suggested' sense data from the incoming ATIO
into the created system ATIO- and set sense_len appropriately.
Correctly propagate tag values.

Fall back to the model of generating (well, the functions in isp_pci.c
do the work) multiple CTIOs based upon what we get from XPT. Instead
of being able to pair Qlogic generated ATIOs with CAM ATIOs, and then
to pair CAM CTIOs with Qlogic CTIOs, we have to take the CTIO passed
to us from XPT, and if it implies that we have to generate extra
Qlogic CTIOs, so be it. This means that we have to wait until the
last CTIO in a sequence we generated completes before calling xpt_done.

Executive summary- target mode actually now pretty much works well
enough to tell folks about.
This commit is contained in:
mjacob 2000-07-18 06:58:28 +00:00
parent 70eb60cccd
commit f1fdd1ae53

View File

@ -195,11 +195,13 @@ static void destroy_lun_state(struct ispsoftc *, tstate_t *);
static void isp_en_lun(struct ispsoftc *, union ccb *);
static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
static cam_status isp_target_putback_atio(struct ispsoftc *, union ccb *);
static timeout_t isp_refire_putback_atio;
static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
static int isp_handle_platform_ctio(struct ispsoftc *, void *);
static void isp_handle_platform_ctio_part2(struct ispsoftc *, union ccb *);
static __inline int
is_lun_enabled(struct ispsoftc *isp, lun_id_t lun)
@ -403,7 +405,7 @@ isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
struct ccb_en_lun *cel = &ccb->cel;
tstate_t *tptr;
u_int16_t rstat;
int bus, s;
int bus;
lun_id_t lun;
target_id_t tgt;
@ -697,6 +699,7 @@ isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
u_int32_t *hp, save_handle;
u_int16_t iptr, optr;
if (isp_getrqentry(isp, &iptr, &optr, &qe)) {
xpt_print_path(ccb->ccb_h.path);
printf("Request Queue Overflow in isp_target_start_ctio\n");
@ -709,8 +712,8 @@ isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
*/
if (IS_FC(isp)) {
struct ccb_accept_tio *atiop;
ct2_entry_t *cto = qe;
u_int16_t *ssptr = NULL;
cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
cto->ct_header.rqs_entry_count = 1;
@ -718,15 +721,29 @@ isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
if (isp->isp_maxluns <= 16) {
cto->ct_lun = ccb->ccb_h.target_lun;
}
/*
* Start with a residual based on what the original datalength
* was supposed to be. Basically, we ignore what CAM has set
* for residuals. The data transfer routines will knock off
* the residual for each byte actually moved- and also will
* be responsible for setting the underrun flag.
*/
/* HACK! HACK! */
if ((atiop = ccb->ccb_h.periph_priv.entries[1].ptr) != NULL) {
cto->ct_resid = atiop->ccb_h.spriv_field0;
}
/*
* We always have to use the tag_id- it has the RX_ID
* for this exchage.
*/
cto->ct_rxid = cso->tag_id;
cto->ct_flags = CT2_CCINCR;
if (cso->dxfer_len == 0) {
cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
KASSERT(ccb->ccb_h.flags & CAM_SEND_STATUS,
("a CTIO with no data and no status?"));
cto->ct_flags |= CT2_SENDSTATUS;
ssptr = &cto->rsp.m1.ct_scsi_status;
*ssptr = cso->scsi_status;
if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
cto->ct_flags |= CT2_SENDSTATUS;
cto->rsp.m1.ct_scsi_status = cso->scsi_status;
}
if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
int m = min(cso->sense_len, MAXRESPLEN);
bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
@ -741,24 +758,19 @@ isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
cto->ct_flags |= CT2_DATA_OUT;
}
if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
ssptr = &cto->rsp.m0.ct_scsi_status;
cto->ct_flags |= CT2_SENDSTATUS;
cto->rsp.m0.ct_scsi_status = cso->scsi_status;
}
/*
* If we're sending data and status back together,
* we can't also send back sense data as well.
*/
ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
}
if (ssptr && cso->resid) {
cto->ct_resid = cso->resid;
if (cso->resid < 0)
*ssptr |= CT2_DATA_OVER;
else
*ssptr |= CT2_DATA_UNDER;
}
if (isp_tdebug > 1 && ssptr &&
(cso->scsi_status != SCSI_STATUS_OK || cso->resid)) {
if (isp_tdebug > 1 && (cto->ct_flags & CAM_SEND_STATUS)) {
printf("%s:CTIO2 RX_ID 0x%x SCSI STATUS 0x%x "
"resid %d\n", isp->isp_name, cto->ct_rxid,
cso->scsi_status, cso->resid);
"datalength %u\n", isp->isp_name, cto->ct_rxid,
cso->scsi_status, cto->ct_resid);
}
hp = &cto->ct_reserved;
} else {
@ -769,23 +781,30 @@ isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
cto->ct_iid = cso->init_id;
cto->ct_tgt = ccb->ccb_h.target_id;
cto->ct_lun = ccb->ccb_h.target_lun;
cto->ct_tag_type = cso->tag_action;
cto->ct_tag_val = cso->tag_id;
cto->ct_flags = CT_CCINCR;
if (cso->dxfer_len) {
if (cso->tag_id && cso->tag_action) {
/*
* We don't specify a tag type for regular SCSI.
* Just the tag value and set the flag.
*/
cto->ct_tag_val = cso->tag_id;
cto->ct_flags |= CT_TQAE;
}
if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
cto->ct_flags |= CT_NODISC;
}
if (cso->dxfer_len == 0) {
cto->ct_flags |= CT_NO_DATA;
} else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
cto->ct_flags |= CT_DATA_IN;
} else {
cto->ct_flags |= CT_DATA_OUT;
}
if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
cto->ct_flags |= CT_SENDSTATUS;
cto->ct_scsi_status = cso->scsi_status;
cto->ct_resid = cso->resid;
}
if (isp_tdebug > 1 &&
(cso->scsi_status != SCSI_STATUS_OK || cso->resid)) {
if (isp_tdebug > 1 && (cto->ct_flags & CAM_SEND_STATUS)) {
printf("%s:CTIO SCSI STATUS 0x%x resid %d\n",
isp->isp_name, cso->scsi_status, cso->resid);
}
@ -827,6 +846,65 @@ isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
}
}
static cam_status
isp_target_putback_atio(struct ispsoftc *isp, union ccb *ccb)
{
void *qe;
struct ccb_accept_tio *atiop;
u_int16_t iptr, optr;
if (isp_getrqentry(isp, &iptr, &optr, &qe)) {
xpt_print_path(ccb->ccb_h.path);
printf("Request Queue Overflow in isp_target_putback_atio\n");
return (CAM_RESRC_UNAVAIL);
}
bzero(qe, QENTRY_LEN);
atiop = (struct ccb_accept_tio *) ccb;
if (IS_FC(isp)) {
at2_entry_t *at = qe;
at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
at->at_header.rqs_entry_count = 1;
if (isp->isp_maxluns > 16) {
at->at_scclun = (uint16_t) atiop->ccb_h.target_lun;
} else {
at->at_lun = (uint8_t) atiop->ccb_h.target_lun;
}
at->at_status = CT_OK;
at->at_rxid = atiop->tag_id;
ISP_SWIZ_ATIO2(isp, qe, qe);
} else {
at_entry_t *at = qe;
at->at_header.rqs_entry_type = RQSTYPE_ATIO;
at->at_header.rqs_entry_count = 1;
at->at_iid = atiop->init_id;
at->at_tgt = atiop->ccb_h.target_id;
at->at_lun = atiop->ccb_h.target_lun;
at->at_status = CT_OK;
if (atiop->ccb_h.status & CAM_TAG_ACTION_VALID) {
at->at_tag_type = atiop->tag_action;
}
at->at_tag_val = atiop->tag_id;
ISP_SWIZ_ATIO(isp, qe, qe);
}
ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
MemoryBarrier();
ISP_ADD_REQUEST(isp, iptr);
return (CAM_REQ_CMP);
}
static void
isp_refire_putback_atio(void *arg)
{
union ccb *ccb = arg;
int s = splcam();
if (isp_target_putback_atio(XS_ISP(ccb), ccb) != CAM_REQ_CMP) {
(void) timeout(isp_refire_putback_atio, ccb, 10);
} else {
isp_handle_platform_ctio_part2(XS_ISP(ccb), ccb);
}
splx(s);
}
/*
* Handle ATIO stuff that the generic code can't.
* This means handling CDBs.
@ -913,24 +991,33 @@ isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
atiop->ccb_h.target_lun = aep->at_lun;
}
if (aep->at_flags & AT_NODISC) {
xpt_print_path(tptr->owner);
printf("incoming command that cannot disconnect\n");
atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
} else {
atiop->ccb_h.flags = 0;
}
if (status & QLTM_SVALID) {
size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
atiop->sense_len = amt;
MEMCPY(&atiop->sense_data, aep->at_sense, amt);
} else {
atiop->sense_len = 0;
}
atiop->init_id = aep->at_iid;
atiop->cdb_len = aep->at_cdblen;
MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
atiop->ccb_h.status = CAM_CDB_RECVD;
atiop->tag_id = aep->at_tag_val;
if ((atiop->tag_action = aep->at_tag_type) != 0) {
atiop->tag_id = aep->at_tag_val;
atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
}
xpt_done((union ccb*)atiop);
if (isp_tdebug > 1) {
printf("%s:ATIO CDB=0x%x iid%d->lun%d tag 0x%x ttype 0x%x\n",
printf("%s:ATIO CDB=0x%x iid%d->lun%d tag 0x%x ttype 0x%x %s",
isp->isp_name, aep->at_cdb[0] & 0xff, aep->at_iid,
aep->at_lun, aep->at_tag_val & 0xff, aep->at_tag_type);
aep->at_lun, aep->at_tag_val & 0xff, aep->at_tag_type,
(aep->at_flags & AT_NODISC)? "nondisc\n" : "\n");
}
rls_lun_statep(isp, tptr);
return (0);
@ -1027,11 +1114,20 @@ isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
return (0);
}
SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
if (tptr == &isp->isp_osinfo.tsdflt) {
atiop->ccb_h.target_id =
((fcparam *)isp->isp_param)->isp_loopid;
atiop->ccb_h.target_lun = lun;
}
if (aep->at_status & QLTM_SVALID) {
size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
atiop->sense_len = amt;
MEMCPY(&atiop->sense_data, aep->at_sense, amt);
} else {
atiop->sense_len = 0;
}
atiop->init_id = aep->at_iid;
atiop->cdb_len = ATIO2_CDBLEN;
MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
@ -1056,21 +1152,28 @@ isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
if (atiop->tag_action != 0) {
atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
}
/*
* Preserve overall command datalength in private field.
*/
atiop->ccb_h.spriv_field0 = aep->at_datalen;
xpt_done((union ccb*)atiop);
if (isp_tdebug > 1) {
printf("%s:ATIO2 RX_ID 0x%x CDB=0x%x iid%d->lun%d tattr 0x%x\n",
printf("%s:ATIO2 RX_ID 0x%x CDB=0x%x iid%d->lun%d tattr 0x%x "
"datalen %u\n",
isp->isp_name, aep->at_rxid & 0xffff, aep->at_cdb[0] & 0xff,
aep->at_iid, lun, aep->at_taskflags);
aep->at_iid, lun, aep->at_taskflags, aep->at_datalen);
}
rls_lun_statep(isp, tptr);
return (0);
}
static int
isp_handle_platform_ctio(struct ispsoftc *isp, void * arg)
isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
{
union ccb *ccb;
int sentstatus, ok;
int sentstatus, ok, notify_cam;
/*
* CTIO and CTIO2 are close enough....
@ -1084,7 +1187,7 @@ isp_handle_platform_ctio(struct ispsoftc *isp, void * arg)
ct2_entry_t *ct = arg;
sentstatus = ct->ct_flags & CT2_SENDSTATUS;
ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
if (ok && ccb->ccb_h.flags & CAM_SEND_SENSE) {
if (ok && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
ccb->ccb_h.status |= CAM_SENT_SENSE;
}
if (isp_tdebug > 1) {
@ -1093,6 +1196,7 @@ isp_handle_platform_ctio(struct ispsoftc *isp, void * arg)
ct->ct_status, ct->ct_flags,
(ccb->ccb_h.status & CAM_SENT_SENSE) != 0);
}
notify_cam = ct->ct_header.rqs_seqno;
} else {
ct_entry_t *ct = arg;
sentstatus = ct->ct_flags & CT_SENDSTATUS;
@ -1102,6 +1206,7 @@ isp_handle_platform_ctio(struct ispsoftc *isp, void * arg)
isp->isp_name, ct->ct_tag_val, ct->ct_status,
ct->ct_flags);
}
notify_cam = ct->ct_header.rqs_seqno;
}
/*
@ -1112,7 +1217,8 @@ isp_handle_platform_ctio(struct ispsoftc *isp, void * arg)
*
* In any case, for this platform, the upper layers figure out
* what to do next, so all we do here is collect status and
* pass information along.
* pass information along. The exception is that we clear
* the notion of handling a non-disconnecting command here.
*/
if (sentstatus) {
@ -1126,7 +1232,26 @@ isp_handle_platform_ctio(struct ispsoftc *isp, void * arg)
}
}
if (notify_cam == 0) {
if (isp_tdebug > 1) {
printf("%s:Intermediate CTIO done\n", isp->isp_name);
}
return (0);
}
if (isp_tdebug > 1) {
printf("%s:Final CTIO done\n", isp->isp_name);
}
if (isp_target_putback_atio(isp, ccb) != CAM_REQ_CMP) {
(void) timeout(isp_refire_putback_atio, ccb, 10);
} else {
isp_handle_platform_ctio_part2(isp, ccb);
}
return (0);
}
static void
isp_handle_platform_ctio_part2(struct ispsoftc *isp, union ccb *ccb)
{
if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
ccb->ccb_h.status |= CAM_REQ_CMP;
}
@ -1148,7 +1273,6 @@ isp_handle_platform_ctio(struct ispsoftc *isp, void * arg)
}
}
xpt_done(ccb);
return (0);
}
#endif
@ -1427,8 +1551,13 @@ isp_action(struct cam_sim *sim, union ccb *ccb)
xpt_done(ccb);
break;
}
ccb->ccb_h.sim_priv.entries[0].field = 0;
ccb->ccb_h.sim_priv.entries[1].ptr = isp;
ISP_LOCK(isp);
if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
#if 0
(void) isp_target_putback_atio(isp, ccb);
#endif
SLIST_INSERT_HEAD(&tptr->atios,
&ccb->ccb_h, sim_links.sle);
} else {