MFC r268353:

Implement ABORT TASK SET and I_T NEXUS RESET task management functions.

Use the last one to terminate active commands on iSCSI session termination.
Previous code was aborting only commands doing some data moves.
This commit is contained in:
mav 2014-07-15 17:12:37 +00:00
parent ebf361d529
commit a0bee6cdf5
6 changed files with 142 additions and 55 deletions

View File

@ -409,6 +409,8 @@ static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io,
ctl_ua_type ua_type);
static int ctl_abort_task(union ctl_io *io);
static int ctl_abort_task_set(union ctl_io *io);
static int ctl_i_t_nexus_reset(union ctl_io *io);
static void ctl_run_task(union ctl_io *io);
#ifdef CTL_IO_DELAY
static void ctl_datamove_timer_wakeup(void *arg);
@ -7484,7 +7486,8 @@ ctl_report_supported_tmf(struct ctl_scsiio *ctsio)
ctsio->kern_rel_offset = 0;
data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr;
data->byte1 |= RST_ATS | RST_LURS | RST_TRS;
data->byte1 |= RST_ATS | RST_ATSS | RST_LURS | RST_TRS;
data->byte2 |= RST_ITNRS;
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
ctsio->be_move_done = ctl_config_move_done;
@ -11674,6 +11677,97 @@ ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
return (0);
}
static int
ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id,
int other_sc)
{
union ctl_io *xio;
int found;
mtx_assert(&lun->lun_lock, MA_OWNED);
/*
* Run through the OOA queue and attempt to find the given I/O.
* The target port, initiator ID, tag type and tag number have to
* match the values that we got from the initiator. If we have an
* untagged command to abort, simply abort the first untagged command
* we come to. We only allow one untagged command at a time of course.
*/
for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
if ((targ_port == xio->io_hdr.nexus.targ_port) &&
(init_id == xio->io_hdr.nexus.initid.id)) {
xio->io_hdr.flags |= CTL_FLAG_ABORT;
found = 1;
if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) {
union ctl_ha_msg msg_info;
msg_info.hdr.nexus = xio->io_hdr.nexus;
msg_info.task.task_action = CTL_TASK_ABORT_TASK;
msg_info.task.tag_num = xio->scsiio.tag_num;
msg_info.task.tag_type = xio->scsiio.tag_type;
msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
msg_info.hdr.original_sc = NULL;
msg_info.hdr.serializing_sc = NULL;
ctl_ha_msg_send(CTL_HA_CHAN_CTL,
(void *)&msg_info, sizeof(msg_info), 0);
}
}
}
return (found);
}
static int
ctl_abort_task_set(union ctl_io *io)
{
struct ctl_softc *softc = control_softc;
struct ctl_lun *lun;
uint32_t targ_lun;
/*
* Look up the LUN.
*/
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL))
lun = softc->ctl_luns[targ_lun];
else {
mtx_unlock(&softc->ctl_lock);
return (1);
}
mtx_lock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port,
io->io_hdr.nexus.initid.id,
(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
mtx_unlock(&lun->lun_lock);
return (0);
}
static int
ctl_i_t_nexus_reset(union ctl_io *io)
{
struct ctl_softc *softc = control_softc;
struct ctl_lun *lun;
uint32_t initindex;
initindex = ctl_get_initindex(&io->io_hdr.nexus);
mtx_lock(&softc->ctl_lock);
STAILQ_FOREACH(lun, &softc->lun_list, links) {
mtx_lock(&lun->lun_lock);
ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port,
io->io_hdr.nexus.initid.id,
(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
ctl_clear_mask(lun->have_ca, initindex);
lun->pending_sense[initindex].ua_pending |= CTL_UA_I_T_NEXUS_LOSS;
mtx_unlock(&lun->lun_lock);
}
mtx_unlock(&softc->ctl_lock);
return (0);
}
static int
ctl_abort_task(union ctl_io *io)
{
@ -11873,11 +11967,15 @@ ctl_run_task(union ctl_io *io)
retval = ctl_abort_task(io);
break;
case CTL_TASK_ABORT_TASK_SET:
retval = ctl_abort_task_set(io);
break;
case CTL_TASK_CLEAR_ACA:
break;
case CTL_TASK_CLEAR_TASK_SET:
break;
case CTL_TASK_I_T_NEXUS_RESET:
retval = ctl_i_t_nexus_reset(io);
break;
case CTL_TASK_LUN_RESET: {
struct ctl_lun *lun;
uint32_t targ_lun;

View File

@ -115,17 +115,18 @@ typedef enum {
CTL_UA_POWERON = 0x0001,
CTL_UA_BUS_RESET = 0x0002,
CTL_UA_TARG_RESET = 0x0004,
CTL_UA_LUN_RESET = 0x0008,
CTL_UA_LUN_CHANGE = 0x0010,
CTL_UA_MODE_CHANGE = 0x0020,
CTL_UA_LOG_CHANGE = 0x0040,
CTL_UA_LVD = 0x0080,
CTL_UA_SE = 0x0100,
CTL_UA_RES_PREEMPT = 0x0200,
CTL_UA_RES_RELEASE = 0x0400,
CTL_UA_REG_PREEMPT = 0x0800,
CTL_UA_ASYM_ACC_CHANGE = 0x1000,
CTL_UA_CAPACITY_CHANGED = 0x2000
CTL_UA_I_T_NEXUS_LOSS = 0x0008,
CTL_UA_LUN_RESET = 0x0010,
CTL_UA_LUN_CHANGE = 0x0020,
CTL_UA_MODE_CHANGE = 0x0030,
CTL_UA_LOG_CHANGE = 0x0080,
CTL_UA_LVD = 0x0100,
CTL_UA_SE = 0x0200,
CTL_UA_RES_PREEMPT = 0x0400,
CTL_UA_RES_RELEASE = 0x0800,
CTL_UA_REG_PREEMPT = 0x1000,
CTL_UA_ASYM_ACC_CHANGE = 0x2000,
CTL_UA_CAPACITY_CHANGED = 0x4000
} ctl_ua_type;
#ifdef _KERNEL

View File

@ -401,6 +401,11 @@ ctl_build_ua(ctl_ua_type ua_type, struct scsi_sense_data *sense,
asc = 0x29;
ascq = 0x03;
break;
case CTL_UA_I_T_NEXUS_LOSS:
/* 29h/07h I_T NEXUS LOSS OCCURRED */
asc = 0x29;
ascq = 0x07;
break;
case CTL_UA_LUN_RESET:
/* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */
/*

View File

@ -631,6 +631,12 @@ cfiscsi_pdu_handle_task_request(struct icl_pdu *request)
io->taskio.task_action = CTL_TASK_ABORT_TASK;
io->taskio.tag_num = bhstmr->bhstmr_referenced_task_tag;
break;
case BHSTMR_FUNCTION_ABORT_TASK_SET:
#if 0
CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_ABORT_TASK_SET");
#endif
io->taskio.task_action = CTL_TASK_ABORT_TASK_SET;
break;
case BHSTMR_FUNCTION_LOGICAL_UNIT_RESET:
#if 0
CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_LOGICAL_UNIT_RESET");
@ -1033,64 +1039,36 @@ cfiscsi_callout(void *context)
static void
cfiscsi_session_terminate_tasks(struct cfiscsi_session *cs)
{
struct cfiscsi_data_wait *cdw, *tmpcdw;
struct cfiscsi_data_wait *cdw;
union ctl_io *io;
int error, last;
#ifdef notyet
io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref);
if (io == NULL) {
CFISCSI_SESSION_WARN(cs, "can't allocate ctl_io");
return;
}
ctl_zero_io(io);
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = NULL;
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = cs;
io->io_hdr.io_type = CTL_IO_TASK;
io->io_hdr.nexus.initid.id = cs->cs_ctl_initid;
io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port;
io->io_hdr.nexus.targ_target.id = 0;
io->io_hdr.nexus.targ_lun = lun;
io->io_hdr.nexus.targ_lun = 0;
io->taskio.tag_type = CTL_TAG_SIMPLE; /* XXX */
io->taskio.task_action = CTL_TASK_ABORT_TASK_SET;
io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET;
refcount_acquire(&cs->cs_outstanding_ctl_pdus);
error = ctl_queue(io);
if (error != CTL_RETVAL_COMPLETE) {
CFISCSI_SESSION_WARN(cs, "ctl_queue() failed; error %d", error);
refcount_release(&cs->cs_outstanding_ctl_pdus);
ctl_free_io(io);
}
#else
/*
* CTL doesn't currently support CTL_TASK_ABORT_TASK_SET, so instead
* just iterate over tasks that are waiting for something - data - and
* terminate those.
*/
CFISCSI_SESSION_LOCK(cs);
TAILQ_FOREACH_SAFE(cdw,
&cs->cs_waiting_for_data_out, cdw_next, tmpcdw) {
io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref);
if (io == NULL) {
CFISCSI_SESSION_WARN(cs, "can't allocate ctl_io");
return;
}
ctl_zero_io(io);
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = NULL;
io->io_hdr.io_type = CTL_IO_TASK;
io->io_hdr.nexus.initid.id = cs->cs_ctl_initid;
io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port;
io->io_hdr.nexus.targ_target.id = 0;
//io->io_hdr.nexus.targ_lun = lun; /* Not needed? */
io->taskio.tag_type = CTL_TAG_SIMPLE; /* XXX */
io->taskio.task_action = CTL_TASK_ABORT_TASK;
io->taskio.tag_num = cdw->cdw_initiator_task_tag;
error = ctl_queue(io);
if (error != CTL_RETVAL_COMPLETE) {
CFISCSI_SESSION_WARN(cs, "ctl_queue() failed; error %d", error);
ctl_free_io(io);
return;
}
#if 0
CFISCSI_SESSION_DEBUG(cs, "removing csw for initiator task tag "
"0x%x", cdw->cdw_initiator_task_tag);
#endif
while ((cdw = TAILQ_FIRST(&cs->cs_waiting_for_data_out)) != NULL) {
TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next);
CFISCSI_SESSION_UNLOCK(cs);
/*
* Set nonzero port status; this prevents backends from
* assuming that the data transfer actually succeeded
@ -1098,11 +1076,10 @@ cfiscsi_session_terminate_tasks(struct cfiscsi_session *cs)
*/
cdw->cdw_ctl_io->scsiio.io_hdr.port_status = 42;
cdw->cdw_ctl_io->scsiio.be_move_done(cdw->cdw_ctl_io);
TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next);
uma_zfree(cfiscsi_data_wait_zone, cdw);
CFISCSI_SESSION_LOCK(cs);
}
CFISCSI_SESSION_UNLOCK(cs);
#endif
/*
* Wait for CTL to terminate all the tasks.
@ -1114,7 +1091,7 @@ cfiscsi_session_terminate_tasks(struct cfiscsi_session *cs)
break;
CFISCSI_SESSION_WARN(cs, "waiting for CTL to terminate tasks, "
"%d remaining", cs->cs_outstanding_ctl_pdus);
pause("cfiscsi_terminate", 1);
pause("cfiscsi_terminate", hz / 100);
}
}
@ -2864,14 +2841,18 @@ cfiscsi_done(union ctl_io *io)
KASSERT(((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE),
("invalid CTL status %#x", io->io_hdr.status));
request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
if (request == NULL) {
if (io->io_hdr.io_type == CTL_IO_TASK &&
io->taskio.task_action == CTL_TASK_I_T_NEXUS_RESET) {
/*
* Implicit task termination has just completed; nothing to do.
*/
cs = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
refcount_release(&cs->cs_outstanding_ctl_pdus);
ctl_free_io(io);
return;
}
request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
cs = PDU_SESSION(request);
refcount_release(&cs->cs_outstanding_ctl_pdus);

View File

@ -364,6 +364,7 @@ typedef enum {
CTL_TASK_ABORT_TASK_SET,
CTL_TASK_CLEAR_ACA,
CTL_TASK_CLEAR_TASK_SET,
CTL_TASK_I_T_NEXUS_RESET,
CTL_TASK_LUN_RESET,
CTL_TASK_TARGET_RESET,
CTL_TASK_BUS_RESET,

View File

@ -84,6 +84,7 @@ static struct ctl_task_desc ctl_task_table[] = {
{CTL_TASK_ABORT_TASK_SET, "Abort Task Set"},
{CTL_TASK_CLEAR_ACA, "Clear ACA"},
{CTL_TASK_CLEAR_TASK_SET, "Clear Task Set"},
{CTL_TASK_I_T_NEXUS_RESET, "I_T Nexus Reset"},
{CTL_TASK_LUN_RESET, "LUN Reset"},
{CTL_TASK_TARGET_RESET, "Target Reset"},
{CTL_TASK_BUS_RESET, "Bus Reset"},