Implement QUERY TASK, QUERY TASK SET and QUERY ASYNC EVENT.

Now we support most of SAM-5 task management.
This commit is contained in:
Alexander Motin 2015-09-14 08:01:05 +00:00
parent 6e17d3a7fc
commit ceff31dc0c
6 changed files with 318 additions and 176 deletions

View File

@ -412,11 +412,14 @@ static int ctl_scsiio(struct ctl_scsiio *ctsio);
static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io);
static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
ctl_ua_type ua_type);
static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io,
static int ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io,
ctl_ua_type ua_type);
static int ctl_lun_reset(struct ctl_softc *ctl_softc, union ctl_io *io);
static int ctl_abort_task(union ctl_io *io);
static int ctl_abort_task_set(union ctl_io *io);
static int ctl_query_task(union ctl_io *io, int task_set);
static int ctl_i_t_nexus_reset(union ctl_io *io);
static int ctl_query_async_event(union ctl_io *io);
static void ctl_run_task(union ctl_io *io);
#ifdef CTL_IO_DELAY
static void ctl_datamove_timer_wakeup(void *arg);
@ -7362,8 +7365,9 @@ ctl_report_supported_tmf(struct ctl_scsiio *ctsio)
ctsio->kern_rel_offset = 0;
data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr;
data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_TRS;
data->byte2 |= RST_ITNRS;
data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS |
RST_TRS;
data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS;
ctl_set_success(ctsio);
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@ -11373,10 +11377,10 @@ ctl_target_reset(struct ctl_softc *softc, union ctl_io *io,
if (port != NULL &&
ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
continue;
retval += ctl_lun_reset(lun, io, ua_type);
retval += ctl_do_lun_reset(lun, io, ua_type);
}
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
return (retval);
}
@ -11402,7 +11406,7 @@ ctl_target_reset(struct ctl_softc *softc, union ctl_io *io,
* XXX KDM for now, we're setting unit attention for all initiators.
*/
static int
ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
{
union ctl_io *xio;
#if 0
@ -11450,6 +11454,39 @@ ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
return (0);
}
static int
ctl_lun_reset(struct ctl_softc *softc, union ctl_io *io)
{
struct ctl_lun *lun;
uint32_t targ_lun;
int retval;
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if ((targ_lun >= CTL_MAX_LUNS) ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
return (1);
}
retval = ctl_do_lun_reset(lun, io, CTL_UA_LUN_RESET);
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) {
union ctl_ha_msg msg_info;
msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
msg_info.hdr.nexus = io->io_hdr.nexus;
msg_info.task.task_action = CTL_TASK_LUN_RESET;
msg_info.hdr.original_sc = NULL;
msg_info.hdr.serializing_sc = NULL;
ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
sizeof(msg_info.task), M_WAITOK);
}
return (retval);
}
static void
ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id,
int other_sc)
@ -11505,10 +11542,10 @@ ctl_abort_task_set(union ctl_io *io)
*/
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL))
lun = softc->ctl_luns[targ_lun];
else {
if ((targ_lun >= CTL_MAX_LUNS) ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
return (1);
}
@ -11523,6 +11560,7 @@ ctl_abort_task_set(union ctl_io *io)
(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
}
mtx_unlock(&lun->lun_lock);
io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
return (0);
}
@ -11560,6 +11598,7 @@ ctl_i_t_nexus_reset(union ctl_io *io)
mtx_unlock(&lun->lun_lock);
}
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
return (0);
}
@ -11584,11 +11623,10 @@ ctl_abort_task(union ctl_io *io)
*/
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if ((targ_lun < CTL_MAX_LUNS)
&& (softc->ctl_luns[targ_lun] != NULL))
lun = softc->ctl_luns[targ_lun];
else {
if ((targ_lun >= CTL_MAX_LUNS) ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
return (1);
}
@ -11694,6 +11732,77 @@ ctl_abort_task(union ctl_io *io)
io->taskio.tag_type);
#endif
}
io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
return (0);
}
static int
ctl_query_task(union ctl_io *io, int task_set)
{
union ctl_io *xio;
struct ctl_lun *lun;
struct ctl_softc *softc;
int found = 0;
uint32_t targ_lun;
softc = control_softc;
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if ((targ_lun >= CTL_MAX_LUNS) ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
return (1);
}
mtx_lock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port)
|| (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid)
|| (xio->io_hdr.flags & CTL_FLAG_ABORT))
continue;
if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) {
found = 1;
break;
}
}
mtx_unlock(&lun->lun_lock);
if (found)
io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED;
else
io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
return (0);
}
static int
ctl_query_async_event(union ctl_io *io)
{
struct ctl_lun *lun;
struct ctl_softc *softc;
ctl_ua_type ua;
uint32_t targ_lun, initidx;
softc = control_softc;
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if ((targ_lun >= CTL_MAX_LUNS) ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
return (1);
}
mtx_lock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
initidx = ctl_get_initindex(&io->io_hdr.nexus);
ua = ctl_build_qae(lun, initidx, io->taskio.task_resp);
mtx_unlock(&lun->lun_lock);
if (ua != CTL_UA_NONE)
io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED;
else
io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
return (0);
}
@ -11702,41 +11811,12 @@ ctl_run_task(union ctl_io *io)
{
struct ctl_softc *softc = control_softc;
int retval = 1;
const char *task_desc;
CTL_DEBUG_PRINT(("ctl_run_task\n"));
KASSERT(io->io_hdr.io_type == CTL_IO_TASK,
("ctl_run_task: Unextected io_type %d\n",
io->io_hdr.io_type));
task_desc = ctl_scsi_task_string(&io->taskio);
if (task_desc != NULL) {
#ifdef NEEDTOPORT
csevent_log(CSC_CTL | CSC_SHELF_SW |
CTL_TASK_REPORT,
csevent_LogType_Trace,
csevent_Severity_Information,
csevent_AlertLevel_Green,
csevent_FRU_Firmware,
csevent_FRU_Unknown,
"CTL: received task: %s",task_desc);
#endif
} else {
#ifdef NEEDTOPORT
csevent_log(CSC_CTL | CSC_SHELF_SW |
CTL_TASK_REPORT,
csevent_LogType_Trace,
csevent_Severity_Information,
csevent_AlertLevel_Green,
csevent_FRU_Firmware,
csevent_FRU_Unknown,
"CTL: received unknown task "
"type: %d (%#x)",
io->taskio.task_action,
io->taskio.task_action);
#endif
}
("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type));
io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED;
bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp));
switch (io->taskio.task_action) {
case CTL_TASK_ABORT_TASK:
retval = ctl_abort_task(io);
@ -11750,36 +11830,9 @@ ctl_run_task(union ctl_io *io)
case CTL_TASK_I_T_NEXUS_RESET:
retval = ctl_i_t_nexus_reset(io);
break;
case CTL_TASK_LUN_RESET: {
struct ctl_lun *lun;
uint32_t targ_lun;
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if ((targ_lun < CTL_MAX_LUNS)
&& (softc->ctl_luns[targ_lun] != NULL))
lun = softc->ctl_luns[targ_lun];
else {
mtx_unlock(&softc->ctl_lock);
retval = 1;
break;
}
retval = ctl_lun_reset(lun, io, CTL_UA_LUN_RESET);
mtx_unlock(&softc->ctl_lock);
if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) {
union ctl_ha_msg msg_info;
msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
msg_info.hdr.nexus = io->io_hdr.nexus;
msg_info.task.task_action = CTL_TASK_LUN_RESET;
msg_info.hdr.original_sc = NULL;
msg_info.hdr.serializing_sc = NULL;
ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
sizeof(msg_info.task), M_WAITOK);
}
case CTL_TASK_LUN_RESET:
retval = ctl_lun_reset(softc, io);
break;
}
case CTL_TASK_TARGET_RESET:
retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET);
break;
@ -11790,9 +11843,18 @@ ctl_run_task(union ctl_io *io)
break;
case CTL_TASK_PORT_LOGOUT:
break;
case CTL_TASK_QUERY_TASK:
retval = ctl_query_task(io, 0);
break;
case CTL_TASK_QUERY_TASK_SET:
retval = ctl_query_task(io, 1);
break;
case CTL_TASK_QUERY_ASYNC_EVENT:
retval = ctl_query_async_event(io);
break;
default:
printf("ctl_run_task: got unknown task management event %d\n",
io->taskio.task_action);
printf("%s: got unknown task management event %d\n",
__func__, io->taskio.task_action);
break;
}
if (retval == 0)

View File

@ -365,6 +365,132 @@ ctl_set_ua(struct ctl_scsiio *ctsio, int asc, int ascq)
SSD_ELEM_NONE);
}
static void
ctl_ua_to_acsq(ctl_ua_type ua_to_build, int *asc, int *ascq,
ctl_ua_type *ua_to_clear)
{
switch (ua_to_build) {
case CTL_UA_POWERON:
/* 29h/01h POWER ON OCCURRED */
*asc = 0x29;
*ascq = 0x01;
*ua_to_clear = ~0;
break;
case CTL_UA_BUS_RESET:
/* 29h/02h SCSI BUS RESET OCCURRED */
*asc = 0x29;
*ascq = 0x02;
*ua_to_clear = ~0;
break;
case CTL_UA_TARG_RESET:
/* 29h/03h BUS DEVICE RESET FUNCTION OCCURRED*/
*asc = 0x29;
*ascq = 0x03;
*ua_to_clear = ~0;
break;
case CTL_UA_I_T_NEXUS_LOSS:
/* 29h/07h I_T NEXUS LOSS OCCURRED */
*asc = 0x29;
*ascq = 0x07;
*ua_to_clear = ~0;
break;
case CTL_UA_LUN_RESET:
/* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */
/*
* Since we don't have a specific ASC/ASCQ pair for a LUN
* reset, just return the generic reset code.
*/
*asc = 0x29;
*ascq = 0x00;
break;
case CTL_UA_LUN_CHANGE:
/* 3Fh/0Eh REPORTED LUNS DATA HAS CHANGED */
*asc = 0x3F;
*ascq = 0x0E;
break;
case CTL_UA_MODE_CHANGE:
/* 2Ah/01h MODE PARAMETERS CHANGED */
*asc = 0x2A;
*ascq = 0x01;
break;
case CTL_UA_LOG_CHANGE:
/* 2Ah/02h LOG PARAMETERS CHANGED */
*asc = 0x2A;
*ascq = 0x02;
break;
case CTL_UA_INQ_CHANGE:
/* 3Fh/03h INQUIRY DATA HAS CHANGED */
*asc = 0x3F;
*ascq = 0x03;
break;
case CTL_UA_RES_PREEMPT:
/* 2Ah/03h RESERVATIONS PREEMPTED */
*asc = 0x2A;
*ascq = 0x03;
break;
case CTL_UA_RES_RELEASE:
/* 2Ah/04h RESERVATIONS RELEASED */
*asc = 0x2A;
*ascq = 0x04;
break;
case CTL_UA_REG_PREEMPT:
/* 2Ah/05h REGISTRATIONS PREEMPTED */
*asc = 0x2A;
*ascq = 0x05;
break;
case CTL_UA_ASYM_ACC_CHANGE:
/* 2Ah/06h ASYMMETRIC ACCESS STATE CHANGED */
*asc = 0x2A;
*ascq = 0x06;
break;
case CTL_UA_CAPACITY_CHANGED:
/* 2Ah/09h CAPACITY DATA HAS CHANGED */
*asc = 0x2A;
*ascq = 0x09;
break;
case CTL_UA_THIN_PROV_THRES:
/* 38h/07h THIN PROVISIONING SOFT THRESHOLD REACHED */
*asc = 0x38;
*ascq = 0x07;
break;
default:
panic("%s: Unknown UA %x", __func__, ua_to_build);
}
}
ctl_ua_type
ctl_build_qae(struct ctl_lun *lun, uint32_t initidx, uint8_t *resp)
{
ctl_ua_type ua;
ctl_ua_type ua_to_build, ua_to_clear;
int asc, ascq;
uint32_t p, i;
mtx_assert(&lun->lun_lock, MA_OWNED);
p = initidx / CTL_MAX_INIT_PER_PORT;
i = initidx % CTL_MAX_INIT_PER_PORT;
if (lun->pending_ua[p] == NULL)
ua = CTL_UA_POWERON;
else
ua = lun->pending_ua[p][i];
if (ua == CTL_UA_NONE)
return (CTL_UA_NONE);
ua_to_build = (1 << (ffs(ua) - 1));
ua_to_clear = ua_to_build;
ctl_ua_to_acsq(ua_to_build, &asc, &ascq, &ua_to_clear);
resp[0] = SSD_KEY_UNIT_ATTENTION;
if (ua_to_build == ua)
resp[0] |= 0x10;
else
resp[0] |= 0x20;
resp[1] = asc;
resp[2] = ascq;
return (ua);
}
ctl_ua_type
ctl_build_ua(struct ctl_lun *lun, uint32_t initidx,
struct scsi_sense_data *sense, scsi_sense_data_type sense_format)
@ -396,94 +522,7 @@ ctl_build_ua(struct ctl_lun *lun, uint32_t initidx,
ua_to_build = (1 << (ffs(ua[i]) - 1));
ua_to_clear = ua_to_build;
switch (ua_to_build) {
case CTL_UA_POWERON:
/* 29h/01h POWER ON OCCURRED */
asc = 0x29;
ascq = 0x01;
ua_to_clear = ~0;
break;
case CTL_UA_BUS_RESET:
/* 29h/02h SCSI BUS RESET OCCURRED */
asc = 0x29;
ascq = 0x02;
ua_to_clear = ~0;
break;
case CTL_UA_TARG_RESET:
/* 29h/03h BUS DEVICE RESET FUNCTION OCCURRED*/
asc = 0x29;
ascq = 0x03;
ua_to_clear = ~0;
break;
case CTL_UA_I_T_NEXUS_LOSS:
/* 29h/07h I_T NEXUS LOSS OCCURRED */
asc = 0x29;
ascq = 0x07;
ua_to_clear = ~0;
break;
case CTL_UA_LUN_RESET:
/* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */
/*
* Since we don't have a specific ASC/ASCQ pair for a LUN
* reset, just return the generic reset code.
*/
asc = 0x29;
ascq = 0x00;
break;
case CTL_UA_LUN_CHANGE:
/* 3Fh/0Eh REPORTED LUNS DATA HAS CHANGED */
asc = 0x3F;
ascq = 0x0E;
break;
case CTL_UA_MODE_CHANGE:
/* 2Ah/01h MODE PARAMETERS CHANGED */
asc = 0x2A;
ascq = 0x01;
break;
case CTL_UA_LOG_CHANGE:
/* 2Ah/02h LOG PARAMETERS CHANGED */
asc = 0x2A;
ascq = 0x02;
break;
case CTL_UA_INQ_CHANGE:
/* 3Fh/03h INQUIRY DATA HAS CHANGED */
asc = 0x3F;
ascq = 0x03;
break;
case CTL_UA_RES_PREEMPT:
/* 2Ah/03h RESERVATIONS PREEMPTED */
asc = 0x2A;
ascq = 0x03;
break;
case CTL_UA_RES_RELEASE:
/* 2Ah/04h RESERVATIONS RELEASED */
asc = 0x2A;
ascq = 0x04;
break;
case CTL_UA_REG_PREEMPT:
/* 2Ah/05h REGISTRATIONS PREEMPTED */
asc = 0x2A;
ascq = 0x05;
break;
case CTL_UA_ASYM_ACC_CHANGE:
/* 2Ah/06n ASYMMETRIC ACCESS STATE CHANGED */
asc = 0x2A;
ascq = 0x06;
break;
case CTL_UA_CAPACITY_CHANGED:
/* 2Ah/09n CAPACITY DATA HAS CHANGED */
asc = 0x2A;
ascq = 0x09;
break;
case CTL_UA_THIN_PROV_THRES:
/* 38h/07n THIN PROVISIONING SOFT THRESHOLD REACHED */
asc = 0x38;
ascq = 0x07;
break;
default:
panic("ctl_build_ua: Unknown UA %x", ua_to_build);
}
ctl_ua_to_acsq(ua_to_build, &asc, &ascq, &ua_to_clear);
ctl_set_sense_data(sense,
/*lun*/ NULL,

View File

@ -57,6 +57,7 @@ void ctl_sense_to_desc(struct scsi_sense_data_fixed *sense_src,
void ctl_sense_to_fixed(struct scsi_sense_data_desc *sense_src,
struct scsi_sense_data_fixed *sense_dest);
void ctl_set_ua(struct ctl_scsiio *ctsio, int asc, int ascq);
ctl_ua_type ctl_build_qae(struct ctl_lun *lun, uint32_t initidx, uint8_t *resp);
ctl_ua_type ctl_build_ua(struct ctl_lun *lun, uint32_t initidx,
struct scsi_sense_data *sense, scsi_sense_data_type sense_format);
void ctl_set_overlapped_cmd(struct ctl_scsiio *ctsio);

View File

@ -663,12 +663,31 @@ cfiscsi_pdu_handle_task_request(struct icl_pdu *request)
#endif
io->taskio.task_action = CTL_TASK_TARGET_RESET;
break;
case BHSTMR_FUNCTION_QUERY_TASK:
#if 0
CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_TASK");
#endif
io->taskio.task_action = CTL_TASK_QUERY_TASK;
io->taskio.tag_num = bhstmr->bhstmr_referenced_task_tag;
break;
case BHSTMR_FUNCTION_QUERY_TASK_SET:
#if 0
CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_TASK_SET");
#endif
io->taskio.task_action = CTL_TASK_QUERY_TASK_SET;
break;
case BHSTMR_FUNCTION_I_T_NEXUS_RESET:
#if 0
CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_I_T_NEXUS_RESET");
#endif
io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET;
break;
case BHSTMR_FUNCTION_QUERY_ASYNC_EVENT:
#if 0
CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_ASYNC_EVENT");
#endif
io->taskio.task_action = CTL_TASK_QUERY_ASYNC_EVENT;
break;
default:
CFISCSI_SESSION_DEBUG(cs, "unsupported function 0x%x",
bhstmr->bhstmr_function & ~0x80);
@ -2910,18 +2929,23 @@ cfiscsi_task_management_done(union ctl_io *io)
response->ip_bhs;
bhstmr2->bhstmr_opcode = ISCSI_BHS_OPCODE_TASK_RESPONSE;
bhstmr2->bhstmr_flags = 0x80;
if (io->io_hdr.status == CTL_SUCCESS) {
switch (io->taskio.task_status) {
case CTL_TASK_FUNCTION_COMPLETE:
bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_COMPLETE;
} else {
/*
* XXX: How to figure out what exactly went wrong? iSCSI spec
* expects us to provide detailed error, e.g. "Task does
* not exist" or "LUN does not exist".
*/
CFISCSI_SESSION_DEBUG(cs, "BHSTMR_RESPONSE_FUNCTION_NOT_SUPPORTED");
bhstmr2->bhstmr_response =
BHSTMR_RESPONSE_FUNCTION_NOT_SUPPORTED;
break;
case CTL_TASK_FUNCTION_SUCCEEDED:
bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_SUCCEEDED;
break;
case CTL_TASK_LUN_DOES_NOT_EXIST:
bhstmr2->bhstmr_response = BHSTMR_RESPONSE_LUN_DOES_NOT_EXIST;
break;
case CTL_TASK_FUNCTION_NOT_SUPPORTED:
default:
bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_NOT_SUPPORTED;
break;
}
memcpy(bhstmr2->bhstmr_additional_reponse_information,
io->taskio.task_resp, sizeof(io->taskio.task_resp));
bhstmr2->bhstmr_initiator_task_tag = bhstmr->bhstmr_initiator_task_tag;
ctl_free_io(io);

View File

@ -328,9 +328,20 @@ typedef enum {
CTL_TASK_TARGET_RESET,
CTL_TASK_BUS_RESET,
CTL_TASK_PORT_LOGIN,
CTL_TASK_PORT_LOGOUT
CTL_TASK_PORT_LOGOUT,
CTL_TASK_QUERY_TASK,
CTL_TASK_QUERY_TASK_SET,
CTL_TASK_QUERY_ASYNC_EVENT
} ctl_task_type;
typedef enum {
CTL_TASK_FUNCTION_COMPLETE,
CTL_TASK_FUNCTION_SUCCEEDED,
CTL_TASK_FUNCTION_REJECTED,
CTL_TASK_LUN_DOES_NOT_EXIST,
CTL_TASK_FUNCTION_NOT_SUPPORTED
} ctl_task_status;
/*
* Task management I/O structure. Aborts, bus resets, etc., are sent using
* this structure.
@ -343,6 +354,8 @@ struct ctl_taskio {
ctl_task_type task_action; /* Target Reset, Abort, etc. */
uint32_t tag_num; /* tag number */
ctl_tag_type tag_type; /* simple, ordered, etc. */
uint8_t task_status; /* Complete, Succeeded, etc. */
uint8_t task_resp[3];/* Response information */
};
typedef enum {

View File

@ -89,7 +89,10 @@ static struct ctl_task_desc ctl_task_table[] = {
{CTL_TASK_TARGET_RESET, "Target Reset"},
{CTL_TASK_BUS_RESET, "Bus Reset"},
{CTL_TASK_PORT_LOGIN, "Port Login"},
{CTL_TASK_PORT_LOGOUT, "Port Logout"}
{CTL_TASK_PORT_LOGOUT, "Port Logout"},
{CTL_TASK_QUERY_TASK, "Query Task"},
{CTL_TASK_QUERY_TASK_SET, "Query Task Set"},
{CTL_TASK_QUERY_ASYNC_EVENT, "Query Async Event"}
};
void