From b1d86a4bdc5aa94695e936fe711bc485604fec1c Mon Sep 17 00:00:00 2001 From: mav Date: Mon, 16 Jan 2017 16:19:55 +0000 Subject: [PATCH] Make CTL frontends report kern_data_resid for under-/overruns. It seems like kern_data_resid was never really implemented. This change finally does it. Now frontends update this field while transferring data, while CTL/backends getting it can more flexibly handle the result. At this point behavior should not change significantly, still reporting errors on write overrun, but that may be changed later, if we decide so. CAM target frontend still does not properly handle overruns due to CAM API limitations. We may need to add some fields to struct ccb_accept_tio to pass information about initiator requested transfer size(s). MFC after: 2 weeks --- sys/cam/ctl/ctl.c | 56 +++------------- sys/cam/ctl/ctl_backend_block.c | 26 +++----- sys/cam/ctl/ctl_backend_ramdisk.c | 26 +++----- sys/cam/ctl/ctl_error.c | 12 ++++ sys/cam/ctl/ctl_error.h | 1 + sys/cam/ctl/ctl_frontend_cam_sim.c | 22 ++---- sys/cam/ctl/ctl_frontend_ioctl.c | 25 ++----- sys/cam/ctl/ctl_frontend_iscsi.c | 19 +++--- sys/cam/ctl/ctl_tpc.c | 11 --- sys/cam/ctl/ctl_tpc_local.c | 14 ++-- sys/cam/ctl/scsi_ctl.c | 104 ++++++++++++++--------------- sys/cam/scsi/scsi_all.c | 2 +- 12 files changed, 122 insertions(+), 196 deletions(-) diff --git a/sys/cam/ctl/ctl.c b/sys/cam/ctl/ctl.c index 2efad7c7b7ad..8c6e39adc051 100644 --- a/sys/cam/ctl/ctl.c +++ b/sys/cam/ctl/ctl.c @@ -5053,18 +5053,13 @@ ctl_config_move_done(union ctl_io *io) if ((io->io_hdr.port_status != 0) && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { - /* - * For hardware error sense keys, the sense key - * specific value is defined to be a retry count, - * but we use it to pass back an internal FETD - * error code. XXX KDM Hopefully the FETD is only - * using 16 bits for an error code, since that's - * all the space we have in the sks field. - */ - ctl_set_internal_failure(&io->scsiio, - /*sks_valid*/ 1, - /*retry_count*/ - io->io_hdr.port_status); + ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, + /*retry_count*/ io->io_hdr.port_status); + } else if (io->scsiio.kern_data_resid != 0 && + (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && + ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || + (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { + ctl_set_invalid_field_ciu(&io->scsiio); } if (ctl_debug & CTL_DEBUG_CDB_DATA) @@ -5462,7 +5457,6 @@ ctl_format(struct ctl_scsiio *ctsio) ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); ctsio->kern_data_len = length; ctsio->kern_total_len = length; - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; @@ -5588,7 +5582,6 @@ ctl_read_buffer(struct ctl_scsiio *ctsio) } ctsio->kern_data_len = len; ctsio->kern_total_len = len; - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctl_set_success(ctsio); @@ -5634,7 +5627,6 @@ ctl_write_buffer(struct ctl_scsiio *ctsio) ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; ctsio->kern_data_len = len; ctsio->kern_total_len = len; - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; @@ -5742,7 +5734,6 @@ ctl_write_same(struct ctl_scsiio *ctsio) ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); ctsio->kern_data_len = len; ctsio->kern_total_len = len; - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; @@ -5788,7 +5779,6 @@ ctl_unmap(struct ctl_scsiio *ctsio) ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); ctsio->kern_data_len = len; ctsio->kern_total_len = len; - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; @@ -6278,7 +6268,6 @@ ctl_mode_select(struct ctl_scsiio *ctsio) ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); ctsio->kern_data_len = param_len; ctsio->kern_total_len = param_len; - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; @@ -6508,7 +6497,6 @@ ctl_mode_sense(struct ctl_scsiio *ctsio) ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; if (total_len < alloc_len) { ctsio->residual = alloc_len - total_len; @@ -6861,7 +6849,6 @@ ctl_log_sense(struct ctl_scsiio *ctsio) ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; if (total_len < alloc_len) { ctsio->residual = alloc_len - total_len; @@ -6929,7 +6916,6 @@ ctl_read_capacity(struct ctl_scsiio *ctsio) ctsio->residual = 0; ctsio->kern_data_len = sizeof(*data); ctsio->kern_total_len = sizeof(*data); - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; @@ -6995,7 +6981,6 @@ ctl_read_capacity_16(struct ctl_scsiio *ctsio) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; @@ -7050,7 +7035,6 @@ ctl_get_lba_status(struct ctl_scsiio *ctsio) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; @@ -7112,7 +7096,6 @@ ctl_read_defect(struct ctl_scsiio *ctsio) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; @@ -7211,7 +7194,6 @@ ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; if (ext) { @@ -7412,7 +7394,6 @@ ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; switch (cdb->options & RSO_OPTIONS_MASK) { @@ -7526,7 +7507,6 @@ ctl_report_supported_tmf(struct ctl_scsiio *ctsio) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr; @@ -7574,7 +7554,6 @@ ctl_report_timestamp(struct ctl_scsiio *ctsio) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; @@ -7647,7 +7626,6 @@ ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; @@ -8225,7 +8203,6 @@ ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); ctsio->kern_data_len = param_len; ctsio->kern_total_len = param_len; - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; @@ -9207,7 +9184,6 @@ ctl_report_luns(struct ctl_scsiio *ctsio) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; @@ -9270,7 +9246,6 @@ ctl_request_sense(struct ctl_scsiio *ctsio) ctsio->kern_data_len = cdb->length; ctsio->kern_total_len = cdb->length; - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; @@ -9409,7 +9384,6 @@ ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; @@ -9478,7 +9452,6 @@ ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; @@ -9537,7 +9510,6 @@ ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; @@ -9613,7 +9585,6 @@ ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; @@ -9678,7 +9649,6 @@ ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; @@ -9807,7 +9777,6 @@ ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; @@ -9883,7 +9852,6 @@ ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; @@ -9959,7 +9927,6 @@ ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; @@ -10016,7 +9983,6 @@ ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; @@ -10151,7 +10117,6 @@ ctl_inquiry_std(struct ctl_scsiio *ctsio) ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; if (data_len < alloc_len) { @@ -10379,7 +10344,6 @@ ctl_get_config(struct ctl_scsiio *ctsio) sizeof(struct scsi_get_config_feature) + 4; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr; @@ -10585,7 +10549,6 @@ ctl_get_event_status(struct ctl_scsiio *ctsio) data_len = sizeof(struct scsi_get_event_status_header); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; if (data_len < alloc_len) { @@ -10623,7 +10586,6 @@ ctl_mechanism_status(struct ctl_scsiio *ctsio) data_len = sizeof(struct scsi_mechanism_status_header); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; if (data_len < alloc_len) { @@ -10683,7 +10645,6 @@ ctl_read_toc(struct ctl_scsiio *ctsio) data_len += sizeof(struct scsi_read_toc_type01_descr); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; if (data_len < alloc_len) { @@ -12585,6 +12546,9 @@ ctl_datamove(union ctl_io *io) CTL_DEBUG_PRINT(("ctl_datamove\n")); + /* No data transferred yet. Frontend must update this when done. */ + io->scsiio.kern_data_resid = io->scsiio.kern_data_len; + #ifdef CTL_TIME_IO if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { char str[256]; diff --git a/sys/cam/ctl/ctl_backend_block.c b/sys/cam/ctl/ctl_backend_block.c index 7d836f5afaf6..2bc8d697cd18 100644 --- a/sys/cam/ctl/ctl_backend_block.c +++ b/sys/cam/ctl/ctl_backend_block.c @@ -419,6 +419,16 @@ ctl_be_block_move_done(union ctl_io *io) */ if (io->io_hdr.flags & CTL_FLAG_ABORT) { ; + } else if ((io->io_hdr.port_status != 0) && + ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || + (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { + ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, + /*retry_count*/ io->io_hdr.port_status); + } else if (io->scsiio.kern_data_resid != 0 && + (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && + ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || + (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { + ctl_set_invalid_field_ciu(&io->scsiio); } else if ((io->io_hdr.port_status == 0) && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { lbalen = ARGS(beio->io); @@ -428,21 +438,6 @@ ctl_be_block_move_done(union ctl_io *io) /* We have two data blocks ready for comparison. */ ctl_be_block_compare(io); } - } else if ((io->io_hdr.port_status != 0) && - ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || - (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { - /* - * For hardware error sense keys, the sense key - * specific value is defined to be a retry count, - * but we use it to pass back an internal FETD - * error code. XXX KDM Hopefully the FETD is only - * using 16 bits for an error code, since that's - * all the space we have in the sks field. - */ - ctl_set_internal_failure(&io->scsiio, - /*sks_valid*/ 1, - /*retry_count*/ - io->io_hdr.port_status); } /* @@ -1634,7 +1629,6 @@ ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, else io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; io->scsiio.kern_data_len = beio->io_len; - io->scsiio.kern_data_resid = 0; io->scsiio.kern_sg_entries = beio->num_segs; io->io_hdr.flags |= CTL_FLAG_ALLOCATED; diff --git a/sys/cam/ctl/ctl_backend_ramdisk.c b/sys/cam/ctl/ctl_backend_ramdisk.c index dad9f20f9e97..71ed07ad909a 100644 --- a/sys/cam/ctl/ctl_backend_ramdisk.c +++ b/sys/cam/ctl/ctl_backend_ramdisk.c @@ -231,6 +231,16 @@ ctl_backend_ramdisk_move_done(union ctl_io *io) io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; if (io->io_hdr.flags & CTL_FLAG_ABORT) { ; + } else if (io->io_hdr.port_status != 0 && + ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || + (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { + ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, + /*retry_count*/ io->io_hdr.port_status); + } else if (io->scsiio.kern_data_resid != 0 && + (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && + ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || + (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { + ctl_set_invalid_field_ciu(&io->scsiio); } else if ((io->io_hdr.port_status == 0) && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { if (io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer > 0) { @@ -243,21 +253,6 @@ ctl_backend_ramdisk_move_done(union ctl_io *io) return (0); } ctl_set_success(&io->scsiio); - } else if ((io->io_hdr.port_status != 0) && - ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || - (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { - /* - * For hardware error sense keys, the sense key - * specific value is defined to be a retry count, - * but we use it to pass back an internal FETD - * error code. XXX KDM Hopefully the FETD is only - * using 16 bits for an error code, since that's - * all the space we have in the sks field. - */ - ctl_set_internal_failure(&io->scsiio, - /*sks_valid*/ 1, - /*retry_count*/ - io->io_hdr.port_status); } ctl_data_submit_done(io); return(0); @@ -318,7 +313,6 @@ ctl_backend_ramdisk_continue(union ctl_io *io) #endif /* CTL_RAMDISK_PAGES */ io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; - io->scsiio.kern_data_resid = 0; io->scsiio.kern_data_len = len_filled; io->scsiio.kern_sg_entries = sg_filled; io->io_hdr.flags |= CTL_FLAG_ALLOCATED; diff --git a/sys/cam/ctl/ctl_error.c b/sys/cam/ctl/ctl_error.c index 409584ffbd89..9c222fa4286c 100644 --- a/sys/cam/ctl/ctl_error.c +++ b/sys/cam/ctl/ctl_error.c @@ -641,6 +641,18 @@ ctl_set_invalid_field(struct ctl_scsiio *ctsio, int sks_valid, int command, /*data*/ sks, SSD_ELEM_NONE); } +void +ctl_set_invalid_field_ciu(struct ctl_scsiio *ctsio) +{ + + /* "Invalid field in command information unit" */ + ctl_set_sense(ctsio, + /*current_error*/ 1, + /*sense_key*/ SSD_KEY_ABORTED_COMMAND, + /*ascq*/ 0x0E, + /*ascq*/ 0x03, + SSD_ELEM_NONE); +} void ctl_set_invalid_opcode(struct ctl_scsiio *ctsio) diff --git a/sys/cam/ctl/ctl_error.h b/sys/cam/ctl/ctl_error.h index d4cdbb370e25..75c948c439e6 100644 --- a/sys/cam/ctl/ctl_error.h +++ b/sys/cam/ctl/ctl_error.h @@ -66,6 +66,7 @@ void ctl_set_overlapped_cmd(struct ctl_scsiio *ctsio); void ctl_set_overlapped_tag(struct ctl_scsiio *ctsio, uint8_t tag); void ctl_set_invalid_field(struct ctl_scsiio *ctsio, int sks_valid, int command, int field, int bit_valid, int bit); +void ctl_set_invalid_field_ciu(struct ctl_scsiio *ctsio); void ctl_set_invalid_opcode(struct ctl_scsiio *ctsio); void ctl_set_param_len_error(struct ctl_scsiio *ctsio); void ctl_set_already_locked(struct ctl_scsiio *ctsio); diff --git a/sys/cam/ctl/ctl_frontend_cam_sim.c b/sys/cam/ctl/ctl_frontend_cam_sim.c index 544b669afdc1..406adde9d3f8 100644 --- a/sys/cam/ctl/ctl_frontend_cam_sim.c +++ b/sys/cam/ctl/ctl_frontend_cam_sim.c @@ -300,7 +300,7 @@ cfcs_datamove(union ctl_io *io) struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; int cam_sg_count, ctl_sg_count, cam_sg_start; int cam_sg_offset; - int len_to_copy, len_copied; + int len_to_copy; int ctl_watermark, cam_watermark; int i, j; @@ -365,7 +365,6 @@ cfcs_datamove(union ctl_io *io) ctl_watermark = 0; cam_watermark = cam_sg_offset; - len_copied = 0; for (i = cam_sg_start, j = 0; i < cam_sg_count && j < ctl_sg_count;) { uint8_t *cam_ptr, *ctl_ptr; @@ -387,9 +386,6 @@ cfcs_datamove(union ctl_io *io) ctl_ptr = (uint8_t *)ctl_sglist[j].addr; ctl_ptr = ctl_ptr + ctl_watermark; - ctl_watermark += len_to_copy; - cam_watermark += len_to_copy; - if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { CTL_DEBUG_PRINT(("%s: copying %d bytes to CAM\n", @@ -405,30 +401,22 @@ cfcs_datamove(union ctl_io *io) bcopy(cam_ptr, ctl_ptr, len_to_copy); } - len_copied += len_to_copy; + io->scsiio.ext_data_filled += len_to_copy; + io->scsiio.kern_data_resid -= len_to_copy; + cam_watermark += len_to_copy; if (cam_sglist[i].ds_len == cam_watermark) { i++; cam_watermark = 0; } + ctl_watermark += len_to_copy; if (ctl_sglist[j].len == ctl_watermark) { j++; ctl_watermark = 0; } } - io->scsiio.ext_data_filled += len_copied; - - /* - * Report write underflow as error, since CTL and backends don't - * really support it. - */ - if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && - j < ctl_sg_count) { - io->io_hdr.port_status = 43; - } else - if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = NULL; io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; diff --git a/sys/cam/ctl/ctl_frontend_ioctl.c b/sys/cam/ctl/ctl_frontend_ioctl.c index 6f90fe3a42bc..3f5c23c0c55e 100644 --- a/sys/cam/ctl/ctl_frontend_ioctl.c +++ b/sys/cam/ctl/ctl_frontend_ioctl.c @@ -138,7 +138,7 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio) struct ctl_sg_entry ext_entry, kern_entry; int ext_sglen, ext_sg_entries, kern_sg_entries; int ext_sg_start, ext_offset; - int len_to_copy, len_copied; + int len_to_copy; int kern_watermark, ext_watermark; int ext_sglist_malloced; int i, j; @@ -150,7 +150,8 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio) */ if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) { ext_sglist_malloced = 0; - ctsio->ext_data_filled = ctsio->ext_data_len; + ctsio->ext_data_filled += ctsio->kern_data_len; + ctsio->kern_data_resid = 0; goto bailout; } @@ -204,7 +205,6 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio) kern_watermark = 0; ext_watermark = ext_offset; - len_copied = 0; for (i = ext_sg_start, j = 0; i < ext_sg_entries && j < kern_sg_entries;) { uint8_t *ext_ptr, *kern_ptr; @@ -226,9 +226,6 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio) kern_ptr = (uint8_t *)kern_sglist[j].addr; kern_ptr = kern_ptr + kern_watermark; - kern_watermark += len_to_copy; - ext_watermark += len_to_copy; - if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " @@ -250,21 +247,22 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio) } } - len_copied += len_to_copy; + ctsio->ext_data_filled += len_to_copy; + ctsio->kern_data_resid -= len_to_copy; + ext_watermark += len_to_copy; if (ext_sglist[i].len == ext_watermark) { i++; ext_watermark = 0; } + kern_watermark += len_to_copy; if (kern_sglist[j].len == kern_watermark) { j++; kern_watermark = 0; } } - ctsio->ext_data_filled += len_copied; - CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, " "kern_sg_entries: %d\n", ext_sg_entries, kern_sg_entries)); @@ -272,15 +270,6 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio) "kern_data_len = %d\n", ctsio->ext_data_len, ctsio->kern_data_len)); - /* - * Report write underflow as error, since CTL and backends don't - * really support it. - */ - if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && - j < kern_sg_entries) { - ctsio->io_hdr.port_status = 43; - } - bailout: if (ext_sglist_malloced != 0) free(ext_sglist, M_CTL); diff --git a/sys/cam/ctl/ctl_frontend_iscsi.c b/sys/cam/ctl/ctl_frontend_iscsi.c index 443a4790a126..1171eb0876d6 100644 --- a/sys/cam/ctl/ctl_frontend_iscsi.c +++ b/sys/cam/ctl/ctl_frontend_iscsi.c @@ -769,6 +769,7 @@ cfiscsi_handle_data_segment(struct icl_pdu *request, struct cfiscsi_data_wait *c cdw->cdw_sg_len -= copy_len; off += copy_len; io->scsiio.ext_data_filled += copy_len; + io->scsiio.kern_data_resid -= copy_len; if (cdw->cdw_sg_len == 0) { /* @@ -2514,6 +2515,7 @@ cfiscsi_datamove_in(union ctl_io *io) } sg_addr += len; sg_len -= len; + io->scsiio.kern_data_resid -= len; KASSERT(buffer_offset + response->ip_data_len <= expected_len, ("buffer_offset %zd + ip_data_len %zd > expected_len %zd", @@ -2599,7 +2601,7 @@ cfiscsi_datamove_out(union ctl_io *io) struct iscsi_bhs_r2t *bhsr2t; struct cfiscsi_data_wait *cdw; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; - uint32_t expected_len, r2t_off, r2t_len; + uint32_t expected_len, datamove_len, r2t_off, r2t_len; uint32_t target_transfer_tag; bool done; @@ -2618,16 +2620,15 @@ cfiscsi_datamove_out(union ctl_io *io) PDU_TOTAL_TRANSFER_LEN(request) = io->scsiio.kern_total_len; /* - * Report write underflow as error since CTL and backends don't - * really support it, and SCSI does not tell how to do it right. + * Complete write underflow. Not a single byte to read. Return. */ expected_len = ntohl(bhssc->bhssc_expected_data_transfer_length); - if (io->scsiio.kern_rel_offset + io->scsiio.kern_data_len > - expected_len) { - io->scsiio.io_hdr.port_status = 43; + if (io->scsiio.kern_rel_offset > expected_len) { io->scsiio.be_move_done(io); return; } + datamove_len = MIN(io->scsiio.kern_data_len, + expected_len - io->scsiio.kern_rel_offset); target_transfer_tag = atomic_fetchadd_32(&cs->cs_target_transfer_tag, 1); @@ -2650,7 +2651,7 @@ cfiscsi_datamove_out(union ctl_io *io) cdw->cdw_ctl_io = io; cdw->cdw_target_transfer_tag = target_transfer_tag; cdw->cdw_initiator_task_tag = bhssc->bhssc_initiator_task_tag; - cdw->cdw_r2t_end = io->scsiio.kern_data_len; + cdw->cdw_r2t_end = datamove_len; cdw->cdw_datasn = 0; /* Set initial data pointer for the CDW respecting ext_data_filled. */ @@ -2659,7 +2660,7 @@ cfiscsi_datamove_out(union ctl_io *io) } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; - ctl_sglist->len = io->scsiio.kern_data_len; + ctl_sglist->len = datamove_len; } cdw->cdw_sg_index = 0; cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr; @@ -2690,7 +2691,7 @@ cfiscsi_datamove_out(union ctl_io *io) } r2t_off = io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled; - r2t_len = MIN(io->scsiio.kern_data_len - io->scsiio.ext_data_filled, + r2t_len = MIN(datamove_len - io->scsiio.ext_data_filled, cs->cs_max_burst_length); cdw->cdw_r2t_end = io->scsiio.ext_data_filled + r2t_len; diff --git a/sys/cam/ctl/ctl_tpc.c b/sys/cam/ctl/ctl_tpc.c index ee9bd7393920..9e1cfa852edb 100644 --- a/sys/cam/ctl/ctl_tpc.c +++ b/sys/cam/ctl/ctl_tpc.c @@ -293,7 +293,6 @@ ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; @@ -470,7 +469,6 @@ ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr; @@ -568,7 +566,6 @@ ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr; @@ -646,7 +643,6 @@ ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr; @@ -718,7 +714,6 @@ ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr; @@ -1730,7 +1725,6 @@ ctl_extended_copy_lid1(struct ctl_scsiio *ctsio) ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); ctsio->kern_data_len = len; ctsio->kern_total_len = len; - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; @@ -1885,7 +1879,6 @@ ctl_extended_copy_lid4(struct ctl_scsiio *ctsio) ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); ctsio->kern_data_len = len; ctsio->kern_total_len = len; - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; @@ -2083,7 +2076,6 @@ ctl_populate_token(struct ctl_scsiio *ctsio) ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); ctsio->kern_data_len = len; ctsio->kern_total_len = len; - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; @@ -2247,7 +2239,6 @@ ctl_write_using_token(struct ctl_scsiio *ctsio) ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); ctsio->kern_data_len = len; ctsio->kern_total_len = len; - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; @@ -2423,7 +2414,6 @@ ctl_receive_rod_token_information(struct ctl_scsiio *ctsio) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr; @@ -2504,7 +2494,6 @@ ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio) ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } - ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr; diff --git a/sys/cam/ctl/ctl_tpc_local.c b/sys/cam/ctl/ctl_tpc_local.c index f726aa59e70e..47b7ed6c9e34 100644 --- a/sys/cam/ctl/ctl_tpc_local.c +++ b/sys/cam/ctl/ctl_tpc_local.c @@ -137,7 +137,7 @@ tpcl_datamove(union ctl_io *io) struct ctl_sg_entry ext_entry, kern_entry; int ext_sg_entries, kern_sg_entries; int ext_sg_start, ext_offset; - int len_to_copy, len_copied; + int len_to_copy; int kern_watermark, ext_watermark; struct ctl_scsiio *ctsio; int i, j; @@ -196,7 +196,6 @@ tpcl_datamove(union ctl_io *io) kern_watermark = 0; ext_watermark = ext_offset; - len_copied = 0; for (i = ext_sg_start, j = 0; i < ext_sg_entries && j < kern_sg_entries;) { uint8_t *ext_ptr, *kern_ptr; @@ -218,9 +217,6 @@ tpcl_datamove(union ctl_io *io) kern_ptr = (uint8_t *)kern_sglist[j].addr; kern_ptr = kern_ptr + kern_watermark; - kern_watermark += len_to_copy; - ext_watermark += len_to_copy; - if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { CTL_DEBUG_PRINT(("%s: copying %d bytes to user\n", @@ -236,27 +232,27 @@ tpcl_datamove(union ctl_io *io) memcpy(kern_ptr, ext_ptr, len_to_copy); } - len_copied += len_to_copy; + ctsio->ext_data_filled += len_to_copy; + ctsio->kern_data_resid -= len_to_copy; + ext_watermark += len_to_copy; if (ext_sglist[i].len == ext_watermark) { i++; ext_watermark = 0; } + kern_watermark += len_to_copy; if (kern_sglist[j].len == kern_watermark) { j++; kern_watermark = 0; } } - ctsio->ext_data_filled += len_copied; - CTL_DEBUG_PRINT(("%s: ext_sg_entries: %d, kern_sg_entries: %d\n", __func__, ext_sg_entries, kern_sg_entries)); CTL_DEBUG_PRINT(("%s: ext_data_len = %d, kern_data_len = %d\n", __func__, ctsio->ext_data_len, ctsio->kern_data_len)); - /* XXX KDM set residual?? */ bailout: io->scsiio.be_move_done(io); } diff --git a/sys/cam/ctl/scsi_ctl.c b/sys/cam/ctl/scsi_ctl.c index d03966ee82f3..a32a67200c9b 100644 --- a/sys/cam/ctl/scsi_ctl.c +++ b/sys/cam/ctl/scsi_ctl.c @@ -721,15 +721,18 @@ ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io, idx = cmd_info->cur_transfer_index; off = cmd_info->cur_transfer_off; cmd_info->flags &= ~CTLFE_CMD_PIECEWISE; - if (io->scsiio.kern_sg_entries == 0) { - /* No S/G list. */ + if (io->scsiio.kern_sg_entries == 0) { /* No S/G list. */ + + /* One time shift for SRR offset. */ + off += io->scsiio.ext_data_filled; + io->scsiio.ext_data_filled = 0; + *data_ptr = io->scsiio.kern_data_ptr + off; if (io->scsiio.kern_data_len - off <= bus_softc->maxio) { *dxfer_len = io->scsiio.kern_data_len - off; } else { *dxfer_len = bus_softc->maxio; - cmd_info->cur_transfer_index = -1; - cmd_info->cur_transfer_off = bus_softc->maxio; + cmd_info->cur_transfer_off += bus_softc->maxio; cmd_info->flags |= CTLFE_CMD_PIECEWISE; } *sglist_cnt = 0; @@ -738,9 +741,18 @@ ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io, *flags |= CAM_DATA_PADDR; else *flags |= CAM_DATA_VADDR; - } else { - /* S/G list with physical or virtual pointers. */ + } else { /* S/G list with physical or virtual pointers. */ ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; + + /* One time shift for SRR offset. */ + while (io->scsiio.ext_data_filled >= ctl_sglist[idx].len - off) { + io->scsiio.ext_data_filled -= ctl_sglist[idx].len - off; + idx++; + off = 0; + } + off += io->scsiio.ext_data_filled; + io->scsiio.ext_data_filled = 0; + cam_sglist = cmd_info->cam_sglist; *dxfer_len = 0; for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) { @@ -818,18 +830,8 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb) /* * Datamove call, we need to setup the S/G list. */ - scsi_status = 0; - csio->cdb_len = atio->cdb_len; ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, &csio->sglist_cnt); - io->scsiio.ext_data_filled += dxfer_len; - if (io->scsiio.ext_data_filled > io->scsiio.kern_total_len) { - xpt_print(periph->path, "%s: tag 0x%04x " - "fill len %u > total %u\n", - __func__, io->scsiio.tag_num, - io->scsiio.ext_data_filled, - io->scsiio.kern_total_len); - } } else { /* * We're done, send status back. @@ -891,8 +893,8 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb) data_ptr = NULL; dxfer_len = 0; csio->sglist_cnt = 0; - scsi_status = 0; } + scsi_status = 0; if ((io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) && (cmd_info->flags & CTLFE_CMD_PIECEWISE) == 0 && ((io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) == 0 || @@ -1246,13 +1248,36 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb) | (done_ccb->csio.msg_ptr[6]); } + /* + * If we have an SRR and we're still sending data, we + * should be able to adjust offsets and cycle again. + * It is possible only if offset is from this datamove. + */ + if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) && + srr_off >= io->scsiio.kern_rel_offset && + srr_off < io->scsiio.kern_rel_offset + + io->scsiio.kern_data_len) { + io->scsiio.kern_data_resid = + io->scsiio.kern_rel_offset + + io->scsiio.kern_data_len - srr_off; + io->scsiio.ext_data_filled = srr_off; + io->scsiio.io_hdr.status = CTL_STATUS_NONE; + io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; + softc->ccbs_freed++; + xpt_release_ccb(done_ccb); + TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, + periph_links.tqe); + xpt_schedule(periph, /*priority*/ 1); + break; + } + + /* + * If status was being sent, the back end data is now history. + * Hack it up and resubmit a new command with the CDB adjusted. + * If the SIM does the right thing, all of the resid math + * should work. + */ if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { - /* - * If status was being sent, the back end data is now - * history. Hack it up and resubmit a new command with - * the CDB adjusted. If the SIM does the right thing, - * all of the resid math should work. - */ softc->ccbs_freed++; xpt_release_ccb(done_ccb); if (ctlfe_adjust_cdb(atio, srr_off) == 0) { @@ -1262,22 +1287,6 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb) /* * Fall through to doom.... */ - } else if (srr) { - /* - * If we have an srr and we're still sending data, we - * should be able to adjust offsets and cycle again. - */ - io->scsiio.kern_rel_offset = - io->scsiio.ext_data_filled = srr_off; - io->scsiio.ext_data_len = io->scsiio.kern_total_len - - io->scsiio.kern_rel_offset; - softc->ccbs_freed++; - io->scsiio.io_hdr.status = CTL_STATUS_NONE; - xpt_release_ccb(done_ccb); - TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, - periph_links.tqe); - xpt_schedule(periph, /*priority*/ 1); - break; } if ((done_ccb->ccb_h.flags & CAM_SEND_STATUS) && @@ -1320,16 +1329,6 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb) io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; - io->scsiio.ext_data_len += csio->dxfer_len; - if (io->scsiio.ext_data_len > - io->scsiio.kern_total_len) { - xpt_print(periph->path, "%s: tag 0x%04x " - "done len %u > total %u sent %u\n", - __func__, io->scsiio.tag_num, - io->scsiio.ext_data_len, - io->scsiio.kern_total_len, - io->scsiio.ext_data_filled); - } /* * Translate CAM status to CTL status. Success * does not change the overall, ctl_io status. In @@ -1339,6 +1338,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb) */ switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_REQ_CMP: + io->scsiio.kern_data_resid -= csio->dxfer_len; io->io_hdr.port_status = 0; break; default: @@ -1368,7 +1368,6 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb) if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) && (io->io_hdr.port_status == 0)) { ccb_flags flags; - uint8_t scsi_status; uint8_t *data_ptr; uint32_t dxfer_len; @@ -1379,8 +1378,6 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb) ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, &csio->sglist_cnt); - scsi_status = 0; - if (((flags & CAM_SEND_STATUS) == 0) && (dxfer_len == 0)) { printf("%s: tag %04x no status or " @@ -1400,7 +1397,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb) MSG_SIMPLE_Q_TAG : 0, atio->tag_id, atio->init_id, - scsi_status, + 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*timeout*/ 5 * 1000); @@ -2003,6 +2000,7 @@ ctlfe_datamove(union ctl_io *io) KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Unexpected io_type (%d) in ctlfe_datamove", io->io_hdr.io_type)); + io->scsiio.ext_data_filled = 0; ccb = PRIV_CCB(io); periph = xpt_path_periph(ccb->ccb_h.path); cam_periph_lock(periph); diff --git a/sys/cam/scsi/scsi_all.c b/sys/cam/scsi/scsi_all.c index bf2ee2f69e9c..d7989dcf5067 100644 --- a/sys/cam/scsi/scsi_all.c +++ b/sys/cam/scsi/scsi_all.c @@ -1371,7 +1371,7 @@ static struct asc_table_entry asc_table[] = { { SST(0x0E, 0x02, SS_RDEF, /* XXX TBD */ "Information unit too long") }, /* DT P R MAEBK F */ - { SST(0x0E, 0x03, SS_RDEF, /* XXX TBD */ + { SST(0x0E, 0x03, SS_FATAL | EINVAL, "Invalid field in command information unit") }, /* D W O BK */ { SST(0x10, 0x00, SS_RDEF,