Restructure camdd(8) slightly to make it easier to add support for

additional protocols.

Submitted by:	Chuck Tuffli <chuck@tuffli.net>
MFC after:	1 week
Differential Revision:	D11230
This commit is contained in:
Kenneth D. Merry 2017-08-22 13:08:22 +00:00
parent e6a4cfc37b
commit 988bd0d72b

View File

@ -260,6 +260,7 @@ struct camdd_buf {
struct camdd_dev_pass { struct camdd_dev_pass {
int scsi_dev_type; int scsi_dev_type;
int protocol;
struct cam_device *dev; struct cam_device *dev;
uint64_t max_sector; uint64_t max_sector;
uint32_t block_len; uint32_t block_len;
@ -477,6 +478,9 @@ uint32_t camdd_buf_get_len(struct camdd_buf *buf);
void camdd_buf_add_child(struct camdd_buf *buf, struct camdd_buf *child_buf); void camdd_buf_add_child(struct camdd_buf *buf, struct camdd_buf *child_buf);
int camdd_probe_tape(int fd, char *filename, uint64_t *max_iosize, int camdd_probe_tape(int fd, char *filename, uint64_t *max_iosize,
uint64_t *max_blk, uint64_t *min_blk, uint64_t *blk_gran); uint64_t *max_blk, uint64_t *min_blk, uint64_t *blk_gran);
int camdd_probe_pass_scsi(struct cam_device *cam_dev, union ccb *ccb,
camdd_argmask arglist, int probe_retry_count,
int probe_timeout, uint64_t *maxsector, uint32_t *block_len);
struct camdd_dev *camdd_probe_file(int fd, struct camdd_io_opts *io_opts, struct camdd_dev *camdd_probe_file(int fd, struct camdd_io_opts *io_opts,
int retry_count, int timeout); int retry_count, int timeout);
struct camdd_dev *camdd_probe_pass(struct cam_device *cam_dev, struct camdd_dev *camdd_probe_pass(struct cam_device *cam_dev,
@ -485,7 +489,8 @@ struct camdd_dev *camdd_probe_pass(struct cam_device *cam_dev,
int probe_timeout, int io_retry_count, int probe_timeout, int io_retry_count,
int io_timeout); int io_timeout);
void *camdd_file_worker(void *arg); void *camdd_file_worker(void *arg);
camdd_buf_status camdd_ccb_status(union ccb *ccb); camdd_buf_status camdd_ccb_status(union ccb *ccb, int protocol);
int camdd_get_cgd(struct cam_device *device, struct ccb_getdev *cgd);
int camdd_queue_peer_buf(struct camdd_dev *dev, struct camdd_buf *buf); int camdd_queue_peer_buf(struct camdd_dev *dev, struct camdd_buf *buf);
int camdd_complete_peer_buf(struct camdd_dev *dev, struct camdd_buf *peer_buf); int camdd_complete_peer_buf(struct camdd_dev *dev, struct camdd_buf *peer_buf);
void camdd_peer_done(struct camdd_buf *buf); void camdd_peer_done(struct camdd_buf *buf);
@ -1248,56 +1253,59 @@ camdd_probe_file(int fd, struct camdd_io_opts *io_opts, int retry_count,
} }
/* /*
* Need to implement this. Do a basic probe: * Get a get device CCB for the specified device.
* - Check the inquiry data, make sure we're talking to a device that we
* can reasonably expect to talk to -- direct, RBC, CD, WORM.
* - Send a test unit ready, make sure the device is available.
* - Get the capacity and block size.
*/ */
struct camdd_dev * int
camdd_probe_pass(struct cam_device *cam_dev, struct camdd_io_opts *io_opts, camdd_get_cgd(struct cam_device *device, struct ccb_getdev *cgd)
camdd_argmask arglist, int probe_retry_count,
int probe_timeout, int io_retry_count, int io_timeout)
{ {
union ccb *ccb; union ccb *ccb;
uint64_t maxsector; int retval = 0;
uint32_t cpi_maxio, max_iosize, pass_numblocks;
uint32_t block_len;
struct scsi_read_capacity_data rcap;
struct scsi_read_capacity_data_long rcaplong;
struct camdd_dev *dev;
struct camdd_dev_pass *pass_dev;
struct kevent ke;
int scsi_dev_type;
dev = NULL; ccb = cam_getccb(device);
scsi_dev_type = SID_TYPE(&cam_dev->inq_data); if (ccb == NULL) {
maxsector = 0; warnx("%s: couldn't allocate CCB", __func__);
block_len = 0; return -1;
/*
* For devices that support READ CAPACITY, we'll attempt to get the
* capacity. Otherwise, we really don't support tape or other
* devices via SCSI passthrough, so just return an error in that case.
*/
switch (scsi_dev_type) {
case T_DIRECT:
case T_WORM:
case T_CDROM:
case T_OPTICAL:
case T_RBC:
case T_ZBC_HM:
break;
default:
errx(1, "Unsupported SCSI device type %d", scsi_dev_type);
break; /*NOTREACHED*/
} }
ccb = cam_getccb(cam_dev); CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->cgd);
ccb->ccb_h.func_code = XPT_GDEV_TYPE;
if (cam_send_ccb(device, ccb) < 0) {
warn("%s: error sending Get Device Information CCB", __func__);
cam_error_print(device, ccb, CAM_ESF_ALL,
CAM_EPF_ALL, stderr);
retval = -1;
goto bailout;
}
if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
cam_error_print(device, ccb, CAM_ESF_ALL,
CAM_EPF_ALL, stderr);
retval = -1;
goto bailout;
}
bcopy(&ccb->cgd, cgd, sizeof(struct ccb_getdev));
bailout:
cam_freeccb(ccb);
return retval;
}
int
camdd_probe_pass_scsi(struct cam_device *cam_dev, union ccb *ccb,
camdd_argmask arglist, int probe_retry_count,
int probe_timeout, uint64_t *maxsector, uint32_t *block_len)
{
struct scsi_read_capacity_data rcap;
struct scsi_read_capacity_data_long rcaplong;
int retval = -1;
if (ccb == NULL) { if (ccb == NULL) {
warnx("%s: error allocating ccb", __func__); warnx("%s: error passed ccb is NULL", __func__);
goto bailout; goto bailout;
} }
@ -1331,16 +1339,18 @@ camdd_probe_pass(struct cam_device *cam_dev, struct camdd_io_opts *io_opts,
goto bailout; goto bailout;
} }
maxsector = scsi_4btoul(rcap.addr); *maxsector = scsi_4btoul(rcap.addr);
block_len = scsi_4btoul(rcap.length); *block_len = scsi_4btoul(rcap.length);
/* /*
* A last block of 2^32-1 means that the true capacity is over 2TB, * A last block of 2^32-1 means that the true capacity is over 2TB,
* and we need to issue the long READ CAPACITY to get the real * and we need to issue the long READ CAPACITY to get the real
* capacity. Otherwise, we're all set. * capacity. Otherwise, we're all set.
*/ */
if (maxsector != 0xffffffff) if (*maxsector != 0xffffffff) {
goto rcap_done; retval = 0;
goto bailout;
}
scsi_read_capacity_16(&ccb->csio, scsi_read_capacity_16(&ccb->csio,
/*retries*/ probe_retry_count, /*retries*/ probe_retry_count,
@ -1372,10 +1382,83 @@ camdd_probe_pass(struct cam_device *cam_dev, struct camdd_io_opts *io_opts,
goto bailout; goto bailout;
} }
maxsector = scsi_8btou64(rcaplong.addr); *maxsector = scsi_8btou64(rcaplong.addr);
block_len = scsi_4btoul(rcaplong.length); *block_len = scsi_4btoul(rcaplong.length);
retval = 0;
bailout:
return retval;
}
/*
* Need to implement this. Do a basic probe:
* - Check the inquiry data, make sure we're talking to a device that we
* can reasonably expect to talk to -- direct, RBC, CD, WORM.
* - Send a test unit ready, make sure the device is available.
* - Get the capacity and block size.
*/
struct camdd_dev *
camdd_probe_pass(struct cam_device *cam_dev, struct camdd_io_opts *io_opts,
camdd_argmask arglist, int probe_retry_count,
int probe_timeout, int io_retry_count, int io_timeout)
{
union ccb *ccb;
uint64_t maxsector = 0;
uint32_t cpi_maxio, max_iosize, pass_numblocks;
uint32_t block_len = 0;
struct camdd_dev *dev = NULL;
struct camdd_dev_pass *pass_dev;
struct kevent ke;
struct ccb_getdev cgd;
int retval;
int scsi_dev_type;
if ((retval = camdd_get_cgd(cam_dev, &cgd)) != 0) {
warnx("%s: error retrieving CGD", __func__);
return NULL;
}
ccb = cam_getccb(cam_dev);
if (ccb == NULL) {
warnx("%s: error allocating ccb", __func__);
goto bailout;
}
switch (cgd.protocol) {
case PROTO_SCSI:
scsi_dev_type = SID_TYPE(&cam_dev->inq_data);
/*
* For devices that support READ CAPACITY, we'll attempt to get the
* capacity. Otherwise, we really don't support tape or other
* devices via SCSI passthrough, so just return an error in that case.
*/
switch (scsi_dev_type) {
case T_DIRECT:
case T_WORM:
case T_CDROM:
case T_OPTICAL:
case T_RBC:
case T_ZBC_HM:
break;
default:
errx(1, "Unsupported SCSI device type %d", scsi_dev_type);
break; /*NOTREACHED*/
}
if ((retval = camdd_probe_pass_scsi(cam_dev, ccb, probe_retry_count,
arglist, probe_timeout, &maxsector,
&block_len))) {
goto bailout;
}
break;
default:
errx(1, "Unsupported PROTO type %d", cgd.protocol);
break; /*NOTREACHED*/
}
rcap_done:
if (block_len == 0) { if (block_len == 0) {
warnx("Sector size for %s%u is 0, cannot continue", warnx("Sector size for %s%u is 0, cannot continue",
cam_dev->device_name, cam_dev->dev_unit_num); cam_dev->device_name, cam_dev->dev_unit_num);
@ -1405,6 +1488,7 @@ camdd_probe_pass(struct cam_device *cam_dev, struct camdd_io_opts *io_opts,
pass_dev = &dev->dev_spec.pass; pass_dev = &dev->dev_spec.pass;
pass_dev->scsi_dev_type = scsi_dev_type; pass_dev->scsi_dev_type = scsi_dev_type;
pass_dev->protocol = cgd.protocol;
pass_dev->dev = cam_dev; pass_dev->dev = cam_dev;
pass_dev->max_sector = maxsector; pass_dev->max_sector = maxsector;
pass_dev->block_len = block_len; pass_dev->block_len = block_len;
@ -1715,43 +1799,50 @@ camdd_worker(void *arg)
* Simplistic translation of CCB status to our local status. * Simplistic translation of CCB status to our local status.
*/ */
camdd_buf_status camdd_buf_status
camdd_ccb_status(union ccb *ccb) camdd_ccb_status(union ccb *ccb, int protocol)
{ {
camdd_buf_status status = CAMDD_STATUS_NONE; camdd_buf_status status = CAMDD_STATUS_NONE;
cam_status ccb_status; cam_status ccb_status;
ccb_status = ccb->ccb_h.status & CAM_STATUS_MASK; ccb_status = ccb->ccb_h.status & CAM_STATUS_MASK;
switch (ccb_status) { switch (protocol) {
case CAM_REQ_CMP: { case PROTO_SCSI:
if (ccb->csio.resid == 0) { switch (ccb_status) {
status = CAMDD_STATUS_OK; case CAM_REQ_CMP: {
} else if (ccb->csio.dxfer_len > ccb->csio.resid) { if (ccb->csio.resid == 0) {
status = CAMDD_STATUS_SHORT_IO; status = CAMDD_STATUS_OK;
} else { } else if (ccb->csio.dxfer_len > ccb->csio.resid) {
status = CAMDD_STATUS_EOF; status = CAMDD_STATUS_SHORT_IO;
} } else {
break; status = CAMDD_STATUS_EOF;
} }
case CAM_SCSI_STATUS_ERROR: {
switch (ccb->csio.scsi_status) {
case SCSI_STATUS_OK:
case SCSI_STATUS_COND_MET:
case SCSI_STATUS_INTERMED:
case SCSI_STATUS_INTERMED_COND_MET:
status = CAMDD_STATUS_OK;
break; break;
case SCSI_STATUS_CMD_TERMINATED: }
case SCSI_STATUS_CHECK_COND: case CAM_SCSI_STATUS_ERROR: {
case SCSI_STATUS_QUEUE_FULL: switch (ccb->csio.scsi_status) {
case SCSI_STATUS_BUSY: case SCSI_STATUS_OK:
case SCSI_STATUS_RESERV_CONFLICT: case SCSI_STATUS_COND_MET:
case SCSI_STATUS_INTERMED:
case SCSI_STATUS_INTERMED_COND_MET:
status = CAMDD_STATUS_OK;
break;
case SCSI_STATUS_CMD_TERMINATED:
case SCSI_STATUS_CHECK_COND:
case SCSI_STATUS_QUEUE_FULL:
case SCSI_STATUS_BUSY:
case SCSI_STATUS_RESERV_CONFLICT:
default:
status = CAMDD_STATUS_ERROR;
break;
}
break;
}
default: default:
status = CAMDD_STATUS_ERROR; status = CAMDD_STATUS_ERROR;
break; break;
} }
break; break;
}
default: default:
status = CAMDD_STATUS_ERROR; status = CAMDD_STATUS_ERROR;
break; break;
@ -2149,11 +2240,18 @@ camdd_pass_fetch(struct camdd_dev *dev)
CAM_EPF_ALL, stderr); CAM_EPF_ALL, stderr);
} }
data->resid = ccb.csio.resid; switch (pass_dev->protocol) {
dev->bytes_transferred += (ccb.csio.dxfer_len - ccb.csio.resid); case PROTO_SCSI:
data->resid = ccb.csio.resid;
dev->bytes_transferred += (ccb.csio.dxfer_len - ccb.csio.resid);
break;
default:
return -1;
break;
}
if (buf->status == CAMDD_STATUS_NONE) if (buf->status == CAMDD_STATUS_NONE)
buf->status = camdd_ccb_status(&ccb); buf->status = camdd_ccb_status(&ccb, pass_dev->protocol);
if (buf->status == CAMDD_STATUS_ERROR) if (buf->status == CAMDD_STATUS_ERROR)
error_count++; error_count++;
else if (buf->status == CAMDD_STATUS_EOF) { else if (buf->status == CAMDD_STATUS_EOF) {
@ -2433,9 +2531,6 @@ camdd_pass_run(struct camdd_dev *dev)
data = &buf->buf_type_spec.data; data = &buf->buf_type_spec.data;
ccb = &data->ccb;
CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio);
/* /*
* In almost every case the number of blocks should be the device * In almost every case the number of blocks should be the device
* block size. The exception may be at the end of an I/O stream * block size. The exception may be at the end of an I/O stream
@ -2446,21 +2541,36 @@ camdd_pass_run(struct camdd_dev *dev)
else else
num_blocks = data->fill_len / pass_dev->block_len; num_blocks = data->fill_len / pass_dev->block_len;
scsi_read_write(&ccb->csio, ccb = &data->ccb;
/*retries*/ dev->retry_count,
/*cbfcnp*/ NULL, switch (pass_dev->protocol) {
/*tag_action*/ MSG_SIMPLE_Q_TAG, case PROTO_SCSI:
/*readop*/ (dev->write_dev == 0) ? SCSI_RW_READ : CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio);
SCSI_RW_WRITE,
/*byte2*/ 0, scsi_read_write(&ccb->csio,
/*minimum_cmd_size*/ dev->min_cmd_size, /*retries*/ dev->retry_count,
/*lba*/ buf->lba, /*cbfcnp*/ NULL,
/*block_count*/ num_blocks, /*tag_action*/ MSG_SIMPLE_Q_TAG,
/*data_ptr*/ (data->sg_count != 0) ? /*readop*/ (dev->write_dev == 0) ? SCSI_RW_READ :
(uint8_t *)data->segs : data->buf, SCSI_RW_WRITE,
/*dxfer_len*/ (num_blocks * pass_dev->block_len), /*byte2*/ 0,
/*sense_len*/ SSD_FULL_SIZE, /*minimum_cmd_size*/ dev->min_cmd_size,
/*timeout*/ dev->io_timeout); /*lba*/ buf->lba,
/*block_count*/ num_blocks,
/*data_ptr*/ (data->sg_count != 0) ?
(uint8_t *)data->segs : data->buf,
/*dxfer_len*/ (num_blocks * pass_dev->block_len),
/*sense_len*/ SSD_FULL_SIZE,
/*timeout*/ dev->io_timeout);
if (data->sg_count != 0) {
ccb->csio.sglist_cnt = data->sg_count;
}
break;
default:
retval = -1;
goto bailout;
}
/* Disable freezing the device queue */ /* Disable freezing the device queue */
ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; ccb->ccb_h.flags |= CAM_DEV_QFRZDIS;
@ -2469,7 +2579,6 @@ camdd_pass_run(struct camdd_dev *dev)
ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER;
if (data->sg_count != 0) { if (data->sg_count != 0) {
ccb->csio.sglist_cnt = data->sg_count;
ccb->ccb_h.flags |= CAM_DATA_SG; ccb->ccb_h.flags |= CAM_DATA_SG;
} }