bdev_virtio: added virtio_scsi_io_ctx
I/O requests are now allocated from bdev_io memory. virtio_req->iov now points to raw payload, request and response iovectors are available as separate fields. This solution should apply for both vhost-scsi and blk. Change-Id: I588fbdd7fc5442329aadbcb3e31b2f4a7118ec8f Signed-off-by: Dariusz Stojaczyk <dariuszx.stojaczyk@intel.com> Reviewed-on: https://review.gerrithub.io/375264 Tested-by: SPDK Automated Test System <sys_sgsw@intel.com> Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
d0a502bc03
commit
157969b60e
lib/bdev/virtio
@ -64,10 +64,16 @@ struct virtio_scsi_disk {
|
||||
uint32_t block_size;
|
||||
};
|
||||
|
||||
struct virtio_scsi_io_ctx {
|
||||
struct virtio_req vreq;
|
||||
struct virtio_scsi_cmd_req req;
|
||||
struct virtio_scsi_cmd_resp resp;
|
||||
};
|
||||
|
||||
static int
|
||||
bdev_virtio_get_ctx_size(void)
|
||||
{
|
||||
return 0;
|
||||
return sizeof(struct virtio_scsi_io_ctx);
|
||||
}
|
||||
|
||||
SPDK_BDEV_MODULE_REGISTER(virtio_scsi, bdev_virtio_initialize, bdev_virtio_finish,
|
||||
@ -76,60 +82,52 @@ SPDK_BDEV_MODULE_REGISTER(virtio_scsi, bdev_virtio_initialize, bdev_virtio_finis
|
||||
static void
|
||||
bdev_virtio_rw(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
{
|
||||
struct iovec iov[128];
|
||||
struct virtio_req vreq;
|
||||
struct virtio_req *vreq;
|
||||
struct virtio_scsi_cmd_req *req;
|
||||
struct virtio_scsi_cmd_resp *resp;
|
||||
uint16_t cnt;
|
||||
struct virtio_req *complete;
|
||||
struct virtio_scsi_disk *disk = (struct virtio_scsi_disk *)bdev_io->bdev;
|
||||
struct virtio_scsi_io_ctx *io_ctx = (struct virtio_scsi_io_ctx *)bdev_io->driver_ctx;
|
||||
bool is_read = (bdev_io->type == SPDK_BDEV_IO_TYPE_READ);
|
||||
|
||||
vreq.iov = iov;
|
||||
vreq = &io_ctx->vreq;
|
||||
req = &io_ctx->req;
|
||||
resp = &io_ctx->resp;
|
||||
|
||||
req = spdk_dma_malloc(4096, 64, NULL);
|
||||
resp = spdk_dma_malloc(4096, 64, NULL);
|
||||
vreq->iov_req.iov_base = (void *)req;
|
||||
vreq->iov_req.iov_len = sizeof(*req);
|
||||
|
||||
iov[0].iov_base = (void *)req;
|
||||
iov[0].iov_len = sizeof(*req);
|
||||
vreq->iov_resp.iov_base = (void *)resp;
|
||||
vreq->iov_resp.iov_len = sizeof(*resp);
|
||||
|
||||
if (is_read) {
|
||||
iov[1].iov_base = (void *)resp;
|
||||
iov[1].iov_len = sizeof(struct virtio_scsi_cmd_resp);
|
||||
memcpy(&iov[2], bdev_io->u.read.iovs, sizeof(struct iovec) * bdev_io->u.read.iovcnt);
|
||||
vreq.iovcnt = 2 + bdev_io->u.read.iovcnt;
|
||||
vreq.start_write = 1;
|
||||
} else {
|
||||
memcpy(&iov[1], bdev_io->u.write.iovs, sizeof(struct iovec) * bdev_io->u.write.iovcnt);
|
||||
iov[1 + bdev_io->u.write.iovcnt].iov_base = (void *)resp;
|
||||
iov[1 + bdev_io->u.write.iovcnt].iov_len = sizeof(struct virtio_scsi_cmd_resp);
|
||||
vreq.iovcnt = 2 + bdev_io->u.write.iovcnt;
|
||||
vreq.start_write = vreq.iovcnt - 1;
|
||||
}
|
||||
vreq->is_write = !is_read;
|
||||
|
||||
memset(req, 0, sizeof(*req));
|
||||
req->lun[0] = 1;
|
||||
req->lun[1] = 0;
|
||||
|
||||
if (is_read) {
|
||||
vreq->iov = bdev_io->u.read.iovs;
|
||||
vreq->iovcnt = bdev_io->u.read.iovcnt;
|
||||
req->cdb[0] = SPDK_SBC_READ_10;
|
||||
to_be32(&req->cdb[2], bdev_io->u.read.offset / disk->block_size);
|
||||
to_be16(&req->cdb[7], bdev_io->u.read.len / disk->block_size);
|
||||
} else {
|
||||
vreq->iov = bdev_io->u.write.iovs;
|
||||
vreq->iovcnt = bdev_io->u.write.iovcnt;
|
||||
req->cdb[0] = SPDK_SBC_WRITE_10;
|
||||
to_be32(&req->cdb[2], bdev_io->u.write.offset / disk->block_size);
|
||||
to_be16(&req->cdb[7], bdev_io->u.write.len / disk->block_size);
|
||||
}
|
||||
|
||||
virtio_xmit_pkts(disk->hw->vqs[2], &vreq);
|
||||
virtio_xmit_pkts(disk->hw->vqs[2], vreq);
|
||||
|
||||
do {
|
||||
cnt = virtio_recv_pkts(disk->hw->vqs[2], &complete, 1);
|
||||
} while (cnt == 0);
|
||||
|
||||
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
|
||||
spdk_dma_free(req);
|
||||
spdk_dma_free(resp);
|
||||
}
|
||||
|
||||
static int _bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
@ -210,7 +208,7 @@ bdev_virtio_destroy_cb(void *io_device, void *ctx_buf)
|
||||
static void
|
||||
scan_target(struct virtio_hw *hw, uint8_t target)
|
||||
{
|
||||
struct iovec iov[3];
|
||||
struct iovec iov;
|
||||
struct virtio_req vreq;
|
||||
struct virtio_scsi_cmd_req *req;
|
||||
struct virtio_scsi_cmd_resp *resp;
|
||||
@ -220,29 +218,29 @@ scan_target(struct virtio_hw *hw, uint8_t target)
|
||||
struct virtio_scsi_disk *disk;
|
||||
struct spdk_bdev *bdev;
|
||||
|
||||
vreq.iov = iov;
|
||||
vreq.iovcnt = 3;
|
||||
vreq.start_write = 1;
|
||||
vreq.iov = &iov;
|
||||
vreq.iovcnt = 1;
|
||||
vreq.is_write = 0;
|
||||
|
||||
iov[0].iov_base = spdk_dma_malloc(4096, 64, NULL);
|
||||
iov[1].iov_base = spdk_dma_malloc(4096, 64, NULL);
|
||||
iov[2].iov_base = spdk_dma_malloc(4096, 64, NULL);
|
||||
req = spdk_dma_zmalloc(sizeof(*req), 64, NULL);
|
||||
resp = spdk_dma_malloc(sizeof(*resp), 64, NULL);
|
||||
|
||||
req = iov[0].iov_base;
|
||||
resp = iov[1].iov_base;
|
||||
vreq.iov_req.iov_base = (void *)req;
|
||||
vreq.iov_req.iov_len = sizeof(*req);
|
||||
|
||||
vreq.iov_resp.iov_base = (void *)resp;
|
||||
vreq.iov_resp.iov_len = sizeof(*resp);
|
||||
|
||||
iov.iov_base = spdk_dma_malloc(4096, 64, NULL);
|
||||
iov.iov_len = 255;
|
||||
|
||||
memset(req, 0, sizeof(*req));
|
||||
req->lun[0] = 1;
|
||||
req->lun[1] = target;
|
||||
iov[0].iov_len = sizeof(*req);
|
||||
|
||||
cdb = (struct spdk_scsi_cdb_inquiry *)req->cdb;
|
||||
cdb->opcode = SPDK_SPC_INQUIRY;
|
||||
cdb->alloc_len[1] = 255;
|
||||
|
||||
iov[1].iov_len = sizeof(struct virtio_scsi_cmd_resp);
|
||||
iov[2].iov_len = 255;
|
||||
|
||||
virtio_xmit_pkts(hw->vqs[2], &vreq);
|
||||
|
||||
do {
|
||||
@ -256,14 +254,12 @@ scan_target(struct virtio_hw *hw, uint8_t target)
|
||||
memset(req, 0, sizeof(*req));
|
||||
req->lun[0] = 1;
|
||||
req->lun[1] = target;
|
||||
iov[0].iov_len = sizeof(*req);
|
||||
|
||||
req->cdb[0] = SPDK_SPC_SERVICE_ACTION_IN_16;
|
||||
req->cdb[1] = SPDK_SBC_SAI_READ_CAPACITY_16;
|
||||
|
||||
iov[1].iov_len = sizeof(struct virtio_scsi_cmd_resp);
|
||||
iov[2].iov_len = 32;
|
||||
to_be32(&req->cdb[10], iov[2].iov_len);
|
||||
iov.iov_len = 32;
|
||||
to_be32(&req->cdb[10], iov.iov_len);
|
||||
|
||||
virtio_xmit_pkts(hw->vqs[2], &vreq);
|
||||
|
||||
@ -277,8 +273,8 @@ scan_target(struct virtio_hw *hw, uint8_t target)
|
||||
return;
|
||||
}
|
||||
|
||||
disk->num_blocks = from_be64((uint64_t *)(iov[2].iov_base)) + 1;
|
||||
disk->block_size = from_be32((uint32_t *)(iov[2].iov_base + 8));
|
||||
disk->num_blocks = from_be64((uint64_t *)(iov.iov_base)) + 1;
|
||||
disk->block_size = from_be32((uint32_t *)(iov.iov_base + 8));
|
||||
|
||||
disk->hw = hw;
|
||||
|
||||
|
@ -45,8 +45,10 @@
|
||||
|
||||
struct virtio_req {
|
||||
struct iovec *iov;
|
||||
struct iovec iov_req;
|
||||
struct iovec iov_resp;
|
||||
uint32_t iovcnt;
|
||||
uint32_t start_write;
|
||||
int is_write;
|
||||
uint32_t data_transferred;
|
||||
};
|
||||
|
||||
|
@ -127,11 +127,23 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct virtio_req **rx_pkts,
|
||||
(var) = (val); \
|
||||
} while (0)
|
||||
|
||||
static inline void
|
||||
virtqueue_iov_to_desc(struct virtqueue *vq, uint16_t desc_idx, struct iovec *iov)
|
||||
{
|
||||
if (vq->hw->virtio_user_dev) {
|
||||
vq->vq_ring.desc[desc_idx].addr = (uintptr_t)iov->iov_base;
|
||||
} else {
|
||||
vq->vq_ring.desc[desc_idx].addr = spdk_vtophys(iov->iov_base);
|
||||
}
|
||||
|
||||
vq->vq_ring.desc[desc_idx].len = iov->iov_len;
|
||||
}
|
||||
|
||||
static inline void
|
||||
virtqueue_enqueue_xmit(struct virtqueue *vq, struct virtio_req *req)
|
||||
{
|
||||
struct vq_desc_extra *dxp;
|
||||
struct vring_desc *start_dp;
|
||||
struct vring_desc *descs;
|
||||
uint32_t i;
|
||||
uint16_t head_idx, idx;
|
||||
struct iovec *iov = req->iov;
|
||||
@ -142,20 +154,33 @@ virtqueue_enqueue_xmit(struct virtqueue *vq, struct virtio_req *req)
|
||||
dxp->cookie = (void *)req;
|
||||
dxp->ndescs = req->iovcnt;
|
||||
|
||||
start_dp = vq->vq_ring.desc;
|
||||
descs = vq->vq_ring.desc;
|
||||
|
||||
for (i = 0; i < req->iovcnt; i++) {
|
||||
if (vq->hw->virtio_user_dev) {
|
||||
start_dp[idx].addr = (uintptr_t)iov[i].iov_base;
|
||||
} else {
|
||||
start_dp[idx].addr = spdk_vtophys(iov[i].iov_base);
|
||||
virtqueue_iov_to_desc(vq, idx, &req->iov_req);
|
||||
descs[idx].flags = VRING_DESC_F_NEXT;
|
||||
idx = descs[idx].next;
|
||||
|
||||
if (req->is_write) {
|
||||
for (i = 0; i < req->iovcnt; i++) {
|
||||
virtqueue_iov_to_desc(vq, idx, &iov[i]);
|
||||
descs[idx].flags = VRING_DESC_F_NEXT;
|
||||
idx = descs[idx].next;
|
||||
}
|
||||
start_dp[idx].len = iov[i].iov_len;
|
||||
start_dp[idx].flags = (i >= req->start_write ? VRING_DESC_F_WRITE : 0);
|
||||
if ((i + 1) != req->iovcnt) {
|
||||
start_dp[idx].flags |= VRING_DESC_F_NEXT;
|
||||
|
||||
virtqueue_iov_to_desc(vq, idx, &req->iov_resp);
|
||||
descs[idx].flags = VRING_DESC_F_WRITE;
|
||||
idx = descs[idx].next;
|
||||
} else {
|
||||
virtqueue_iov_to_desc(vq, idx, &req->iov_resp);
|
||||
descs[idx].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
|
||||
idx = descs[idx].next;
|
||||
|
||||
for (i = 0; i < req->iovcnt; i++) {
|
||||
virtqueue_iov_to_desc(vq, idx, &iov[i]);
|
||||
descs[idx].flags = VRING_DESC_F_WRITE;
|
||||
descs[idx].flags |= (i + 1) != req->iovcnt ? VRING_DESC_F_NEXT : 0;
|
||||
idx = descs[idx].next;
|
||||
}
|
||||
idx = start_dp[idx].next;
|
||||
}
|
||||
|
||||
vq->vq_desc_head_idx = idx;
|
||||
|
Loading…
x
Reference in New Issue
Block a user