vhost: wrap rte_vhost_vring into spdk_vhost_vring

This enable storing SPDK specific stuff per queue. First use of
this will be event index feature.

Change-Id: Ieca6fa47a6f2e23bec73d2cda8b0ed8b9185bd28
Signed-off-by: Pawel Wodkowski <pawelx.wodkowski@intel.com>
Reviewed-on: https://review.gerrithub.io/376636
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Dariusz Stojaczyk <dariuszx.stojaczyk@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Pawel Wodkowski 2017-08-31 16:38:35 +02:00 committed by Daniel Verkamp
parent 598ba73f5f
commit 3a33129a6d
6 changed files with 65 additions and 57 deletions

View File

@ -90,11 +90,13 @@ void *spdk_vhost_gpa_to_vva(struct spdk_vhost_dev *vdev, uint64_t addr)
* Get available requests from avail ring.
*/
uint16_t
spdk_vhost_vq_avail_ring_get(struct rte_vhost_vring *vq, uint16_t *reqs, uint16_t reqs_len)
spdk_vhost_vq_avail_ring_get(struct spdk_vhost_virtqueue *virtqueue, uint16_t *reqs,
uint16_t reqs_len)
{
struct vring_avail *avail = vq->avail;
uint16_t size_mask = vq->size - 1;
uint16_t last_idx = vq->last_avail_idx, avail_idx = avail->idx;
struct rte_vhost_vring *vring = &virtqueue->vring;
struct vring_avail *avail = vring->avail;
uint16_t size_mask = vring->size - 1;
uint16_t last_idx = vring->last_avail_idx, avail_idx = avail->idx;
uint16_t count = RTE_MIN((avail_idx - last_idx) & size_mask, reqs_len);
uint16_t i;
@ -102,9 +104,9 @@ spdk_vhost_vq_avail_ring_get(struct rte_vhost_vring *vq, uint16_t *reqs, uint16_
return 0;
}
vq->last_avail_idx += count;
vring->last_avail_idx += count;
for (i = 0; i < count; i++) {
reqs[i] = vq->avail->ring[(last_idx + i) & size_mask];
reqs[i] = vring->avail->ring[(last_idx + i) & size_mask];
}
SPDK_DEBUGLOG(SPDK_TRACE_VHOST_RING,
@ -121,14 +123,15 @@ spdk_vhost_vring_desc_is_indirect(struct vring_desc *cur_desc)
}
int
spdk_vhost_vq_get_desc(struct spdk_vhost_dev *vdev, struct rte_vhost_vring *vq, uint16_t req_idx,
struct vring_desc **desc, struct vring_desc **desc_table, uint32_t *desc_table_size)
spdk_vhost_vq_get_desc(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue *virtqueue,
uint16_t req_idx, struct vring_desc **desc, struct vring_desc **desc_table,
uint32_t *desc_table_size)
{
if (spdk_unlikely(req_idx >= vq->size)) {
if (spdk_unlikely(req_idx >= virtqueue->vring.size)) {
return -1;
}
*desc = &vq->desc[req_idx];
*desc = &virtqueue->vring.desc[req_idx];
if (spdk_vhost_vring_desc_is_indirect(*desc)) {
assert(spdk_vhost_dev_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC));
@ -138,8 +141,8 @@ spdk_vhost_vq_get_desc(struct spdk_vhost_dev *vdev, struct rte_vhost_vring *vq,
return 0;
}
*desc_table = vq->desc;
*desc_table_size = vq->size;
*desc_table = virtqueue->vring.desc;
*desc_table_size = virtqueue->vring.size;
return 0;
}
@ -148,34 +151,34 @@ spdk_vhost_vq_get_desc(struct spdk_vhost_dev *vdev, struct rte_vhost_vring *vq,
* Enqueue id and len to used ring.
*/
void
spdk_vhost_vq_used_ring_enqueue(struct spdk_vhost_dev *vdev, struct rte_vhost_vring *vq,
uint16_t id,
uint32_t len)
spdk_vhost_vq_used_ring_enqueue(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue *virtqueue,
uint16_t id, uint32_t len)
{
int need_event = 0;
struct vring_used *used = vq->used;
uint16_t last_idx = vq->last_used_idx & (vq->size - 1);
struct rte_vhost_vring *vring = &virtqueue->vring;
struct vring_used *used = vring->used;
uint16_t last_idx = vring->last_used_idx & (vring->size - 1);
SPDK_DEBUGLOG(SPDK_TRACE_VHOST_RING, "USED: last_idx=%"PRIu16" req id=%"PRIu16" len=%"PRIu32"\n",
vq->last_used_idx, id, len);
vring->last_used_idx, id, len);
vq->last_used_idx++;
vring->last_used_idx++;
used->ring[last_idx].id = id;
used->ring[last_idx].len = len;
spdk_wmb();
* (volatile uint16_t *) &used->idx = vq->last_used_idx;
* (volatile uint16_t *) &used->idx = vring->last_used_idx;
if (spdk_vhost_dev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
spdk_unlikely(vq->avail->idx == vq->last_avail_idx)) {
spdk_unlikely(vring->avail->idx == vring->last_avail_idx)) {
need_event = 1;
} else {
spdk_mb();
need_event = !(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT);
need_event = !(vring->avail->flags & VRING_AVAIL_F_NO_INTERRUPT);
}
if (need_event) {
eventfd_write(vq->callfd, (eventfd_t)1);
eventfd_write(vring->callfd, (eventfd_t)1);
}
}
@ -733,7 +736,7 @@ stop_device(int vid)
}
for (i = 0; i < vdev->num_queues; i++) {
q = &vdev->virtqueue[i];
q = &vdev->virtqueue[i].vring;
rte_vhost_set_vhost_vring_last_idx(vdev->vid, i, q->last_avail_idx, q->last_used_idx);
}
@ -771,8 +774,9 @@ start_device(int vid)
goto out;
}
memset(vdev->virtqueue, 0, sizeof(vdev->virtqueue));
for (i = 0; i < num_queues; i++) {
if (rte_vhost_get_vhost_vring(vid, i, &vdev->virtqueue[i])) {
if (rte_vhost_get_vhost_vring(vid, i, &vdev->virtqueue[i].vring)) {
SPDK_ERRLOG("vhost device %d: Failed to get information of queue %"PRIu16"\n", vid, i);
goto out;
}

View File

@ -47,7 +47,7 @@
struct spdk_vhost_blk_task {
struct spdk_bdev_io *bdev_io;
struct spdk_vhost_blk_dev *bvdev;
struct rte_vhost_vring *vq;
struct spdk_vhost_virtqueue *vq;
volatile uint8_t *status;
@ -122,7 +122,7 @@ invalid_blk_request(struct spdk_vhost_blk_task *task, uint8_t status)
* FIXME: Make this function return to rd_cnt and wr_cnt
*/
static int
blk_iovs_setup(struct spdk_vhost_dev *vdev, struct rte_vhost_vring *vq, uint16_t req_idx,
blk_iovs_setup(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue *vq, uint16_t req_idx,
struct iovec *iovs, uint16_t *iovs_cnt, uint32_t *length)
{
struct vring_desc *desc, *desc_table;
@ -203,7 +203,7 @@ blk_request_complete_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg
static int
process_blk_request(struct spdk_vhost_blk_task *task, struct spdk_vhost_blk_dev *bvdev,
struct rte_vhost_vring *vq,
struct spdk_vhost_virtqueue *vq,
uint16_t req_idx)
{
const struct virtio_blk_outhdr *req;
@ -301,7 +301,7 @@ process_blk_request(struct spdk_vhost_blk_task *task, struct spdk_vhost_blk_dev
}
static void
process_vq(struct spdk_vhost_blk_dev *bvdev, struct rte_vhost_vring *vq)
process_vq(struct spdk_vhost_blk_dev *bvdev, struct spdk_vhost_virtqueue *vq)
{
struct spdk_vhost_blk_task *tasks[32] = {0};
int rc;
@ -339,7 +339,7 @@ vdev_worker(void *arg)
}
static void
no_bdev_process_vq(struct spdk_vhost_blk_dev *bvdev, struct rte_vhost_vring *vq)
no_bdev_process_vq(struct spdk_vhost_blk_dev *bvdev, struct spdk_vhost_virtqueue *vq)
{
struct iovec iovs[SPDK_VHOST_IOVS_MAX];
uint32_t length;
@ -454,7 +454,7 @@ alloc_task_pool(struct spdk_vhost_blk_dev *bvdev)
* Limit the pool size to 1024 * num_queues. This should be enough as QEMU have the
* same hard limit for queue size.
*/
task_cnt += spdk_min(bvdev->vdev.virtqueue[i].size, 1024);
task_cnt += spdk_min(bvdev->vdev.virtqueue[i].vring.size, 1024);
}
ring_size = spdk_align32pow2(task_cnt + 1);

View File

@ -82,6 +82,10 @@ enum spdk_vhost_dev_type {
SPDK_VHOST_DEV_T_BLK,
};
struct spdk_vhost_virtqueue {
struct rte_vhost_vring vring;
} __attribute((aligned(SPDK_CACHE_LINE_SIZE)));
struct spdk_vhost_dev_backend {
uint64_t virtio_features;
uint64_t disabled_features;
@ -114,8 +118,7 @@ struct spdk_vhost_dev {
uint16_t num_queues;
uint64_t negotiated_features;
struct rte_vhost_vring virtqueue[SPDK_VHOST_MAX_VQUEUES] __attribute((aligned(
SPDK_CACHE_LINE_SIZE)));
struct spdk_vhost_virtqueue virtqueue[SPDK_VHOST_MAX_VQUEUES];
};
struct spdk_vhost_dev *spdk_vhost_dev_find(const char *ctrlr_name);
@ -124,9 +127,9 @@ void spdk_vhost_dev_mem_unregister(struct spdk_vhost_dev *vdev);
void *spdk_vhost_gpa_to_vva(struct spdk_vhost_dev *vdev, uint64_t addr);
uint16_t spdk_vhost_vq_avail_ring_get(struct rte_vhost_vring *vq, uint16_t *reqs,
uint16_t spdk_vhost_vq_avail_ring_get(struct spdk_vhost_virtqueue *vq, uint16_t *reqs,
uint16_t reqs_len);
bool spdk_vhost_vq_should_notify(struct spdk_vhost_dev *vdev, struct rte_vhost_vring *vq);
bool spdk_vhost_vq_should_notify(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue *vq);
/**
* Get a virtio descriptor at given index in given virtqueue.
@ -145,10 +148,10 @@ bool spdk_vhost_vq_should_notify(struct spdk_vhost_dev *vdev, struct rte_vhost_v
* \return 0 on success, -1 if given index is invalid.
* If -1 is returned, the params won't be changed.
*/
int spdk_vhost_vq_get_desc(struct spdk_vhost_dev *vdev, struct rte_vhost_vring *vq,
uint16_t req_idx,
struct vring_desc **desc, struct vring_desc **desc_table, uint32_t *desc_table_size);
void spdk_vhost_vq_used_ring_enqueue(struct spdk_vhost_dev *vdev, struct rte_vhost_vring *vq,
int spdk_vhost_vq_get_desc(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue *vq,
uint16_t req_idx, struct vring_desc **desc, struct vring_desc **desc_table,
uint32_t *desc_table_size);
void spdk_vhost_vq_used_ring_enqueue(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue *vq,
uint16_t id, uint32_t len);
/**

View File

@ -96,7 +96,7 @@ struct spdk_vhost_scsi_task {
int req_idx;
struct rte_vhost_vring *vq;
struct spdk_vhost_virtqueue *vq;
};
static int spdk_vhost_scsi_start(struct spdk_vhost_dev *, void *);
@ -173,7 +173,7 @@ static void
eventq_enqueue(struct spdk_vhost_scsi_dev *svdev, unsigned scsi_dev_num, uint32_t event,
uint32_t reason)
{
struct rte_vhost_vring *vq;
struct spdk_vhost_virtqueue *vq;
struct vring_desc *desc, *desc_table;
struct virtio_scsi_event *desc_ev;
uint32_t desc_table_size, req_size = 0;
@ -332,8 +332,8 @@ process_ctrl_request(struct spdk_vhost_scsi_task *task)
SPDK_DEBUGLOG(SPDK_TRACE_VHOST_SCSI_QUEUE,
"Processing controlq descriptor: desc %d/%p, desc_addr %p, len %d, flags %d, last_used_idx %d; kickfd %d; size %d\n",
task->req_idx, desc, (void *)desc->addr, desc->len, desc->flags, task->vq->last_used_idx,
task->vq->kickfd, task->vq->size);
task->req_idx, desc, (void *)desc->addr, desc->len, desc->flags, task->vq->vring.last_used_idx,
task->vq->vring.kickfd, task->vq->vring.size);
SPDK_TRACEDUMP(SPDK_TRACE_VHOST_SCSI_QUEUE, "Request descriptor", (uint8_t *)ctrl_req,
desc->len);
@ -544,7 +544,7 @@ process_request(struct spdk_vhost_scsi_task *task)
}
static void
process_controlq(struct spdk_vhost_scsi_dev *svdev, struct rte_vhost_vring *vq)
process_controlq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue *vq)
{
struct spdk_vhost_scsi_task *tasks[32];
struct spdk_vhost_scsi_task *task;
@ -565,7 +565,7 @@ process_controlq(struct spdk_vhost_scsi_dev *svdev, struct rte_vhost_vring *vq)
}
static void
process_requestq(struct spdk_vhost_scsi_dev *svdev, struct rte_vhost_vring *vq)
process_requestq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue *vq)
{
struct spdk_vhost_scsi_task *tasks[32];
struct spdk_vhost_scsi_task *task;
@ -958,7 +958,7 @@ alloc_task_pool(struct spdk_vhost_scsi_dev *svdev)
* Limit the pool size to 1024 * num_queues. This should be enough as QEMU have the
* same hard limit for queue size.
*/
task_cnt += spdk_min(svdev->vdev.virtqueue[i].size, 1024);
task_cnt += spdk_min(svdev->vdev.virtqueue[i].vring.size, 1024);
}
ring_size = spdk_align32pow2(task_cnt + 1);

View File

@ -62,10 +62,10 @@ struct spdk_io_channel {
DEFINE_STUB(spdk_ring_enqueue, size_t, (struct spdk_ring *ring, void **objs, size_t count), 0);
DEFINE_STUB(spdk_ring_dequeue, size_t, (struct spdk_ring *ring, void **objs, size_t count), 0);
DEFINE_STUB_V(spdk_vhost_vq_used_ring_enqueue, (struct spdk_vhost_dev *vdev,
struct rte_vhost_vring *vq, uint16_t id, uint32_t len));
DEFINE_STUB(spdk_vhost_vq_get_desc, int, (struct spdk_vhost_dev *vdev, struct rte_vhost_vring *vq,
uint16_t req_idx, struct vring_desc **desc, struct vring_desc **desc_table,
uint32_t *desc_table_size), 0);
struct spdk_vhost_virtqueue *vq, uint16_t id, uint32_t len));
DEFINE_STUB(spdk_vhost_vq_get_desc, int, (struct spdk_vhost_dev *vdev,
struct spdk_vhost_virtqueue *vq, uint16_t req_idx, struct vring_desc **desc,
struct vring_desc **desc_table, uint32_t *desc_table_size), 0);
DEFINE_STUB(spdk_vhost_vring_desc_is_wr, bool, (struct vring_desc *cur_desc), false);
DEFINE_STUB(spdk_vhost_vring_desc_to_iov, int, (struct spdk_vhost_dev *vdev, struct iovec *iov,
uint16_t *iov_index, const struct vring_desc *desc), 0);
@ -81,7 +81,8 @@ DEFINE_STUB(spdk_bdev_writev, int, (struct spdk_bdev_desc *desc, struct spdk_io_
uint64_t offset, uint64_t len,
spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
DEFINE_STUB_P(spdk_bdev_get_product_name, const char, (const struct spdk_bdev *bdev), {0});
DEFINE_STUB(spdk_vhost_vq_avail_ring_get, uint16_t, (struct rte_vhost_vring *vq, uint16_t *reqs,
DEFINE_STUB(spdk_vhost_vq_avail_ring_get, uint16_t, (struct spdk_vhost_virtqueue *vq,
uint16_t *reqs,
uint16_t reqs_len), 0);
DEFINE_STUB_V(spdk_vhost_dev_mem_register, (struct spdk_vhost_dev *vdev));
DEFINE_STUB_V(spdk_poller_register, (struct spdk_poller **ppoller, spdk_poller_fn fn, void *arg,

View File

@ -57,14 +57,14 @@ DEFINE_STUB(spdk_ring_dequeue, size_t, (struct spdk_ring *ring, void **objs, siz
DEFINE_STUB(spdk_scsi_dev_allocate_io_channels, int, (struct spdk_scsi_dev *dev), 0);
DEFINE_STUB_P(spdk_scsi_lun_get_name, const char, (const struct spdk_scsi_lun *lun), {0});
DEFINE_STUB(spdk_scsi_lun_get_id, int, (const struct spdk_scsi_lun *lun), 0);
DEFINE_STUB(spdk_vhost_vq_avail_ring_get, uint16_t, (struct rte_vhost_vring *vq, uint16_t *reqs,
uint16_t reqs_len), 0);
DEFINE_STUB(spdk_vhost_vq_get_desc, int, (struct spdk_vhost_dev *vdev, struct rte_vhost_vring *vq,
uint16_t req_idx, struct vring_desc **desc, struct vring_desc **desc_table,
uint32_t *desc_table_size), 0);
DEFINE_STUB(spdk_vhost_vq_avail_ring_get, uint16_t, (struct spdk_vhost_virtqueue *vq,
uint16_t *reqs, uint16_t reqs_len), 0);
DEFINE_STUB(spdk_vhost_vq_get_desc, int, (struct spdk_vhost_dev *vdev,
struct spdk_vhost_virtqueue *vq, uint16_t req_idx, struct vring_desc **desc,
struct vring_desc **desc_table, uint32_t *desc_table_size), 0);
DEFINE_STUB_VP(spdk_vhost_gpa_to_vva, (struct spdk_vhost_dev *vdev, uint64_t addr), {0});
DEFINE_STUB_V(spdk_vhost_vq_used_ring_enqueue, (struct spdk_vhost_dev *vdev,
struct rte_vhost_vring *vq, uint16_t id, uint32_t len));
struct spdk_vhost_virtqueue *vq, uint16_t id, uint32_t len));
DEFINE_STUB(spdk_scsi_dev_has_pending_tasks, bool, (const struct spdk_scsi_dev *dev), false);
DEFINE_STUB_V(spdk_scsi_dev_free_io_channels, (struct spdk_scsi_dev *dev));
DEFINE_STUB_V(spdk_scsi_dev_destruct, (struct spdk_scsi_dev *dev));