vhost: remove vDPA available ring relay helper

We don't need to relay available ring and check the desc, vdpa device
can access the available ring in the guest directly. With this patch,
we can achieve better throughput and lower CPU usage.

Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
Xiao Wang 2019-01-23 21:02:59 +08:00 committed by Ferruh Yigit
parent 8b90e43581
commit b172129583
4 changed files with 12 additions and 161 deletions

View File

@ -584,9 +584,12 @@ m_ifcvf_start(struct ifcvf_internal *internal)
}
hw->vring[i].desc = gpa;
hw->vring[i].avail = m_vring_iova +
(char *)internal->m_vring[i].avail -
(char *)internal->m_vring[i].desc;
gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
if (gpa == 0) {
DRV_LOG(ERR, "Fail to get GPA for available ring.");
return -1;
}
hw->vring[i].avail = gpa;
hw->vring[i].used = m_vring_iova +
(char *)internal->m_vring[i].used -
@ -673,13 +676,6 @@ m_disable_vfio_intr(struct ifcvf_internal *internal)
rte_intr_disable(intr_handle);
}
static void
update_avail_ring(struct ifcvf_internal *internal, uint16_t qid)
{
rte_vdpa_relay_vring_avail(internal->vid, qid, &internal->m_vring[qid]);
ifcvf_notify_queue(&internal->hw, qid);
}
static void
update_used_ring(struct ifcvf_internal *internal, uint16_t qid)
{
@ -703,12 +699,10 @@ vring_relay(void *arg)
vid = internal->vid;
q_num = rte_vhost_get_vring_num(vid);
/* prepare the mediated vring */
for (qid = 0; qid < q_num; qid++) {
for (qid = 0; qid < q_num; qid++)
rte_vhost_get_vring_base(vid, qid,
&internal->m_vring[qid].avail->idx,
&internal->m_vring[qid].used->idx);
rte_vdpa_relay_vring_avail(vid, qid, &internal->m_vring[qid]);
}
/* add notify fd and interrupt fd to epoll */
epfd = epoll_create(IFCVF_MAX_QUEUES * 2);
@ -775,7 +769,7 @@ vring_relay(void *arg)
if (events[i].data.u32 & 1)
update_used_ring(internal, qid);
else
update_avail_ring(internal, qid);
ifcvf_notify_queue(&internal->hw, qid);
}
}
@ -818,13 +812,14 @@ static int
ifcvf_sw_fallback_switchover(struct ifcvf_internal *internal)
{
int ret;
int vid = internal->vid;
/* stop the direct IO data path */
unset_notify_relay(internal);
vdpa_ifcvf_stop(internal);
vdpa_disable_vfio_intr(internal);
ret = rte_vhost_host_notifier_ctrl(internal->vid, false);
ret = rte_vhost_host_notifier_ctrl(vid, false);
if (ret && ret != -ENOTSUP)
goto error;
@ -843,6 +838,8 @@ ifcvf_sw_fallback_switchover(struct ifcvf_internal *internal)
if (ret)
goto stop_vf;
rte_vhost_host_notifier_ctrl(vid, true);
internal->sw_fallback_running = true;
return 0;

View File

@ -174,25 +174,6 @@ rte_vdpa_get_device_num(void);
int __rte_experimental
rte_vhost_host_notifier_ctrl(int vid, bool enable);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
* Synchronize the available ring from guest to mediated ring, help to
* check desc validity to protect against malicious guest driver.
*
* @param vid
* vhost device id
* @param qid
* vhost queue id
* @param vring_m
* mediated virtio ring pointer
* @return
* number of synced available entries on success, -1 on failure
*/
int __rte_experimental
rte_vdpa_relay_vring_avail(int vid, uint16_t qid, void *vring_m);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice

View File

@ -84,6 +84,5 @@ EXPERIMENTAL {
rte_vhost_crypto_set_zero_copy;
rte_vhost_va_from_guest_pa;
rte_vhost_host_notifier_ctrl;
rte_vdpa_relay_vring_avail;
rte_vdpa_relay_vring_used;
};

View File

@ -123,132 +123,6 @@ rte_vdpa_get_device_num(void)
return vdpa_device_num;
}
static bool
invalid_desc_check(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
{
uint64_t desc_addr, desc_chunck_len;
while (desc_len) {
desc_chunck_len = desc_len;
desc_addr = vhost_iova_to_vva(dev, vq,
desc_iova,
&desc_chunck_len,
perm);
if (!desc_addr)
return true;
desc_len -= desc_chunck_len;
desc_iova += desc_chunck_len;
}
return false;
}
int __rte_experimental
rte_vdpa_relay_vring_avail(int vid, uint16_t qid, void *vring_m)
{
struct virtio_net *dev = get_device(vid);
uint16_t idx, idx_m, desc_id;
struct vring_desc desc;
struct vhost_virtqueue *vq;
struct vring_desc *desc_ring;
struct vring_desc *idesc = NULL;
struct vring *s_vring;
uint64_t dlen;
uint32_t nr_descs;
int ret;
uint8_t perm;
if (!dev || !vring_m)
return -1;
if (qid >= dev->nr_vring)
return -1;
if (vq_is_packed(dev))
return -1;
s_vring = (struct vring *)vring_m;
vq = dev->virtqueue[qid];
idx = vq->avail->idx;
idx_m = s_vring->avail->idx;
ret = (uint16_t)(idx - idx_m);
while (idx_m != idx) {
/* avail entry copy */
desc_id = vq->avail->ring[idx_m & (vq->size - 1)];
if (unlikely(desc_id >= vq->size))
return -1;
s_vring->avail->ring[idx_m & (vq->size - 1)] = desc_id;
desc_ring = vq->desc;
nr_descs = vq->size;
if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) {
dlen = vq->desc[desc_id].len;
nr_descs = dlen / sizeof(struct vring_desc);
if (unlikely(nr_descs > vq->size))
return -1;
desc_ring = (struct vring_desc *)(uintptr_t)
vhost_iova_to_vva(dev, vq,
vq->desc[desc_id].addr, &dlen,
VHOST_ACCESS_RO);
if (unlikely(!desc_ring))
return -1;
if (unlikely(dlen < vq->desc[desc_id].len)) {
idesc = alloc_copy_ind_table(dev, vq,
vq->desc[desc_id].addr,
vq->desc[desc_id].len);
if (unlikely(!idesc))
return -1;
desc_ring = idesc;
}
desc_id = 0;
}
/* check if the buf addr is within the guest memory */
do {
if (unlikely(desc_id >= vq->size))
goto fail;
if (unlikely(nr_descs-- == 0))
goto fail;
desc = desc_ring[desc_id];
perm = desc.flags & VRING_DESC_F_WRITE ?
VHOST_ACCESS_WO : VHOST_ACCESS_RO;
if (invalid_desc_check(dev, vq, desc.addr, desc.len,
perm))
goto fail;
desc_id = desc.next;
} while (desc.flags & VRING_DESC_F_NEXT);
if (unlikely(idesc)) {
free_ind_table(idesc);
idesc = NULL;
}
idx_m++;
}
rte_smp_wmb();
s_vring->avail->idx = idx;
if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
vhost_avail_event(vq) = idx;
return ret;
fail:
if (unlikely(idesc))
free_ind_table(idesc);
return -1;
}
int __rte_experimental
rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
{