net/virtio: improve perf via one-way barriers on used flag

In case VIRTIO_F_ORDER_PLATFORM(36) is not negotiated, then the frontend
and backend are assumed to be implemented in software, that is they can
run on identical CPUs in an SMP configuration.
Thus a weak form of memory barriers like rte_smp_r/wmb, other than
rte_cio_r/wmb, is sufficient for this case(vq->hw->weak_barriers == 1)
and yields better performance.
For the above case, this patch helps yielding even better performance
by replacing the two-way barriers with C11 one-way barriers for used
flags in packed ring.

Signed-off-by: Joyce Kong <joyce.kong@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
Reviewed-by: Phil Yang <phil.yang@arm.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
Joyce Kong 2019-09-17 13:28:26 +08:00 committed by Ferruh Yigit
parent 6094557de0
commit 2c661d418e
4 changed files with 40 additions and 9 deletions

View File

@ -164,9 +164,11 @@ virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
for (i = 0; i < num; i++) {
used_idx = vq->vq_used_cons_idx;
/* desc_is_used has a load-acquire or rte_cio_rmb inside
* and wait for used desc in virtqueue.
*/
if (!desc_is_used(&desc[used_idx], vq))
return i;
virtio_rmb(vq->hw->weak_barriers);
len[i] = desc[used_idx].len;
id = desc[used_idx].id;
cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
@ -275,8 +277,10 @@ virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
struct vq_desc_extra *dxp;
used_idx = vq->vq_used_cons_idx;
/* desc_is_used has a load-acquire or rte_cio_rmb inside
* and wait for used desc in virtqueue.
*/
while (num > 0 && desc_is_used(&desc[used_idx], vq)) {
virtio_rmb(vq->hw->weak_barriers);
id = desc[used_idx].id;
do {
curr_id = used_idx;
@ -307,8 +311,10 @@ virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)
struct vq_desc_extra *dxp;
used_idx = vq->vq_used_cons_idx;
/* desc_is_used has a load-acquire or rte_cio_rmb inside
* and wait for used desc in virtqueue.
*/
while (num-- && desc_is_used(&desc[used_idx], vq)) {
virtio_rmb(vq->hw->weak_barriers);
id = desc[used_idx].id;
dxp = &vq->vq_descx[id];
vq->vq_used_cons_idx += dxp->ndescs;

View File

@ -698,8 +698,8 @@ virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
if (vq->used_wrap_counter)
flags |= VRING_PACKED_DESC_F_AVAIL_USED;
rte_smp_wmb();
vring->desc[vq->used_idx].flags = flags;
__atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
__ATOMIC_RELEASE);
vq->used_idx += n_descs;
if (vq->used_idx >= dev->queue_size) {

View File

@ -54,6 +54,32 @@ virtio_wmb(uint8_t weak_barriers)
rte_cio_wmb();
}
static inline uint16_t
virtqueue_fetch_flags_packed(struct vring_packed_desc *dp,
uint8_t weak_barriers)
{
uint16_t flags;
if (weak_barriers) {
/* x86 prefers to using rte_smp_rmb over __atomic_load_n as it reports
* a better perf(~1.5%), which comes from the saved branch by the compiler.
* The if and else branch are identical with the smp and cio barriers both
* defined as compiler barriers on x86.
*/
#ifdef RTE_ARCH_X86_64
flags = dp->flags;
rte_smp_rmb();
#else
flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE);
#endif
} else {
flags = dp->flags;
rte_cio_rmb();
}
return flags;
}
static inline void
virtqueue_store_flags_packed(struct vring_packed_desc *dp,
uint16_t flags, uint8_t weak_barriers)
@ -307,7 +333,7 @@ desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
{
uint16_t used, avail, flags;
flags = desc->flags;
flags = virtqueue_fetch_flags_packed(desc, vq->hw->weak_barriers);
used = !!(flags & VRING_PACKED_DESC_F_USED);
avail = !!(flags & VRING_PACKED_DESC_F_AVAIL);

View File

@ -110,8 +110,6 @@ flush_shadow_used_ring_packed(struct virtio_net *dev,
used_idx -= vq->size;
}
rte_smp_wmb();
for (i = 0; i < vq->shadow_used_idx; i++) {
uint16_t flags;
@ -147,7 +145,8 @@ flush_shadow_used_ring_packed(struct virtio_net *dev,
}
}
vq->desc_packed[head_idx].flags = head_flags;
__atomic_store_n(&vq->desc_packed[head_idx].flags, head_flags,
__ATOMIC_RELEASE);
vhost_log_cache_used_vring(dev, vq,
head_idx *