vhost: replace SMP with thread fence for control path
Simply replace the smp barriers with atomic thread fence for vhost control path, if there are no synchronization points. Signed-off-by: Joyce Kong <joyce.kong@arm.com> Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
parent
5faf0a9c54
commit
a33c3584f3
@ -106,7 +106,7 @@ __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
|
||||
return;
|
||||
|
||||
/* To make sure guest memory updates are committed before logging */
|
||||
rte_smp_wmb();
|
||||
rte_atomic_thread_fence(__ATOMIC_RELEASE);
|
||||
|
||||
page = addr / VHOST_LOG_PAGE;
|
||||
while (page * VHOST_LOG_PAGE < addr + len) {
|
||||
@ -144,7 +144,7 @@ __vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
|
||||
if (unlikely(!dev->log_base))
|
||||
return;
|
||||
|
||||
rte_smp_wmb();
|
||||
rte_atomic_thread_fence(__ATOMIC_RELEASE);
|
||||
|
||||
log_base = (unsigned long *)(uintptr_t)dev->log_base;
|
||||
|
||||
@ -163,7 +163,7 @@ __vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
|
||||
#endif
|
||||
}
|
||||
|
||||
rte_smp_wmb();
|
||||
rte_atomic_thread_fence(__ATOMIC_RELEASE);
|
||||
|
||||
vq->log_cache_nb_elem = 0;
|
||||
}
|
||||
@ -190,7 +190,7 @@ vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
|
||||
* No more room for a new log cache entry,
|
||||
* so write the dirty log map directly.
|
||||
*/
|
||||
rte_smp_wmb();
|
||||
rte_atomic_thread_fence(__ATOMIC_RELEASE);
|
||||
vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
|
||||
|
||||
return;
|
||||
@ -1097,11 +1097,11 @@ rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx,
|
||||
if (unlikely(idx >= vq->size))
|
||||
return -1;
|
||||
|
||||
rte_smp_mb();
|
||||
rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
|
||||
|
||||
vq->inflight_split->desc[idx].inflight = 0;
|
||||
|
||||
rte_smp_mb();
|
||||
rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
|
||||
|
||||
vq->inflight_split->used_idx = last_used_idx;
|
||||
return 0;
|
||||
@ -1140,11 +1140,11 @@ rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx,
|
||||
if (unlikely(head >= vq->size))
|
||||
return -1;
|
||||
|
||||
rte_smp_mb();
|
||||
rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
|
||||
|
||||
inflight_info->desc[head].inflight = 0;
|
||||
|
||||
rte_smp_mb();
|
||||
rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
|
||||
|
||||
inflight_info->old_free_head = inflight_info->free_head;
|
||||
inflight_info->old_used_idx = inflight_info->used_idx;
|
||||
@ -1330,7 +1330,7 @@ vhost_enable_notify_packed(struct virtio_net *dev,
|
||||
vq->avail_wrap_counter << 15;
|
||||
}
|
||||
|
||||
rte_smp_wmb();
|
||||
rte_atomic_thread_fence(__ATOMIC_RELEASE);
|
||||
|
||||
vq->device_event->flags = flags;
|
||||
return 0;
|
||||
|
@ -728,7 +728,7 @@ static __rte_always_inline void
|
||||
vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
|
||||
{
|
||||
/* Flush used->idx update before we read avail->flags. */
|
||||
rte_smp_mb();
|
||||
rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
|
||||
|
||||
/* Don't kick guest if we don't reach index specified by guest. */
|
||||
if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
|
||||
@ -770,7 +770,7 @@ vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
|
||||
bool signalled_used_valid, kick = false;
|
||||
|
||||
/* Flush used desc update. */
|
||||
rte_smp_mb();
|
||||
rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
|
||||
|
||||
if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
|
||||
if (vq->driver_event->flags !=
|
||||
@ -796,7 +796,7 @@ vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
|
||||
goto kick;
|
||||
}
|
||||
|
||||
rte_smp_rmb();
|
||||
rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
|
||||
|
||||
off_wrap = vq->driver_event->off_wrap;
|
||||
off = off_wrap & ~(1 << 15);
|
||||
|
@ -1690,7 +1690,7 @@ vhost_check_queue_inflights_split(struct virtio_net *dev,
|
||||
|
||||
if (inflight_split->used_idx != used->idx) {
|
||||
inflight_split->desc[last_io].inflight = 0;
|
||||
rte_smp_mb();
|
||||
rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
|
||||
inflight_split->used_idx = used->idx;
|
||||
}
|
||||
|
||||
|
@ -1663,7 +1663,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
|
||||
queue_id, 0, count - vq->async_last_pkts_n);
|
||||
n_pkts_cpl += vq->async_last_pkts_n;
|
||||
|
||||
rte_smp_wmb();
|
||||
rte_atomic_thread_fence(__ATOMIC_RELEASE);
|
||||
|
||||
while (likely((n_pkts_put < count) && n_inflight)) {
|
||||
uint16_t info_idx = (start_idx + n_pkts_put) & (vq_size - 1);
|
||||
|
Loading…
Reference in New Issue
Block a user