net/virtio: fix control VQ

This patch mainly fixed below issues in the packed ring based
control vq support in virtio driver:

1. When parsing the used descriptors, we have to track the
   number of descs that we need to skip;
2. vq->vq_free_cnt was decreased twice for a same desc;

Meanwhile, make the function name consistent with other parts.

Fixes: ec194c2f1895 ("net/virtio: support packed queue in send command")
Fixes: a4270ea4ff79 ("net/virtio: check head desc with correct wrap counter")

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
Tiwei Bie 2019-01-23 01:01:40 +08:00 committed by Ferruh Yigit
parent 5ffbd70b17
commit 2923b8f9c4
2 changed files with 31 additions and 43 deletions

View File

@ -142,16 +142,17 @@ static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
static struct virtio_pmd_ctrl *
virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
int *dlen, int pkt_num)
virtio_send_command_packed(struct virtnet_ctl *cvq,
struct virtio_pmd_ctrl *ctrl,
int *dlen, int pkt_num)
{
struct virtqueue *vq = cvq->vq;
int head;
struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
struct virtio_pmd_ctrl *result;
bool avail_wrap_counter, used_wrap_counter;
uint16_t flags;
bool avail_wrap_counter;
int sum = 0;
int nb_descs = 0;
int k;
/*
@ -162,11 +163,10 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
*/
head = vq->vq_avail_idx;
avail_wrap_counter = vq->avail_wrap_counter;
used_wrap_counter = vq->used_wrap_counter;
desc[head].flags = VRING_DESC_F_NEXT;
desc[head].addr = cvq->virtio_net_hdr_mem;
desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
vq->vq_free_cnt--;
nb_descs++;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
vq->avail_wrap_counter ^= 1;
@ -177,55 +177,51 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
+ sizeof(struct virtio_net_ctrl_hdr)
+ sizeof(ctrl->status) + sizeof(uint8_t) * sum;
desc[vq->vq_avail_idx].len = dlen[k];
flags = VRING_DESC_F_NEXT;
desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |
VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
VRING_DESC_F_USED(!vq->avail_wrap_counter);
sum += dlen[k];
vq->vq_free_cnt--;
flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
VRING_DESC_F_USED(!vq->avail_wrap_counter);
desc[vq->vq_avail_idx].flags = flags;
rte_smp_wmb();
vq->vq_free_cnt--;
nb_descs++;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
vq->avail_wrap_counter ^= 1;
}
}
desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr);
desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
flags = VRING_DESC_F_WRITE;
flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
VRING_DESC_F_USED(!vq->avail_wrap_counter);
desc[vq->vq_avail_idx].flags = flags;
flags = VRING_DESC_F_NEXT;
flags |= VRING_DESC_F_AVAIL(avail_wrap_counter) |
VRING_DESC_F_USED(!avail_wrap_counter);
desc[head].flags = flags;
rte_smp_wmb();
desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
VRING_DESC_F_USED(!vq->avail_wrap_counter);
vq->vq_free_cnt--;
nb_descs++;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
vq->avail_wrap_counter ^= 1;
}
virtio_wmb(vq->hw->weak_barriers);
desc[head].flags = VRING_DESC_F_NEXT |
VRING_DESC_F_AVAIL(avail_wrap_counter) |
VRING_DESC_F_USED(!avail_wrap_counter);
virtio_wmb(vq->hw->weak_barriers);
virtqueue_notify(vq);
/* wait for used descriptors in virtqueue */
do {
rte_rmb();
while (!desc_is_used(&desc[head], vq))
usleep(100);
} while (!__desc_is_used(&desc[head], used_wrap_counter));
virtio_rmb(vq->hw->weak_barriers);
/* now get used descriptors */
while (desc_is_used(&desc[vq->vq_used_cons_idx], vq)) {
vq->vq_free_cnt++;
if (++vq->vq_used_cons_idx >= vq->vq_nentries) {
vq->vq_used_cons_idx -= vq->vq_nentries;
vq->used_wrap_counter ^= 1;
}
vq->vq_free_cnt += nb_descs;
vq->vq_used_cons_idx += nb_descs;
if (vq->vq_used_cons_idx >= vq->vq_nentries) {
vq->vq_used_cons_idx -= vq->vq_nentries;
vq->used_wrap_counter ^= 1;
}
result = cvq->virtio_net_hdr_mz->addr;
@ -266,7 +262,7 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
sizeof(struct virtio_pmd_ctrl));
if (vtpci_packed_queue(vq->hw)) {
result = virtio_pq_send_command(cvq, ctrl, dlen, pkt_num);
result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num);
goto out_unlock;
}

View File

@ -281,7 +281,7 @@ struct virtio_tx_region {
};
static inline int
__desc_is_used(struct vring_packed_desc *desc, bool wrap_counter)
desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
{
uint16_t used, avail, flags;
@ -289,16 +289,9 @@ __desc_is_used(struct vring_packed_desc *desc, bool wrap_counter)
used = !!(flags & VRING_DESC_F_USED(1));
avail = !!(flags & VRING_DESC_F_AVAIL(1));
return avail == used && used == wrap_counter;
return avail == used && used == vq->used_wrap_counter;
}
static inline int
desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
{
return __desc_is_used(desc, vq->used_wrap_counter);
}
static inline void
vring_desc_init_packed(struct virtqueue *vq, int n)
{
@ -354,7 +347,6 @@ virtqueue_enable_intr_packed(struct virtqueue *vq)
{
uint16_t *event_flags = &vq->ring_packed.driver_event->desc_event_flags;
if (vq->event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
virtio_wmb(vq->hw->weak_barriers);
vq->event_flags_shadow = RING_EVENT_FLAGS_ENABLE;