net/virtio: split Rx/Tx queue
We keep a common vq structure, containing only vq related fields, and then split others into RX, TX and control queue respectively. Signed-off-by: Huawei Xie <huawei.xie@intel.com> [Jianfeng Tan: found and fixed 2 bugs] Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
This commit is contained in:
parent
88c107840d
commit
01ad44fd37
@ -117,40 +117,61 @@ struct rte_virtio_xstats_name_off {
|
||||
};
|
||||
|
||||
/* [rt]x_qX_ is prepended to the name string here */
|
||||
static const struct rte_virtio_xstats_name_off rte_virtio_q_stat_strings[] = {
|
||||
{"good_packets", offsetof(struct virtqueue, packets)},
|
||||
{"good_bytes", offsetof(struct virtqueue, bytes)},
|
||||
{"errors", offsetof(struct virtqueue, errors)},
|
||||
{"multicast_packets", offsetof(struct virtqueue, multicast)},
|
||||
{"broadcast_packets", offsetof(struct virtqueue, broadcast)},
|
||||
{"undersize_packets", offsetof(struct virtqueue, size_bins[0])},
|
||||
{"size_64_packets", offsetof(struct virtqueue, size_bins[1])},
|
||||
{"size_65_127_packets", offsetof(struct virtqueue, size_bins[2])},
|
||||
{"size_128_255_packets", offsetof(struct virtqueue, size_bins[3])},
|
||||
{"size_256_511_packets", offsetof(struct virtqueue, size_bins[4])},
|
||||
{"size_512_1023_packets", offsetof(struct virtqueue, size_bins[5])},
|
||||
{"size_1024_1517_packets", offsetof(struct virtqueue, size_bins[6])},
|
||||
{"size_1518_max_packets", offsetof(struct virtqueue, size_bins[7])},
|
||||
static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = {
|
||||
{"good_packets", offsetof(struct virtnet_rx, stats.packets)},
|
||||
{"good_bytes", offsetof(struct virtnet_rx, stats.bytes)},
|
||||
{"errors", offsetof(struct virtnet_rx, stats.errors)},
|
||||
{"multicast_packets", offsetof(struct virtnet_rx, stats.multicast)},
|
||||
{"broadcast_packets", offsetof(struct virtnet_rx, stats.broadcast)},
|
||||
{"undersize_packets", offsetof(struct virtnet_rx, stats.size_bins[0])},
|
||||
{"size_64_packets", offsetof(struct virtnet_rx, stats.size_bins[1])},
|
||||
{"size_65_127_packets", offsetof(struct virtnet_rx, stats.size_bins[2])},
|
||||
{"size_128_255_packets", offsetof(struct virtnet_rx, stats.size_bins[3])},
|
||||
{"size_256_511_packets", offsetof(struct virtnet_rx, stats.size_bins[4])},
|
||||
{"size_512_1023_packets", offsetof(struct virtnet_rx, stats.size_bins[5])},
|
||||
{"size_1024_1517_packets", offsetof(struct virtnet_rx, stats.size_bins[6])},
|
||||
{"size_1518_max_packets", offsetof(struct virtnet_rx, stats.size_bins[7])},
|
||||
};
|
||||
|
||||
#define VIRTIO_NB_Q_XSTATS (sizeof(rte_virtio_q_stat_strings) / \
|
||||
sizeof(rte_virtio_q_stat_strings[0]))
|
||||
/* [rt]x_qX_ is prepended to the name string here */
|
||||
static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
|
||||
{"good_packets", offsetof(struct virtnet_tx, stats.packets)},
|
||||
{"good_bytes", offsetof(struct virtnet_tx, stats.bytes)},
|
||||
{"errors", offsetof(struct virtnet_tx, stats.errors)},
|
||||
{"multicast_packets", offsetof(struct virtnet_tx, stats.multicast)},
|
||||
{"broadcast_packets", offsetof(struct virtnet_tx, stats.broadcast)},
|
||||
{"undersize_packets", offsetof(struct virtnet_tx, stats.size_bins[0])},
|
||||
{"size_64_packets", offsetof(struct virtnet_tx, stats.size_bins[1])},
|
||||
{"size_65_127_packets", offsetof(struct virtnet_tx, stats.size_bins[2])},
|
||||
{"size_128_255_packets", offsetof(struct virtnet_tx, stats.size_bins[3])},
|
||||
{"size_256_511_packets", offsetof(struct virtnet_tx, stats.size_bins[4])},
|
||||
{"size_512_1023_packets", offsetof(struct virtnet_tx, stats.size_bins[5])},
|
||||
{"size_1024_1517_packets", offsetof(struct virtnet_tx, stats.size_bins[6])},
|
||||
{"size_1518_max_packets", offsetof(struct virtnet_tx, stats.size_bins[7])},
|
||||
};
|
||||
|
||||
#define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \
|
||||
sizeof(rte_virtio_rxq_stat_strings[0]))
|
||||
#define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \
|
||||
sizeof(rte_virtio_txq_stat_strings[0]))
|
||||
|
||||
static int
|
||||
virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,
|
||||
virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
|
||||
int *dlen, int pkt_num)
|
||||
{
|
||||
uint32_t head, i;
|
||||
int k, sum = 0;
|
||||
virtio_net_ctrl_ack status = ~0;
|
||||
struct virtio_pmd_ctrl result;
|
||||
struct virtqueue *vq;
|
||||
|
||||
ctrl->status = status;
|
||||
|
||||
if (!(vq && vq->hw->cvq)) {
|
||||
if (!cvq && !cvq->vq) {
|
||||
PMD_INIT_LOG(ERR, "Control queue is not supported.");
|
||||
return -1;
|
||||
}
|
||||
vq = cvq->vq;
|
||||
head = vq->vq_desc_head_idx;
|
||||
|
||||
PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
|
||||
@ -160,7 +181,7 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,
|
||||
if ((vq->vq_free_cnt < ((uint32_t)pkt_num + 2)) || (pkt_num < 1))
|
||||
return -1;
|
||||
|
||||
memcpy(vq->virtio_net_hdr_mz->addr, ctrl,
|
||||
memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
|
||||
sizeof(struct virtio_pmd_ctrl));
|
||||
|
||||
/*
|
||||
@ -170,14 +191,14 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,
|
||||
* One RX packet for ACK.
|
||||
*/
|
||||
vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT;
|
||||
vq->vq_ring.desc[head].addr = vq->virtio_net_hdr_mz->phys_addr;
|
||||
vq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mz->phys_addr;
|
||||
vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
|
||||
vq->vq_free_cnt--;
|
||||
i = vq->vq_ring.desc[head].next;
|
||||
|
||||
for (k = 0; k < pkt_num; k++) {
|
||||
vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT;
|
||||
vq->vq_ring.desc[i].addr = vq->virtio_net_hdr_mz->phys_addr
|
||||
vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mz->phys_addr
|
||||
+ sizeof(struct virtio_net_ctrl_hdr)
|
||||
+ sizeof(ctrl->status) + sizeof(uint8_t)*sum;
|
||||
vq->vq_ring.desc[i].len = dlen[k];
|
||||
@ -187,7 +208,7 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,
|
||||
}
|
||||
|
||||
vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;
|
||||
vq->vq_ring.desc[i].addr = vq->virtio_net_hdr_mz->phys_addr
|
||||
vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mz->phys_addr
|
||||
+ sizeof(struct virtio_net_ctrl_hdr);
|
||||
vq->vq_ring.desc[i].len = sizeof(ctrl->status);
|
||||
vq->vq_free_cnt--;
|
||||
@ -232,7 +253,7 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,
|
||||
PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
|
||||
vq->vq_free_cnt, vq->vq_desc_head_idx);
|
||||
|
||||
memcpy(&result, vq->virtio_net_hdr_mz->addr,
|
||||
memcpy(&result, cvq->virtio_net_hdr_mz->addr,
|
||||
sizeof(struct virtio_pmd_ctrl));
|
||||
|
||||
return result.status;
|
||||
@ -272,10 +293,6 @@ virtio_dev_queue_release(struct virtqueue *vq)
|
||||
if (vq->configured)
|
||||
hw->vtpci_ops->del_queue(hw, vq);
|
||||
|
||||
rte_memzone_free(vq->mz);
|
||||
if (vq->virtio_net_hdr_mz)
|
||||
rte_memzone_free(vq->virtio_net_hdr_mz);
|
||||
|
||||
rte_free(vq->sw_ring);
|
||||
rte_free(vq);
|
||||
}
|
||||
@ -287,14 +304,21 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
||||
uint16_t vtpci_queue_idx,
|
||||
uint16_t nb_desc,
|
||||
unsigned int socket_id,
|
||||
struct virtqueue **pvq)
|
||||
void **pvq)
|
||||
{
|
||||
char vq_name[VIRTQUEUE_MAX_NAME_SZ];
|
||||
const struct rte_memzone *mz;
|
||||
char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ];
|
||||
const struct rte_memzone *mz = NULL, *hdr_mz = NULL;
|
||||
unsigned int vq_size, size;
|
||||
struct virtio_hw *hw = dev->data->dev_private;
|
||||
struct virtqueue *vq = NULL;
|
||||
struct virtnet_rx *rxvq;
|
||||
struct virtnet_tx *txvq;
|
||||
struct virtnet_ctl *cvq;
|
||||
struct virtqueue *vq;
|
||||
const char *queue_names[] = {"rvq", "txq", "cvq"};
|
||||
size_t sz_vq, sz_q = 0, sz_hdr_mz = 0;
|
||||
void *sw_ring = NULL;
|
||||
int ret;
|
||||
|
||||
PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx);
|
||||
|
||||
@ -316,32 +340,31 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
||||
|
||||
snprintf(vq_name, sizeof(vq_name), "port%d_%s%d",
|
||||
dev->data->port_id, queue_names[queue_type], queue_idx);
|
||||
vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
|
||||
vq_size * sizeof(struct vq_desc_extra),
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
|
||||
sz_vq = RTE_ALIGN_CEIL(sizeof(*vq) +
|
||||
vq_size * sizeof(struct vq_desc_extra),
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (queue_type == VTNET_RQ) {
|
||||
sz_q = sz_vq + sizeof(*rxvq);
|
||||
} else if (queue_type == VTNET_TQ) {
|
||||
sz_q = sz_vq + sizeof(*txvq);
|
||||
/*
|
||||
* For each xmit packet, allocate a virtio_net_hdr
|
||||
* and indirect ring elements
|
||||
*/
|
||||
sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region);
|
||||
} else if (queue_type == VTNET_CQ) {
|
||||
sz_q = sz_vq + sizeof(*cvq);
|
||||
/* Allocate a page for control vq command, data and status */
|
||||
sz_hdr_mz = PAGE_SIZE;
|
||||
}
|
||||
|
||||
vq = rte_zmalloc_socket(vq_name, sz_q, RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (vq == NULL) {
|
||||
PMD_INIT_LOG(ERR, "Can not allocate virtqueue");
|
||||
PMD_INIT_LOG(ERR, "can not allocate vq");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (queue_type == VTNET_RQ) {
|
||||
size_t sz_sw;
|
||||
|
||||
sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
|
||||
sizeof(vq->sw_ring[0]);
|
||||
vq->sw_ring = rte_zmalloc_socket("rxq->sw_ring", sz_sw,
|
||||
RTE_CACHE_LINE_SIZE,
|
||||
socket_id);
|
||||
if (!vq->sw_ring) {
|
||||
PMD_INIT_LOG(ERR, "Can not allocate RX soft ring");
|
||||
virtio_dev_queue_release(vq);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
vq->hw = hw;
|
||||
vq->port_id = dev->data->port_id;
|
||||
vq->queue_id = queue_idx;
|
||||
vq->vq_queue_index = vtpci_queue_idx;
|
||||
vq->vq_nentries = vq_size;
|
||||
|
||||
@ -354,16 +377,17 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
||||
*/
|
||||
size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
|
||||
vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
|
||||
PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", size, vq->vq_ring_size);
|
||||
PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
|
||||
size, vq->vq_ring_size);
|
||||
|
||||
mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
|
||||
socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
|
||||
mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, socket_id,
|
||||
0, VIRTIO_PCI_VRING_ALIGN);
|
||||
if (mz == NULL) {
|
||||
if (rte_errno == EEXIST)
|
||||
mz = rte_memzone_lookup(vq_name);
|
||||
if (mz == NULL) {
|
||||
virtio_dev_queue_release(vq);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto fail_q_alloc;
|
||||
}
|
||||
}
|
||||
|
||||
@ -374,44 +398,65 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
||||
*/
|
||||
if ((mz->phys_addr + vq->vq_ring_size - 1) >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
|
||||
PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!");
|
||||
virtio_dev_queue_release(vq);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto fail_q_alloc;
|
||||
}
|
||||
|
||||
memset(mz->addr, 0, sizeof(mz->len));
|
||||
vq->mz = mz;
|
||||
|
||||
vq->vq_ring_mem = mz->phys_addr;
|
||||
vq->vq_ring_virt_mem = mz->addr;
|
||||
PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%"PRIx64, (uint64_t)mz->phys_addr);
|
||||
PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%"PRIx64, (uint64_t)(uintptr_t)mz->addr);
|
||||
vq->virtio_net_hdr_mz = NULL;
|
||||
vq->virtio_net_hdr_mem = 0;
|
||||
PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64,
|
||||
(uint64_t)mz->phys_addr);
|
||||
PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
|
||||
(uint64_t)(uintptr_t)mz->addr);
|
||||
|
||||
if (queue_type == VTNET_TQ) {
|
||||
const struct rte_memzone *hdr_mz;
|
||||
struct virtio_tx_region *txr;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* For each xmit packet, allocate a virtio_net_hdr
|
||||
* and indirect ring elements
|
||||
*/
|
||||
snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d_hdrzone",
|
||||
dev->data->port_id, queue_idx);
|
||||
hdr_mz = rte_memzone_reserve_aligned(vq_name,
|
||||
vq_size * sizeof(*txr),
|
||||
if (sz_hdr_mz) {
|
||||
snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_%s%d_hdr",
|
||||
dev->data->port_id, queue_names[queue_type],
|
||||
queue_idx);
|
||||
hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
|
||||
socket_id, 0,
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (hdr_mz == NULL) {
|
||||
if (rte_errno == EEXIST)
|
||||
hdr_mz = rte_memzone_lookup(vq_name);
|
||||
hdr_mz = rte_memzone_lookup(vq_hdr_name);
|
||||
if (hdr_mz == NULL) {
|
||||
virtio_dev_queue_release(vq);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto fail_q_alloc;
|
||||
}
|
||||
}
|
||||
vq->virtio_net_hdr_mz = hdr_mz;
|
||||
vq->virtio_net_hdr_mem = hdr_mz->phys_addr;
|
||||
}
|
||||
|
||||
if (queue_type == VTNET_RQ) {
|
||||
size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
|
||||
sizeof(vq->sw_ring[0]);
|
||||
|
||||
sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (!sw_ring) {
|
||||
PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
|
||||
ret = -ENOMEM;
|
||||
goto fail_q_alloc;
|
||||
}
|
||||
|
||||
vq->sw_ring = sw_ring;
|
||||
rxvq = (struct virtnet_rx *)RTE_PTR_ADD(vq, sz_vq);
|
||||
rxvq->vq = vq;
|
||||
rxvq->port_id = dev->data->port_id;
|
||||
rxvq->queue_id = queue_idx;
|
||||
rxvq->mz = mz;
|
||||
*pvq = rxvq;
|
||||
} else if (queue_type == VTNET_TQ) {
|
||||
struct virtio_tx_region *txr;
|
||||
unsigned int i;
|
||||
|
||||
txvq = (struct virtnet_tx *)RTE_PTR_ADD(vq, sz_vq);
|
||||
txvq->vq = vq;
|
||||
txvq->port_id = dev->data->port_id;
|
||||
txvq->queue_id = queue_idx;
|
||||
txvq->mz = mz;
|
||||
txvq->virtio_net_hdr_mz = hdr_mz;
|
||||
txvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
|
||||
|
||||
txr = hdr_mz->addr;
|
||||
memset(txr, 0, vq_size * sizeof(*txr));
|
||||
@ -421,58 +466,55 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
||||
vring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir));
|
||||
|
||||
/* first indirect descriptor is always the tx header */
|
||||
start_dp->addr = vq->virtio_net_hdr_mem
|
||||
start_dp->addr = txvq->virtio_net_hdr_mem
|
||||
+ i * sizeof(*txr)
|
||||
+ offsetof(struct virtio_tx_region, tx_hdr);
|
||||
|
||||
start_dp->len = vq->hw->vtnet_hdr_size;
|
||||
start_dp->len = hw->vtnet_hdr_size;
|
||||
start_dp->flags = VRING_DESC_F_NEXT;
|
||||
}
|
||||
|
||||
*pvq = txvq;
|
||||
} else if (queue_type == VTNET_CQ) {
|
||||
/* Allocate a page for control vq command, data and status */
|
||||
snprintf(vq_name, sizeof(vq_name), "port%d_cvq_hdrzone",
|
||||
dev->data->port_id);
|
||||
vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,
|
||||
PAGE_SIZE, socket_id, 0, RTE_CACHE_LINE_SIZE);
|
||||
if (vq->virtio_net_hdr_mz == NULL) {
|
||||
if (rte_errno == EEXIST)
|
||||
vq->virtio_net_hdr_mz =
|
||||
rte_memzone_lookup(vq_name);
|
||||
if (vq->virtio_net_hdr_mz == NULL) {
|
||||
virtio_dev_queue_release(vq);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
vq->virtio_net_hdr_mem =
|
||||
vq->virtio_net_hdr_mz->phys_addr;
|
||||
memset(vq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
|
||||
cvq = (struct virtnet_ctl *)RTE_PTR_ADD(vq, sz_vq);
|
||||
cvq->vq = vq;
|
||||
cvq->mz = mz;
|
||||
cvq->virtio_net_hdr_mz = hdr_mz;
|
||||
cvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
|
||||
memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
|
||||
*pvq = cvq;
|
||||
}
|
||||
|
||||
hw->vtpci_ops->setup_queue(hw, vq);
|
||||
|
||||
vq->configured = 1;
|
||||
*pvq = vq;
|
||||
return 0;
|
||||
|
||||
fail_q_alloc:
|
||||
rte_free(sw_ring);
|
||||
rte_memzone_free(hdr_mz);
|
||||
rte_memzone_free(mz);
|
||||
rte_free(vq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
virtio_dev_cq_queue_setup(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx,
|
||||
uint32_t socket_id)
|
||||
{
|
||||
struct virtqueue *vq;
|
||||
struct virtnet_ctl *cvq;
|
||||
int ret;
|
||||
struct virtio_hw *hw = dev->data->dev_private;
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
ret = virtio_dev_queue_setup(dev, VTNET_CQ, VTNET_SQ_CQ_QUEUE_IDX,
|
||||
vtpci_queue_idx, 0, socket_id, &vq);
|
||||
vtpci_queue_idx, 0, socket_id, (void **)&cvq);
|
||||
if (ret < 0) {
|
||||
PMD_INIT_LOG(ERR, "control vq initialization failed");
|
||||
return ret;
|
||||
}
|
||||
|
||||
hw->cvq = vq;
|
||||
hw->cvq = cvq;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -680,32 +722,32 @@ virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
const struct virtqueue *txvq = dev->data->tx_queues[i];
|
||||
const struct virtnet_tx *txvq = dev->data->tx_queues[i];
|
||||
if (txvq == NULL)
|
||||
continue;
|
||||
|
||||
stats->opackets += txvq->packets;
|
||||
stats->obytes += txvq->bytes;
|
||||
stats->oerrors += txvq->errors;
|
||||
stats->opackets += txvq->stats.packets;
|
||||
stats->obytes += txvq->stats.bytes;
|
||||
stats->oerrors += txvq->stats.errors;
|
||||
|
||||
if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
|
||||
stats->q_opackets[i] = txvq->packets;
|
||||
stats->q_obytes[i] = txvq->bytes;
|
||||
stats->q_opackets[i] = txvq->stats.packets;
|
||||
stats->q_obytes[i] = txvq->stats.bytes;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
const struct virtqueue *rxvq = dev->data->rx_queues[i];
|
||||
const struct virtnet_rx *rxvq = dev->data->rx_queues[i];
|
||||
if (rxvq == NULL)
|
||||
continue;
|
||||
|
||||
stats->ipackets += rxvq->packets;
|
||||
stats->ibytes += rxvq->bytes;
|
||||
stats->ierrors += rxvq->errors;
|
||||
stats->ipackets += rxvq->stats.packets;
|
||||
stats->ibytes += rxvq->stats.bytes;
|
||||
stats->ierrors += rxvq->stats.errors;
|
||||
|
||||
if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
|
||||
stats->q_ipackets[i] = rxvq->packets;
|
||||
stats->q_ibytes[i] = rxvq->bytes;
|
||||
stats->q_ipackets[i] = rxvq->stats.packets;
|
||||
stats->q_ibytes[i] = rxvq->stats.bytes;
|
||||
}
|
||||
}
|
||||
|
||||
@ -720,8 +762,8 @@ static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
|
||||
unsigned count = 0;
|
||||
unsigned t;
|
||||
|
||||
unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_Q_XSTATS +
|
||||
dev->data->nb_rx_queues * VIRTIO_NB_Q_XSTATS;
|
||||
unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
|
||||
dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
|
||||
|
||||
if (xstats_names == NULL) {
|
||||
/* Note: limit checked in rte_eth_xstats_names() */
|
||||
@ -730,11 +772,11 @@ static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
|
||||
struct virtqueue *rxvq = dev->data->rx_queues[i];
|
||||
if (rxvq == NULL)
|
||||
continue;
|
||||
for (t = 0; t < VIRTIO_NB_Q_XSTATS; t++) {
|
||||
for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
|
||||
snprintf(xstats_names[count].name,
|
||||
sizeof(xstats_names[count].name),
|
||||
"rx_q%u_%s", i,
|
||||
rte_virtio_q_stat_strings[t].name);
|
||||
rte_virtio_rxq_stat_strings[t].name);
|
||||
xstats_names[count].id = count;
|
||||
count++;
|
||||
}
|
||||
@ -744,11 +786,11 @@ static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
|
||||
struct virtqueue *txvq = dev->data->tx_queues[i];
|
||||
if (txvq == NULL)
|
||||
continue;
|
||||
for (t = 0; t < VIRTIO_NB_Q_XSTATS; t++) {
|
||||
for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
|
||||
snprintf(xstats_names[count].name,
|
||||
sizeof(xstats_names[count].name),
|
||||
"tx_q%u_%s", i,
|
||||
rte_virtio_q_stat_strings[t].name);
|
||||
rte_virtio_txq_stat_strings[t].name);
|
||||
xstats_names[count].id = count;
|
||||
count++;
|
||||
}
|
||||
@ -765,40 +807,40 @@ virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
|
||||
unsigned i;
|
||||
unsigned count = 0;
|
||||
|
||||
unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_Q_XSTATS +
|
||||
dev->data->nb_rx_queues * VIRTIO_NB_Q_XSTATS;
|
||||
unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
|
||||
dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
|
||||
|
||||
if (n < nstats)
|
||||
return nstats;
|
||||
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
struct virtqueue *rxvq = dev->data->rx_queues[i];
|
||||
struct virtnet_rx *rxvq = dev->data->rx_queues[i];
|
||||
|
||||
if (rxvq == NULL)
|
||||
continue;
|
||||
|
||||
unsigned t;
|
||||
|
||||
for (t = 0; t < VIRTIO_NB_Q_XSTATS; t++) {
|
||||
for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
|
||||
xstats[count].id = count;
|
||||
xstats[count].value = *(uint64_t *)(((char *)rxvq) +
|
||||
rte_virtio_q_stat_strings[t].offset);
|
||||
rte_virtio_rxq_stat_strings[t].offset);
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
struct virtqueue *txvq = dev->data->tx_queues[i];
|
||||
struct virtnet_tx *txvq = dev->data->tx_queues[i];
|
||||
|
||||
if (txvq == NULL)
|
||||
continue;
|
||||
|
||||
unsigned t;
|
||||
|
||||
for (t = 0; t < VIRTIO_NB_Q_XSTATS; t++) {
|
||||
for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
|
||||
xstats[count].id = count;
|
||||
xstats[count].value = *(uint64_t *)(((char *)txvq) +
|
||||
rte_virtio_q_stat_strings[t].offset);
|
||||
rte_virtio_txq_stat_strings[t].offset);
|
||||
count++;
|
||||
}
|
||||
}
|
||||
@ -818,29 +860,31 @@ virtio_dev_stats_reset(struct rte_eth_dev *dev)
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
struct virtqueue *txvq = dev->data->tx_queues[i];
|
||||
struct virtnet_tx *txvq = dev->data->tx_queues[i];
|
||||
if (txvq == NULL)
|
||||
continue;
|
||||
|
||||
txvq->packets = 0;
|
||||
txvq->bytes = 0;
|
||||
txvq->errors = 0;
|
||||
txvq->multicast = 0;
|
||||
txvq->broadcast = 0;
|
||||
memset(txvq->size_bins, 0, sizeof(txvq->size_bins[0]) * 8);
|
||||
txvq->stats.packets = 0;
|
||||
txvq->stats.bytes = 0;
|
||||
txvq->stats.errors = 0;
|
||||
txvq->stats.multicast = 0;
|
||||
txvq->stats.broadcast = 0;
|
||||
memset(txvq->stats.size_bins, 0,
|
||||
sizeof(txvq->stats.size_bins[0]) * 8);
|
||||
}
|
||||
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
struct virtqueue *rxvq = dev->data->rx_queues[i];
|
||||
struct virtnet_rx *rxvq = dev->data->rx_queues[i];
|
||||
if (rxvq == NULL)
|
||||
continue;
|
||||
|
||||
rxvq->packets = 0;
|
||||
rxvq->bytes = 0;
|
||||
rxvq->errors = 0;
|
||||
rxvq->multicast = 0;
|
||||
rxvq->broadcast = 0;
|
||||
memset(rxvq->size_bins, 0, sizeof(rxvq->size_bins[0]) * 8);
|
||||
rxvq->stats.packets = 0;
|
||||
rxvq->stats.bytes = 0;
|
||||
rxvq->stats.errors = 0;
|
||||
rxvq->stats.multicast = 0;
|
||||
rxvq->stats.broadcast = 0;
|
||||
memset(rxvq->stats.size_bins, 0,
|
||||
sizeof(rxvq->stats.size_bins[0]) * 8);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1233,7 +1277,8 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
|
||||
eth_dev->tx_pkt_burst = NULL;
|
||||
eth_dev->rx_pkt_burst = NULL;
|
||||
|
||||
virtio_dev_queue_release(hw->cvq);
|
||||
if (hw->cvq)
|
||||
virtio_dev_queue_release(hw->cvq->vq);
|
||||
|
||||
rte_free(eth_dev->data->mac_addrs);
|
||||
eth_dev->data->mac_addrs = NULL;
|
||||
@ -1321,6 +1366,8 @@ virtio_dev_start(struct rte_eth_dev *dev)
|
||||
{
|
||||
uint16_t nb_queues, i;
|
||||
struct virtio_hw *hw = dev->data->dev_private;
|
||||
struct virtnet_rx *rxvq;
|
||||
struct virtnet_tx *txvq __rte_unused;
|
||||
|
||||
/* check if lsc interrupt feature is enabled */
|
||||
if (dev->data->dev_conf.intr_conf.lsc) {
|
||||
@ -1360,16 +1407,22 @@ virtio_dev_start(struct rte_eth_dev *dev)
|
||||
|
||||
PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
|
||||
|
||||
for (i = 0; i < nb_queues; i++)
|
||||
virtqueue_notify(dev->data->rx_queues[i]);
|
||||
for (i = 0; i < nb_queues; i++) {
|
||||
rxvq = dev->data->rx_queues[i];
|
||||
virtqueue_notify(rxvq->vq);
|
||||
}
|
||||
|
||||
PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
|
||||
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++)
|
||||
VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
rxvq = dev->data->rx_queues[i];
|
||||
VIRTQUEUE_DUMP(rxvq->vq);
|
||||
}
|
||||
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++)
|
||||
VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
txvq = dev->data->tx_queues[i];
|
||||
VIRTQUEUE_DUMP(txvq->vq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1380,14 +1433,14 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
|
||||
int i, mbuf_num = 0;
|
||||
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
struct virtnet_rx *rxvq = dev->data->rx_queues[i];
|
||||
|
||||
PMD_INIT_LOG(DEBUG,
|
||||
"Before freeing rxq[%d] used and unused buf", i);
|
||||
VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
|
||||
VIRTQUEUE_DUMP(rxvq->vq);
|
||||
|
||||
PMD_INIT_LOG(DEBUG, "rx_queues[%d]=%p",
|
||||
i, dev->data->rx_queues[i]);
|
||||
while ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(
|
||||
dev->data->rx_queues[i])) != NULL) {
|
||||
PMD_INIT_LOG(DEBUG, "rx_queues[%d]=%p", i, rxvq);
|
||||
while ((buf = virtqueue_detatch_unused(rxvq->vq)) != NULL) {
|
||||
rte_pktmbuf_free(buf);
|
||||
mbuf_num++;
|
||||
}
|
||||
@ -1395,27 +1448,27 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
|
||||
PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num);
|
||||
PMD_INIT_LOG(DEBUG,
|
||||
"After freeing rxq[%d] used and unused buf", i);
|
||||
VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
|
||||
VIRTQUEUE_DUMP(rxvq->vq);
|
||||
}
|
||||
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
struct virtnet_tx *txvq = dev->data->tx_queues[i];
|
||||
|
||||
PMD_INIT_LOG(DEBUG,
|
||||
"Before freeing txq[%d] used and unused bufs",
|
||||
i);
|
||||
VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
|
||||
VIRTQUEUE_DUMP(txvq->vq);
|
||||
|
||||
mbuf_num = 0;
|
||||
while ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(
|
||||
dev->data->tx_queues[i])) != NULL) {
|
||||
while ((buf = virtqueue_detatch_unused(txvq->vq)) != NULL) {
|
||||
rte_pktmbuf_free(buf);
|
||||
|
||||
mbuf_num++;
|
||||
}
|
||||
|
||||
PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num);
|
||||
PMD_INIT_LOG(DEBUG,
|
||||
"After freeing txq[%d] used and unused buf", i);
|
||||
VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
|
||||
VIRTQUEUE_DUMP(txvq->vq);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -81,7 +81,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
||||
uint16_t vtpci_queue_idx,
|
||||
uint16_t nb_desc,
|
||||
unsigned int socket_id,
|
||||
struct virtqueue **pvq);
|
||||
void **pvq);
|
||||
|
||||
void virtio_dev_queue_release(struct virtqueue *vq);
|
||||
|
||||
|
@ -218,7 +218,7 @@ legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
|
||||
|
||||
rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2,
|
||||
VIRTIO_PCI_QUEUE_SEL);
|
||||
src = vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
|
||||
src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
|
||||
rte_eal_pci_ioport_write(&hw->io, &src, 4, VIRTIO_PCI_QUEUE_PFN);
|
||||
}
|
||||
|
||||
@ -441,7 +441,7 @@ modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
|
||||
uint64_t desc_addr, avail_addr, used_addr;
|
||||
uint16_t notify_off;
|
||||
|
||||
desc_addr = vq->mz->phys_addr;
|
||||
desc_addr = vq->vq_ring_mem;
|
||||
avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
|
||||
used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
|
||||
ring[vq->vq_nentries]),
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include <rte_ethdev.h>
|
||||
|
||||
struct virtqueue;
|
||||
struct virtnet_ctl;
|
||||
|
||||
/* VirtIO PCI vendor/device ID. */
|
||||
#define VIRTIO_PCI_VENDORID 0x1AF4
|
||||
@ -242,7 +243,7 @@ struct virtio_pci_ops {
|
||||
struct virtio_net_config;
|
||||
|
||||
struct virtio_hw {
|
||||
struct virtqueue *cvq;
|
||||
struct virtnet_ctl *cvq;
|
||||
struct rte_pci_ioport io;
|
||||
uint64_t guest_features;
|
||||
uint32_t max_tx_queues;
|
||||
|
@ -209,23 +209,24 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
|
||||
}
|
||||
|
||||
static inline void
|
||||
virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie,
|
||||
virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
|
||||
uint16_t needed, int use_indirect, int can_push)
|
||||
{
|
||||
struct vq_desc_extra *dxp;
|
||||
struct virtqueue *vq = txvq->vq;
|
||||
struct vring_desc *start_dp;
|
||||
uint16_t seg_num = cookie->nb_segs;
|
||||
uint16_t head_idx, idx;
|
||||
uint16_t head_size = txvq->hw->vtnet_hdr_size;
|
||||
uint16_t head_size = vq->hw->vtnet_hdr_size;
|
||||
unsigned long offs;
|
||||
|
||||
head_idx = txvq->vq_desc_head_idx;
|
||||
head_idx = vq->vq_desc_head_idx;
|
||||
idx = head_idx;
|
||||
dxp = &txvq->vq_descx[idx];
|
||||
dxp = &vq->vq_descx[idx];
|
||||
dxp->cookie = (void *)cookie;
|
||||
dxp->ndescs = needed;
|
||||
|
||||
start_dp = txvq->vq_ring.desc;
|
||||
start_dp = vq->vq_ring.desc;
|
||||
|
||||
if (can_push) {
|
||||
/* put on zero'd transmit header (no offloads) */
|
||||
@ -259,7 +260,7 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie,
|
||||
+ offsetof(struct virtio_tx_region, tx_hdr);
|
||||
|
||||
start_dp[idx].addr = txvq->virtio_net_hdr_mem + offs;
|
||||
start_dp[idx].len = txvq->hw->vtnet_hdr_size;
|
||||
start_dp[idx].len = vq->hw->vtnet_hdr_size;
|
||||
start_dp[idx].flags = VRING_DESC_F_NEXT;
|
||||
idx = start_dp[idx].next;
|
||||
}
|
||||
@ -272,20 +273,19 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie,
|
||||
} while ((cookie = cookie->next) != NULL);
|
||||
|
||||
if (use_indirect)
|
||||
idx = txvq->vq_ring.desc[head_idx].next;
|
||||
idx = vq->vq_ring.desc[head_idx].next;
|
||||
|
||||
txvq->vq_desc_head_idx = idx;
|
||||
if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
|
||||
txvq->vq_desc_tail_idx = idx;
|
||||
txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
|
||||
vq_update_avail_ring(txvq, head_idx);
|
||||
vq->vq_desc_head_idx = idx;
|
||||
if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
|
||||
vq->vq_desc_tail_idx = idx;
|
||||
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
|
||||
vq_update_avail_ring(vq, head_idx);
|
||||
}
|
||||
|
||||
static void
|
||||
virtio_dev_vring_start(struct virtqueue *vq, int queue_type)
|
||||
virtio_dev_vring_start(struct virtqueue *vq)
|
||||
{
|
||||
struct rte_mbuf *m;
|
||||
int i, nbufs, error, size = vq->vq_nentries;
|
||||
int size = vq->vq_nentries;
|
||||
struct vring *vr = &vq->vq_ring;
|
||||
uint8_t *ring_mem = vq->vq_ring_virt_mem;
|
||||
|
||||
@ -309,30 +309,70 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type)
|
||||
* Disable device(host) interrupting guest
|
||||
*/
|
||||
virtqueue_disable_intr(vq);
|
||||
}
|
||||
|
||||
/* Only rx virtqueue needs mbufs to be allocated at initialization */
|
||||
if (queue_type == VTNET_RQ) {
|
||||
if (vq->mpool == NULL)
|
||||
void
|
||||
virtio_dev_cq_start(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct virtio_hw *hw = dev->data->dev_private;
|
||||
|
||||
if (hw->cvq && hw->cvq->vq) {
|
||||
virtio_dev_vring_start(hw->cvq->vq);
|
||||
VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
virtio_dev_rxtx_start(struct rte_eth_dev *dev)
|
||||
{
|
||||
/*
|
||||
* Start receive and transmit vrings
|
||||
* - Setup vring structure for all queues
|
||||
* - Initialize descriptor for the rx vring
|
||||
* - Allocate blank mbufs for the each rx descriptor
|
||||
*
|
||||
*/
|
||||
uint16_t i;
|
||||
uint16_t desc_idx;
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
/* Start rx vring. */
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
struct virtnet_rx *rxvq = dev->data->rx_queues[i];
|
||||
struct virtqueue *vq = rxvq->vq;
|
||||
int error, nbufs;
|
||||
struct rte_mbuf *m;
|
||||
|
||||
virtio_dev_vring_start(vq);
|
||||
if (rxvq->mpool == NULL) {
|
||||
rte_exit(EXIT_FAILURE,
|
||||
"Cannot allocate initial mbufs for rx virtqueue");
|
||||
"Cannot allocate mbufs for rx virtqueue");
|
||||
}
|
||||
|
||||
/* Allocate blank mbufs for the each rx descriptor */
|
||||
nbufs = 0;
|
||||
error = ENOSPC;
|
||||
|
||||
#ifdef RTE_MACHINE_CPUFLAG_SSSE3
|
||||
if (use_simple_rxtx)
|
||||
for (i = 0; i < vq->vq_nentries; i++) {
|
||||
vq->vq_ring.avail->ring[i] = i;
|
||||
vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;
|
||||
if (use_simple_rxtx) {
|
||||
for (desc_idx = 0; desc_idx < vq->vq_nentries;
|
||||
desc_idx++) {
|
||||
vq->vq_ring.avail->ring[desc_idx] = desc_idx;
|
||||
vq->vq_ring.desc[desc_idx].flags =
|
||||
VRING_DESC_F_WRITE;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
memset(&vq->fake_mbuf, 0, sizeof(vq->fake_mbuf));
|
||||
for (i = 0; i < RTE_PMD_VIRTIO_RX_MAX_BURST; i++)
|
||||
vq->sw_ring[vq->vq_nentries + i] = &vq->fake_mbuf;
|
||||
memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
|
||||
for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
|
||||
desc_idx++) {
|
||||
vq->sw_ring[vq->vq_nentries + desc_idx] =
|
||||
&rxvq->fake_mbuf;
|
||||
}
|
||||
|
||||
while (!virtqueue_full(vq)) {
|
||||
m = rte_mbuf_raw_alloc(vq->mpool);
|
||||
m = rte_mbuf_raw_alloc(rxvq->mpool);
|
||||
if (m == NULL)
|
||||
break;
|
||||
|
||||
@ -355,64 +395,40 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type)
|
||||
vq_update_avail_idx(vq);
|
||||
|
||||
PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
|
||||
} else if (queue_type == VTNET_TQ) {
|
||||
#ifdef RTE_MACHINE_CPUFLAG_SSSE3
|
||||
if (use_simple_rxtx) {
|
||||
int mid_idx = vq->vq_nentries >> 1;
|
||||
for (i = 0; i < mid_idx; i++) {
|
||||
vq->vq_ring.avail->ring[i] = i + mid_idx;
|
||||
vq->vq_ring.desc[i + mid_idx].next = i;
|
||||
vq->vq_ring.desc[i + mid_idx].addr =
|
||||
vq->virtio_net_hdr_mem +
|
||||
offsetof(struct virtio_tx_region, tx_hdr);
|
||||
vq->vq_ring.desc[i + mid_idx].len =
|
||||
vq->hw->vtnet_hdr_size;
|
||||
vq->vq_ring.desc[i + mid_idx].flags =
|
||||
VRING_DESC_F_NEXT;
|
||||
vq->vq_ring.desc[i].flags = 0;
|
||||
}
|
||||
for (i = mid_idx; i < vq->vq_nentries; i++)
|
||||
vq->vq_ring.avail->ring[i] = i;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
virtio_dev_cq_start(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct virtio_hw *hw = dev->data->dev_private;
|
||||
|
||||
if (hw->cvq) {
|
||||
virtio_dev_vring_start(hw->cvq, VTNET_CQ);
|
||||
VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
virtio_dev_rxtx_start(struct rte_eth_dev *dev)
|
||||
{
|
||||
/*
|
||||
* Start receive and transmit vrings
|
||||
* - Setup vring structure for all queues
|
||||
* - Initialize descriptor for the rx vring
|
||||
* - Allocate blank mbufs for the each rx descriptor
|
||||
*
|
||||
*/
|
||||
int i;
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
/* Start rx vring. */
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
virtio_dev_vring_start(dev->data->rx_queues[i], VTNET_RQ);
|
||||
VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
|
||||
VIRTQUEUE_DUMP(vq);
|
||||
}
|
||||
|
||||
/* Start tx vring. */
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
virtio_dev_vring_start(dev->data->tx_queues[i], VTNET_TQ);
|
||||
VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
|
||||
struct virtnet_tx *txvq = dev->data->tx_queues[i];
|
||||
struct virtqueue *vq = txvq->vq;
|
||||
|
||||
virtio_dev_vring_start(vq);
|
||||
#ifdef RTE_MACHINE_CPUFLAG_SSSE3
|
||||
if (use_simple_rxtx) {
|
||||
uint16_t mid_idx = vq->vq_nentries >> 1;
|
||||
|
||||
for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) {
|
||||
vq->vq_ring.avail->ring[desc_idx] =
|
||||
desc_idx + mid_idx;
|
||||
vq->vq_ring.desc[desc_idx + mid_idx].next =
|
||||
desc_idx;
|
||||
vq->vq_ring.desc[desc_idx + mid_idx].addr =
|
||||
txvq->virtio_net_hdr_mem +
|
||||
offsetof(struct virtio_tx_region, tx_hdr);
|
||||
vq->vq_ring.desc[desc_idx + mid_idx].len =
|
||||
vq->hw->vtnet_hdr_size;
|
||||
vq->vq_ring.desc[desc_idx + mid_idx].flags =
|
||||
VRING_DESC_F_NEXT;
|
||||
vq->vq_ring.desc[desc_idx].flags = 0;
|
||||
}
|
||||
for (desc_idx = mid_idx; desc_idx < vq->vq_nentries;
|
||||
desc_idx++)
|
||||
vq->vq_ring.avail->ring[desc_idx] = desc_idx;
|
||||
}
|
||||
#endif
|
||||
VIRTQUEUE_DUMP(vq);
|
||||
}
|
||||
}
|
||||
|
||||
@ -425,24 +441,24 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
struct rte_mempool *mp)
|
||||
{
|
||||
uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
|
||||
struct virtqueue *vq;
|
||||
struct virtnet_rx *rxvq;
|
||||
int ret;
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
ret = virtio_dev_queue_setup(dev, VTNET_RQ, queue_idx, vtpci_queue_idx,
|
||||
nb_desc, socket_id, &vq);
|
||||
nb_desc, socket_id, (void **)&rxvq);
|
||||
if (ret < 0) {
|
||||
PMD_INIT_LOG(ERR, "rvq initialization failed");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Create mempool for rx mbuf allocation */
|
||||
vq->mpool = mp;
|
||||
rxvq->mpool = mp;
|
||||
|
||||
dev->data->rx_queues[queue_idx] = vq;
|
||||
dev->data->rx_queues[queue_idx] = rxvq;
|
||||
|
||||
#ifdef RTE_MACHINE_CPUFLAG_SSSE3
|
||||
virtio_rxq_vec_setup(vq);
|
||||
virtio_rxq_vec_setup(rxvq);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
@ -451,7 +467,16 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
void
|
||||
virtio_dev_rx_queue_release(void *rxq)
|
||||
{
|
||||
virtio_dev_queue_release(rxq);
|
||||
struct virtnet_rx *rxvq = rxq;
|
||||
struct virtqueue *vq = rxvq->vq;
|
||||
/* rxvq is freed when vq is freed, and as mz should be freed after the
|
||||
* del_queue, so we reserve the mz pointer first.
|
||||
*/
|
||||
const struct rte_memzone *mz = rxvq->mz;
|
||||
|
||||
/* no need to free rxq as vq and rxq are allocated together */
|
||||
virtio_dev_queue_release(vq);
|
||||
rte_memzone_free(mz);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -473,6 +498,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
#ifdef RTE_MACHINE_CPUFLAG_SSSE3
|
||||
struct virtio_hw *hw = dev->data->dev_private;
|
||||
#endif
|
||||
struct virtnet_tx *txvq;
|
||||
struct virtqueue *vq;
|
||||
uint16_t tx_free_thresh;
|
||||
int ret;
|
||||
@ -497,11 +523,12 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
#endif
|
||||
|
||||
ret = virtio_dev_queue_setup(dev, VTNET_TQ, queue_idx, vtpci_queue_idx,
|
||||
nb_desc, socket_id, &vq);
|
||||
nb_desc, socket_id, (void **)&txvq);
|
||||
if (ret < 0) {
|
||||
PMD_INIT_LOG(ERR, "rvq initialization failed");
|
||||
PMD_INIT_LOG(ERR, "tvq initialization failed");
|
||||
return ret;
|
||||
}
|
||||
vq = txvq->vq;
|
||||
|
||||
tx_free_thresh = tx_conf->tx_free_thresh;
|
||||
if (tx_free_thresh == 0)
|
||||
@ -519,14 +546,24 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
|
||||
vq->vq_free_thresh = tx_free_thresh;
|
||||
|
||||
dev->data->tx_queues[queue_idx] = vq;
|
||||
dev->data->tx_queues[queue_idx] = txvq;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
virtio_dev_tx_queue_release(void *txq)
|
||||
{
|
||||
virtio_dev_queue_release(txq);
|
||||
struct virtnet_tx *txvq = txq;
|
||||
struct virtqueue *vq = txvq->vq;
|
||||
/* txvq is freed when vq is freed, and as mz should be freed after the
|
||||
* del_queue, so we reserve the mz pointer first.
|
||||
*/
|
||||
const struct rte_memzone *hdr_mz = txvq->virtio_net_hdr_mz;
|
||||
const struct rte_memzone *mz = txvq->mz;
|
||||
|
||||
virtio_dev_queue_release(vq);
|
||||
rte_memzone_free(mz);
|
||||
rte_memzone_free(hdr_mz);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -545,34 +582,34 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
|
||||
}
|
||||
|
||||
static void
|
||||
virtio_update_packet_stats(struct virtqueue *vq, struct rte_mbuf *mbuf)
|
||||
virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
|
||||
{
|
||||
uint32_t s = mbuf->pkt_len;
|
||||
struct ether_addr *ea;
|
||||
|
||||
if (s == 64) {
|
||||
vq->size_bins[1]++;
|
||||
stats->size_bins[1]++;
|
||||
} else if (s > 64 && s < 1024) {
|
||||
uint32_t bin;
|
||||
|
||||
/* count zeros, and offset into correct bin */
|
||||
bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
|
||||
vq->size_bins[bin]++;
|
||||
stats->size_bins[bin]++;
|
||||
} else {
|
||||
if (s < 64)
|
||||
vq->size_bins[0]++;
|
||||
stats->size_bins[0]++;
|
||||
else if (s < 1519)
|
||||
vq->size_bins[6]++;
|
||||
stats->size_bins[6]++;
|
||||
else if (s >= 1519)
|
||||
vq->size_bins[7]++;
|
||||
stats->size_bins[7]++;
|
||||
}
|
||||
|
||||
ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
|
||||
if (is_multicast_ether_addr(ea)) {
|
||||
if (is_broadcast_ether_addr(ea))
|
||||
vq->broadcast++;
|
||||
stats->broadcast++;
|
||||
else
|
||||
vq->multicast++;
|
||||
stats->multicast++;
|
||||
}
|
||||
}
|
||||
|
||||
@ -581,7 +618,8 @@ virtio_update_packet_stats(struct virtqueue *vq, struct rte_mbuf *mbuf)
|
||||
uint16_t
|
||||
virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
||||
{
|
||||
struct virtqueue *rxvq = rx_queue;
|
||||
struct virtnet_rx *rxvq = rx_queue;
|
||||
struct virtqueue *vq = rxvq->vq;
|
||||
struct virtio_hw *hw;
|
||||
struct rte_mbuf *rxm, *new_mbuf;
|
||||
uint16_t nb_used, num, nb_rx;
|
||||
@ -591,19 +629,19 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
||||
uint32_t i, nb_enqueued;
|
||||
uint32_t hdr_size;
|
||||
|
||||
nb_used = VIRTQUEUE_NUSED(rxvq);
|
||||
nb_used = VIRTQUEUE_NUSED(vq);
|
||||
|
||||
virtio_rmb();
|
||||
|
||||
num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
|
||||
num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
|
||||
if (likely(num > DESC_PER_CACHELINE))
|
||||
num = num - ((rxvq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
|
||||
num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
|
||||
|
||||
num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, num);
|
||||
num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
|
||||
PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
|
||||
|
||||
hw = rxvq->hw;
|
||||
hw = vq->hw;
|
||||
nb_rx = 0;
|
||||
nb_enqueued = 0;
|
||||
hdr_size = hw->vtnet_hdr_size;
|
||||
@ -616,8 +654,8 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
||||
if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
|
||||
PMD_RX_LOG(ERR, "Packet drop");
|
||||
nb_enqueued++;
|
||||
virtio_discard_rxbuf(rxvq, rxm);
|
||||
rxvq->errors++;
|
||||
virtio_discard_rxbuf(vq, rxm);
|
||||
rxvq->stats.errors++;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -638,15 +676,15 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
||||
|
||||
rx_pkts[nb_rx++] = rxm;
|
||||
|
||||
rxvq->bytes += rx_pkts[nb_rx - 1]->pkt_len;
|
||||
virtio_update_packet_stats(rxvq, rxm);
|
||||
rxvq->stats.bytes += rx_pkts[nb_rx - 1]->pkt_len;
|
||||
virtio_update_packet_stats(&rxvq->stats, rxm);
|
||||
}
|
||||
|
||||
rxvq->packets += nb_rx;
|
||||
rxvq->stats.packets += nb_rx;
|
||||
|
||||
/* Allocate new mbuf for the used descriptor */
|
||||
error = ENOSPC;
|
||||
while (likely(!virtqueue_full(rxvq))) {
|
||||
while (likely(!virtqueue_full(vq))) {
|
||||
new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
|
||||
if (unlikely(new_mbuf == NULL)) {
|
||||
struct rte_eth_dev *dev
|
||||
@ -654,7 +692,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
||||
dev->data->rx_mbuf_alloc_failed++;
|
||||
break;
|
||||
}
|
||||
error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);
|
||||
error = virtqueue_enqueue_recv_refill(vq, new_mbuf);
|
||||
if (unlikely(error)) {
|
||||
rte_pktmbuf_free(new_mbuf);
|
||||
break;
|
||||
@ -663,10 +701,10 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
||||
}
|
||||
|
||||
if (likely(nb_enqueued)) {
|
||||
vq_update_avail_idx(rxvq);
|
||||
vq_update_avail_idx(vq);
|
||||
|
||||
if (unlikely(virtqueue_kick_prepare(rxvq))) {
|
||||
virtqueue_notify(rxvq);
|
||||
if (unlikely(virtqueue_kick_prepare(vq))) {
|
||||
virtqueue_notify(vq);
|
||||
PMD_RX_LOG(DEBUG, "Notified");
|
||||
}
|
||||
}
|
||||
@ -679,7 +717,8 @@ virtio_recv_mergeable_pkts(void *rx_queue,
|
||||
struct rte_mbuf **rx_pkts,
|
||||
uint16_t nb_pkts)
|
||||
{
|
||||
struct virtqueue *rxvq = rx_queue;
|
||||
struct virtnet_rx *rxvq = rx_queue;
|
||||
struct virtqueue *vq = rxvq->vq;
|
||||
struct virtio_hw *hw;
|
||||
struct rte_mbuf *rxm, *new_mbuf;
|
||||
uint16_t nb_used, num, nb_rx;
|
||||
@ -693,13 +732,13 @@ virtio_recv_mergeable_pkts(void *rx_queue,
|
||||
uint32_t seg_res;
|
||||
uint32_t hdr_size;
|
||||
|
||||
nb_used = VIRTQUEUE_NUSED(rxvq);
|
||||
nb_used = VIRTQUEUE_NUSED(vq);
|
||||
|
||||
virtio_rmb();
|
||||
|
||||
PMD_RX_LOG(DEBUG, "used:%d", nb_used);
|
||||
|
||||
hw = rxvq->hw;
|
||||
hw = vq->hw;
|
||||
nb_rx = 0;
|
||||
i = 0;
|
||||
nb_enqueued = 0;
|
||||
@ -714,7 +753,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
|
||||
if (nb_rx == nb_pkts)
|
||||
break;
|
||||
|
||||
num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, 1);
|
||||
num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
|
||||
if (num != 1)
|
||||
continue;
|
||||
|
||||
@ -728,8 +767,8 @@ virtio_recv_mergeable_pkts(void *rx_queue,
|
||||
if (unlikely(len[0] < hdr_size + ETHER_HDR_LEN)) {
|
||||
PMD_RX_LOG(ERR, "Packet drop");
|
||||
nb_enqueued++;
|
||||
virtio_discard_rxbuf(rxvq, rxm);
|
||||
rxvq->errors++;
|
||||
virtio_discard_rxbuf(vq, rxm);
|
||||
rxvq->stats.errors++;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -760,9 +799,9 @@ virtio_recv_mergeable_pkts(void *rx_queue,
|
||||
*/
|
||||
uint16_t rcv_cnt =
|
||||
RTE_MIN(seg_res, RTE_DIM(rcv_pkts));
|
||||
if (likely(VIRTQUEUE_NUSED(rxvq) >= rcv_cnt)) {
|
||||
if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
|
||||
uint32_t rx_num =
|
||||
virtqueue_dequeue_burst_rx(rxvq,
|
||||
virtqueue_dequeue_burst_rx(vq,
|
||||
rcv_pkts, len, rcv_cnt);
|
||||
i += rx_num;
|
||||
rcv_cnt = rx_num;
|
||||
@ -770,8 +809,8 @@ virtio_recv_mergeable_pkts(void *rx_queue,
|
||||
PMD_RX_LOG(ERR,
|
||||
"No enough segments for packet.");
|
||||
nb_enqueued++;
|
||||
virtio_discard_rxbuf(rxvq, rxm);
|
||||
rxvq->errors++;
|
||||
virtio_discard_rxbuf(vq, rxm);
|
||||
rxvq->stats.errors++;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -801,16 +840,16 @@ virtio_recv_mergeable_pkts(void *rx_queue,
|
||||
VIRTIO_DUMP_PACKET(rx_pkts[nb_rx],
|
||||
rx_pkts[nb_rx]->data_len);
|
||||
|
||||
rxvq->bytes += rx_pkts[nb_rx]->pkt_len;
|
||||
virtio_update_packet_stats(rxvq, rx_pkts[nb_rx]);
|
||||
rxvq->stats.bytes += rx_pkts[nb_rx]->pkt_len;
|
||||
virtio_update_packet_stats(&rxvq->stats, rx_pkts[nb_rx]);
|
||||
nb_rx++;
|
||||
}
|
||||
|
||||
rxvq->packets += nb_rx;
|
||||
rxvq->stats.packets += nb_rx;
|
||||
|
||||
/* Allocate new mbuf for the used descriptor */
|
||||
error = ENOSPC;
|
||||
while (likely(!virtqueue_full(rxvq))) {
|
||||
while (likely(!virtqueue_full(vq))) {
|
||||
new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
|
||||
if (unlikely(new_mbuf == NULL)) {
|
||||
struct rte_eth_dev *dev
|
||||
@ -818,7 +857,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
|
||||
dev->data->rx_mbuf_alloc_failed++;
|
||||
break;
|
||||
}
|
||||
error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);
|
||||
error = virtqueue_enqueue_recv_refill(vq, new_mbuf);
|
||||
if (unlikely(error)) {
|
||||
rte_pktmbuf_free(new_mbuf);
|
||||
break;
|
||||
@ -827,10 +866,10 @@ virtio_recv_mergeable_pkts(void *rx_queue,
|
||||
}
|
||||
|
||||
if (likely(nb_enqueued)) {
|
||||
vq_update_avail_idx(rxvq);
|
||||
vq_update_avail_idx(vq);
|
||||
|
||||
if (unlikely(virtqueue_kick_prepare(rxvq))) {
|
||||
virtqueue_notify(rxvq);
|
||||
if (unlikely(virtqueue_kick_prepare(vq))) {
|
||||
virtqueue_notify(vq);
|
||||
PMD_RX_LOG(DEBUG, "Notified");
|
||||
}
|
||||
}
|
||||
@ -841,8 +880,9 @@ virtio_recv_mergeable_pkts(void *rx_queue,
|
||||
uint16_t
|
||||
virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
{
|
||||
struct virtqueue *txvq = tx_queue;
|
||||
struct virtio_hw *hw = txvq->hw;
|
||||
struct virtnet_tx *txvq = tx_queue;
|
||||
struct virtqueue *vq = txvq->vq;
|
||||
struct virtio_hw *hw = vq->hw;
|
||||
uint16_t hdr_size = hw->vtnet_hdr_size;
|
||||
uint16_t nb_used, nb_tx;
|
||||
int error;
|
||||
@ -851,11 +891,11 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
return nb_pkts;
|
||||
|
||||
PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
|
||||
nb_used = VIRTQUEUE_NUSED(txvq);
|
||||
nb_used = VIRTQUEUE_NUSED(vq);
|
||||
|
||||
virtio_rmb();
|
||||
if (likely(nb_used > txvq->vq_nentries - txvq->vq_free_thresh))
|
||||
virtio_xmit_cleanup(txvq, nb_used);
|
||||
if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
|
||||
virtio_xmit_cleanup(vq, nb_used);
|
||||
|
||||
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
|
||||
struct rte_mbuf *txm = tx_pkts[nb_tx];
|
||||
@ -889,16 +929,16 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
* default => number of segments + 1
|
||||
*/
|
||||
slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
|
||||
need = slots - txvq->vq_free_cnt;
|
||||
need = slots - vq->vq_free_cnt;
|
||||
|
||||
/* Positive value indicates it need free vring descriptors */
|
||||
if (unlikely(need > 0)) {
|
||||
nb_used = VIRTQUEUE_NUSED(txvq);
|
||||
nb_used = VIRTQUEUE_NUSED(vq);
|
||||
virtio_rmb();
|
||||
need = RTE_MIN(need, (int)nb_used);
|
||||
|
||||
virtio_xmit_cleanup(txvq, need);
|
||||
need = slots - txvq->vq_free_cnt;
|
||||
virtio_xmit_cleanup(vq, need);
|
||||
need = slots - vq->vq_free_cnt;
|
||||
if (unlikely(need > 0)) {
|
||||
PMD_TX_LOG(ERR,
|
||||
"No free tx descriptors to transmit");
|
||||
@ -909,17 +949,17 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
/* Enqueue Packet buffers */
|
||||
virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect, can_push);
|
||||
|
||||
txvq->bytes += txm->pkt_len;
|
||||
virtio_update_packet_stats(txvq, txm);
|
||||
txvq->stats.bytes += txm->pkt_len;
|
||||
virtio_update_packet_stats(&txvq->stats, txm);
|
||||
}
|
||||
|
||||
txvq->packets += nb_tx;
|
||||
txvq->stats.packets += nb_tx;
|
||||
|
||||
if (likely(nb_tx)) {
|
||||
vq_update_avail_idx(txvq);
|
||||
vq_update_avail_idx(vq);
|
||||
|
||||
if (unlikely(virtqueue_kick_prepare(txvq))) {
|
||||
virtqueue_notify(txvq);
|
||||
if (unlikely(virtqueue_kick_prepare(vq))) {
|
||||
virtqueue_notify(vq);
|
||||
PMD_TX_LOG(DEBUG, "Notified backend after xmit");
|
||||
}
|
||||
}
|
||||
|
@ -31,11 +31,65 @@
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _VIRTIO_RXTX_H_
|
||||
#define _VIRTIO_RXTX_H_
|
||||
|
||||
#define RTE_PMD_VIRTIO_RX_MAX_BURST 64
|
||||
|
||||
struct virtnet_stats {
|
||||
uint64_t packets;
|
||||
uint64_t bytes;
|
||||
uint64_t errors;
|
||||
uint64_t multicast;
|
||||
uint64_t broadcast;
|
||||
/* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
|
||||
uint64_t size_bins[8];
|
||||
};
|
||||
|
||||
struct virtnet_rx {
|
||||
struct virtqueue *vq;
|
||||
/* dummy mbuf, for wraparound when processing RX ring. */
|
||||
struct rte_mbuf fake_mbuf;
|
||||
uint64_t mbuf_initializer; /**< value to init mbufs. */
|
||||
struct rte_mempool *mpool; /**< mempool for mbuf allocation */
|
||||
|
||||
uint16_t queue_id; /**< DPDK queue index. */
|
||||
uint8_t port_id; /**< Device port identifier. */
|
||||
|
||||
/* Statistics */
|
||||
struct virtnet_stats stats;
|
||||
|
||||
const struct rte_memzone *mz; /**< mem zone to populate RX ring. */
|
||||
};
|
||||
|
||||
struct virtnet_tx {
|
||||
struct virtqueue *vq;
|
||||
/**< memzone to populate hdr. */
|
||||
const struct rte_memzone *virtio_net_hdr_mz;
|
||||
phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
|
||||
|
||||
uint16_t queue_id; /**< DPDK queue index. */
|
||||
uint8_t port_id; /**< Device port identifier. */
|
||||
|
||||
/* Statistics */
|
||||
struct virtnet_stats stats;
|
||||
|
||||
const struct rte_memzone *mz; /**< mem zone to populate TX ring. */
|
||||
};
|
||||
|
||||
struct virtnet_ctl {
|
||||
struct virtqueue *vq;
|
||||
/**< memzone to populate hdr. */
|
||||
const struct rte_memzone *virtio_net_hdr_mz;
|
||||
phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
|
||||
uint8_t port_id; /**< Device port identifier. */
|
||||
const struct rte_memzone *mz; /**< mem zone to populate RX ring. */
|
||||
};
|
||||
|
||||
#ifdef RTE_MACHINE_CPUFLAG_SSSE3
|
||||
int virtio_rxq_vec_setup(struct virtqueue *rxq);
|
||||
int virtio_rxq_vec_setup(struct virtnet_rx *rxvq);
|
||||
|
||||
int virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
|
||||
struct rte_mbuf *m);
|
||||
#endif
|
||||
#endif /* _VIRTIO_RXTX_H_ */
|
||||
|
@ -92,17 +92,18 @@ virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
|
||||
}
|
||||
|
||||
static inline void
|
||||
virtio_rxq_rearm_vec(struct virtqueue *rxvq)
|
||||
virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
|
||||
{
|
||||
int i;
|
||||
uint16_t desc_idx;
|
||||
struct rte_mbuf **sw_ring;
|
||||
struct vring_desc *start_dp;
|
||||
int ret;
|
||||
struct virtqueue *vq = rxvq->vq;
|
||||
|
||||
desc_idx = rxvq->vq_avail_idx & (rxvq->vq_nentries - 1);
|
||||
sw_ring = &rxvq->sw_ring[desc_idx];
|
||||
start_dp = &rxvq->vq_ring.desc[desc_idx];
|
||||
desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
|
||||
sw_ring = &vq->sw_ring[desc_idx];
|
||||
start_dp = &vq->vq_ring.desc[desc_idx];
|
||||
|
||||
ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,
|
||||
RTE_VIRTIO_VPMD_RX_REARM_THRESH);
|
||||
@ -120,14 +121,14 @@ virtio_rxq_rearm_vec(struct virtqueue *rxvq)
|
||||
|
||||
start_dp[i].addr =
|
||||
(uint64_t)((uintptr_t)sw_ring[i]->buf_physaddr +
|
||||
RTE_PKTMBUF_HEADROOM - rxvq->hw->vtnet_hdr_size);
|
||||
RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size);
|
||||
start_dp[i].len = sw_ring[i]->buf_len -
|
||||
RTE_PKTMBUF_HEADROOM + rxvq->hw->vtnet_hdr_size;
|
||||
RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
|
||||
}
|
||||
|
||||
rxvq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
|
||||
rxvq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH;
|
||||
vq_update_avail_idx(rxvq);
|
||||
vq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
|
||||
vq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH;
|
||||
vq_update_avail_idx(vq);
|
||||
}
|
||||
|
||||
/* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)
|
||||
@ -143,7 +144,8 @@ uint16_t
|
||||
virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
uint16_t nb_pkts)
|
||||
{
|
||||
struct virtqueue *rxvq = rx_queue;
|
||||
struct virtnet_rx *rxvq = rx_queue;
|
||||
struct virtqueue *vq = rxvq->vq;
|
||||
uint16_t nb_used;
|
||||
uint16_t desc_idx;
|
||||
struct vring_used_elem *rused;
|
||||
@ -175,15 +177,15 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
len_adjust = _mm_set_epi16(
|
||||
0, 0,
|
||||
0,
|
||||
(uint16_t)-rxvq->hw->vtnet_hdr_size,
|
||||
0, (uint16_t)-rxvq->hw->vtnet_hdr_size,
|
||||
(uint16_t)-vq->hw->vtnet_hdr_size,
|
||||
0, (uint16_t)-vq->hw->vtnet_hdr_size,
|
||||
0, 0);
|
||||
|
||||
if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
|
||||
return 0;
|
||||
|
||||
nb_used = *(volatile uint16_t *)&rxvq->vq_ring.used->idx -
|
||||
rxvq->vq_used_cons_idx;
|
||||
nb_used = *(volatile uint16_t *)&vq->vq_ring.used->idx -
|
||||
vq->vq_used_cons_idx;
|
||||
|
||||
rte_compiler_barrier();
|
||||
|
||||
@ -193,17 +195,17 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP);
|
||||
nb_used = RTE_MIN(nb_used, nb_pkts);
|
||||
|
||||
desc_idx = (uint16_t)(rxvq->vq_used_cons_idx & (rxvq->vq_nentries - 1));
|
||||
rused = &rxvq->vq_ring.used->ring[desc_idx];
|
||||
sw_ring = &rxvq->sw_ring[desc_idx];
|
||||
sw_ring_end = &rxvq->sw_ring[rxvq->vq_nentries];
|
||||
desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
|
||||
rused = &vq->vq_ring.used->ring[desc_idx];
|
||||
sw_ring = &vq->sw_ring[desc_idx];
|
||||
sw_ring_end = &vq->sw_ring[vq->vq_nentries];
|
||||
|
||||
_mm_prefetch((const void *)rused, _MM_HINT_T0);
|
||||
|
||||
if (rxvq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
|
||||
if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
|
||||
virtio_rxq_rearm_vec(rxvq);
|
||||
if (unlikely(virtqueue_kick_prepare(rxvq)))
|
||||
virtqueue_notify(rxvq);
|
||||
if (unlikely(virtqueue_kick_prepare(vq)))
|
||||
virtqueue_notify(vq);
|
||||
}
|
||||
|
||||
for (nb_pkts_received = 0;
|
||||
@ -286,9 +288,9 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
}
|
||||
}
|
||||
|
||||
rxvq->vq_used_cons_idx += nb_pkts_received;
|
||||
rxvq->vq_free_cnt += nb_pkts_received;
|
||||
rxvq->packets += nb_pkts_received;
|
||||
vq->vq_used_cons_idx += nb_pkts_received;
|
||||
vq->vq_free_cnt += nb_pkts_received;
|
||||
rxvq->stats.packets += nb_pkts_received;
|
||||
return nb_pkts_received;
|
||||
}
|
||||
|
||||
@ -342,28 +344,29 @@ uint16_t
|
||||
virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
uint16_t nb_pkts)
|
||||
{
|
||||
struct virtqueue *txvq = tx_queue;
|
||||
struct virtnet_tx *txvq = tx_queue;
|
||||
struct virtqueue *vq = txvq->vq;
|
||||
uint16_t nb_used;
|
||||
uint16_t desc_idx;
|
||||
struct vring_desc *start_dp;
|
||||
uint16_t nb_tail, nb_commit;
|
||||
int i;
|
||||
uint16_t desc_idx_max = (txvq->vq_nentries >> 1) - 1;
|
||||
uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1;
|
||||
|
||||
nb_used = VIRTQUEUE_NUSED(txvq);
|
||||
nb_used = VIRTQUEUE_NUSED(vq);
|
||||
rte_compiler_barrier();
|
||||
|
||||
if (nb_used >= VIRTIO_TX_FREE_THRESH)
|
||||
virtio_xmit_cleanup(tx_queue);
|
||||
virtio_xmit_cleanup(vq);
|
||||
|
||||
nb_commit = nb_pkts = RTE_MIN((txvq->vq_free_cnt >> 1), nb_pkts);
|
||||
desc_idx = (uint16_t) (txvq->vq_avail_idx & desc_idx_max);
|
||||
start_dp = txvq->vq_ring.desc;
|
||||
nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts);
|
||||
desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max);
|
||||
start_dp = vq->vq_ring.desc;
|
||||
nb_tail = (uint16_t) (desc_idx_max + 1 - desc_idx);
|
||||
|
||||
if (nb_commit >= nb_tail) {
|
||||
for (i = 0; i < nb_tail; i++)
|
||||
txvq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
|
||||
vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
|
||||
for (i = 0; i < nb_tail; i++) {
|
||||
start_dp[desc_idx].addr =
|
||||
rte_mbuf_data_dma_addr(*tx_pkts);
|
||||
@ -375,7 +378,7 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
desc_idx = 0;
|
||||
}
|
||||
for (i = 0; i < nb_commit; i++)
|
||||
txvq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
|
||||
vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
|
||||
for (i = 0; i < nb_commit; i++) {
|
||||
start_dp[desc_idx].addr = rte_mbuf_data_dma_addr(*tx_pkts);
|
||||
start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
|
||||
@ -385,21 +388,21 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
|
||||
rte_compiler_barrier();
|
||||
|
||||
txvq->vq_free_cnt -= (uint16_t)(nb_pkts << 1);
|
||||
txvq->vq_avail_idx += nb_pkts;
|
||||
txvq->vq_ring.avail->idx = txvq->vq_avail_idx;
|
||||
txvq->packets += nb_pkts;
|
||||
vq->vq_free_cnt -= (uint16_t)(nb_pkts << 1);
|
||||
vq->vq_avail_idx += nb_pkts;
|
||||
vq->vq_ring.avail->idx = vq->vq_avail_idx;
|
||||
txvq->stats.packets += nb_pkts;
|
||||
|
||||
if (likely(nb_pkts)) {
|
||||
if (unlikely(virtqueue_kick_prepare(txvq)))
|
||||
virtqueue_notify(txvq);
|
||||
if (unlikely(virtqueue_kick_prepare(vq)))
|
||||
virtqueue_notify(vq);
|
||||
}
|
||||
|
||||
return nb_pkts;
|
||||
}
|
||||
|
||||
int __attribute__((cold))
|
||||
virtio_rxq_vec_setup(struct virtqueue *rxq)
|
||||
virtio_rxq_vec_setup(struct virtnet_rx *rxq)
|
||||
{
|
||||
uintptr_t p;
|
||||
struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
|
||||
|
@ -153,23 +153,29 @@ struct virtio_pmd_ctrl {
|
||||
uint8_t data[VIRTIO_MAX_CTRL_DATA];
|
||||
};
|
||||
|
||||
struct vq_desc_extra {
|
||||
void *cookie;
|
||||
uint16_t ndescs;
|
||||
};
|
||||
|
||||
struct virtqueue {
|
||||
struct virtio_hw *hw; /**< virtio_hw structure pointer. */
|
||||
const struct rte_memzone *mz; /**< mem zone to populate RX ring. */
|
||||
const struct rte_memzone *virtio_net_hdr_mz; /**< memzone to populate hdr. */
|
||||
struct rte_mempool *mpool; /**< mempool for mbuf allocation */
|
||||
uint16_t queue_id; /**< DPDK queue index. */
|
||||
uint8_t port_id; /**< Device port identifier. */
|
||||
uint16_t vq_queue_index; /**< PCI queue index */
|
||||
struct virtio_hw *hw; /**< virtio_hw structure pointer. */
|
||||
struct vring vq_ring; /**< vring keeping desc, used and avail */
|
||||
/**
|
||||
* Last consumed descriptor in the used table,
|
||||
* trails vq_ring.used->idx.
|
||||
*/
|
||||
uint16_t vq_used_cons_idx;
|
||||
uint16_t vq_nentries; /**< vring desc numbers */
|
||||
uint16_t vq_free_cnt; /**< num of desc available */
|
||||
uint16_t vq_avail_idx; /**< sync until needed */
|
||||
uint16_t vq_free_thresh; /**< free threshold */
|
||||
|
||||
void *vq_ring_virt_mem; /**< linear address of vring*/
|
||||
void *vq_ring_virt_mem; /**< linear address of vring*/
|
||||
unsigned int vq_ring_size;
|
||||
phys_addr_t vq_ring_mem; /**< physical address of vring */
|
||||
|
||||
struct vring vq_ring; /**< vring keeping desc, used and avail */
|
||||
uint16_t vq_free_cnt; /**< num of desc available */
|
||||
uint16_t vq_nentries; /**< vring desc numbers */
|
||||
uint16_t vq_free_thresh; /**< free threshold */
|
||||
phys_addr_t vq_ring_mem; /**< physical address of vring */
|
||||
|
||||
/**
|
||||
* Head of the free chain in the descriptor table. If
|
||||
* there are no free descriptors, this will be set to
|
||||
@ -177,36 +183,11 @@ struct virtqueue {
|
||||
*/
|
||||
uint16_t vq_desc_head_idx;
|
||||
uint16_t vq_desc_tail_idx;
|
||||
/**
|
||||
* Last consumed descriptor in the used table,
|
||||
* trails vq_ring.used->idx.
|
||||
*/
|
||||
uint16_t vq_used_cons_idx;
|
||||
uint16_t vq_avail_idx;
|
||||
uint64_t mbuf_initializer; /**< value to init mbufs. */
|
||||
phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
|
||||
|
||||
struct rte_mbuf **sw_ring; /**< RX software ring. */
|
||||
/* dummy mbuf, for wraparound when processing RX ring. */
|
||||
struct rte_mbuf fake_mbuf;
|
||||
|
||||
/* Statistics */
|
||||
uint64_t packets;
|
||||
uint64_t bytes;
|
||||
uint64_t errors;
|
||||
uint64_t multicast;
|
||||
uint64_t broadcast;
|
||||
/* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
|
||||
uint64_t size_bins[8];
|
||||
|
||||
uint16_t *notify_addr;
|
||||
|
||||
int configured;
|
||||
|
||||
struct vq_desc_extra {
|
||||
void *cookie;
|
||||
uint16_t ndescs;
|
||||
} vq_descx[0];
|
||||
uint16_t vq_queue_index; /**< PCI queue index */
|
||||
uint16_t *notify_addr;
|
||||
int configured;
|
||||
struct rte_mbuf **sw_ring; /**< RX software ring. */
|
||||
struct vq_desc_extra vq_descx[0];
|
||||
};
|
||||
|
||||
/* If multiqueue is provided by host, then we suppport it. */
|
||||
|
Loading…
Reference in New Issue
Block a user