virtio: code-style cleanup
This patch cleanups some coding style issue, and fixes some errors and warnings reported by checkpatch.pl. Signed-off-by: Ouyang Changchun <changchun.ouyang@intel.com> Tested-by: Waterman Cao <waterman.cao@intel.com> Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
This commit is contained in:
parent
c3dfe188ba
commit
5591a4a913
lib/librte_pmd_virtio
@ -134,7 +134,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
||||
|
||||
if (queue_type == VTNET_RQ) {
|
||||
rte_snprintf(vq_name, sizeof(vq_name), "port%d_rvq%d",
|
||||
dev->data->port_id, queue_idx);
|
||||
dev->data->port_id, queue_idx);
|
||||
vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
|
||||
vq_size * sizeof(struct vq_desc_extra), CACHE_LINE_SIZE);
|
||||
memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
|
||||
@ -146,8 +146,8 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
||||
memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
|
||||
} else if(queue_type == VTNET_CQ) {
|
||||
rte_snprintf(vq_name, sizeof(vq_name), "port%d_cvq",
|
||||
dev->data->port_id);
|
||||
vq = rte_zmalloc(vq_name, sizeof(struct virtqueue),
|
||||
dev->data->port_id);
|
||||
CACHE_LINE_SIZE);
|
||||
memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
|
||||
}
|
||||
@ -155,6 +155,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
||||
PMD_INIT_LOG(ERR, "%s: Can not allocate virtqueue\n", __func__);
|
||||
return (-ENOMEM);
|
||||
}
|
||||
|
||||
vq->hw = hw;
|
||||
vq->port_id = dev->data->port_id;
|
||||
vq->queue_id = queue_idx;
|
||||
@ -171,11 +172,12 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
||||
PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d\n", size, vq->vq_ring_size);
|
||||
|
||||
mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
|
||||
socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
|
||||
socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
|
||||
if (mz == NULL) {
|
||||
rte_free(vq);
|
||||
return (-ENOMEM);
|
||||
}
|
||||
|
||||
/*
|
||||
* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
|
||||
* and only accepts 32 bit page frame number.
|
||||
@ -186,6 +188,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
||||
rte_free(vq);
|
||||
return (-ENOMEM);
|
||||
}
|
||||
|
||||
memset(mz->addr, 0, sizeof(mz->len));
|
||||
vq->mz = mz;
|
||||
vq->vq_ring_mem = mz->phys_addr;
|
||||
@ -197,8 +200,8 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
||||
|
||||
if (queue_type == VTNET_TQ) {
|
||||
/*
|
||||
* For each xmit packet, allocate a virtio_net_hdr
|
||||
*/
|
||||
* For each xmit packet, allocate a virtio_net_hdr
|
||||
*/
|
||||
rte_snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d_hdrzone",
|
||||
dev->data->port_id, queue_idx);
|
||||
vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,
|
||||
@ -206,10 +209,12 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
||||
socket_id, 0, CACHE_LINE_SIZE);
|
||||
if (vq->virtio_net_hdr_mz == NULL) {
|
||||
rte_free(vq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
vq->virtio_net_hdr_mem = (void *)(uintptr_t)vq->virtio_net_hdr_mz->phys_addr;
|
||||
memset(vq->virtio_net_hdr_mz->addr, 0, vq_size * sizeof(struct virtio_net_hdr));
|
||||
vq->virtio_net_hdr_mem =
|
||||
(void *)(uintptr_t)vq->virtio_net_hdr_mz->phys_addr;
|
||||
memset(vq->virtio_net_hdr_mz->addr, 0,
|
||||
vq_size * sizeof(struct virtio_net_hdr));
|
||||
} else if (queue_type == VTNET_CQ) {
|
||||
/* Allocate a page for control vq command, data and status */
|
||||
rte_snprintf(vq_name, sizeof(vq_name), "port%d_cvq_hdrzone",
|
||||
@ -218,9 +223,10 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
||||
PAGE_SIZE, socket_id, 0, CACHE_LINE_SIZE);
|
||||
if (vq->virtio_net_hdr_mz == NULL) {
|
||||
rte_free(vq);
|
||||
return (-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
vq->virtio_net_hdr_mem = (void *)(uintptr_t)vq->virtio_net_hdr_mz->phys_addr;
|
||||
vq->virtio_net_hdr_mem =
|
||||
(void *)(uintptr_t)vq->virtio_net_hdr_mz->phys_addr;
|
||||
memset(vq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
|
||||
}
|
||||
|
||||
@ -231,7 +237,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
||||
VIRTIO_WRITE_REG_4(hw, VIRTIO_PCI_QUEUE_PFN,
|
||||
mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
|
||||
*pvq = vq;
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -253,7 +259,7 @@ virtio_dev_cq_queue_setup(struct rte_eth_dev *dev,
|
||||
}
|
||||
|
||||
hw->cvq = vq;
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -264,26 +270,27 @@ virtio_dev_close(struct rte_eth_dev *dev)
|
||||
virtio_dev_stop(dev);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* dev_ops for virtio, bare necessities for basic operation
|
||||
*/
|
||||
static struct eth_dev_ops virtio_eth_dev_ops = {
|
||||
.dev_configure = virtio_dev_configure,
|
||||
.dev_start = virtio_dev_start,
|
||||
.dev_stop = virtio_dev_stop,
|
||||
.dev_close = virtio_dev_close,
|
||||
.dev_configure = virtio_dev_configure,
|
||||
.dev_start = virtio_dev_start,
|
||||
.dev_stop = virtio_dev_stop,
|
||||
.dev_close = virtio_dev_close,
|
||||
|
||||
.dev_infos_get = virtio_dev_info_get,
|
||||
.stats_get = virtio_dev_stats_get,
|
||||
.stats_reset = virtio_dev_stats_reset,
|
||||
.link_update = virtio_dev_link_update,
|
||||
.mac_addr_add = NULL,
|
||||
.mac_addr_remove = NULL,
|
||||
.rx_queue_setup = virtio_dev_rx_queue_setup,
|
||||
.rx_queue_release = virtio_dev_rx_queue_release, /* meaningfull only to multiple queue */
|
||||
.tx_queue_setup = virtio_dev_tx_queue_setup,
|
||||
.tx_queue_release = virtio_dev_tx_queue_release /* meaningfull only to multiple queue */
|
||||
.dev_infos_get = virtio_dev_info_get,
|
||||
.stats_get = virtio_dev_stats_get,
|
||||
.stats_reset = virtio_dev_stats_reset,
|
||||
.link_update = virtio_dev_link_update,
|
||||
.mac_addr_add = NULL,
|
||||
.mac_addr_remove = NULL,
|
||||
.rx_queue_setup = virtio_dev_rx_queue_setup,
|
||||
/* meaningfull only to multiple queue */
|
||||
.rx_queue_release = virtio_dev_rx_queue_release,
|
||||
.tx_queue_setup = virtio_dev_tx_queue_setup,
|
||||
/* meaningfull only to multiple queue */
|
||||
.tx_queue_release = virtio_dev_tx_queue_release,
|
||||
};
|
||||
|
||||
static inline int
|
||||
@ -573,6 +580,7 @@ eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,
|
||||
/* Tell the host we've known how to drive the device. */
|
||||
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
|
||||
virtio_negotiate_features(hw);
|
||||
|
||||
/* Setting up rx_header size for the device */
|
||||
if(vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))
|
||||
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
|
||||
@ -587,6 +595,7 @@ eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,
|
||||
ETHER_ADDR_LEN);
|
||||
return (-ENOMEM);
|
||||
}
|
||||
|
||||
/* Copy the permanent MAC address to: virtio_hw */
|
||||
virtio_get_hwaddr(hw);
|
||||
ether_addr_copy((struct ether_addr *) hw->mac_addr,
|
||||
@ -600,7 +609,7 @@ eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,
|
||||
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
|
||||
eth_dev->data->port_id, pci_dev->id.vendor_id,
|
||||
pci_dev->id.device_id);
|
||||
return (0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct eth_driver rte_virtio_pmd = {
|
||||
@ -669,7 +678,6 @@ virtio_dev_start(struct rte_eth_dev *dev)
|
||||
|
||||
/* Check VIRTIO_NET_F_STATUS for link status*/
|
||||
if(vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
|
||||
|
||||
vtpci_read_dev_config(hw,
|
||||
offsetof(struct virtio_net_config, status),
|
||||
&status, sizeof(status));
|
||||
|
@ -57,21 +57,21 @@
|
||||
|
||||
/* Features desired/implemented by this driver. */
|
||||
#define VTNET_FEATURES \
|
||||
(VIRTIO_NET_F_MAC | \
|
||||
VIRTIO_NET_F_STATUS | \
|
||||
VIRTIO_NET_F_CTRL_VQ | \
|
||||
VIRTIO_NET_F_CTRL_RX | \
|
||||
VIRTIO_NET_F_CTRL_VLAN | \
|
||||
VIRTIO_NET_F_CSUM | \
|
||||
VIRTIO_NET_F_HOST_TSO4 | \
|
||||
VIRTIO_NET_F_HOST_TSO6 | \
|
||||
VIRTIO_NET_F_HOST_ECN | \
|
||||
VIRTIO_NET_F_GUEST_CSUM | \
|
||||
VIRTIO_NET_F_GUEST_TSO4 | \
|
||||
VIRTIO_NET_F_GUEST_TSO6 | \
|
||||
VIRTIO_NET_F_GUEST_ECN | \
|
||||
VIRTIO_NET_F_MRG_RXBUF | \
|
||||
VIRTIO_RING_F_INDIRECT_DESC)
|
||||
(VIRTIO_NET_F_MAC | \
|
||||
VIRTIO_NET_F_STATUS | \
|
||||
VIRTIO_NET_F_CTRL_VQ | \
|
||||
VIRTIO_NET_F_CTRL_RX | \
|
||||
VIRTIO_NET_F_CTRL_VLAN | \
|
||||
VIRTIO_NET_F_CSUM | \
|
||||
VIRTIO_NET_F_HOST_TSO4 | \
|
||||
VIRTIO_NET_F_HOST_TSO6 | \
|
||||
VIRTIO_NET_F_HOST_ECN | \
|
||||
VIRTIO_NET_F_GUEST_CSUM | \
|
||||
VIRTIO_NET_F_GUEST_TSO4 | \
|
||||
VIRTIO_NET_F_GUEST_TSO6 | \
|
||||
VIRTIO_NET_F_GUEST_ECN | \
|
||||
VIRTIO_NET_F_MRG_RXBUF | \
|
||||
VIRTIO_RING_F_INDIRECT_DESC)
|
||||
|
||||
/*
|
||||
* RX/TX function prototypes
|
||||
|
@ -82,14 +82,14 @@ virtio_dev_vring_start(struct rte_eth_dev *dev, struct virtqueue *vq, int queue_
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
/*
|
||||
* Reinitialise since virtio port might have been stopped and restarted
|
||||
*/
|
||||
* Reinitialise since virtio port might have been stopped and restarted
|
||||
*/
|
||||
memset(vq->vq_ring_virt_mem, 0, vq->vq_ring_size);
|
||||
vring_init(vr, size, ring_mem, vq->vq_alignment);
|
||||
vq->vq_used_cons_idx = 0;
|
||||
vq->vq_desc_head_idx = 0;
|
||||
vq->vq_avail_idx = 0;
|
||||
vq->vq_desc_tail_idx = vq->vq_nentries - 1;
|
||||
vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
|
||||
vq->vq_free_cnt = vq->vq_nentries;
|
||||
memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
|
||||
|
||||
@ -110,25 +110,31 @@ virtio_dev_vring_start(struct rte_eth_dev *dev, struct virtqueue *vq, int queue_
|
||||
/* Only rx virtqueue needs mbufs to be allocated at initialization */
|
||||
if (queue_type == VTNET_RQ) {
|
||||
if (vq->mpool == NULL)
|
||||
rte_exit(EXIT_FAILURE, "Cannot allocate initial mbufs for rx virtqueue\n");
|
||||
/* Allocate blank mbufs for the each rx descriptor */
|
||||
rte_exit(EXIT_FAILURE,
|
||||
"Cannot allocate initial mbufs for rx virtqueue\n");
|
||||
|
||||
/* Allocate blank mbufs for the each rx descriptor */
|
||||
nbufs = 0;
|
||||
error = ENOSPC;
|
||||
while (!virtqueue_full(vq)) {
|
||||
m = rte_rxmbuf_alloc(vq->mpool);
|
||||
if (m == NULL)
|
||||
break;
|
||||
|
||||
/******************************************
|
||||
* Enqueue allocated buffers *
|
||||
*******************************************/
|
||||
error = virtqueue_enqueue_recv_refill(vq, m);
|
||||
|
||||
if (error) {
|
||||
rte_pktmbuf_free_seg(m);
|
||||
break;
|
||||
}
|
||||
nbufs++;
|
||||
}
|
||||
|
||||
vq_update_avail_idx(vq);
|
||||
|
||||
PMD_INIT_LOG(DEBUG, "Allocated %d bufs\n", nbufs);
|
||||
VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_SEL, VTNET_SQ_RQ_QUEUE_IDX);
|
||||
VIRTIO_WRITE_REG_4(vq->hw, VIRTIO_PCI_QUEUE_PFN,
|
||||
@ -180,6 +186,7 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
PMD_INIT_LOG(ERR, "tvq initialization failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Create mempool for rx mbuf allocation */
|
||||
vq->mpool = mp;
|
||||
|
||||
@ -254,29 +261,40 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
||||
num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
|
||||
if (likely(num > DESC_PER_CACHELINE))
|
||||
num = num - ((rxvq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
|
||||
|
||||
if(num == 0) return 0;
|
||||
|
||||
num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, num);
|
||||
PMD_RX_LOG(DEBUG, "used:%d dequeue:%d\n", nb_used, num);
|
||||
for (i = 0; i < num ; i ++) {
|
||||
rxm = rcv_pkts[i];
|
||||
|
||||
PMD_RX_LOG(DEBUG, "packet len:%d\n", len[i]);
|
||||
if (unlikely(len[i] < (uint32_t)hw->vtnet_hdr_size + ETHER_HDR_LEN)) {
|
||||
|
||||
if (unlikely(len[i]
|
||||
< (uint32_t)hw->vtnet_hdr_size + ETHER_HDR_LEN)) {
|
||||
PMD_RX_LOG(ERR, "Packet drop\n");
|
||||
nb_enqueued++;
|
||||
virtio_discard_rxbuf(rxvq, rxm);
|
||||
hw->eth_stats.ierrors++;
|
||||
continue;
|
||||
}
|
||||
|
||||
rxm->pkt.in_port = rxvq->port_id;
|
||||
rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
|
||||
rxm->pkt.nb_segs = 1;
|
||||
rxm->pkt.next = NULL;
|
||||
rxm->pkt.pkt_len = (uint32_t)(len[i] - sizeof(struct virtio_net_hdr));
|
||||
rxm->pkt.data_len = (uint16_t)(len[i] - sizeof(struct virtio_net_hdr));
|
||||
rxm->pkt.pkt_len = (uint32_t)(len[i]
|
||||
- sizeof(struct virtio_net_hdr));
|
||||
rxm->pkt.data_len = (uint16_t)(len[i]
|
||||
- sizeof(struct virtio_net_hdr));
|
||||
|
||||
VIRTIO_DUMP_PACKET(rxm, rxm->pkt.data_len);
|
||||
|
||||
rx_pkts[nb_rx++] = rxm;
|
||||
hw->eth_stats.ibytes += len[i] - sizeof(struct virtio_net_hdr);
|
||||
}
|
||||
|
||||
hw->eth_stats.ipackets += nb_rx;
|
||||
|
||||
/* Allocate new mbuf for the used descriptor */
|
||||
@ -294,14 +312,15 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
||||
}
|
||||
nb_enqueued ++;
|
||||
}
|
||||
if(likely(nb_enqueued)) {
|
||||
if(unlikely(virtqueue_kick_prepare(rxvq))) {
|
||||
if (likely(nb_enqueued)) {
|
||||
if (unlikely(virtqueue_kick_prepare(rxvq))) {
|
||||
virtqueue_notify(rxvq);
|
||||
PMD_RX_LOG(DEBUG, "Notified\n");
|
||||
}
|
||||
}
|
||||
|
||||
vq_update_avail_idx(rxvq);
|
||||
|
||||
|
||||
return (nb_rx);
|
||||
}
|
||||
|
||||
@ -332,6 +351,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
virtqueue_dequeue_pkt_tx(txvq);
|
||||
num--;
|
||||
}
|
||||
|
||||
if(!virtqueue_full(txvq)) {
|
||||
txm = tx_pkts[nb_tx];
|
||||
/* Enqueue Packet buffers */
|
||||
@ -360,5 +380,6 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
virtqueue_notify(txvq);
|
||||
PMD_TX_LOG(DEBUG, "Notified backend after xmit\n");
|
||||
}
|
||||
|
||||
return (nb_tx);
|
||||
}
|
||||
|
@ -242,7 +242,7 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
|
||||
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
|
||||
if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
|
||||
while (dp->flags & VRING_DESC_F_NEXT) {
|
||||
desc_idx_last = dp->next;
|
||||
desc_idx_last = dp->next;
|
||||
dp = &vq->vq_ring.desc[dp->next];
|
||||
}
|
||||
}
|
||||
@ -259,6 +259,7 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
|
||||
dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
|
||||
dp_tail->next = desc_idx;
|
||||
}
|
||||
|
||||
vq->vq_desc_tail_idx = desc_idx_last;
|
||||
dp->next = VQ_RING_DESC_CHAIN_END;
|
||||
}
|
||||
@ -294,7 +295,7 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
|
||||
idx = start_dp[idx].next;
|
||||
vq->vq_desc_head_idx = idx;
|
||||
if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
|
||||
vq->vq_desc_tail_idx = idx;
|
||||
vq->vq_desc_tail_idx = idx;
|
||||
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
|
||||
vq_update_avail_ring(vq, head_idx);
|
||||
|
||||
@ -335,7 +336,7 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
|
||||
idx = start_dp[idx].next;
|
||||
txvq->vq_desc_head_idx = idx;
|
||||
if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
|
||||
txvq->vq_desc_tail_idx = idx;
|
||||
txvq->vq_desc_tail_idx = idx;
|
||||
txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
|
||||
vq_update_avail_ring(txvq, head_idx);
|
||||
|
||||
@ -357,11 +358,13 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint
|
||||
desc_idx = (uint16_t) uep->id;
|
||||
len[i] = uep->len;
|
||||
cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
|
||||
|
||||
if (unlikely(cookie == NULL)) {
|
||||
PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n",
|
||||
vq->vq_used_cons_idx);
|
||||
break;
|
||||
}
|
||||
|
||||
rte_prefetch0(cookie);
|
||||
rte_packet_prefetch(cookie->pkt.data);
|
||||
rx_pkts[i] = cookie;
|
||||
@ -369,22 +372,23 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint
|
||||
vq_ring_free_chain(vq, desc_idx);
|
||||
vq->vq_descx[desc_idx].cookie = NULL;
|
||||
}
|
||||
|
||||
return (i);
|
||||
}
|
||||
|
||||
static inline uint16_t __attribute__((always_inline))
|
||||
virtqueue_dequeue_pkt_tx(struct virtqueue *vq)
|
||||
{
|
||||
struct vring_used_elem *uep;
|
||||
uint16_t used_idx, desc_idx;
|
||||
struct vring_used_elem *uep;
|
||||
uint16_t used_idx, desc_idx;
|
||||
|
||||
used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
|
||||
uep = &vq->vq_ring.used->ring[used_idx];
|
||||
desc_idx = (uint16_t) uep->id;
|
||||
vq->vq_used_cons_idx++;
|
||||
vq_ring_free_chain(vq, desc_idx);
|
||||
used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
|
||||
uep = &vq->vq_ring.used->ring[used_idx];
|
||||
desc_idx = (uint16_t) uep->id;
|
||||
vq->vq_used_cons_idx++;
|
||||
vq_ring_free_chain(vq, desc_idx);
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
|
||||
|
Loading…
x
Reference in New Issue
Block a user