virtio: checkpatch cleanups

This fixes style problems reported by checkpatch including:
  * extra whitespace
  * spaces before tabs
  * strings broken across lines
  * excessively long lines
  * missing spaces after keywords
  * unnecessary paren's in return statements

Signed-off-by: Stephen Hemminger <shemming@brocade.com>
Acked-by: Changchun Ouyang <changchun.ouyang@intel.com>
This commit is contained in:
Stephen Hemminger 2014-06-12 18:32:40 -07:00 committed by Thomas Monjalon
parent 761e8034ac
commit 14337d0b7a
8 changed files with 115 additions and 103 deletions

View File

@ -110,8 +110,9 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,
ctrl->status = status;
if (!vq->hw->cvq) {
PMD_INIT_LOG(ERR, "%s(): Control queue is "
"not supported by this device.\n", __func__);
PMD_INIT_LOG(ERR,
"%s(): Control queue is not supported.\n",
__func__);
return -1;
}
@ -257,10 +258,10 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
nb_desc = vq_size;
if (vq_size == 0) {
PMD_INIT_LOG(ERR, "%s: virtqueue does not exist\n", __func__);
return (-EINVAL);
return -EINVAL;
} else if (!rte_is_power_of_2(vq_size)) {
PMD_INIT_LOG(ERR, "%s: virtqueue size is not powerof 2\n", __func__);
return (-EINVAL);
return -EINVAL;
} else if (nb_desc != vq_size) {
PMD_INIT_LOG(ERR, "Warning: nb_desc(%d) is not equal to vq size (%d), fall to vq size\n",
nb_desc, vq_size);
@ -273,13 +274,13 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
vq_size * sizeof(struct vq_desc_extra), CACHE_LINE_SIZE);
memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
} else if(queue_type == VTNET_TQ) {
} else if (queue_type == VTNET_TQ) {
rte_snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d",
dev->data->port_id, queue_idx);
vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
vq_size * sizeof(struct vq_desc_extra), CACHE_LINE_SIZE);
memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
} else if(queue_type == VTNET_CQ) {
} else if (queue_type == VTNET_CQ) {
rte_snprintf(vq_name, sizeof(vq_name), "port%d_cvq",
dev->data->port_id);
vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
@ -311,7 +312,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
if (mz == NULL) {
rte_free(vq);
return (-ENOMEM);
return -ENOMEM;
}
/*
@ -319,10 +320,10 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
* and only accepts 32 bit page frame number.
* Check if the allocated physical memory exceeds 16TB.
*/
if ( (mz->phys_addr + vq->vq_ring_size - 1) >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32) ) {
if ((mz->phys_addr + vq->vq_ring_size - 1) >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!\n");
rte_free(vq);
return (-ENOMEM);
return -ENOMEM;
}
memset(mz->addr, 0, sizeof(mz->len));
@ -429,7 +430,7 @@ static struct eth_dev_ops virtio_eth_dev_ops = {
/* meaningfull only to multiple queue */
.tx_queue_release = virtio_dev_tx_queue_release,
/* collect stats per queue */
.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set
.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
};
static inline int
@ -441,9 +442,9 @@ virtio_dev_atomic_read_link_status(struct rte_eth_dev *dev,
if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
*(uint64_t *)src) == 0)
return (-1);
return -1;
return (0);
return 0;
}
/**
@ -467,9 +468,9 @@ virtio_dev_atomic_write_link_status(struct rte_eth_dev *dev,
if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
*(uint64_t *)src) == 0)
return (-1);
return -1;
return (0);
return 0;
}
static void
@ -477,7 +478,7 @@ virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct virtio_hw *hw =
VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if(stats)
if (stats)
memcpy(stats, &hw->eth_stats, sizeof(*stats));
}
@ -516,8 +517,9 @@ static void
virtio_negotiate_features(struct virtio_hw *hw)
{
uint32_t guest_features, mask;
mask = VIRTIO_NET_F_CTRL_RX | VIRTIO_NET_F_CTRL_VLAN;
mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM ;
mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
/* TSO and LRO are only available when their corresponding
* checksum offload feature is also negotiated.
@ -559,7 +561,8 @@ parse_sysfs_value(const char *filename, unsigned long *val)
char buf[BUFSIZ];
char *end = NULL;
if ((f = fopen(filename, "r")) == NULL) {
f = fopen(filename, "r");
if (f == NULL) {
PMD_INIT_LOG(ERR, "%s(): cannot open sysfs value %s\n",
__func__, filename);
return -1;
@ -592,14 +595,14 @@ static int get_uio_dev(struct rte_pci_addr *loc, char *buf, unsigned int buflen)
/* depending on kernel version, uio can be located in uio/uioX
* or uio:uioX */
rte_snprintf(dirname, sizeof(dirname),
SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/uio",
loc->domain, loc->bus, loc->devid, loc->function);
SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/uio",
loc->domain, loc->bus, loc->devid, loc->function);
dir = opendir(dirname);
if (dir == NULL) {
/* retry with the parent directory */
rte_snprintf(dirname, sizeof(dirname),
SYSFS_PCI_DEVICES "/" PCI_PRI_FMT,
loc->domain, loc->bus, loc->devid, loc->function);
SYSFS_PCI_DEVICES "/" PCI_PRI_FMT,
loc->domain, loc->bus, loc->devid, loc->function);
dir = opendir(dirname);
if (dir == NULL) {
@ -663,23 +666,23 @@ eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,
struct virtio_hw *hw =
VIRTIO_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
if (RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr) ) {
if (RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr)) {
PMD_INIT_LOG(ERR,
"MBUF HEADROOM should be enough to hold virtio net hdr\n");
return (-1);
return -1;
}
if (! (rte_eal_get_configuration()->flags & EAL_FLG_HIGH_IOPL)) {
if (!(rte_eal_get_configuration()->flags & EAL_FLG_HIGH_IOPL)) {
PMD_INIT_LOG(ERR,
"IOPL call failed in EAL init - cannot use virtio PMD driver\n");
return (-1);
return -1;
}
eth_dev->dev_ops = &virtio_eth_dev_ops;
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
eth_dev->tx_pkt_burst = &virtio_xmit_pkts;
if(rte_eal_process_type() == RTE_PROC_SECONDARY)
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
return 0;
pci_dev = eth_dev->pci_dev;
@ -690,7 +693,7 @@ eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,
{
char dirname[PATH_MAX];
char filename[PATH_MAX];
unsigned long start,size;
unsigned long start, size;
if (get_uio_dev(&pci_dev->addr, dirname, sizeof(dirname)) < 0)
return -1;
@ -714,8 +717,9 @@ eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,
}
pci_dev->mem_resource[0].addr = (void *)(uintptr_t)start;
pci_dev->mem_resource[0].len = (uint64_t)size;
PMD_INIT_LOG(DEBUG, "PCI Port IO found start=0x%lx with "
"size=0x%lx\n", start, size);
PMD_INIT_LOG(DEBUG,
"PCI Port IO found start=0x%lx with size=0x%lx\n",
start, size);
}
#endif
hw->io_base = (uint32_t)(uintptr_t)pci_dev->mem_resource[0].addr;
@ -731,7 +735,7 @@ eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,
virtio_negotiate_features(hw);
/* Setting up rx_header size for the device */
if(vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
else
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
@ -742,15 +746,17 @@ eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,
PMD_INIT_LOG(ERR,
"Failed to allocate %d bytes needed to store MAC addresses",
ETHER_ADDR_LEN);
return (-ENOMEM);
return -ENOMEM;
}
/* Copy the permanent MAC address to: virtio_hw */
virtio_get_hwaddr(hw);
ether_addr_copy((struct ether_addr *) hw->mac_addr,
&eth_dev->data->mac_addrs[0]);
PMD_INIT_LOG(DEBUG, "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", hw->mac_addr[0],
hw->mac_addr[1],hw->mac_addr[2], hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
PMD_INIT_LOG(DEBUG,
"PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
config = &local_config;
@ -758,16 +764,16 @@ eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,
if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
offset_conf += sizeof(config->status);
} else {
PMD_INIT_LOG(DEBUG, "VIRTIO_NET_F_STATUS is "
"not supported\n");
PMD_INIT_LOG(DEBUG,
"VIRTIO_NET_F_STATUS is not supported\n");
config->status = 0;
}
if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
offset_conf += sizeof(config->max_virtqueue_pairs);
} else {
PMD_INIT_LOG(DEBUG, "VIRTIO_NET_F_MQ is "
"not supported\n");
PMD_INIT_LOG(DEBUG,
"VIRTIO_NET_F_MQ is not supported\n");
config->max_virtqueue_pairs = 1;
}
@ -828,7 +834,7 @@ static int
rte_virtio_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
{
rte_eth_driver_register(&rte_virtio_pmd);
return (0);
return 0;
}
/*
@ -851,7 +857,7 @@ virtio_dev_tx_queue_release(__rte_unused void *txq)
static int
virtio_dev_configure(__rte_unused struct rte_eth_dev *dev)
{
return (0);
return 0;
}
@ -877,13 +883,13 @@ virtio_dev_start(struct rte_eth_dev *dev)
virtio_dev_rxtx_start(dev);
/* Check VIRTIO_NET_F_STATUS for link status*/
if(vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
vtpci_read_dev_config(hw,
offsetof(struct virtio_net_config, status),
&status, sizeof(status));
if((status & VIRTIO_NET_S_LINK_UP) == 0) {
if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
PMD_INIT_LOG(ERR, "Port: %d Link is DOWN\n", dev->data->port_id);
return (-EIO);
return -EIO;
} else {
PMD_INIT_LOG(DEBUG, "Port: %d Link is UP\n", dev->data->port_id);
}
@ -920,9 +926,10 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
{
struct rte_mbuf *buf;
int i, mbuf_num = 0;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
PMD_INIT_LOG(DEBUG, "Before freeing rxq[%d] used and "
"unused buf\n", i);
PMD_INIT_LOG(DEBUG,
"Before freeing rxq[%d] used and unused buf\n", i);
VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
while ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(
@ -932,14 +939,15 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
}
PMD_INIT_LOG(DEBUG, "free %d mbufs\n", mbuf_num);
PMD_INIT_LOG(DEBUG, "After freeing rxq[%d] used and "
"unused buf\n", i);
PMD_INIT_LOG(DEBUG,
"After freeing rxq[%d] used and unused buf\n", i);
VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
PMD_INIT_LOG(DEBUG, "Before freeing txq[%d] used and "
"unused bufs\n", i);
PMD_INIT_LOG(DEBUG,
"Before freeing txq[%d] used and unused bufs\n",
i);
VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
mbuf_num = 0;
@ -980,34 +988,37 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
memset(&link, 0, sizeof(link));
virtio_dev_atomic_read_link_status(dev, &link);
old = link;
link.link_duplex = FULL_DUPLEX ;
link.link_speed = SPEED_10G ;
if(vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
link.link_duplex = FULL_DUPLEX;
link.link_speed = SPEED_10G;
if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
PMD_INIT_LOG(DEBUG, "Get link status from hw\n");
vtpci_read_dev_config(hw,
offsetof(struct virtio_net_config, status),
&status, sizeof(status));
if((status & VIRTIO_NET_S_LINK_UP) == 0) {
if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
link.link_status = 0;
PMD_INIT_LOG(DEBUG, "Port %d is down\n",dev->data->port_id);
PMD_INIT_LOG(DEBUG, "Port %d is down\n",
dev->data->port_id);
} else {
link.link_status = 1;
PMD_INIT_LOG(DEBUG, "Port %d is up\n",dev->data->port_id);
PMD_INIT_LOG(DEBUG, "Port %d is up\n",
dev->data->port_id);
}
} else {
link.link_status = 1; //Link up
link.link_status = 1; /* Link up */
}
virtio_dev_atomic_write_link_status(dev, &link);
if(old.link_status == link.link_status)
return (-1);
if (old.link_status == link.link_status)
return -1;
/*changed*/
return (0);
return 0;
}
static void
virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct virtio_hw *hw = VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
dev_info->driver_name = dev->driver->pci_drv.name;
dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;

View File

@ -123,7 +123,7 @@ struct virtio_adapter {
* via tcp_lro_rx().
*/
#define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \
VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN)
VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN)
#endif /* _VIRTIO_ETHDEV_H_ */

View File

@ -92,7 +92,7 @@ vtpci_negotiate_features(struct virtio_hw *hw, uint32_t guest_features)
features = (hw->host_features) & guest_features;
VIRTIO_WRITE_REG_4(hw, VIRTIO_PCI_GUEST_FEATURES, features);
return (features);
return features;
}
@ -116,7 +116,7 @@ vtpci_reinit_complete(struct virtio_hw *hw)
uint8_t
vtpci_get_status(struct virtio_hw *hw)
{
return (VIRTIO_READ_REG_1(hw, VIRTIO_PCI_STATUS));
return VIRTIO_READ_REG_1(hw, VIRTIO_PCI_STATUS);
}
void

View File

@ -65,12 +65,12 @@ struct virtqueue;
#define VIRTIO_PCI_QUEUE_SEL 14 /* current VQ selection (16, RW) */
#define VIRTIO_PCI_QUEUE_NOTIFY 16 /* notify host regarding VQ (16, RW) */
#define VIRTIO_PCI_STATUS 18 /* device status register (8, RW) */
#define VIRTIO_PCI_ISR 19 /* interrupt status register, reading
* also clears the register (8, RO) */
#define VIRTIO_PCI_ISR 19 /* interrupt status register, reading
* also clears the register (8, RO) */
/* Only if MSIX is enabled: */
#define VIRTIO_MSI_CONFIG_VECTOR 20 /* configuration change vector (16, RW) */
#define VIRTIO_MSI_QUEUE_VECTOR 22 /* vector for selected VQ notifications
(16, RW) */
#define VIRTIO_MSI_QUEUE_VECTOR 22 /* vector for selected VQ notifications
(16, RW) */
/* The bit of the ISR which indicates a device has an interrupt. */
#define VIRTIO_PCI_ISR_INTR 0x1
@ -255,7 +255,7 @@ outl_p(unsigned int data, unsigned int port)
static inline int
vtpci_with_feature(struct virtio_hw *hw, uint32_t feature)
{
return ((hw->guest_features & feature) != 0);
return (hw->guest_features & feature) != 0;
}
/*

View File

@ -133,7 +133,7 @@ vring_size(unsigned int num, unsigned long align)
size = RTE_ALIGN_CEIL(size, align);
size += sizeof(struct vring_used) +
(num * sizeof(struct vring_used_elem));
return (size);
return size;
}
static inline void
@ -145,7 +145,7 @@ vring_init(struct vring *vr, unsigned int num, uint8_t *p,
vr->avail = (struct vring_avail *) (p +
num * sizeof(struct vring_desc));
vr->used = (void *)
RTE_ALIGN_CEIL( (uintptr_t)(&vr->avail->ring[num]), align);
RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align);
}
/*

View File

@ -54,7 +54,7 @@
#include "virtio_ethdev.h"
#include "virtqueue.h"
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(m, len)
#else
#define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
@ -68,7 +68,7 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
return (m);
return m;
}
static void
@ -175,6 +175,7 @@ virtio_dev_rxtx_start(struct rte_eth_dev *dev)
*
*/
int i;
PMD_INIT_FUNC_TRACE();
/* Start rx vring. */
@ -214,7 +215,7 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
vq->mpool = mp;
dev->data->rx_queues[queue_idx] = vq;
return (0);
return 0;
}
/*
@ -244,7 +245,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
}
dev->data->tx_queues[queue_idx] = vq;
return (0);
return 0;
}
static void
@ -285,17 +286,18 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (likely(num > DESC_PER_CACHELINE))
num = num - ((rxvq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
if(num == 0) return 0;
if (num == 0)
return 0;
num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, num);
PMD_RX_LOG(DEBUG, "used:%d dequeue:%d\n", nb_used, num);
for (i = 0; i < num ; i ++) {
for (i = 0; i < num ; i++) {
rxm = rcv_pkts[i];
PMD_RX_LOG(DEBUG, "packet len:%d\n", len[i]);
if (unlikely(len[i]
< (uint32_t)hw->vtnet_hdr_size + ETHER_HDR_LEN)) {
< (uint32_t)hw->vtnet_hdr_size + ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop\n");
nb_enqueued++;
virtio_discard_rxbuf(rxvq, rxm);
@ -308,9 +310,9 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm->pkt.nb_segs = 1;
rxm->pkt.next = NULL;
rxm->pkt.pkt_len = (uint32_t)(len[i]
- sizeof(struct virtio_net_hdr));
- sizeof(struct virtio_net_hdr));
rxm->pkt.data_len = (uint16_t)(len[i]
- sizeof(struct virtio_net_hdr));
- sizeof(struct virtio_net_hdr));
VIRTIO_DUMP_PACKET(rxm, rxm->pkt.data_len);
@ -336,7 +338,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rte_pktmbuf_free_seg(new_mbuf);
break;
}
nb_enqueued ++;
nb_enqueued++;
}
if (likely(nb_enqueued)) {
if (unlikely(virtqueue_kick_prepare(rxvq))) {
@ -347,7 +349,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
vq_update_avail_idx(rxvq);
return (nb_rx);
return nb_rx;
}
uint16_t
@ -362,7 +364,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
nb_tx = 0;
if (unlikely(nb_pkts < 1))
return (nb_pkts);
return nb_pkts;
PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
nb_used = VIRTQUEUE_NUSED(txvq);
@ -378,7 +380,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
num--;
}
if(!virtqueue_full(txvq)) {
if (!virtqueue_full(txvq)) {
txm = tx_pkts[nb_tx];
/* Enqueue Packet buffers */
error = virtqueue_enqueue_xmit(txvq, txm);
@ -405,10 +407,10 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
hw->eth_stats.opackets += nb_tx;
hw->eth_stats.q_opackets[txvq->queue_id] += nb_tx;
if(unlikely(virtqueue_kick_prepare(txvq))) {
if (unlikely(virtqueue_kick_prepare(txvq))) {
virtqueue_notify(txvq);
PMD_TX_LOG(DEBUG, "Notified backend after xmit\n");
}
return (nb_tx);
return nb_tx;
}

View File

@ -60,11 +60,11 @@ virtqueue_detatch_unused(struct virtqueue *vq)
struct rte_mbuf *cookie;
int idx;
for(idx = 0; idx < vq->vq_nentries; idx++) {
for (idx = 0; idx < vq->vq_nentries; idx++) {
if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
vq->vq_descx[idx].cookie = NULL;
return cookie;
}
}
return (NULL);
return NULL;
}

View File

@ -209,12 +209,12 @@ void virtqueue_dump(struct virtqueue *vq);
/**
* Get all mbufs to be freed.
*/
struct rte_mbuf * virtqueue_detatch_unused(struct virtqueue *vq);
struct rte_mbuf *virtqueue_detatch_unused(struct virtqueue *vq);
static inline int
virtqueue_full(const struct virtqueue *vq)
{
return (vq->vq_free_cnt == 0);
return vq->vq_free_cnt == 0;
}
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
@ -243,7 +243,7 @@ vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
}
static inline int __attribute__((always_inline))
virtqueue_kick_prepare(struct virtqueue * vq)
virtqueue_kick_prepare(struct virtqueue *vq)
{
return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
}
@ -298,18 +298,17 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
{
struct vq_desc_extra *dxp;
struct vring_desc *start_dp;
uint16_t needed;
uint16_t needed = 1;
uint16_t head_idx, idx;
needed = 1;
if (unlikely(vq->vq_free_cnt == 0))
return (-ENOSPC);
return -ENOSPC;
if (unlikely(vq->vq_free_cnt < needed))
return (-EMSGSIZE);
return -EMSGSIZE;
head_idx = vq->vq_desc_head_idx;
if (unlikely(head_idx >= vq->vq_nentries))
return (-EFAULT);
return -EFAULT;
idx = head_idx;
dxp = &vq->vq_descx[idx];
@ -328,7 +327,7 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
vq_update_avail_ring(vq, head_idx);
return (0);
return 0;
}
static inline int __attribute__((always_inline))
@ -336,16 +335,16 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
{
struct vq_desc_extra *dxp;
struct vring_desc *start_dp;
uint16_t needed;
uint16_t needed = 2;
uint16_t head_idx, idx;
needed = 2;
if (unlikely(txvq->vq_free_cnt == 0))
return (-ENOSPC);
return -ENOSPC;
if (unlikely(txvq->vq_free_cnt < needed))
return (-EMSGSIZE);
return -EMSGSIZE;
head_idx = txvq->vq_desc_head_idx;
if (unlikely(head_idx >= txvq->vq_nentries))
return (-EFAULT);
return -EFAULT;
idx = head_idx;
dxp = &txvq->vq_descx[idx];
@ -369,7 +368,7 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
vq_update_avail_ring(txvq, head_idx);
return (0);
return 0;
}
static inline uint16_t __attribute__((always_inline))
@ -381,7 +380,7 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint
uint16_t i;
/* Caller does the check */
for (i = 0; i < num ; i ++) {
for (i = 0; i < num; i++) {
used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
uep = &vq->vq_ring.used->ring[used_idx];
desc_idx = (uint16_t) uep->id;
@ -402,7 +401,7 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint
vq->vq_descx[desc_idx].cookie = NULL;
}
return (i);
return i;
}
static inline uint16_t __attribute__((always_inline))
@ -420,7 +419,7 @@ virtqueue_dequeue_pkt_tx(struct virtqueue *vq)
return 0;
}
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTQUEUE_DUMP(vq) do { \
uint16_t used_idx, nused; \
used_idx = (vq)->vq_ring.used->idx; \