vhost: coding style fixes

Fix serious coding style issues reported by checkpatch.

Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Changchun Ouyang <changchun.ouyang@intel.com>
This commit is contained in:
Huawei Xie 2014-10-09 02:54:57 +08:00 committed by Thomas Monjalon
parent 38726fb1b6
commit 60ddca7654
5 changed files with 294 additions and 268 deletions

View File

@ -55,78 +55,75 @@ enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
#define BUF_VECTOR_MAX 256 #define BUF_VECTOR_MAX 256
/* /**
* Structure contains buffer address, length and descriptor index * Structure contains buffer address, length and descriptor index
* from vring to do scatter RX. * from vring to do scatter RX.
*/ */
struct buf_vector { struct buf_vector {
uint64_t buf_addr; uint64_t buf_addr;
uint32_t buf_len; uint32_t buf_len;
uint32_t desc_idx; uint32_t desc_idx;
}; };
/* /**
* Structure contains variables relevant to TX/RX virtqueues. * Structure contains variables relevant to RX/TX virtqueues.
*/ */
struct vhost_virtqueue struct vhost_virtqueue {
{ struct vring_desc *desc; /**< Virtqueue descriptor ring. */
struct vring_desc *desc; /* Virtqueue descriptor ring. */ struct vring_avail *avail; /**< Virtqueue available ring. */
struct vring_avail *avail; /* Virtqueue available ring. */ struct vring_used *used; /**< Virtqueue used ring. */
struct vring_used *used; /* Virtqueue used ring. */ uint32_t size; /**< Size of descriptor ring. */
uint32_t size; /* Size of descriptor ring. */ uint32_t backend; /**< Backend value to determine if device should started/stopped. */
uint32_t backend; /* Backend value to determine if device should started/stopped. */ uint16_t vhost_hlen; /**< Vhost header length (varies depending on RX merge buffers. */
uint16_t vhost_hlen; /* Vhost header length (varies depending on RX merge buffers. */ volatile uint16_t last_used_idx; /**< Last index used on the available ring */
volatile uint16_t last_used_idx; /* Last index used on the available ring */ volatile uint16_t last_used_idx_res; /**< Used for multiple devices reserving buffers. */
volatile uint16_t last_used_idx_res; /* Used for multiple devices reserving buffers. */ eventfd_t callfd; /**< Currently unused as polling mode is enabled. */
eventfd_t callfd; /* Currently unused as polling mode is enabled. */ eventfd_t kickfd; /**< Used to notify the guest (trigger interrupt). */
eventfd_t kickfd; /* Used to notify the guest (trigger interrupt). */ struct buf_vector buf_vec[BUF_VECTOR_MAX]; /**< for scatter RX. */
/* Used for scatter RX. */
struct buf_vector buf_vec[BUF_VECTOR_MAX];
} __rte_cache_aligned; } __rte_cache_aligned;
/* /**
* Device structure contains all configuration information relating to the device. * Device structure contains all configuration information relating to the device.
*/ */
struct virtio_net struct virtio_net
{ {
struct vhost_virtqueue *virtqueue[VIRTIO_QNUM]; /* Contains all virtqueue information. */ struct vhost_virtqueue *virtqueue[VIRTIO_QNUM]; /**< Contains all virtqueue information. */
struct virtio_memory *mem; /* QEMU memory and memory region information. */ struct virtio_memory *mem; /**< QEMU memory and memory region information. */
uint64_t features; /* Negotiated feature set. */ uint64_t features; /**< Negotiated feature set. */
uint64_t device_fh; /* device identifier. */ uint64_t device_fh; /**< device identifier. */
uint32_t flags; /* Device flags. Only used to check if device is running on data core. */ uint32_t flags; /**< Device flags. Only used to check if device is running on data core. */
void *priv; /**< private context */ void *priv; /**< private context */
} __rte_cache_aligned; } __rte_cache_aligned;
/* /**
* Information relating to memory regions including offsets to addresses in QEMUs memory file. * Information relating to memory regions including offsets to addresses in QEMUs memory file.
*/ */
struct virtio_memory_regions { struct virtio_memory_regions {
uint64_t guest_phys_address; /* Base guest physical address of region. */ uint64_t guest_phys_address; /**< Base guest physical address of region. */
uint64_t guest_phys_address_end; /* End guest physical address of region. */ uint64_t guest_phys_address_end; /**< End guest physical address of region. */
uint64_t memory_size; /* Size of region. */ uint64_t memory_size; /**< Size of region. */
uint64_t userspace_address; /* Base userspace address of region. */ uint64_t userspace_address; /**< Base userspace address of region. */
uint64_t address_offset; /* Offset of region for address translation. */ uint64_t address_offset; /**< Offset of region for address translation. */
}; };
/* /**
* Memory structure includes region and mapping information. * Memory structure includes region and mapping information.
*/ */
struct virtio_memory { struct virtio_memory {
uint64_t base_address; /* Base QEMU userspace address of the memory file. */ uint64_t base_address; /**< Base QEMU userspace address of the memory file. */
uint64_t mapped_address; /* Mapped address of memory file base in our applications memory space. */ uint64_t mapped_address; /**< Mapped address of memory file base in our applications memory space. */
uint64_t mapped_size; /* Total size of memory file. */ uint64_t mapped_size; /**< Total size of memory file. */
uint32_t nregions; /* Number of memory regions. */ uint32_t nregions; /**< Number of memory regions. */
/* Memory region information. */ struct virtio_memory_regions regions[0]; /**< Memory region information. */
struct virtio_memory_regions regions[0];
}; };
/* /**
* Device operations to add/remove device. * Device operations to add/remove device.
*/ */
struct virtio_net_device_ops { struct virtio_net_device_ops {
int (* new_device) (struct virtio_net *); /* Add device. */ int (*new_device)(struct virtio_net *); /**< Add device. */
void (* destroy_device) (volatile struct virtio_net *); /* Remove device. */ void (*destroy_device)(volatile struct virtio_net *); /**< Remove device. */
}; };
static inline uint16_t __attribute__((always_inline)) static inline uint16_t __attribute__((always_inline))

View File

@ -116,7 +116,7 @@ vhost_net_release(fuse_req_t req, struct fuse_file_info *fi)
#define VHOST_IOCTL(func) do { \ #define VHOST_IOCTL(func) do { \
result = (func)(ctx); \ result = (func)(ctx); \
fuse_reply_ioctl(req, result, NULL, 0); \ fuse_reply_ioctl(req, result, NULL, 0); \
} while(0) \ } while (0)
/* /*
* Boilerplate IOCTL RETRY * Boilerplate IOCTL RETRY
@ -126,7 +126,7 @@ vhost_net_release(fuse_req_t req, struct fuse_file_info *fi)
struct iovec iov_r = { arg, (size_r) }; \ struct iovec iov_r = { arg, (size_r) }; \
struct iovec iov_w = { arg, (size_w) }; \ struct iovec iov_w = { arg, (size_w) }; \
fuse_reply_ioctl_retry(req, &iov_r, (size_r)?1:0, &iov_w, (size_w)?1:0); \ fuse_reply_ioctl_retry(req, &iov_r, (size_r)?1:0, &iov_w, (size_w)?1:0); \
} while(0) \ } while (0)
/* /*
* Boilerplate code for CUSE Read IOCTL * Boilerplate code for CUSE Read IOCTL
@ -136,11 +136,11 @@ vhost_net_release(fuse_req_t req, struct fuse_file_info *fi)
if (!in_bufsz) { \ if (!in_bufsz) { \
VHOST_IOCTL_RETRY(sizeof(type), 0); \ VHOST_IOCTL_RETRY(sizeof(type), 0); \
} else { \ } else { \
(var) = *(const type * ) in_buf; \ (var) = *(const type*) in_buf; \
result = func(ctx, &(var)); \ result = func(ctx, &(var)); \
fuse_reply_ioctl(req, result, NULL, 0); \ fuse_reply_ioctl(req, result, NULL, 0); \
} \ } \
} while(0) \ } while (0)
/* /*
* Boilerplate code for CUSE Write IOCTL * Boilerplate code for CUSE Write IOCTL
@ -153,7 +153,7 @@ vhost_net_release(fuse_req_t req, struct fuse_file_info *fi)
result = (func)(ctx, &(var)); \ result = (func)(ctx, &(var)); \
fuse_reply_ioctl(req, result, &(var), sizeof(type)); \ fuse_reply_ioctl(req, result, &(var), sizeof(type)); \
} \ } \
} while(0) \ } while (0)
/* /*
* Boilerplate code for CUSE Read/Write IOCTL * Boilerplate code for CUSE Read/Write IOCTL
@ -163,11 +163,11 @@ vhost_net_release(fuse_req_t req, struct fuse_file_info *fi)
if (!in_bufsz) { \ if (!in_bufsz) { \
VHOST_IOCTL_RETRY(sizeof(type1), sizeof(type2)); \ VHOST_IOCTL_RETRY(sizeof(type1), sizeof(type2)); \
} else { \ } else { \
(var1) = *(const type1* ) (in_buf); \ (var1) = *(const type1*) (in_buf); \
result = (func)(ctx, (var1), &(var2)); \ result = (func)(ctx, (var1), &(var2)); \
fuse_reply_ioctl(req, result, &(var2), sizeof(type2)); \ fuse_reply_ioctl(req, result, &(var2), sizeof(type2)); \
} \ } \
} while(0) \ } while (0)
/* /*
* The IOCTLs are handled using CUSE/FUSE in userspace. Depending on * The IOCTLs are handled using CUSE/FUSE in userspace. Depending on
@ -187,8 +187,7 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
uint32_t index; uint32_t index;
int result = 0; int result = 0;
switch(cmd) switch (cmd) {
{
case VHOST_NET_SET_BACKEND: case VHOST_NET_SET_BACKEND:
LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_NET_SET_BACKEND\n", ctx.fh); LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_NET_SET_BACKEND\n", ctx.fh);
VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_backend); VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_backend);
@ -218,7 +217,7 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_MEM_TABLE\n", ctx.fh); LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_MEM_TABLE\n", ctx.fh);
static struct vhost_memory mem_temp; static struct vhost_memory mem_temp;
switch(in_bufsz){ switch (in_bufsz) {
case 0: case 0:
VHOST_IOCTL_RETRY(sizeof(struct vhost_memory), 0); VHOST_IOCTL_RETRY(sizeof(struct vhost_memory), 0);
break; break;
@ -281,11 +280,10 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
fuse_reply_ioctl(req, result, NULL, 0); fuse_reply_ioctl(req, result, NULL, 0);
} }
if (result < 0) { if (result < 0)
LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: FAIL\n", ctx.fh); LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: FAIL\n", ctx.fh);
} else { else
LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: SUCCESS\n", ctx.fh); LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: SUCCESS\n", ctx.fh);
}
} }
/* /*
@ -349,7 +347,7 @@ rte_vhost_driver_register(const char *dev_name)
return 0; return 0;
} }
/* /**
* The CUSE session is launched allowing the application to receive open, release and ioctl calls. * The CUSE session is launched allowing the application to receive open, release and ioctl calls.
*/ */
int int

View File

@ -76,8 +76,7 @@
/* /*
* Structure used to identify device context. * Structure used to identify device context.
*/ */
struct vhost_device_ctx struct vhost_device_ctx {
{
pid_t pid; /* PID of process calling the IOCTL. */ pid_t pid; /* PID of process calling the IOCTL. */
uint64_t fh; /* Populated with fi->fh to track the device index. */ uint64_t fh; /* Populated with fi->fh to track the device index. */
}; };
@ -87,26 +86,26 @@ struct vhost_device_ctx
* functions are called in CUSE context and are used to configure devices. * functions are called in CUSE context and are used to configure devices.
*/ */
struct vhost_net_device_ops { struct vhost_net_device_ops {
int (* new_device) (struct vhost_device_ctx); int (*new_device)(struct vhost_device_ctx);
void (* destroy_device) (struct vhost_device_ctx); void (*destroy_device)(struct vhost_device_ctx);
int (* get_features) (struct vhost_device_ctx, uint64_t *); int (*get_features)(struct vhost_device_ctx, uint64_t *);
int (* set_features) (struct vhost_device_ctx, uint64_t *); int (*set_features)(struct vhost_device_ctx, uint64_t *);
int (* set_mem_table) (struct vhost_device_ctx, const void *, uint32_t); int (*set_mem_table)(struct vhost_device_ctx, const void *, uint32_t);
int (* set_vring_num) (struct vhost_device_ctx, struct vhost_vring_state *); int (*set_vring_num)(struct vhost_device_ctx, struct vhost_vring_state *);
int (* set_vring_addr) (struct vhost_device_ctx, struct vhost_vring_addr *); int (*set_vring_addr)(struct vhost_device_ctx, struct vhost_vring_addr *);
int (* set_vring_base) (struct vhost_device_ctx, struct vhost_vring_state *); int (*set_vring_base)(struct vhost_device_ctx, struct vhost_vring_state *);
int (* get_vring_base) (struct vhost_device_ctx, uint32_t, struct vhost_vring_state *); int (*get_vring_base)(struct vhost_device_ctx, uint32_t, struct vhost_vring_state *);
int (* set_vring_kick) (struct vhost_device_ctx, struct vhost_vring_file *); int (*set_vring_kick)(struct vhost_device_ctx, struct vhost_vring_file *);
int (* set_vring_call) (struct vhost_device_ctx, struct vhost_vring_file *); int (*set_vring_call)(struct vhost_device_ctx, struct vhost_vring_file *);
int (* set_backend) (struct vhost_device_ctx, struct vhost_vring_file *); int (*set_backend)(struct vhost_device_ctx, struct vhost_vring_file *);
int (* set_owner) (struct vhost_device_ctx); int (*set_owner)(struct vhost_device_ctx);
int (* reset_owner) (struct vhost_device_ctx); int (*reset_owner)(struct vhost_device_ctx);
}; };

View File

@ -40,22 +40,23 @@
#include "vhost-net-cdev.h" #include "vhost-net-cdev.h"
#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */ #define MAX_PKT_BURST 32
/* /**
* This function adds buffers to the virtio devices RX virtqueue. Buffers can * This function adds buffers to the virtio devices RX virtqueue. Buffers can
* be received from the physical port or from another virtio device. A packet * be received from the physical port or from another virtio device. A packet
* count is returned to indicate the number of packets that were succesfully * count is returned to indicate the number of packets that were succesfully
* added to the RX queue. This function works when mergeable is disabled. * added to the RX queue. This function works when mergeable is disabled.
*/ */
static inline uint32_t __attribute__((always_inline)) static inline uint32_t __attribute__((always_inline))
virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count) virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count)
{ {
struct vhost_virtqueue *vq; struct vhost_virtqueue *vq;
struct vring_desc *desc; struct vring_desc *desc;
struct rte_mbuf *buff; struct rte_mbuf *buff;
/* The virtio_hdr is initialised to 0. */ /* The virtio_hdr is initialised to 0. */
struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0,0,0,0,0,0},0}; struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
uint64_t buff_addr = 0; uint64_t buff_addr = 0;
uint64_t buff_hdr_addr = 0; uint64_t buff_hdr_addr = 0;
uint32_t head[MAX_PKT_BURST], packet_len = 0; uint32_t head[MAX_PKT_BURST], packet_len = 0;
@ -74,7 +75,10 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts,
vq = dev->virtqueue[VIRTIO_RXQ]; vq = dev->virtqueue[VIRTIO_RXQ];
count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count; count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
/* As many data cores may want access to available buffers, they need to be reserved. */ /*
* As many data cores may want access to available buffers,
* they need to be reserved.
*/
do { do {
res_base_idx = vq->last_used_idx_res; res_base_idx = vq->last_used_idx_res;
avail_idx = *((volatile uint16_t *)&vq->avail->idx); avail_idx = *((volatile uint16_t *)&vq->avail->idx);
@ -89,18 +93,20 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts,
res_end_idx = res_base_idx + count; res_end_idx = res_base_idx + count;
/* vq->last_used_idx_res is atomically updated. */ /* vq->last_used_idx_res is atomically updated. */
success = rte_atomic16_cmpset(&vq->last_used_idx_res, res_base_idx, success = rte_atomic16_cmpset(&vq->last_used_idx_res,
res_end_idx); res_base_idx, res_end_idx);
} while (unlikely(success == 0)); } while (unlikely(success == 0));
res_cur_idx = res_base_idx; res_cur_idx = res_base_idx;
LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx); LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n",
dev->device_fh, res_cur_idx, res_end_idx);
/* Prefetch available ring to retrieve indexes. */ /* Prefetch available ring to retrieve indexes. */
rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]); rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);
/* Retrieve all of the head indexes first to avoid caching issues. */ /* Retrieve all of the head indexes first to avoid caching issues. */
for (head_idx = 0; head_idx < count; head_idx++) for (head_idx = 0; head_idx < count; head_idx++)
head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) & (vq->size - 1)]; head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) &
(vq->size - 1)];
/*Prefetch descriptor index. */ /*Prefetch descriptor index. */
rte_prefetch0(&vq->desc[head[packet_success]]); rte_prefetch0(&vq->desc[head[packet_success]]);
@ -114,7 +120,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts,
/* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */ /* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */
buff_addr = gpa_to_vva(dev, desc->addr); buff_addr = gpa_to_vva(dev, desc->addr);
/* Prefetch buffer address. */ /* Prefetch buffer address. */
rte_prefetch0((void*)(uintptr_t)buff_addr); rte_prefetch0((void *)(uintptr_t)buff_addr);
/* Copy virtio_hdr to packet and increment buffer address */ /* Copy virtio_hdr to packet and increment buffer address */
buff_hdr_addr = buff_addr; buff_hdr_addr = buff_addr;
@ -176,9 +182,8 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts,
} }
static inline uint32_t __attribute__((always_inline)) static inline uint32_t __attribute__((always_inline))
copy_from_mbuf_to_vring(struct virtio_net *dev, copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t res_base_idx,
uint16_t res_base_idx, uint16_t res_end_idx, uint16_t res_end_idx, struct rte_mbuf *pkt)
struct rte_mbuf *pkt)
{ {
uint32_t vec_idx = 0; uint32_t vec_idx = 0;
uint32_t entry_success = 0; uint32_t entry_success = 0;
@ -388,8 +393,8 @@ copy_from_mbuf_to_vring(struct virtio_net *dev,
* added to the RX queue. This function works for mergeable RX. * added to the RX queue. This function works for mergeable RX.
*/ */
static inline uint32_t __attribute__((always_inline)) static inline uint32_t __attribute__((always_inline))
virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts, virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
uint32_t count) struct rte_mbuf **pkts, uint32_t count)
{ {
struct vhost_virtqueue *vq; struct vhost_virtqueue *vq;
uint32_t pkt_idx = 0, entry_success = 0; uint32_t pkt_idx = 0, entry_success = 0;
@ -509,7 +514,8 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf *
} }
uint16_t uint16_t
rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts, uint16_t count) rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count)
{ {
if (unlikely(dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF))) if (unlikely(dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)))
return virtio_dev_merge_rx(dev, queue_id, pkts, count); return virtio_dev_merge_rx(dev, queue_id, pkts, count);
@ -518,7 +524,8 @@ rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_mb
} }
uint16_t uint16_t
rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
{ {
struct rte_mbuf *m, *prev; struct rte_mbuf *m, *prev;
struct vhost_virtqueue *vq; struct vhost_virtqueue *vq;

View File

@ -56,16 +56,16 @@
* Device linked list structure for configuration. * Device linked list structure for configuration.
*/ */
struct virtio_net_config_ll { struct virtio_net_config_ll {
struct virtio_net dev; /* Virtio device. */ struct virtio_net dev; /* Virtio device.*/
struct virtio_net_config_ll *next; /* Next entry on linked list. */ struct virtio_net_config_ll *next; /* Next entry on linked list.*/
}; };
const char eventfd_cdev[] = "/dev/eventfd-link"; const char eventfd_cdev[] = "/dev/eventfd-link";
/* device ops to add/remove device to data core. */ /* device ops to add/remove device to data core. */
static struct virtio_net_device_ops const * notify_ops; static struct virtio_net_device_ops const *notify_ops;
/* Root address of the linked list in the configuration core. */ /* Root address of the linked list in the configuration core. */
static struct virtio_net_config_ll *ll_root = NULL; static struct virtio_net_config_ll *ll_root;
/* Features supported by this application. RX merge buffers are enabled by default. */ /* Features supported by this application. RX merge buffers are enabled by default. */
#define VHOST_SUPPORTED_FEATURES (1ULL << VIRTIO_NET_F_MRG_RXBUF) #define VHOST_SUPPORTED_FEATURES (1ULL << VIRTIO_NET_F_MRG_RXBUF)
@ -81,8 +81,7 @@ const uint32_t BUFSIZE = PATH_MAX;
#define PROCMAP_SZ 8 #define PROCMAP_SZ 8
/* Structure containing information gathered from maps file. */ /* Structure containing information gathered from maps file. */
struct procmap struct procmap {
{
uint64_t va_start; /* Start virtual address in file. */ uint64_t va_start; /* Start virtual address in file. */
uint64_t len; /* Size of file. */ uint64_t len; /* Size of file. */
uint64_t pgoff; /* Not used. */ uint64_t pgoff; /* Not used. */
@ -90,7 +89,7 @@ struct procmap
uint32_t min; /* Not used. */ uint32_t min; /* Not used. */
uint32_t ino; /* Not used. */ uint32_t ino; /* Not used. */
char prot[PROT_SZ]; /* Not used. */ char prot[PROT_SZ]; /* Not used. */
char fname[PATH_MAX]; /* File name. */ char fname[PATH_MAX];/* File name. */
}; };
/* /*
@ -110,7 +109,8 @@ qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
if ((qemu_va >= region->userspace_address) && if ((qemu_va >= region->userspace_address) &&
(qemu_va <= region->userspace_address + (qemu_va <= region->userspace_address +
region->memory_size)) { region->memory_size)) {
vhost_va = dev->mem->mapped_address + qemu_va - dev->mem->base_address; vhost_va = dev->mem->mapped_address + qemu_va -
dev->mem->base_address;
break; break;
} }
} }
@ -121,7 +121,8 @@ qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
* Locate the file containing QEMU's memory space and map it to our address space. * Locate the file containing QEMU's memory space and map it to our address space.
*/ */
static int static int
host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, uint64_t addr) host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
pid_t pid, uint64_t addr)
{ {
struct dirent *dptr = NULL; struct dirent *dptr = NULL;
struct procmap procmap; struct procmap procmap;
@ -141,13 +142,15 @@ host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, u
char *end = NULL; char *end = NULL;
/* Path where mem files are located. */ /* Path where mem files are located. */
snprintf (procdir, PATH_MAX, "/proc/%u/fd/", pid); snprintf(procdir, PATH_MAX, "/proc/%u/fd/", pid);
/* Maps file used to locate mem file. */ /* Maps file used to locate mem file. */
snprintf (mapfile, PATH_MAX, "/proc/%u/maps", pid); snprintf(mapfile, PATH_MAX, "/proc/%u/maps", pid);
fmap = fopen(mapfile, "r"); fmap = fopen(mapfile, "r");
if (fmap == NULL) { if (fmap == NULL) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to open maps file for pid %d\n", dev->device_fh, pid); RTE_LOG(ERR, VHOST_CONFIG,
"(%"PRIu64") Failed to open maps file for pid %d\n",
dev->device_fh, pid);
return -1; return -1;
} }
@ -157,7 +160,8 @@ host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, u
errno = 0; errno = 0;
/* Split line in to fields. */ /* Split line in to fields. */
for (i = 0; i < PROCMAP_SZ; i++) { for (i = 0; i < PROCMAP_SZ; i++) {
if (((in[i] = strtok_r(str, &dlm[i], &sp)) == NULL) || (errno != 0)) { in[i] = strtok_r(str, &dlm[i], &sp);
if ((in[i] == NULL) || (errno != 0)) {
fclose(fmap); fclose(fmap);
return -1; return -1;
} }
@ -220,7 +224,7 @@ host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, u
/* Find the guest memory file among the process fds. */ /* Find the guest memory file among the process fds. */
dp = opendir(procdir); dp = opendir(procdir);
if (dp == NULL) { if (dp == NULL) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Cannot open pid %d process directory \n", dev->device_fh, pid); RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Cannot open pid %d process directory\n", dev->device_fh, pid);
return -1; return -1;
} }
@ -229,7 +233,8 @@ host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, u
/* Read the fd directory contents. */ /* Read the fd directory contents. */
while (NULL != (dptr = readdir(dp))) { while (NULL != (dptr = readdir(dp))) {
snprintf (memfile, PATH_MAX, "/proc/%u/fd/%s", pid, dptr->d_name); snprintf(memfile, PATH_MAX, "/proc/%u/fd/%s",
pid, dptr->d_name);
realpath(memfile, resolved_path); realpath(memfile, resolved_path);
if (resolved_path == NULL) { if (resolved_path == NULL) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to resolve fd directory\n", dev->device_fh); RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to resolve fd directory\n", dev->device_fh);
@ -257,8 +262,9 @@ host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, u
return -1; return -1;
} }
map = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE , MAP_POPULATE|MAP_SHARED, fd, 0); map = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE ,
close (fd); MAP_POPULATE|MAP_SHARED, fd, 0);
close(fd);
if (map == MAP_FAILED) { if (map == MAP_FAILED) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Error mapping the file %s for pid %d\n", dev->device_fh, memfile, pid); RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Error mapping the file %s for pid %d\n", dev->device_fh, memfile, pid);
@ -304,9 +310,8 @@ get_device(struct vhost_device_ctx ctx)
ll_dev = get_config_ll_entry(ctx); ll_dev = get_config_ll_entry(ctx);
/* If a matching entry is found in the linked list, return the device in that entry. */ /* If a matching entry is found in the linked list, return the device in that entry. */
if (ll_dev) { if (ll_dev)
return &ll_dev->dev; return &ll_dev->dev;
}
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Device not found in linked list.\n", ctx.fh); RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Device not found in linked list.\n", ctx.fh);
return NULL; return NULL;
@ -351,7 +356,8 @@ cleanup_device(struct virtio_net *dev)
{ {
/* Unmap QEMU memory file if mapped. */ /* Unmap QEMU memory file if mapped. */
if (dev->mem) { if (dev->mem) {
munmap((void*)(uintptr_t)dev->mem->mapped_address, (size_t)dev->mem->mapped_size); munmap((void *)(uintptr_t)dev->mem->mapped_address,
(size_t)dev->mem->mapped_size);
free(dev->mem); free(dev->mem);
} }
@ -381,7 +387,8 @@ free_device(struct virtio_net_config_ll *ll_dev)
* Remove an entry from the device configuration linked list. * Remove an entry from the device configuration linked list.
*/ */
static struct virtio_net_config_ll * static struct virtio_net_config_ll *
rm_config_ll_entry(struct virtio_net_config_ll *ll_dev, struct virtio_net_config_ll *ll_dev_last) rm_config_ll_entry(struct virtio_net_config_ll *ll_dev,
struct virtio_net_config_ll *ll_dev_last)
{ {
/* First remove the device and then clean it up. */ /* First remove the device and then clean it up. */
if (ll_dev == ll_root) { if (ll_dev == ll_root) {
@ -398,7 +405,8 @@ rm_config_ll_entry(struct virtio_net_config_ll *ll_dev, struct virtio_net_config
} else { } else {
cleanup_device(&ll_dev->dev); cleanup_device(&ll_dev->dev);
free_device(ll_dev); free_device(ll_dev);
RTE_LOG(ERR, VHOST_CONFIG, "Remove entry from config_ll failed\n"); RTE_LOG(ERR, VHOST_CONFIG,
"Remove entry from config_ll failed\n");
return NULL; return NULL;
} }
} }
@ -416,7 +424,7 @@ init_device(struct virtio_net *dev)
vq_offset = offsetof(struct virtio_net, mem); vq_offset = offsetof(struct virtio_net, mem);
/* Set everything to 0. */ /* Set everything to 0. */
memset((void*)(uintptr_t)((uint64_t)(uintptr_t)dev + vq_offset), 0, memset((void *)(uintptr_t)((uint64_t)(uintptr_t)dev + vq_offset), 0,
(sizeof(struct virtio_net) - (size_t)vq_offset)); (sizeof(struct virtio_net) - (size_t)vq_offset));
memset(dev->virtqueue[VIRTIO_RXQ], 0, sizeof(struct vhost_virtqueue)); memset(dev->virtqueue[VIRTIO_RXQ], 0, sizeof(struct vhost_virtqueue));
memset(dev->virtqueue[VIRTIO_TXQ], 0, sizeof(struct vhost_virtqueue)); memset(dev->virtqueue[VIRTIO_TXQ], 0, sizeof(struct vhost_virtqueue));
@ -440,14 +448,18 @@ new_device(struct vhost_device_ctx ctx)
/* Setup device and virtqueues. */ /* Setup device and virtqueues. */
new_ll_dev = malloc(sizeof(struct virtio_net_config_ll)); new_ll_dev = malloc(sizeof(struct virtio_net_config_ll));
if (new_ll_dev == NULL) { if (new_ll_dev == NULL) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for dev.\n", ctx.fh); RTE_LOG(ERR, VHOST_CONFIG,
"(%"PRIu64") Failed to allocate memory for dev.\n",
ctx.fh);
return -1; return -1;
} }
virtqueue_rx = malloc(sizeof(struct vhost_virtqueue)); virtqueue_rx = malloc(sizeof(struct vhost_virtqueue));
if (virtqueue_rx == NULL) { if (virtqueue_rx == NULL) {
free(new_ll_dev); free(new_ll_dev);
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for virtqueue_rx.\n", ctx.fh); RTE_LOG(ERR, VHOST_CONFIG,
"(%"PRIu64") Failed to allocate memory for rxq.\n",
ctx.fh);
return -1; return -1;
} }
@ -455,7 +467,9 @@ new_device(struct vhost_device_ctx ctx)
if (virtqueue_tx == NULL) { if (virtqueue_tx == NULL) {
free(virtqueue_rx); free(virtqueue_rx);
free(new_ll_dev); free(new_ll_dev);
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for virtqueue_tx.\n", ctx.fh); RTE_LOG(ERR, VHOST_CONFIG,
"(%"PRIu64") Failed to allocate memory for txq.\n",
ctx.fh);
return -1; return -1;
} }
@ -573,12 +587,16 @@ set_features(struct vhost_device_ctx ctx, uint64_t *pu)
/* Set the vhost_hlen depending on if VIRTIO_NET_F_MRG_RXBUF is set. */ /* Set the vhost_hlen depending on if VIRTIO_NET_F_MRG_RXBUF is set. */
if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) { if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mergeable RX buffers enabled\n", dev->device_fh); LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mergeable RX buffers enabled\n", dev->device_fh);
dev->virtqueue[VIRTIO_RXQ]->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf); dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
dev->virtqueue[VIRTIO_TXQ]->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf); sizeof(struct virtio_net_hdr_mrg_rxbuf);
dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
sizeof(struct virtio_net_hdr_mrg_rxbuf);
} else { } else {
LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mergeable RX buffers disabled\n", dev->device_fh); LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mergeable RX buffers disabled\n", dev->device_fh);
dev->virtqueue[VIRTIO_RXQ]->vhost_hlen = sizeof(struct virtio_net_hdr); dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
dev->virtqueue[VIRTIO_TXQ]->vhost_hlen = sizeof(struct virtio_net_hdr); sizeof(struct virtio_net_hdr);
dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
sizeof(struct virtio_net_hdr);
} }
return 0; return 0;
} }
@ -590,7 +608,8 @@ set_features(struct vhost_device_ctx ctx, uint64_t *pu)
* storing offsets used to translate buffer addresses. * storing offsets used to translate buffer addresses.
*/ */
static int static int
set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr, uint32_t nregions) set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr,
uint32_t nregions)
{ {
struct virtio_net *dev; struct virtio_net *dev;
struct vhost_memory_region *mem_regions; struct vhost_memory_region *mem_regions;
@ -603,12 +622,14 @@ set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr, uint32_
return -1; return -1;
if (dev->mem) { if (dev->mem) {
munmap((void*)(uintptr_t)dev->mem->mapped_address, (size_t)dev->mem->mapped_size); munmap((void *)(uintptr_t)dev->mem->mapped_address,
(size_t)dev->mem->mapped_size);
free(dev->mem); free(dev->mem);
} }
/* Malloc the memory structure depending on the number of regions. */ /* Malloc the memory structure depending on the number of regions. */
mem = calloc(1, sizeof(struct virtio_memory) + (sizeof(struct virtio_memory_regions) * nregions)); mem = calloc(1, sizeof(struct virtio_memory) +
(sizeof(struct virtio_memory_regions) * nregions));
if (mem == NULL) { if (mem == NULL) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for dev->mem.\n", dev->device_fh); RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for dev->mem.\n", dev->device_fh);
return -1; return -1;
@ -616,19 +637,24 @@ set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr, uint32_
mem->nregions = nregions; mem->nregions = nregions;
mem_regions = (void*)(uintptr_t)((uint64_t)(uintptr_t)mem_regions_addr + size); mem_regions = (void *)(uintptr_t)
((uint64_t)(uintptr_t)mem_regions_addr + size);
for (regionidx = 0; regionidx < mem->nregions; regionidx++) { for (regionidx = 0; regionidx < mem->nregions; regionidx++) {
/* Populate the region structure for each region. */ /* Populate the region structure for each region. */
mem->regions[regionidx].guest_phys_address = mem_regions[regionidx].guest_phys_addr; mem->regions[regionidx].guest_phys_address =
mem->regions[regionidx].guest_phys_address_end = mem->regions[regionidx].guest_phys_address + mem_regions[regionidx].guest_phys_addr;
mem->regions[regionidx].guest_phys_address_end =
mem->regions[regionidx].guest_phys_address +
mem_regions[regionidx].memory_size; mem_regions[regionidx].memory_size;
mem->regions[regionidx].memory_size = mem_regions[regionidx].memory_size; mem->regions[regionidx].memory_size =
mem->regions[regionidx].userspace_address = mem_regions[regionidx].userspace_addr; mem_regions[regionidx].memory_size;
mem->regions[regionidx].userspace_address =
mem_regions[regionidx].userspace_addr;
LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") REGION: %u - GPA: %p - QEMU VA: %p - SIZE (%"PRIu64")\n", dev->device_fh, LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") REGION: %u - GPA: %p - QEMU VA: %p - SIZE (%"PRIu64")\n", dev->device_fh,
regionidx, (void*)(uintptr_t)mem->regions[regionidx].guest_phys_address, regionidx, (void *)(uintptr_t)mem->regions[regionidx].guest_phys_address,
(void*)(uintptr_t)mem->regions[regionidx].userspace_address, (void *)(uintptr_t)mem->regions[regionidx].userspace_address,
mem->regions[regionidx].memory_size); mem->regions[regionidx].memory_size);
/*set the base address mapping*/ /*set the base address mapping*/
@ -728,19 +754,19 @@ set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
vq = dev->virtqueue[addr->index]; vq = dev->virtqueue[addr->index];
/* The addresses are converted from QEMU virtual to Vhost virtual. */ /* The addresses are converted from QEMU virtual to Vhost virtual. */
vq->desc = (struct vring_desc*)(uintptr_t)qva_to_vva(dev, addr->desc_user_addr); vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev, addr->desc_user_addr);
if (vq->desc == 0) { if (vq->desc == 0) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find descriptor ring address.\n", dev->device_fh); RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find descriptor ring address.\n", dev->device_fh);
return -1; return -1;
} }
vq->avail = (struct vring_avail*)(uintptr_t)qva_to_vva(dev, addr->avail_user_addr); vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev, addr->avail_user_addr);
if (vq->avail == 0) { if (vq->avail == 0) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find available ring address.\n", dev->device_fh); RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find available ring address.\n", dev->device_fh);
return -1; return -1;
} }
vq->used = (struct vring_used*)(uintptr_t)qva_to_vva(dev, addr->used_user_addr); vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev, addr->used_user_addr);
if (vq->used == 0) { if (vq->used == 0) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find used ring address.\n", dev->device_fh); RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find used ring address.\n", dev->device_fh);
return -1; return -1;
@ -778,7 +804,8 @@ set_vring_base(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
* We send the virtio device our available ring last used index. * We send the virtio device our available ring last used index.
*/ */
static int static int
get_vring_base(struct vhost_device_ctx ctx, uint32_t index, struct vhost_vring_state *state) get_vring_base(struct vhost_device_ctx ctx, uint32_t index,
struct vhost_vring_state *state)
{ {
struct virtio_net *dev; struct virtio_net *dev;
@ -903,9 +930,8 @@ set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
struct virtio_net *dev; struct virtio_net *dev;
dev = get_device(ctx); dev = get_device(ctx);
if (dev == NULL) { if (dev == NULL)
return -1; return -1;
}
/* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */ /* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */
dev->virtqueue[file->index]->backend = file->fd; dev->virtqueue[file->index]->backend = file->fd;
@ -917,9 +943,8 @@ set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
return notify_ops->new_device(dev); return notify_ops->new_device(dev);
/* Otherwise we remove it. */ /* Otherwise we remove it. */
} else } else
if (file->fd == VIRTIO_DEV_STOPPED) { if (file->fd == VIRTIO_DEV_STOPPED)
notify_ops->destroy_device(dev); notify_ops->destroy_device(dev);
}
return 0; return 0;
} }
@ -927,8 +952,7 @@ set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
* Function pointers are set for the device operations to allow CUSE to call functions * Function pointers are set for the device operations to allow CUSE to call functions
* when an IOCTL, device_add or device_release is received. * when an IOCTL, device_add or device_release is received.
*/ */
static const struct vhost_net_device_ops vhost_device_ops = static const struct vhost_net_device_ops vhost_device_ops = {
{
.new_device = new_device, .new_device = new_device,
.destroy_device = destroy_device, .destroy_device = destroy_device,
@ -960,7 +984,8 @@ get_virtio_net_callbacks(void)
return &vhost_device_ops; return &vhost_device_ops;
} }
int rte_vhost_enable_guest_notification(struct virtio_net *dev, uint16_t queue_id, int enable) int rte_vhost_enable_guest_notification(struct virtio_net *dev,
uint16_t queue_id, int enable)
{ {
if (enable) { if (enable) {
RTE_LOG(ERR, VHOST_CONFIG, "guest notification isn't supported.\n"); RTE_LOG(ERR, VHOST_CONFIG, "guest notification isn't supported.\n");