rte_virtio: hide some internal functions

Moved some vq-related functions
to virtio.c. It's the only place
they're used in.

This patch also removes the
virtqueue_full() function. It's
not used anywhere.

Note: all the functions content
has been moved 1:1.

Change-Id: Ib854fc0836378e6955c0a7358ecabcf2c3107d06
Signed-off-by: Dariusz Stojaczyk <dariuszx.stojaczyk@intel.com>
Reviewed-on: https://review.gerrithub.io/385628
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
This commit is contained in:
Dariusz Stojaczyk 2017-11-03 16:55:38 +01:00 committed by Jim Harris
parent 0434d5d58f
commit 3b2ed0e968
4 changed files with 139 additions and 147 deletions

View File

@ -54,6 +54,18 @@
#include <rte_prefetch.h>
#include "spdk/env.h"
#include "spdk/barrier.h"
/*
* Per virtio_config.h in Linux.
* For virtio_pci on SMP, we don't need to order with respect to MMIO
* accesses through relaxed memory I/O windows, so smp_mb() et al are
* sufficient.
*
*/
#define virtio_mb() rte_smp_mb()
#define virtio_rmb() rte_smp_rmb()
#define virtio_wmb() rte_smp_wmb()
#include "virtio.h"
@ -63,6 +75,58 @@ struct virtio_driver g_virtio_driver = {
.ctrlr_counter = 0,
};
/* Chain all the descriptors in the ring with an END */
static inline void
vring_desc_init(struct vring_desc *dp, uint16_t n)
{
uint16_t i;
for (i = 0; i < n - 1; i++)
dp[i].next = (uint16_t)(i + 1);
dp[i].next = VQ_RING_DESC_CHAIN_END;
}
/**
* Tell the backend not to interrupt us.
*/
static inline void
virtqueue_disable_intr(struct virtqueue *vq)
{
vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
static inline void
vq_update_avail_idx(struct virtqueue *vq)
{
virtio_wmb();
vq->vq_ring.avail->idx = vq->vq_avail_idx;
}
static inline void
vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
{
uint16_t avail_idx;
/*
* Place the head of the descriptor chain into the next slot and make
* it usable to the host. The chain is made available now rather than
* deferring to virtqueue_notify() in the hopes that if the host is
* currently running on another CPU, we can keep it processing the new
* descriptor.
*/
avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
if (spdk_unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx))
vq->vq_ring.avail->ring[avail_idx] = desc_idx;
vq->vq_avail_idx++;
}
static inline int
virtqueue_kick_prepare(struct virtqueue *vq)
{
return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
}
static void
virtio_init_vring(struct virtqueue *vq)
{

View File

@ -41,7 +41,6 @@
#include <linux/virtio_config.h>
#include <rte_config.h>
#include <rte_memory.h>
#include <rte_mempool.h>
#include "spdk_internal/log.h"
@ -50,17 +49,6 @@
#include "spdk/json.h"
#include "spdk/io_channel.h"
/*
* Per virtio_config.h in Linux.
* For virtio_pci on SMP, we don't need to order with respect to MMIO
* accesses through relaxed memory I/O windows, so smp_mb() et al are
* sufficient.
*
*/
#define virtio_mb() rte_smp_mb()
#define virtio_rmb() rte_smp_rmb()
#define virtio_wmb() rte_smp_wmb()
#define VIRTQUEUE_MAX_NAME_SZ 32
/**
@ -249,64 +237,6 @@ int virtio_dev_init(struct virtio_dev *hw, uint64_t req_features);
void virtio_dev_free(struct virtio_dev *dev);
int virtio_dev_start(struct virtio_dev *hw);
/* Chain all the descriptors in the ring with an END */
static inline void
vring_desc_init(struct vring_desc *dp, uint16_t n)
{
uint16_t i;
for (i = 0; i < n - 1; i++)
dp[i].next = (uint16_t)(i + 1);
dp[i].next = VQ_RING_DESC_CHAIN_END;
}
/**
* Tell the backend not to interrupt us.
*/
static inline void
virtqueue_disable_intr(struct virtqueue *vq)
{
vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}
static inline int
virtqueue_full(const struct virtqueue *vq)
{
return vq->vq_free_cnt == 0;
}
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
static inline void
vq_update_avail_idx(struct virtqueue *vq)
{
virtio_wmb();
vq->vq_ring.avail->idx = vq->vq_avail_idx;
}
static inline void
vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
{
uint16_t avail_idx;
/*
* Place the head of the descriptor chain into the next slot and make
* it usable to the host. The chain is made available now rather than
* deferring to virtqueue_notify() in the hopes that if the host is
* currently running on another CPU, we can keep it processing the new
* descriptor.
*/
avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
if (spdk_unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx))
vq->vq_ring.avail->ring[avail_idx] = desc_idx;
vq->vq_avail_idx++;
}
static inline int
virtqueue_kick_prepare(struct virtqueue *vq)
{
return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
}
/**
* Bind a virtqueue with given index to the current thread;
*
@ -463,6 +393,4 @@ struct virtio_dev *virtio_user_dev_init(const char *name, const char *path,
uint16_t requested_queues,
uint32_t queue_size, uint16_t fixed_queue_num);
extern const struct virtio_dev_ops virtio_user_ops;
#endif /* SPDK_VIRTIO_H */

View File

@ -293,7 +293,7 @@ modern_notify_queue(struct virtio_dev *dev, struct virtqueue *vq)
spdk_mmio_write_2(vq->notify_addr, vq->vq_queue_index);
}
const struct virtio_dev_ops modern_ops = {
static const struct virtio_dev_ops modern_ops = {
.read_dev_cfg = modern_read_dev_config,
.write_dev_cfg = modern_write_dev_config,
.get_status = modern_get_status,

View File

@ -178,79 +178,6 @@ virtio_user_dev_setup(struct virtio_dev *vdev)
return 0;
}
struct virtio_dev *
virtio_user_dev_init(const char *name, const char *path, uint16_t requested_queues,
uint32_t queue_size,
uint16_t fixed_queue_num)
{
struct virtio_dev *vdev;
struct virtio_user_dev *dev;
uint64_t max_queues;
char err_str[64];
if (name == NULL) {
SPDK_ERRLOG("No name gived for controller: %s\n", path);
return NULL;
} else if (requested_queues == 0) {
SPDK_ERRLOG("Can't create controller with no queues: %s\n", path);
return NULL;
}
dev = calloc(1, sizeof(*dev));
if (dev == NULL) {
return NULL;
}
vdev = virtio_dev_construct(&virtio_user_ops, dev);
if (vdev == NULL) {
SPDK_ERRLOG("Failed to init device: %s\n", path);
free(dev);
return NULL;
}
vdev->is_hw = 0;
vdev->name = strdup(name);
if (!vdev->name) {
SPDK_ERRLOG("Failed to reserve memory for controller name: %s\n", path);
goto err;
}
snprintf(dev->path, PATH_MAX, "%s", path);
dev->queue_size = queue_size;
if (virtio_user_dev_setup(vdev) < 0) {
SPDK_ERRLOG("backend set up fails\n");
goto err;
}
if (dev->ops->send_request(dev, VHOST_USER_GET_QUEUE_NUM, &max_queues) < 0) {
spdk_strerror_r(errno, err_str, sizeof(err_str));
SPDK_ERRLOG("get_queue_num fails: %s\n", err_str);
goto err;
}
if (requested_queues > max_queues) {
SPDK_ERRLOG("requested %"PRIu16" request queues but only %"PRIu64" available\n",
requested_queues, max_queues);
goto err;
}
vdev->max_queues = fixed_queue_num + requested_queues;
if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL) < 0) {
spdk_strerror_r(errno, err_str, sizeof(err_str));
SPDK_ERRLOG("set_owner fails: %s\n", err_str);
goto err;
}
TAILQ_INSERT_TAIL(&g_virtio_driver.init_ctrlrs, vdev, tailq);
return vdev;
err:
virtio_dev_free(vdev);
return NULL;
}
static void
virtio_user_read_dev_config(struct virtio_dev *vdev, size_t offset,
void *dst, int length)
@ -439,7 +366,7 @@ virtio_user_dump_json_config(struct virtio_dev *vdev, struct spdk_json_write_ctx
spdk_json_write_string(w, dev->path);
}
const struct virtio_dev_ops virtio_user_ops = {
static const struct virtio_dev_ops virtio_user_ops = {
.read_dev_cfg = virtio_user_read_dev_config,
.write_dev_cfg = virtio_user_write_dev_config,
.get_status = virtio_user_get_status,
@ -453,3 +380,76 @@ const struct virtio_dev_ops virtio_user_ops = {
.notify_queue = virtio_user_notify_queue,
.dump_json_config = virtio_user_dump_json_config,
};
struct virtio_dev *
virtio_user_dev_init(const char *name, const char *path, uint16_t requested_queues,
uint32_t queue_size,
uint16_t fixed_queue_num)
{
struct virtio_dev *vdev;
struct virtio_user_dev *dev;
uint64_t max_queues;
char err_str[64];
if (name == NULL) {
SPDK_ERRLOG("No name gived for controller: %s\n", path);
return NULL;
} else if (requested_queues == 0) {
SPDK_ERRLOG("Can't create controller with no queues: %s\n", path);
return NULL;
}
dev = calloc(1, sizeof(*dev));
if (dev == NULL) {
return NULL;
}
vdev = virtio_dev_construct(&virtio_user_ops, dev);
if (vdev == NULL) {
SPDK_ERRLOG("Failed to init device: %s\n", path);
free(dev);
return NULL;
}
vdev->is_hw = 0;
vdev->name = strdup(name);
if (!vdev->name) {
SPDK_ERRLOG("Failed to reserve memory for controller name: %s\n", path);
goto err;
}
snprintf(dev->path, PATH_MAX, "%s", path);
dev->queue_size = queue_size;
if (virtio_user_dev_setup(vdev) < 0) {
SPDK_ERRLOG("backend set up fails\n");
goto err;
}
if (dev->ops->send_request(dev, VHOST_USER_GET_QUEUE_NUM, &max_queues) < 0) {
spdk_strerror_r(errno, err_str, sizeof(err_str));
SPDK_ERRLOG("get_queue_num fails: %s\n", err_str);
goto err;
}
if (requested_queues > max_queues) {
SPDK_ERRLOG("requested %"PRIu16" request queues but only %"PRIu64" available\n",
requested_queues, max_queues);
goto err;
}
vdev->max_queues = fixed_queue_num + requested_queues;
if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL) < 0) {
spdk_strerror_r(errno, err_str, sizeof(err_str));
SPDK_ERRLOG("set_owner fails: %s\n", err_str);
goto err;
}
TAILQ_INSERT_TAIL(&g_virtio_driver.init_ctrlrs, vdev, tailq);
return vdev;
err:
virtio_dev_free(vdev);
return NULL;
}