2018-04-02 11:46:53 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright(c) 2018 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
*
|
|
|
|
* Device specific vhost lib
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdbool.h>
|
2020-06-26 14:04:36 +00:00
|
|
|
#include <sys/queue.h>
|
2018-04-02 11:46:53 +00:00
|
|
|
|
2020-06-26 14:04:30 +00:00
|
|
|
#include <rte_class.h>
|
2018-04-02 11:46:53 +00:00
|
|
|
#include <rte_malloc.h>
|
2020-06-26 14:04:36 +00:00
|
|
|
#include <rte_spinlock.h>
|
|
|
|
#include <rte_tailq.h>
|
|
|
|
|
2018-04-02 11:46:53 +00:00
|
|
|
#include "rte_vdpa.h"
|
2020-06-26 14:04:41 +00:00
|
|
|
#include "rte_vdpa_dev.h"
|
2018-04-02 11:46:53 +00:00
|
|
|
#include "vhost.h"
|
|
|
|
|
2020-06-26 14:04:36 +00:00
|
|
|
/** Double linked list of vDPA devices. */
|
|
|
|
TAILQ_HEAD(vdpa_device_list, rte_vdpa_device);
|
|
|
|
|
|
|
|
static struct vdpa_device_list vdpa_device_list =
|
|
|
|
TAILQ_HEAD_INITIALIZER(vdpa_device_list);
|
|
|
|
static rte_spinlock_t vdpa_device_list_lock = RTE_SPINLOCK_INITIALIZER;
|
2018-04-02 11:46:53 +00:00
|
|
|
|
|
|
|
|
2020-06-26 14:04:36 +00:00
|
|
|
/* Unsafe, needs to be called with vdpa_device_list_lock held */
|
|
|
|
static struct rte_vdpa_device *
|
|
|
|
__vdpa_find_device_by_name(const char *name)
|
2018-04-02 11:46:53 +00:00
|
|
|
{
|
2020-06-26 14:04:36 +00:00
|
|
|
struct rte_vdpa_device *dev, *ret = NULL;
|
2018-04-02 11:46:53 +00:00
|
|
|
|
2020-06-26 14:04:31 +00:00
|
|
|
if (name == NULL)
|
2020-06-26 14:04:34 +00:00
|
|
|
return NULL;
|
2019-04-11 14:48:40 +00:00
|
|
|
|
2020-06-26 14:04:36 +00:00
|
|
|
TAILQ_FOREACH(dev, &vdpa_device_list, next) {
|
|
|
|
if (!strncmp(dev->device->name, name, RTE_DEV_NAME_MAX_LEN)) {
|
|
|
|
ret = dev;
|
|
|
|
break;
|
|
|
|
}
|
2018-04-02 11:46:53 +00:00
|
|
|
}
|
|
|
|
|
2020-06-26 14:04:36 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct rte_vdpa_device *
|
|
|
|
rte_vdpa_find_device_by_name(const char *name)
|
|
|
|
{
|
|
|
|
struct rte_vdpa_device *dev;
|
|
|
|
|
|
|
|
rte_spinlock_lock(&vdpa_device_list_lock);
|
|
|
|
dev = __vdpa_find_device_by_name(name);
|
|
|
|
rte_spinlock_unlock(&vdpa_device_list_lock);
|
|
|
|
|
|
|
|
return dev;
|
2020-06-26 14:04:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct rte_device *
|
|
|
|
rte_vdpa_get_rte_device(struct rte_vdpa_device *vdpa_dev)
|
|
|
|
{
|
|
|
|
if (vdpa_dev == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return vdpa_dev->device;
|
2018-04-02 11:46:53 +00:00
|
|
|
}
|
|
|
|
|
2020-06-26 14:04:32 +00:00
|
|
|
struct rte_vdpa_device *
|
|
|
|
rte_vdpa_register_device(struct rte_device *rte_dev,
|
|
|
|
struct rte_vdpa_dev_ops *ops)
|
|
|
|
{
|
|
|
|
struct rte_vdpa_device *dev;
|
|
|
|
|
2020-06-26 14:04:36 +00:00
|
|
|
if (ops == NULL)
|
2020-06-26 14:04:32 +00:00
|
|
|
return NULL;
|
|
|
|
|
2020-07-06 11:24:47 +00:00
|
|
|
/* Check mandatory ops are implemented */
|
|
|
|
if (!ops->get_queue_num || !ops->get_features ||
|
|
|
|
!ops->get_protocol_features || !ops->dev_conf ||
|
|
|
|
!ops->dev_close || !ops->set_vring_state ||
|
|
|
|
!ops->set_features) {
|
|
|
|
VHOST_LOG_CONFIG(ERR,
|
|
|
|
"Some mandatory vDPA ops aren't implemented\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-06-26 14:04:36 +00:00
|
|
|
rte_spinlock_lock(&vdpa_device_list_lock);
|
|
|
|
/* Check the device hasn't been register already */
|
|
|
|
dev = __vdpa_find_device_by_name(rte_dev->name);
|
|
|
|
if (dev) {
|
|
|
|
dev = NULL;
|
|
|
|
goto out_unlock;
|
2020-06-26 14:04:32 +00:00
|
|
|
}
|
|
|
|
|
2020-06-26 14:04:36 +00:00
|
|
|
dev = rte_zmalloc(NULL, sizeof(*dev), 0);
|
|
|
|
if (!dev)
|
|
|
|
goto out_unlock;
|
2020-06-26 14:04:32 +00:00
|
|
|
|
|
|
|
dev->device = rte_dev;
|
|
|
|
dev->ops = ops;
|
2020-06-26 14:04:36 +00:00
|
|
|
TAILQ_INSERT_TAIL(&vdpa_device_list, dev, next);
|
|
|
|
out_unlock:
|
|
|
|
rte_spinlock_unlock(&vdpa_device_list_lock);
|
2020-06-26 14:04:32 +00:00
|
|
|
|
|
|
|
return dev;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2020-06-26 14:04:36 +00:00
|
|
|
rte_vdpa_unregister_device(struct rte_vdpa_device *dev)
|
2020-06-26 14:04:32 +00:00
|
|
|
{
|
2020-06-26 14:04:36 +00:00
|
|
|
struct rte_vdpa_device *cur_dev, *tmp_dev;
|
|
|
|
int ret = -1;
|
2020-06-26 14:04:35 +00:00
|
|
|
|
2020-06-26 14:04:36 +00:00
|
|
|
rte_spinlock_lock(&vdpa_device_list_lock);
|
eal: remove sys/queue.h from public headers
Currently there are some public headers that include 'sys/queue.h', which
is not POSIX, but usually provided by the Linux/BSD system library.
(Not in POSIX.1, POSIX.1-2001, or POSIX.1-2008. Present on the BSDs.)
The file is missing on Windows. During the Windows build, DPDK uses a
bundled copy, so building a DPDK library works fine. But when OVS or other
applications use DPDK as a library, because some DPDK public headers
include 'sys/queue.h', on Windows, it triggers an error due to no such
file.
One solution is to install the 'lib/eal/windows/include/sys/queue.h' into
Windows environment, such as [1]. However, this means DPDK exports the
functionalities of 'sys/queue.h' into the environment, which might cause
symbols, macros, headers clashing with other applications.
The patch fixes it by removing the "#include <sys/queue.h>" from
DPDK public headers, so programs including DPDK headers don't depend
on the system to provide 'sys/queue.h'. When these public headers use
macros such as TAILQ_xxx, we replace it by the ones with RTE_ prefix.
For Windows, we copy the definitions from <sys/queue.h> to rte_os.h
in Windows EAL. Note that these RTE_ macros are compatible with
<sys/queue.h>, both at the level of API (to use with <sys/queue.h>
macros in C files) and ABI (to avoid breaking it).
Additionally, the TAILQ_FOREACH_SAFE is not part of <sys/queue.h>,
the patch replaces it with RTE_TAILQ_FOREACH_SAFE.
[1] http://mails.dpdk.org/archives/dev/2021-August/216304.html
Suggested-by: Nick Connolly <nick.connolly@mayadata.io>
Suggested-by: Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>
Signed-off-by: William Tu <u9012063@gmail.com>
Acked-by: Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>
Acked-by: Narcisa Vasile <navasile@linux.microsoft.com>
2021-08-24 16:21:03 +00:00
|
|
|
RTE_TAILQ_FOREACH_SAFE(cur_dev, &vdpa_device_list, next, tmp_dev) {
|
2020-06-26 14:04:36 +00:00
|
|
|
if (dev != cur_dev)
|
2020-06-26 14:04:35 +00:00
|
|
|
continue;
|
2020-06-26 14:04:32 +00:00
|
|
|
|
2020-06-26 14:04:36 +00:00
|
|
|
TAILQ_REMOVE(&vdpa_device_list, dev, next);
|
|
|
|
rte_free(dev);
|
|
|
|
ret = 0;
|
|
|
|
break;
|
2020-06-26 14:04:35 +00:00
|
|
|
}
|
2020-06-26 14:04:36 +00:00
|
|
|
rte_spinlock_unlock(&vdpa_device_list_lock);
|
2020-06-26 14:04:32 +00:00
|
|
|
|
2020-06-26 14:04:36 +00:00
|
|
|
return ret;
|
2020-06-26 14:04:32 +00:00
|
|
|
}
|
|
|
|
|
2019-06-29 11:58:52 +00:00
|
|
|
int
|
2018-12-18 08:02:00 +00:00
|
|
|
rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
|
|
|
|
{
|
|
|
|
struct virtio_net *dev = get_device(vid);
|
|
|
|
uint16_t idx, idx_m, desc_id;
|
|
|
|
struct vhost_virtqueue *vq;
|
|
|
|
struct vring_desc desc;
|
|
|
|
struct vring_desc *desc_ring;
|
|
|
|
struct vring_desc *idesc = NULL;
|
|
|
|
struct vring *s_vring;
|
|
|
|
uint64_t dlen;
|
2019-01-04 04:06:39 +00:00
|
|
|
uint32_t nr_descs;
|
2018-12-18 08:02:00 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!dev || !vring_m)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (qid >= dev->nr_vring)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (vq_is_packed(dev))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
s_vring = (struct vring *)vring_m;
|
|
|
|
vq = dev->virtqueue[qid];
|
|
|
|
idx = vq->used->idx;
|
|
|
|
idx_m = s_vring->used->idx;
|
|
|
|
ret = (uint16_t)(idx_m - idx);
|
|
|
|
|
|
|
|
while (idx != idx_m) {
|
|
|
|
/* copy used entry, used ring logging is not covered here */
|
|
|
|
vq->used->ring[idx & (vq->size - 1)] =
|
|
|
|
s_vring->used->ring[idx & (vq->size - 1)];
|
|
|
|
|
|
|
|
desc_id = vq->used->ring[idx & (vq->size - 1)].id;
|
|
|
|
desc_ring = vq->desc;
|
2019-01-04 04:06:39 +00:00
|
|
|
nr_descs = vq->size;
|
2018-12-18 08:02:00 +00:00
|
|
|
|
2019-01-04 04:06:38 +00:00
|
|
|
if (unlikely(desc_id >= vq->size))
|
|
|
|
return -1;
|
|
|
|
|
2018-12-18 08:02:00 +00:00
|
|
|
if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) {
|
|
|
|
dlen = vq->desc[desc_id].len;
|
2019-01-04 04:06:39 +00:00
|
|
|
nr_descs = dlen / sizeof(struct vring_desc);
|
|
|
|
if (unlikely(nr_descs > vq->size))
|
|
|
|
return -1;
|
|
|
|
|
2018-12-18 08:02:00 +00:00
|
|
|
desc_ring = (struct vring_desc *)(uintptr_t)
|
|
|
|
vhost_iova_to_vva(dev, vq,
|
|
|
|
vq->desc[desc_id].addr, &dlen,
|
|
|
|
VHOST_ACCESS_RO);
|
|
|
|
if (unlikely(!desc_ring))
|
|
|
|
return -1;
|
|
|
|
|
2019-01-04 04:06:37 +00:00
|
|
|
if (unlikely(dlen < vq->desc[desc_id].len)) {
|
2019-05-29 13:04:18 +00:00
|
|
|
idesc = vhost_alloc_copy_ind_table(dev, vq,
|
2019-01-04 04:06:37 +00:00
|
|
|
vq->desc[desc_id].addr,
|
|
|
|
vq->desc[desc_id].len);
|
2018-12-18 08:02:00 +00:00
|
|
|
if (unlikely(!idesc))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
desc_ring = idesc;
|
|
|
|
}
|
|
|
|
|
|
|
|
desc_id = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* dirty page logging for DMA writeable buffer */
|
|
|
|
do {
|
2019-01-04 04:06:38 +00:00
|
|
|
if (unlikely(desc_id >= vq->size))
|
|
|
|
goto fail;
|
2019-01-04 04:06:39 +00:00
|
|
|
if (unlikely(nr_descs-- == 0))
|
|
|
|
goto fail;
|
2018-12-18 08:02:00 +00:00
|
|
|
desc = desc_ring[desc_id];
|
|
|
|
if (desc.flags & VRING_DESC_F_WRITE)
|
2019-10-09 11:54:31 +00:00
|
|
|
vhost_log_write_iova(dev, vq, desc.addr,
|
|
|
|
desc.len);
|
2018-12-18 08:02:00 +00:00
|
|
|
desc_id = desc.next;
|
|
|
|
} while (desc.flags & VRING_DESC_F_NEXT);
|
|
|
|
|
|
|
|
if (unlikely(idesc)) {
|
|
|
|
free_ind_table(idesc);
|
|
|
|
idesc = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
idx++;
|
|
|
|
}
|
|
|
|
|
2020-12-21 15:50:31 +00:00
|
|
|
/* used idx is the synchronization point for the split vring */
|
|
|
|
__atomic_store_n(&vq->used->idx, idx_m, __ATOMIC_RELEASE);
|
2018-12-18 08:02:00 +00:00
|
|
|
|
|
|
|
if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
|
|
|
|
vring_used_event(s_vring) = idx_m;
|
|
|
|
|
|
|
|
return ret;
|
2019-01-04 04:06:38 +00:00
|
|
|
|
|
|
|
fail:
|
|
|
|
if (unlikely(idesc))
|
|
|
|
free_ind_table(idesc);
|
|
|
|
return -1;
|
2018-12-18 08:02:00 +00:00
|
|
|
}
|
2020-06-18 18:59:41 +00:00
|
|
|
|
2020-06-26 14:04:37 +00:00
|
|
|
int
|
|
|
|
rte_vdpa_get_queue_num(struct rte_vdpa_device *dev, uint32_t *queue_num)
|
|
|
|
{
|
|
|
|
if (dev == NULL || dev->ops == NULL || dev->ops->get_queue_num == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return dev->ops->get_queue_num(dev, queue_num);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_vdpa_get_features(struct rte_vdpa_device *dev, uint64_t *features)
|
|
|
|
{
|
|
|
|
if (dev == NULL || dev->ops == NULL || dev->ops->get_features == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return dev->ops->get_features(dev, features);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_vdpa_get_protocol_features(struct rte_vdpa_device *dev, uint64_t *features)
|
|
|
|
{
|
|
|
|
if (dev == NULL || dev->ops == NULL ||
|
|
|
|
dev->ops->get_protocol_features == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return dev->ops->get_protocol_features(dev, features);
|
|
|
|
}
|
|
|
|
|
2020-06-18 18:59:41 +00:00
|
|
|
int
|
2020-06-26 14:04:34 +00:00
|
|
|
rte_vdpa_get_stats_names(struct rte_vdpa_device *dev,
|
|
|
|
struct rte_vdpa_stat_name *stats_names,
|
|
|
|
unsigned int size)
|
2020-06-18 18:59:41 +00:00
|
|
|
{
|
2020-06-26 14:04:34 +00:00
|
|
|
if (!dev)
|
|
|
|
return -EINVAL;
|
2020-06-18 18:59:41 +00:00
|
|
|
|
2020-06-26 14:04:34 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(dev->ops->get_stats_names, -ENOTSUP);
|
2020-06-18 18:59:41 +00:00
|
|
|
|
2020-06-26 14:04:34 +00:00
|
|
|
return dev->ops->get_stats_names(dev, stats_names, size);
|
2020-06-18 18:59:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2020-06-26 14:04:34 +00:00
|
|
|
rte_vdpa_get_stats(struct rte_vdpa_device *dev, uint16_t qid,
|
|
|
|
struct rte_vdpa_stat *stats, unsigned int n)
|
2020-06-18 18:59:41 +00:00
|
|
|
{
|
2020-06-26 14:04:34 +00:00
|
|
|
if (!dev || !stats || !n)
|
2020-06-18 18:59:41 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2020-06-26 14:04:34 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(dev->ops->get_stats, -ENOTSUP);
|
2020-06-18 18:59:41 +00:00
|
|
|
|
2020-06-26 14:04:34 +00:00
|
|
|
return dev->ops->get_stats(dev, qid, stats, n);
|
2020-06-18 18:59:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2020-06-26 14:04:34 +00:00
|
|
|
rte_vdpa_reset_stats(struct rte_vdpa_device *dev, uint16_t qid)
|
2020-06-18 18:59:41 +00:00
|
|
|
{
|
2020-06-26 14:04:34 +00:00
|
|
|
if (!dev)
|
|
|
|
return -EINVAL;
|
2020-06-18 18:59:41 +00:00
|
|
|
|
2020-06-26 14:04:34 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(dev->ops->reset_stats, -ENOTSUP);
|
2020-06-18 18:59:41 +00:00
|
|
|
|
2020-06-26 14:04:34 +00:00
|
|
|
return dev->ops->reset_stats(dev, qid);
|
2020-06-18 18:59:41 +00:00
|
|
|
}
|
2020-06-26 14:04:30 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
vdpa_dev_match(struct rte_vdpa_device *dev,
|
|
|
|
const struct rte_device *rte_dev)
|
|
|
|
{
|
2020-06-26 14:04:31 +00:00
|
|
|
if (dev->device == rte_dev)
|
|
|
|
return 0;
|
2020-06-26 14:04:30 +00:00
|
|
|
|
2020-06-26 14:04:31 +00:00
|
|
|
return -1;
|
2020-06-26 14:04:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Generic rte_vdpa_dev comparison function. */
|
|
|
|
typedef int (*rte_vdpa_cmp_t)(struct rte_vdpa_device *,
|
|
|
|
const struct rte_device *rte_dev);
|
|
|
|
|
|
|
|
static struct rte_vdpa_device *
|
|
|
|
vdpa_find_device(const struct rte_vdpa_device *start, rte_vdpa_cmp_t cmp,
|
|
|
|
struct rte_device *rte_dev)
|
|
|
|
{
|
|
|
|
struct rte_vdpa_device *dev;
|
|
|
|
|
2020-06-26 14:04:36 +00:00
|
|
|
rte_spinlock_lock(&vdpa_device_list_lock);
|
|
|
|
if (start == NULL)
|
|
|
|
dev = TAILQ_FIRST(&vdpa_device_list);
|
2020-06-26 14:04:30 +00:00
|
|
|
else
|
2020-06-26 14:04:36 +00:00
|
|
|
dev = TAILQ_NEXT(start, next);
|
|
|
|
|
|
|
|
while (dev != NULL) {
|
2020-06-26 14:04:30 +00:00
|
|
|
if (cmp(dev, rte_dev) == 0)
|
2020-06-26 14:04:36 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
dev = TAILQ_NEXT(dev, next);
|
2020-06-26 14:04:30 +00:00
|
|
|
}
|
2020-06-26 14:04:36 +00:00
|
|
|
rte_spinlock_unlock(&vdpa_device_list_lock);
|
|
|
|
|
|
|
|
return dev;
|
2020-06-26 14:04:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
|
|
|
vdpa_dev_iterate(const void *start,
|
|
|
|
const char *str,
|
|
|
|
const struct rte_dev_iterator *it)
|
|
|
|
{
|
|
|
|
struct rte_vdpa_device *vdpa_dev = NULL;
|
|
|
|
|
|
|
|
RTE_SET_USED(str);
|
|
|
|
|
|
|
|
vdpa_dev = vdpa_find_device(start, vdpa_dev_match, it->device);
|
|
|
|
|
|
|
|
return vdpa_dev;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct rte_class rte_class_vdpa = {
|
|
|
|
.dev_iterate = vdpa_dev_iterate,
|
|
|
|
};
|
|
|
|
|
|
|
|
RTE_REGISTER_CLASS(vdpa, rte_class_vdpa);
|