d7280c9fff
This patch set introduces support for selective datapath in DPDK vhost-user lib. vDPA stands for vhost Data Path Acceleration. The idea is to support virtio ring compatible devices to serve virtio driver directly to enable datapath acceleration. A set of device ops is defined for device specific operations: a. get_queue_num: Called to get supported queue number of the device. b. get_features: Called to get supported features of the device. c. get_protocol_features: Called to get supported protocol features of the device. d. dev_conf: Called to configure the actual device when the virtio device becomes ready. e. dev_close: Called to close the actual device when the virtio device is stopped. f. set_vring_state: Called to change the state of the vring in the actual device when vring state changes. g. set_features: Called to set the negotiated features to device. h. migration_done: Called to allow the device to response to RARP sending. i. get_vfio_group_fd: Called to get the VFIO group fd of the device. j. get_vfio_device_fd: Called to get the VFIO device fd of the device. k. get_notify_area: Called to get the notify area info of the queue. Signed-off-by: Zhihong Wang <zhihong.wang@intel.com> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
116 lines
2.1 KiB
C
116 lines
2.1 KiB
C
/* SPDX-License-Identifier: BSD-3-Clause
|
|
* Copyright(c) 2018 Intel Corporation
|
|
*/
|
|
|
|
/**
|
|
* @file
|
|
*
|
|
* Device specific vhost lib
|
|
*/
|
|
|
|
#include <stdbool.h>
|
|
|
|
#include <rte_malloc.h>
|
|
#include "rte_vdpa.h"
|
|
#include "vhost.h"
|
|
|
|
static struct rte_vdpa_device *vdpa_devices[MAX_VHOST_DEVICE];
|
|
static uint32_t vdpa_device_num;
|
|
|
|
static bool
|
|
is_same_vdpa_device(struct rte_vdpa_dev_addr *a,
|
|
struct rte_vdpa_dev_addr *b)
|
|
{
|
|
bool ret = true;
|
|
|
|
if (a->type != b->type)
|
|
return false;
|
|
|
|
switch (a->type) {
|
|
case PCI_ADDR:
|
|
if (a->pci_addr.domain != b->pci_addr.domain ||
|
|
a->pci_addr.bus != b->pci_addr.bus ||
|
|
a->pci_addr.devid != b->pci_addr.devid ||
|
|
a->pci_addr.function != b->pci_addr.function)
|
|
ret = false;
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
int
|
|
rte_vdpa_register_device(struct rte_vdpa_dev_addr *addr,
|
|
struct rte_vdpa_dev_ops *ops)
|
|
{
|
|
struct rte_vdpa_device *dev;
|
|
char device_name[MAX_VDPA_NAME_LEN];
|
|
int i;
|
|
|
|
if (vdpa_device_num >= MAX_VHOST_DEVICE)
|
|
return -1;
|
|
|
|
for (i = 0; i < MAX_VHOST_DEVICE; i++) {
|
|
dev = vdpa_devices[i];
|
|
if (dev && is_same_vdpa_device(&dev->addr, addr))
|
|
return -1;
|
|
}
|
|
|
|
for (i = 0; i < MAX_VHOST_DEVICE; i++) {
|
|
if (vdpa_devices[i] == NULL)
|
|
break;
|
|
}
|
|
|
|
sprintf(device_name, "vdpa-dev-%d", i);
|
|
dev = rte_zmalloc(device_name, sizeof(struct rte_vdpa_device),
|
|
RTE_CACHE_LINE_SIZE);
|
|
if (!dev)
|
|
return -1;
|
|
|
|
memcpy(&dev->addr, addr, sizeof(struct rte_vdpa_dev_addr));
|
|
dev->ops = ops;
|
|
vdpa_devices[i] = dev;
|
|
vdpa_device_num++;
|
|
|
|
return i;
|
|
}
|
|
|
|
int
|
|
rte_vdpa_unregister_device(int did)
|
|
{
|
|
if (did < 0 || did >= MAX_VHOST_DEVICE || vdpa_devices[did] == NULL)
|
|
return -1;
|
|
|
|
rte_free(vdpa_devices[did]);
|
|
vdpa_devices[did] = NULL;
|
|
vdpa_device_num--;
|
|
|
|
return did;
|
|
}
|
|
|
|
int
|
|
rte_vdpa_find_device_id(struct rte_vdpa_dev_addr *addr)
|
|
{
|
|
struct rte_vdpa_device *dev;
|
|
int i;
|
|
|
|
for (i = 0; i < MAX_VHOST_DEVICE; ++i) {
|
|
dev = vdpa_devices[i];
|
|
if (dev && is_same_vdpa_device(&dev->addr, addr))
|
|
return i;
|
|
}
|
|
|
|
return -1;
|
|
}
|
|
|
|
struct rte_vdpa_device *
|
|
rte_vdpa_get_device(int did)
|
|
{
|
|
if (did < 0 || did >= MAX_VHOST_DEVICE)
|
|
return NULL;
|
|
|
|
return vdpa_devices[did];
|
|
}
|