net/virtio: add virtio-user features ops
This patch introduces new callbacks for getting and setting Virtio features, and implements them for the different backend types. Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com> Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
This commit is contained in:
parent
06856cabb8
commit
cc0151b34d
@ -110,6 +110,8 @@ struct virtio_user_dev;
|
||||
struct virtio_user_backend_ops {
|
||||
int (*setup)(struct virtio_user_dev *dev);
|
||||
int (*set_owner)(struct virtio_user_dev *dev);
|
||||
int (*get_features)(struct virtio_user_dev *dev, uint64_t *features);
|
||||
int (*set_features)(struct virtio_user_dev *dev, uint64_t features);
|
||||
int (*send_request)(struct virtio_user_dev *dev,
|
||||
enum vhost_user_request req,
|
||||
void *arg);
|
||||
|
@ -38,6 +38,28 @@ struct vhost_memory_kernel {
|
||||
#define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
|
||||
#define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
|
||||
|
||||
/* with below features, vhost kernel does not need to do the checksum and TSO,
|
||||
* these info will be passed to virtio_user through virtio net header.
|
||||
*/
|
||||
#define VHOST_KERNEL_GUEST_OFFLOADS_MASK \
|
||||
((1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
|
||||
(1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
|
||||
(1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
|
||||
(1ULL << VIRTIO_NET_F_GUEST_ECN) | \
|
||||
(1ULL << VIRTIO_NET_F_GUEST_UFO))
|
||||
|
||||
/* with below features, when flows from virtio_user to vhost kernel
|
||||
* (1) if flows goes up through the kernel networking stack, it does not need
|
||||
* to verify checksum, which can save CPU cycles;
|
||||
* (2) if flows goes through a Linux bridge and outside from an interface
|
||||
* (kernel driver), checksum and TSO will be done by GSO in kernel or even
|
||||
* offloaded into real physical device.
|
||||
*/
|
||||
#define VHOST_KERNEL_HOST_OFFLOADS_MASK \
|
||||
((1ULL << VIRTIO_NET_F_HOST_TSO4) | \
|
||||
(1ULL << VIRTIO_NET_F_HOST_TSO6) | \
|
||||
(1ULL << VIRTIO_NET_F_CSUM))
|
||||
|
||||
static uint64_t max_regions = 64;
|
||||
|
||||
static void
|
||||
@ -77,10 +99,57 @@ vhost_kernel_set_owner(struct virtio_user_dev *dev)
|
||||
return vhost_kernel_ioctl(dev->vhostfds[0], VHOST_SET_OWNER, NULL);
|
||||
}
|
||||
|
||||
static int
|
||||
vhost_kernel_get_features(struct virtio_user_dev *dev, uint64_t *features)
|
||||
{
|
||||
int ret;
|
||||
unsigned int tap_features;
|
||||
|
||||
ret = vhost_kernel_ioctl(dev->vhostfds[0], VHOST_GET_FEATURES, features);
|
||||
if (ret < 0) {
|
||||
PMD_DRV_LOG(ERR, "Failed to get features");
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = tap_support_features(&tap_features);
|
||||
if (ret < 0) {
|
||||
PMD_DRV_LOG(ERR, "Failed to get TAP features");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* with tap as the backend, all these features are supported
|
||||
* but not claimed by vhost-net, so we add them back when
|
||||
* reporting to upper layer.
|
||||
*/
|
||||
if (tap_features & IFF_VNET_HDR) {
|
||||
*features |= VHOST_KERNEL_GUEST_OFFLOADS_MASK;
|
||||
*features |= VHOST_KERNEL_HOST_OFFLOADS_MASK;
|
||||
}
|
||||
|
||||
/* vhost_kernel will not declare this feature, but it does
|
||||
* support multi-queue.
|
||||
*/
|
||||
if (tap_features & IFF_MULTI_QUEUE)
|
||||
*features |= (1ull << VIRTIO_NET_F_MQ);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
vhost_kernel_set_features(struct virtio_user_dev *dev, uint64_t features)
|
||||
{
|
||||
/* We don't need memory protection here */
|
||||
features &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM);
|
||||
/* VHOST kernel does not know about below flags */
|
||||
features &= ~VHOST_KERNEL_GUEST_OFFLOADS_MASK;
|
||||
features &= ~VHOST_KERNEL_HOST_OFFLOADS_MASK;
|
||||
features &= ~(1ULL << VIRTIO_NET_F_MQ);
|
||||
|
||||
return vhost_kernel_ioctl(dev->vhostfds[0], VHOST_SET_FEATURES, &features);
|
||||
}
|
||||
|
||||
static uint64_t vhost_req_user_to_kernel[] = {
|
||||
[VHOST_USER_RESET_OWNER] = VHOST_RESET_OWNER,
|
||||
[VHOST_USER_SET_FEATURES] = VHOST_SET_FEATURES,
|
||||
[VHOST_USER_GET_FEATURES] = VHOST_GET_FEATURES,
|
||||
[VHOST_USER_SET_VRING_CALL] = VHOST_SET_VRING_CALL,
|
||||
[VHOST_USER_SET_VRING_NUM] = VHOST_SET_VRING_NUM,
|
||||
[VHOST_USER_SET_VRING_BASE] = VHOST_SET_VRING_BASE,
|
||||
@ -150,51 +219,6 @@ prepare_vhost_memory_kernel(void)
|
||||
return vm;
|
||||
}
|
||||
|
||||
/* with below features, vhost kernel does not need to do the checksum and TSO,
|
||||
* these info will be passed to virtio_user through virtio net header.
|
||||
*/
|
||||
#define VHOST_KERNEL_GUEST_OFFLOADS_MASK \
|
||||
((1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
|
||||
(1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
|
||||
(1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
|
||||
(1ULL << VIRTIO_NET_F_GUEST_ECN) | \
|
||||
(1ULL << VIRTIO_NET_F_GUEST_UFO))
|
||||
|
||||
/* with below features, when flows from virtio_user to vhost kernel
|
||||
* (1) if flows goes up through the kernel networking stack, it does not need
|
||||
* to verify checksum, which can save CPU cycles;
|
||||
* (2) if flows goes through a Linux bridge and outside from an interface
|
||||
* (kernel driver), checksum and TSO will be done by GSO in kernel or even
|
||||
* offloaded into real physical device.
|
||||
*/
|
||||
#define VHOST_KERNEL_HOST_OFFLOADS_MASK \
|
||||
((1ULL << VIRTIO_NET_F_HOST_TSO4) | \
|
||||
(1ULL << VIRTIO_NET_F_HOST_TSO6) | \
|
||||
(1ULL << VIRTIO_NET_F_CSUM))
|
||||
|
||||
static unsigned int
|
||||
tap_support_features(void)
|
||||
{
|
||||
int tapfd;
|
||||
unsigned int tap_features;
|
||||
|
||||
tapfd = open(PATH_NET_TUN, O_RDWR);
|
||||
if (tapfd < 0) {
|
||||
PMD_DRV_LOG(ERR, "fail to open %s: %s",
|
||||
PATH_NET_TUN, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (ioctl(tapfd, TUNGETFEATURES, &tap_features) == -1) {
|
||||
PMD_DRV_LOG(ERR, "TUNGETFEATURES failed: %s", strerror(errno));
|
||||
close(tapfd);
|
||||
return -1;
|
||||
}
|
||||
|
||||
close(tapfd);
|
||||
return tap_features;
|
||||
}
|
||||
|
||||
static int
|
||||
vhost_kernel_send_request(struct virtio_user_dev *dev,
|
||||
enum vhost_user_request req,
|
||||
@ -206,7 +230,6 @@ vhost_kernel_send_request(struct virtio_user_dev *dev,
|
||||
struct vhost_memory_kernel *vm = NULL;
|
||||
int vhostfd;
|
||||
unsigned int queue_sel;
|
||||
unsigned int features;
|
||||
|
||||
PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
|
||||
|
||||
@ -219,17 +242,6 @@ vhost_kernel_send_request(struct virtio_user_dev *dev,
|
||||
arg = (void *)vm;
|
||||
}
|
||||
|
||||
if (req_kernel == VHOST_SET_FEATURES) {
|
||||
/* We don't need memory protection here */
|
||||
*(uint64_t *)arg &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM);
|
||||
|
||||
/* VHOST kernel does not know about below flags */
|
||||
*(uint64_t *)arg &= ~VHOST_KERNEL_GUEST_OFFLOADS_MASK;
|
||||
*(uint64_t *)arg &= ~VHOST_KERNEL_HOST_OFFLOADS_MASK;
|
||||
|
||||
*(uint64_t *)arg &= ~(1ULL << VIRTIO_NET_F_MQ);
|
||||
}
|
||||
|
||||
switch (req_kernel) {
|
||||
case VHOST_SET_VRING_NUM:
|
||||
case VHOST_SET_VRING_ADDR:
|
||||
@ -259,24 +271,6 @@ vhost_kernel_send_request(struct virtio_user_dev *dev,
|
||||
ret = ioctl(vhostfd, req_kernel, arg);
|
||||
}
|
||||
|
||||
if (!ret && req_kernel == VHOST_GET_FEATURES) {
|
||||
features = tap_support_features();
|
||||
/* with tap as the backend, all these features are supported
|
||||
* but not claimed by vhost-net, so we add them back when
|
||||
* reporting to upper layer.
|
||||
*/
|
||||
if (features & IFF_VNET_HDR) {
|
||||
*((uint64_t *)arg) |= VHOST_KERNEL_GUEST_OFFLOADS_MASK;
|
||||
*((uint64_t *)arg) |= VHOST_KERNEL_HOST_OFFLOADS_MASK;
|
||||
}
|
||||
|
||||
/* vhost_kernel will not declare this feature, but it does
|
||||
* support multi-queue.
|
||||
*/
|
||||
if (features & IFF_MULTI_QUEUE)
|
||||
*(uint64_t *)arg |= (1ull << VIRTIO_NET_F_MQ);
|
||||
}
|
||||
|
||||
if (vm)
|
||||
free(vm);
|
||||
|
||||
@ -407,6 +401,8 @@ vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev,
|
||||
struct virtio_user_backend_ops virtio_ops_kernel = {
|
||||
.setup = vhost_kernel_setup,
|
||||
.set_owner = vhost_kernel_set_owner,
|
||||
.get_features = vhost_kernel_get_features,
|
||||
.set_features = vhost_kernel_set_features,
|
||||
.send_request = vhost_kernel_send_request,
|
||||
.enable_qp = vhost_kernel_enable_queue_pair
|
||||
};
|
||||
|
@ -18,6 +18,29 @@
|
||||
#include "../virtio_logs.h"
|
||||
#include "../virtio.h"
|
||||
|
||||
|
||||
int
|
||||
tap_support_features(unsigned int *tap_features)
|
||||
{
|
||||
int tapfd;
|
||||
|
||||
tapfd = open(PATH_NET_TUN, O_RDWR);
|
||||
if (tapfd < 0) {
|
||||
PMD_DRV_LOG(ERR, "fail to open %s: %s",
|
||||
PATH_NET_TUN, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (ioctl(tapfd, TUNGETFEATURES, tap_features) == -1) {
|
||||
PMD_DRV_LOG(ERR, "TUNGETFEATURES failed: %s", strerror(errno));
|
||||
close(tapfd);
|
||||
return -1;
|
||||
}
|
||||
|
||||
close(tapfd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
vhost_kernel_tap_set_offload(int fd, uint64_t features)
|
||||
{
|
||||
|
@ -43,5 +43,6 @@ int vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
|
||||
const char *mac, uint64_t features);
|
||||
int vhost_kernel_tap_set_offload(int fd, uint64_t features);
|
||||
int vhost_kernel_tap_set_queue(int fd, bool attach);
|
||||
int tap_support_features(unsigned int *tap_features);
|
||||
|
||||
#endif
|
||||
|
@ -143,6 +143,64 @@ vhost_user_set_owner(struct virtio_user_dev *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
vhost_user_get_features(struct virtio_user_dev *dev, uint64_t *features)
|
||||
{
|
||||
int ret;
|
||||
struct vhost_user_msg msg = {
|
||||
.request = VHOST_USER_GET_FEATURES,
|
||||
.flags = VHOST_USER_VERSION,
|
||||
};
|
||||
|
||||
ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
ret = vhost_user_read(dev->vhostfd, &msg);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
if (msg.request != VHOST_USER_GET_FEATURES) {
|
||||
PMD_DRV_LOG(ERR, "Unexpected request type (%d)", msg.request);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (msg.size != sizeof(*features)) {
|
||||
PMD_DRV_LOG(ERR, "Unexpected payload size (%u)", msg.size);
|
||||
goto err;
|
||||
}
|
||||
|
||||
*features = msg.payload.u64;
|
||||
|
||||
return 0;
|
||||
err:
|
||||
PMD_DRV_LOG(ERR, "Failed to get backend features");
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int
|
||||
vhost_user_set_features(struct virtio_user_dev *dev, uint64_t features)
|
||||
{
|
||||
int ret;
|
||||
struct vhost_user_msg msg = {
|
||||
.request = VHOST_USER_SET_FEATURES,
|
||||
.flags = VHOST_USER_VERSION,
|
||||
.size = sizeof(features),
|
||||
.payload.u64 = features,
|
||||
};
|
||||
|
||||
msg.payload.u64 |= dev->device_features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES);
|
||||
|
||||
ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0);
|
||||
if (ret < 0) {
|
||||
PMD_DRV_LOG(ERR, "Failed to set features");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct walk_arg {
|
||||
struct vhost_memory *vm;
|
||||
int *fds;
|
||||
@ -249,8 +307,6 @@ static struct vhost_user_msg m;
|
||||
|
||||
const char * const vhost_msg_strings[] = {
|
||||
[VHOST_USER_RESET_OWNER] = "VHOST_RESET_OWNER",
|
||||
[VHOST_USER_SET_FEATURES] = "VHOST_SET_FEATURES",
|
||||
[VHOST_USER_GET_FEATURES] = "VHOST_GET_FEATURES",
|
||||
[VHOST_USER_SET_VRING_CALL] = "VHOST_SET_VRING_CALL",
|
||||
[VHOST_USER_SET_VRING_NUM] = "VHOST_SET_VRING_NUM",
|
||||
[VHOST_USER_SET_VRING_BASE] = "VHOST_SET_VRING_BASE",
|
||||
@ -299,7 +355,6 @@ vhost_user_sock(struct virtio_user_dev *dev,
|
||||
(1ULL << VHOST_USER_PROTOCOL_F_STATUS))))
|
||||
return -ENOTSUP;
|
||||
/* Fallthrough */
|
||||
case VHOST_USER_GET_FEATURES:
|
||||
case VHOST_USER_GET_PROTOCOL_FEATURES:
|
||||
need_reply = 1;
|
||||
break;
|
||||
@ -398,7 +453,6 @@ vhost_user_sock(struct virtio_user_dev *dev,
|
||||
}
|
||||
|
||||
switch (req) {
|
||||
case VHOST_USER_GET_FEATURES:
|
||||
case VHOST_USER_GET_STATUS:
|
||||
case VHOST_USER_GET_PROTOCOL_FEATURES:
|
||||
if (msg.size != sizeof(m.payload.u64)) {
|
||||
@ -536,6 +590,8 @@ vhost_user_enable_queue_pair(struct virtio_user_dev *dev,
|
||||
struct virtio_user_backend_ops virtio_ops_user = {
|
||||
.setup = vhost_user_setup,
|
||||
.set_owner = vhost_user_set_owner,
|
||||
.get_features = vhost_user_get_features,
|
||||
.set_features = vhost_user_set_features,
|
||||
.send_request = vhost_user_sock,
|
||||
.enable_qp = vhost_user_enable_queue_pair
|
||||
};
|
||||
|
@ -40,8 +40,6 @@
|
||||
|
||||
static uint64_t vhost_req_user_to_vdpa[] = {
|
||||
[VHOST_USER_RESET_OWNER] = VHOST_RESET_OWNER,
|
||||
[VHOST_USER_SET_FEATURES] = VHOST_SET_FEATURES,
|
||||
[VHOST_USER_GET_FEATURES] = VHOST_GET_FEATURES,
|
||||
[VHOST_USER_SET_VRING_CALL] = VHOST_SET_VRING_CALL,
|
||||
[VHOST_USER_SET_VRING_NUM] = VHOST_SET_VRING_NUM,
|
||||
[VHOST_USER_SET_VRING_BASE] = VHOST_SET_VRING_BASE,
|
||||
@ -107,6 +105,32 @@ vhost_vdpa_set_owner(struct virtio_user_dev *dev)
|
||||
return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_OWNER, NULL);
|
||||
}
|
||||
|
||||
static int
|
||||
vhost_vdpa_get_features(struct virtio_user_dev *dev, uint64_t *features)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = vhost_vdpa_ioctl(dev->vhostfd, VHOST_GET_FEATURES, features);
|
||||
if (ret) {
|
||||
PMD_DRV_LOG(ERR, "Failed to get features");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Multiqueue not supported for now */
|
||||
*features &= ~(1ULL << VIRTIO_NET_F_MQ);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
vhost_vdpa_set_features(struct virtio_user_dev *dev, uint64_t features)
|
||||
{
|
||||
/* WORKAROUND */
|
||||
features |= 1ULL << VIRTIO_F_IOMMU_PLATFORM;
|
||||
|
||||
return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_FEATURES, &features);
|
||||
}
|
||||
|
||||
static int
|
||||
vhost_vdpa_iotlb_batch_begin(struct virtio_user_dev *dev)
|
||||
{
|
||||
@ -343,14 +367,6 @@ vhost_vdpa_send_request(struct virtio_user_dev *dev,
|
||||
if (req_vdpa == VHOST_SET_MEM_TABLE)
|
||||
return vhost_vdpa_dma_map_all(dev);
|
||||
|
||||
if (req_vdpa == VHOST_SET_FEATURES) {
|
||||
/* WORKAROUND */
|
||||
*(uint64_t *)arg |= 1ULL << VIRTIO_F_IOMMU_PLATFORM;
|
||||
|
||||
/* Multiqueue not supported for now */
|
||||
*(uint64_t *)arg &= ~(1ULL << VIRTIO_NET_F_MQ);
|
||||
}
|
||||
|
||||
switch (req_vdpa) {
|
||||
case VHOST_SET_VRING_NUM:
|
||||
case VHOST_SET_VRING_ADDR:
|
||||
@ -429,6 +445,8 @@ vhost_vdpa_enable_queue_pair(struct virtio_user_dev *dev,
|
||||
struct virtio_user_backend_ops virtio_ops_vdpa = {
|
||||
.setup = vhost_vdpa_setup,
|
||||
.set_owner = vhost_vdpa_set_owner,
|
||||
.get_features = vhost_vdpa_get_features,
|
||||
.set_features = vhost_vdpa_set_features,
|
||||
.send_request = vhost_vdpa_send_request,
|
||||
.enable_qp = vhost_vdpa_enable_queue_pair,
|
||||
.dma_map = vhost_vdpa_dma_map_batch,
|
||||
|
@ -141,7 +141,7 @@ virtio_user_dev_set_features(struct virtio_user_dev *dev)
|
||||
/* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */
|
||||
features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
|
||||
features &= ~(1ull << VIRTIO_NET_F_STATUS);
|
||||
ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features);
|
||||
ret = dev->ops->set_features(dev, features);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
PMD_DRV_LOG(INFO, "set features: %" PRIx64, features);
|
||||
@ -496,8 +496,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
|
||||
&dev->device_features) < 0) {
|
||||
if (dev->ops->get_features(dev, &dev->device_features) < 0) {
|
||||
PMD_INIT_LOG(ERR, "get_features failed: %s",
|
||||
strerror(errno));
|
||||
return -1;
|
||||
|
@ -85,8 +85,7 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev)
|
||||
|
||||
virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
|
||||
|
||||
if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
|
||||
&dev->device_features) < 0) {
|
||||
if (dev->ops->get_features(dev, &dev->device_features) < 0) {
|
||||
PMD_INIT_LOG(ERR, "get_features failed: %s",
|
||||
strerror(errno));
|
||||
return -1;
|
||||
|
Loading…
Reference in New Issue
Block a user