2015-02-23 17:36:31 +00:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
|
|
|
*
|
vhost: refactor code structure
The code structure is a bit messy now. For example, vhost-user message
handling is spread to three different files:
vhost-net-user.c virtio-net.c virtio-net-user.c
Where, vhost-net-user.c is the entrance to handle all those messages
and then invoke the right method for a specific message. Some of them
are stored at virtio-net.c, while others are stored at virtio-net-user.c.
The truth is all of them should be in one file, vhost_user.c.
So this patch refactors the source code structure: mainly on renaming
files and moving code from one file to another file that is more suitable
for storing it. Thus, no functional changes are made.
After the refactor, the code structure becomes to:
- socket.c handles all vhost-user socket file related stuff, such
as, socket file creation for server mode, reconnection
for client mode.
- vhost.c mainly on stuff like vhost device creation/destroy/reset.
Most of the vhost API implementation are there, too.
- vhost_user.c all stuff about vhost-user messages handling goes there.
- virtio_net.c all stuff about virtio-net should go there. It has virtio
net Rx/Tx implementation only so far: it's just a rename
from vhost_rxtx.c
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
2016-08-18 08:48:39 +00:00
|
|
|
* Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
|
2015-02-23 17:36:31 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdio.h>
|
2016-05-06 21:26:03 +00:00
|
|
|
#include <stdbool.h>
|
2015-02-23 17:36:31 +00:00
|
|
|
#include <limits.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/un.h>
|
2016-05-12 23:14:19 +00:00
|
|
|
#include <sys/queue.h>
|
2015-02-23 17:36:31 +00:00
|
|
|
#include <errno.h>
|
2016-07-21 13:19:35 +00:00
|
|
|
#include <fcntl.h>
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
#include <pthread.h>
|
2015-02-23 17:36:31 +00:00
|
|
|
|
|
|
|
#include <rte_log.h>
|
|
|
|
|
|
|
|
#include "fd_man.h"
|
vhost: refactor code structure
The code structure is a bit messy now. For example, vhost-user message
handling is spread to three different files:
vhost-net-user.c virtio-net.c virtio-net-user.c
Where, vhost-net-user.c is the entrance to handle all those messages
and then invoke the right method for a specific message. Some of them
are stored at virtio-net.c, while others are stored at virtio-net-user.c.
The truth is all of them should be in one file, vhost_user.c.
So this patch refactors the source code structure: mainly on renaming
files and moving code from one file to another file that is more suitable
for storing it. Thus, no functional changes are made.
After the refactor, the code structure becomes to:
- socket.c handles all vhost-user socket file related stuff, such
as, socket file creation for server mode, reconnection
for client mode.
- vhost.c mainly on stuff like vhost device creation/destroy/reset.
Most of the vhost API implementation are there, too.
- vhost_user.c all stuff about vhost-user messages handling goes there.
- virtio_net.c all stuff about virtio-net should go there. It has virtio
net Rx/Tx implementation only so far: it's just a rename
from vhost_rxtx.c
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
2016-08-18 08:48:39 +00:00
|
|
|
#include "vhost.h"
|
|
|
|
#include "vhost_user.h"
|
2015-02-23 17:36:31 +00:00
|
|
|
|
2016-05-06 20:13:22 +00:00
|
|
|
/*
|
|
|
|
* Every time rte_vhost_driver_register() is invoked, an associated
|
|
|
|
* vhost_user_socket struct will be created.
|
|
|
|
*/
|
|
|
|
struct vhost_user_socket {
|
|
|
|
char *path;
|
|
|
|
int listenfd;
|
2016-07-21 12:55:36 +00:00
|
|
|
int connfd;
|
2016-05-06 21:26:03 +00:00
|
|
|
bool is_server;
|
2016-05-12 23:14:19 +00:00
|
|
|
bool reconnect;
|
2016-10-09 07:27:58 +00:00
|
|
|
bool dequeue_zero_copy;
|
2016-05-06 20:13:22 +00:00
|
|
|
};
|
2015-02-23 17:36:31 +00:00
|
|
|
|
2016-05-06 20:13:22 +00:00
|
|
|
struct vhost_user_connection {
|
|
|
|
struct vhost_user_socket *vsocket;
|
2016-05-23 08:36:33 +00:00
|
|
|
int vid;
|
2015-02-23 17:36:31 +00:00
|
|
|
};
|
|
|
|
|
2016-05-06 20:13:22 +00:00
|
|
|
#define MAX_VHOST_SOCKET 1024
|
|
|
|
struct vhost_user {
|
|
|
|
struct vhost_user_socket *vsockets[MAX_VHOST_SOCKET];
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
struct fdset fdset;
|
2016-05-06 20:13:22 +00:00
|
|
|
int vsocket_cnt;
|
|
|
|
pthread_mutex_t mutex;
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
};
|
|
|
|
|
2016-05-06 20:13:22 +00:00
|
|
|
#define MAX_VIRTIO_BACKLOG 128
|
|
|
|
|
2016-05-06 21:26:03 +00:00
|
|
|
static void vhost_user_server_new_connection(int fd, void *data, int *remove);
|
vhost: refactor code structure
The code structure is a bit messy now. For example, vhost-user message
handling is spread to three different files:
vhost-net-user.c virtio-net.c virtio-net-user.c
Where, vhost-net-user.c is the entrance to handle all those messages
and then invoke the right method for a specific message. Some of them
are stored at virtio-net.c, while others are stored at virtio-net-user.c.
The truth is all of them should be in one file, vhost_user.c.
So this patch refactors the source code structure: mainly on renaming
files and moving code from one file to another file that is more suitable
for storing it. Thus, no functional changes are made.
After the refactor, the code structure becomes to:
- socket.c handles all vhost-user socket file related stuff, such
as, socket file creation for server mode, reconnection
for client mode.
- vhost.c mainly on stuff like vhost device creation/destroy/reset.
Most of the vhost API implementation are there, too.
- vhost_user.c all stuff about vhost-user messages handling goes there.
- virtio_net.c all stuff about virtio-net should go there. It has virtio
net Rx/Tx implementation only so far: it's just a rename
from vhost_rxtx.c
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
2016-08-18 08:48:39 +00:00
|
|
|
static void vhost_user_read_cb(int fd, void *dat, int *remove);
|
2016-05-12 23:14:19 +00:00
|
|
|
static int vhost_user_create_client(struct vhost_user_socket *vsocket);
|
2016-05-06 20:13:22 +00:00
|
|
|
|
|
|
|
static struct vhost_user vhost_user = {
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
.fdset = {
|
|
|
|
.fd = { [0 ... MAX_FDS - 1] = {-1, NULL, NULL, NULL, 0} },
|
|
|
|
.fd_mutex = PTHREAD_MUTEX_INITIALIZER,
|
|
|
|
.num = 0
|
|
|
|
},
|
2016-05-06 20:13:22 +00:00
|
|
|
.vsocket_cnt = 0,
|
|
|
|
.mutex = PTHREAD_MUTEX_INITIALIZER,
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
};
|
2015-02-23 17:36:31 +00:00
|
|
|
|
|
|
|
/* return bytes# of read on success or negative val on failure. */
|
vhost: refactor code structure
The code structure is a bit messy now. For example, vhost-user message
handling is spread to three different files:
vhost-net-user.c virtio-net.c virtio-net-user.c
Where, vhost-net-user.c is the entrance to handle all those messages
and then invoke the right method for a specific message. Some of them
are stored at virtio-net.c, while others are stored at virtio-net-user.c.
The truth is all of them should be in one file, vhost_user.c.
So this patch refactors the source code structure: mainly on renaming
files and moving code from one file to another file that is more suitable
for storing it. Thus, no functional changes are made.
After the refactor, the code structure becomes to:
- socket.c handles all vhost-user socket file related stuff, such
as, socket file creation for server mode, reconnection
for client mode.
- vhost.c mainly on stuff like vhost device creation/destroy/reset.
Most of the vhost API implementation are there, too.
- vhost_user.c all stuff about vhost-user messages handling goes there.
- virtio_net.c all stuff about virtio-net should go there. It has virtio
net Rx/Tx implementation only so far: it's just a rename
from vhost_rxtx.c
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
2016-08-18 08:48:39 +00:00
|
|
|
int
|
2015-02-23 17:36:31 +00:00
|
|
|
read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
|
|
|
|
{
|
|
|
|
struct iovec iov;
|
|
|
|
struct msghdr msgh;
|
|
|
|
size_t fdsize = fd_num * sizeof(int);
|
|
|
|
char control[CMSG_SPACE(fdsize)];
|
|
|
|
struct cmsghdr *cmsg;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
memset(&msgh, 0, sizeof(msgh));
|
|
|
|
iov.iov_base = buf;
|
|
|
|
iov.iov_len = buflen;
|
|
|
|
|
|
|
|
msgh.msg_iov = &iov;
|
|
|
|
msgh.msg_iovlen = 1;
|
|
|
|
msgh.msg_control = control;
|
|
|
|
msgh.msg_controllen = sizeof(control);
|
|
|
|
|
|
|
|
ret = recvmsg(sockfd, &msgh, 0);
|
|
|
|
if (ret <= 0) {
|
|
|
|
RTE_LOG(ERR, VHOST_CONFIG, "recvmsg failed\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC)) {
|
|
|
|
RTE_LOG(ERR, VHOST_CONFIG, "truncted msg\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
|
|
|
|
cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
|
|
|
|
if ((cmsg->cmsg_level == SOL_SOCKET) &&
|
|
|
|
(cmsg->cmsg_type == SCM_RIGHTS)) {
|
|
|
|
memcpy(fds, CMSG_DATA(cmsg), fdsize);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
vhost: refactor code structure
The code structure is a bit messy now. For example, vhost-user message
handling is spread to three different files:
vhost-net-user.c virtio-net.c virtio-net-user.c
Where, vhost-net-user.c is the entrance to handle all those messages
and then invoke the right method for a specific message. Some of them
are stored at virtio-net.c, while others are stored at virtio-net-user.c.
The truth is all of them should be in one file, vhost_user.c.
So this patch refactors the source code structure: mainly on renaming
files and moving code from one file to another file that is more suitable
for storing it. Thus, no functional changes are made.
After the refactor, the code structure becomes to:
- socket.c handles all vhost-user socket file related stuff, such
as, socket file creation for server mode, reconnection
for client mode.
- vhost.c mainly on stuff like vhost device creation/destroy/reset.
Most of the vhost API implementation are there, too.
- vhost_user.c all stuff about vhost-user messages handling goes there.
- virtio_net.c all stuff about virtio-net should go there. It has virtio
net Rx/Tx implementation only so far: it's just a rename
from vhost_rxtx.c
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
2016-08-18 08:48:39 +00:00
|
|
|
int
|
2015-02-23 17:36:31 +00:00
|
|
|
send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
|
|
|
|
{
|
|
|
|
|
|
|
|
struct iovec iov;
|
|
|
|
struct msghdr msgh;
|
|
|
|
size_t fdsize = fd_num * sizeof(int);
|
|
|
|
char control[CMSG_SPACE(fdsize)];
|
|
|
|
struct cmsghdr *cmsg;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
memset(&msgh, 0, sizeof(msgh));
|
|
|
|
iov.iov_base = buf;
|
|
|
|
iov.iov_len = buflen;
|
|
|
|
|
|
|
|
msgh.msg_iov = &iov;
|
|
|
|
msgh.msg_iovlen = 1;
|
|
|
|
|
|
|
|
if (fds && fd_num > 0) {
|
|
|
|
msgh.msg_control = control;
|
|
|
|
msgh.msg_controllen = sizeof(control);
|
|
|
|
cmsg = CMSG_FIRSTHDR(&msgh);
|
|
|
|
cmsg->cmsg_len = CMSG_LEN(fdsize);
|
|
|
|
cmsg->cmsg_level = SOL_SOCKET;
|
|
|
|
cmsg->cmsg_type = SCM_RIGHTS;
|
|
|
|
memcpy(CMSG_DATA(cmsg), fds, fdsize);
|
|
|
|
} else {
|
|
|
|
msgh.msg_control = NULL;
|
|
|
|
msgh.msg_controllen = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
do {
|
|
|
|
ret = sendmsg(sockfd, &msgh, 0);
|
|
|
|
} while (ret < 0 && errno == EINTR);
|
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
RTE_LOG(ERR, VHOST_CONFIG, "sendmsg error\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-05-06 21:26:03 +00:00
|
|
|
vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
|
2015-02-23 17:36:31 +00:00
|
|
|
{
|
2016-05-23 08:36:33 +00:00
|
|
|
int vid;
|
2016-05-06 21:26:03 +00:00
|
|
|
size_t size;
|
|
|
|
struct vhost_user_connection *conn;
|
2016-07-06 12:24:58 +00:00
|
|
|
int ret;
|
2015-02-23 17:36:31 +00:00
|
|
|
|
2016-05-06 21:26:03 +00:00
|
|
|
conn = malloc(sizeof(*conn));
|
2016-05-06 20:13:22 +00:00
|
|
|
if (conn == NULL) {
|
2016-05-06 21:26:03 +00:00
|
|
|
close(fd);
|
2015-02-23 17:36:31 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-04-29 23:24:27 +00:00
|
|
|
vid = vhost_new_device();
|
2016-05-23 08:36:33 +00:00
|
|
|
if (vid == -1) {
|
2016-05-06 21:26:03 +00:00
|
|
|
close(fd);
|
2016-05-06 20:13:22 +00:00
|
|
|
free(conn);
|
2015-02-23 17:36:31 +00:00
|
|
|
return;
|
|
|
|
}
|
2015-02-23 17:36:32 +00:00
|
|
|
|
2016-05-06 20:13:22 +00:00
|
|
|
size = strnlen(vsocket->path, PATH_MAX);
|
|
|
|
vhost_set_ifname(vid, vsocket->path, size);
|
2015-02-23 17:36:32 +00:00
|
|
|
|
2016-10-09 07:27:58 +00:00
|
|
|
if (vsocket->dequeue_zero_copy)
|
|
|
|
vhost_enable_dequeue_zero_copy(vid);
|
|
|
|
|
2016-05-23 08:36:33 +00:00
|
|
|
RTE_LOG(INFO, VHOST_CONFIG, "new device, handle is %d\n", vid);
|
2015-02-23 17:36:31 +00:00
|
|
|
|
2016-07-21 12:55:36 +00:00
|
|
|
vsocket->connfd = fd;
|
2016-05-06 20:13:22 +00:00
|
|
|
conn->vsocket = vsocket;
|
|
|
|
conn->vid = vid;
|
vhost: refactor code structure
The code structure is a bit messy now. For example, vhost-user message
handling is spread to three different files:
vhost-net-user.c virtio-net.c virtio-net-user.c
Where, vhost-net-user.c is the entrance to handle all those messages
and then invoke the right method for a specific message. Some of them
are stored at virtio-net.c, while others are stored at virtio-net-user.c.
The truth is all of them should be in one file, vhost_user.c.
So this patch refactors the source code structure: mainly on renaming
files and moving code from one file to another file that is more suitable
for storing it. Thus, no functional changes are made.
After the refactor, the code structure becomes to:
- socket.c handles all vhost-user socket file related stuff, such
as, socket file creation for server mode, reconnection
for client mode.
- vhost.c mainly on stuff like vhost device creation/destroy/reset.
Most of the vhost API implementation are there, too.
- vhost_user.c all stuff about vhost-user messages handling goes there.
- virtio_net.c all stuff about virtio-net should go there. It has virtio
net Rx/Tx implementation only so far: it's just a rename
from vhost_rxtx.c
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
2016-08-18 08:48:39 +00:00
|
|
|
ret = fdset_add(&vhost_user.fdset, fd, vhost_user_read_cb,
|
2016-07-06 12:24:58 +00:00
|
|
|
NULL, conn);
|
|
|
|
if (ret < 0) {
|
2016-07-21 12:55:36 +00:00
|
|
|
vsocket->connfd = -1;
|
2016-07-06 12:24:58 +00:00
|
|
|
free(conn);
|
|
|
|
close(fd);
|
|
|
|
RTE_LOG(ERR, VHOST_CONFIG,
|
|
|
|
"failed to add fd %d into vhost server fdset\n",
|
|
|
|
fd);
|
|
|
|
}
|
2016-05-06 21:26:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* call back when there is new vhost-user connection from client */
|
|
|
|
static void
|
|
|
|
vhost_user_server_new_connection(int fd, void *dat, int *remove __rte_unused)
|
|
|
|
{
|
|
|
|
struct vhost_user_socket *vsocket = dat;
|
|
|
|
|
|
|
|
fd = accept(fd, NULL, NULL);
|
|
|
|
if (fd < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
RTE_LOG(INFO, VHOST_CONFIG, "new vhost user connection is %d\n", fd);
|
|
|
|
vhost_user_add_connection(fd, vsocket);
|
2015-02-23 17:36:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
vhost: refactor code structure
The code structure is a bit messy now. For example, vhost-user message
handling is spread to three different files:
vhost-net-user.c virtio-net.c virtio-net-user.c
Where, vhost-net-user.c is the entrance to handle all those messages
and then invoke the right method for a specific message. Some of them
are stored at virtio-net.c, while others are stored at virtio-net-user.c.
The truth is all of them should be in one file, vhost_user.c.
So this patch refactors the source code structure: mainly on renaming
files and moving code from one file to another file that is more suitable
for storing it. Thus, no functional changes are made.
After the refactor, the code structure becomes to:
- socket.c handles all vhost-user socket file related stuff, such
as, socket file creation for server mode, reconnection
for client mode.
- vhost.c mainly on stuff like vhost device creation/destroy/reset.
Most of the vhost API implementation are there, too.
- vhost_user.c all stuff about vhost-user messages handling goes there.
- virtio_net.c all stuff about virtio-net should go there. It has virtio
net Rx/Tx implementation only so far: it's just a rename
from vhost_rxtx.c
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
2016-08-18 08:48:39 +00:00
|
|
|
vhost_user_read_cb(int connfd, void *dat, int *remove)
|
2015-02-23 17:36:31 +00:00
|
|
|
{
|
2016-05-06 20:13:22 +00:00
|
|
|
struct vhost_user_connection *conn = dat;
|
vhost: refactor code structure
The code structure is a bit messy now. For example, vhost-user message
handling is spread to three different files:
vhost-net-user.c virtio-net.c virtio-net-user.c
Where, vhost-net-user.c is the entrance to handle all those messages
and then invoke the right method for a specific message. Some of them
are stored at virtio-net.c, while others are stored at virtio-net-user.c.
The truth is all of them should be in one file, vhost_user.c.
So this patch refactors the source code structure: mainly on renaming
files and moving code from one file to another file that is more suitable
for storing it. Thus, no functional changes are made.
After the refactor, the code structure becomes to:
- socket.c handles all vhost-user socket file related stuff, such
as, socket file creation for server mode, reconnection
for client mode.
- vhost.c mainly on stuff like vhost device creation/destroy/reset.
Most of the vhost API implementation are there, too.
- vhost_user.c all stuff about vhost-user messages handling goes there.
- virtio_net.c all stuff about virtio-net should go there. It has virtio
net Rx/Tx implementation only so far: it's just a rename
from vhost_rxtx.c
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
2016-08-18 08:48:39 +00:00
|
|
|
struct vhost_user_socket *vsocket = conn->vsocket;
|
2015-02-23 17:36:31 +00:00
|
|
|
int ret;
|
|
|
|
|
vhost: refactor code structure
The code structure is a bit messy now. For example, vhost-user message
handling is spread to three different files:
vhost-net-user.c virtio-net.c virtio-net-user.c
Where, vhost-net-user.c is the entrance to handle all those messages
and then invoke the right method for a specific message. Some of them
are stored at virtio-net.c, while others are stored at virtio-net-user.c.
The truth is all of them should be in one file, vhost_user.c.
So this patch refactors the source code structure: mainly on renaming
files and moving code from one file to another file that is more suitable
for storing it. Thus, no functional changes are made.
After the refactor, the code structure becomes to:
- socket.c handles all vhost-user socket file related stuff, such
as, socket file creation for server mode, reconnection
for client mode.
- vhost.c mainly on stuff like vhost device creation/destroy/reset.
Most of the vhost API implementation are there, too.
- vhost_user.c all stuff about vhost-user messages handling goes there.
- virtio_net.c all stuff about virtio-net should go there. It has virtio
net Rx/Tx implementation only so far: it's just a rename
from vhost_rxtx.c
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
2016-08-18 08:48:39 +00:00
|
|
|
ret = vhost_user_msg_handler(conn->vid, connfd);
|
|
|
|
if (ret < 0) {
|
2016-07-21 12:55:36 +00:00
|
|
|
vsocket->connfd = -1;
|
2015-02-23 17:36:31 +00:00
|
|
|
close(connfd);
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
*remove = 1;
|
vhost: refactor code structure
The code structure is a bit messy now. For example, vhost-user message
handling is spread to three different files:
vhost-net-user.c virtio-net.c virtio-net-user.c
Where, vhost-net-user.c is the entrance to handle all those messages
and then invoke the right method for a specific message. Some of them
are stored at virtio-net.c, while others are stored at virtio-net-user.c.
The truth is all of them should be in one file, vhost_user.c.
So this patch refactors the source code structure: mainly on renaming
files and moving code from one file to another file that is more suitable
for storing it. Thus, no functional changes are made.
After the refactor, the code structure becomes to:
- socket.c handles all vhost-user socket file related stuff, such
as, socket file creation for server mode, reconnection
for client mode.
- vhost.c mainly on stuff like vhost device creation/destroy/reset.
Most of the vhost API implementation are there, too.
- vhost_user.c all stuff about vhost-user messages handling goes there.
- virtio_net.c all stuff about virtio-net should go there. It has virtio
net Rx/Tx implementation only so far: it's just a rename
from vhost_rxtx.c
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
2016-08-18 08:48:39 +00:00
|
|
|
vhost_destroy_device(conn->vid);
|
2016-10-18 14:38:06 +00:00
|
|
|
free(conn);
|
2015-02-23 17:36:31 +00:00
|
|
|
|
2016-05-12 23:14:19 +00:00
|
|
|
if (vsocket->reconnect)
|
|
|
|
vhost_user_create_client(vsocket);
|
2015-02-23 17:36:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-06 21:26:03 +00:00
|
|
|
static int
|
|
|
|
create_unix_socket(const char *path, struct sockaddr_un *un, bool is_server)
|
2015-02-23 17:36:31 +00:00
|
|
|
{
|
2016-05-06 21:26:03 +00:00
|
|
|
int fd;
|
2015-02-23 17:36:31 +00:00
|
|
|
|
2016-05-06 21:26:03 +00:00
|
|
|
fd = socket(AF_UNIX, SOCK_STREAM, 0);
|
|
|
|
if (fd < 0)
|
|
|
|
return -1;
|
|
|
|
RTE_LOG(INFO, VHOST_CONFIG, "vhost-user %s: socket created, fd: %d\n",
|
|
|
|
is_server ? "server" : "client", fd);
|
2015-06-30 09:20:48 +00:00
|
|
|
|
2016-07-21 13:19:35 +00:00
|
|
|
if (!is_server && fcntl(fd, F_SETFL, O_NONBLOCK)) {
|
|
|
|
RTE_LOG(ERR, VHOST_CONFIG,
|
|
|
|
"vhost-user: can't set nonblocking mode for socket, fd: "
|
|
|
|
"%d (%s)\n", fd, strerror(errno));
|
|
|
|
close(fd);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-05-06 21:26:03 +00:00
|
|
|
memset(un, 0, sizeof(*un));
|
|
|
|
un->sun_family = AF_UNIX;
|
|
|
|
strncpy(un->sun_path, path, sizeof(un->sun_path));
|
2016-06-28 03:58:30 +00:00
|
|
|
un->sun_path[sizeof(un->sun_path) - 1] = '\0';
|
2016-05-06 21:26:03 +00:00
|
|
|
|
|
|
|
return fd;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
vhost_user_create_server(struct vhost_user_socket *vsocket)
|
|
|
|
{
|
|
|
|
int fd;
|
|
|
|
int ret;
|
|
|
|
struct sockaddr_un un;
|
|
|
|
const char *path = vsocket->path;
|
|
|
|
|
|
|
|
fd = create_unix_socket(path, &un, vsocket->is_server);
|
|
|
|
if (fd < 0)
|
2015-02-23 17:36:31 +00:00
|
|
|
return -1;
|
2016-05-06 21:26:03 +00:00
|
|
|
|
|
|
|
ret = bind(fd, (struct sockaddr *)&un, sizeof(un));
|
|
|
|
if (ret < 0) {
|
|
|
|
RTE_LOG(ERR, VHOST_CONFIG,
|
|
|
|
"failed to bind to %s: %s; remove it and try again\n",
|
|
|
|
path, strerror(errno));
|
|
|
|
goto err;
|
2015-06-30 09:20:48 +00:00
|
|
|
}
|
2016-05-06 21:26:03 +00:00
|
|
|
RTE_LOG(INFO, VHOST_CONFIG, "bind to %s\n", path);
|
2015-02-23 17:36:31 +00:00
|
|
|
|
2016-05-06 21:26:03 +00:00
|
|
|
ret = listen(fd, MAX_VIRTIO_BACKLOG);
|
|
|
|
if (ret < 0)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
vsocket->listenfd = fd;
|
2016-07-06 12:24:58 +00:00
|
|
|
ret = fdset_add(&vhost_user.fdset, fd, vhost_user_server_new_connection,
|
2016-05-06 21:26:03 +00:00
|
|
|
NULL, vsocket);
|
2016-07-06 12:24:58 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
RTE_LOG(ERR, VHOST_CONFIG,
|
|
|
|
"failed to add listen fd %d to vhost server fdset\n",
|
|
|
|
fd);
|
|
|
|
goto err;
|
|
|
|
}
|
2016-05-06 21:26:03 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
close(fd);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-05-12 23:14:19 +00:00
|
|
|
struct vhost_user_reconnect {
|
|
|
|
struct sockaddr_un un;
|
|
|
|
int fd;
|
|
|
|
struct vhost_user_socket *vsocket;
|
|
|
|
|
|
|
|
TAILQ_ENTRY(vhost_user_reconnect) next;
|
|
|
|
};
|
|
|
|
|
|
|
|
TAILQ_HEAD(vhost_user_reconnect_tailq_list, vhost_user_reconnect);
|
|
|
|
struct vhost_user_reconnect_list {
|
|
|
|
struct vhost_user_reconnect_tailq_list head;
|
|
|
|
pthread_mutex_t mutex;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct vhost_user_reconnect_list reconn_list;
|
|
|
|
static pthread_t reconn_tid;
|
|
|
|
|
2016-07-21 13:19:35 +00:00
|
|
|
static int
|
|
|
|
vhost_user_connect_nonblock(int fd, struct sockaddr *un, size_t sz)
|
|
|
|
{
|
|
|
|
int ret, flags;
|
|
|
|
|
|
|
|
ret = connect(fd, un, sz);
|
|
|
|
if (ret < 0 && errno != EISCONN)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
flags = fcntl(fd, F_GETFL, 0);
|
|
|
|
if (flags < 0) {
|
|
|
|
RTE_LOG(ERR, VHOST_CONFIG,
|
|
|
|
"can't get flags for connfd %d\n", fd);
|
|
|
|
return -2;
|
|
|
|
}
|
|
|
|
if ((flags & O_NONBLOCK) && fcntl(fd, F_SETFL, flags & ~O_NONBLOCK)) {
|
|
|
|
RTE_LOG(ERR, VHOST_CONFIG,
|
|
|
|
"can't disable nonblocking on fd %d\n", fd);
|
|
|
|
return -2;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-12 23:14:19 +00:00
|
|
|
static void *
|
|
|
|
vhost_user_client_reconnect(void *arg __rte_unused)
|
|
|
|
{
|
2016-07-21 13:19:35 +00:00
|
|
|
int ret;
|
2016-05-12 23:14:19 +00:00
|
|
|
struct vhost_user_reconnect *reconn, *next;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
pthread_mutex_lock(&reconn_list.mutex);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* An equal implementation of TAILQ_FOREACH_SAFE,
|
|
|
|
* which does not exist on all platforms.
|
|
|
|
*/
|
|
|
|
for (reconn = TAILQ_FIRST(&reconn_list.head);
|
|
|
|
reconn != NULL; reconn = next) {
|
|
|
|
next = TAILQ_NEXT(reconn, next);
|
|
|
|
|
2016-07-21 13:19:35 +00:00
|
|
|
ret = vhost_user_connect_nonblock(reconn->fd,
|
|
|
|
(struct sockaddr *)&reconn->un,
|
|
|
|
sizeof(reconn->un));
|
|
|
|
if (ret == -2) {
|
|
|
|
close(reconn->fd);
|
|
|
|
RTE_LOG(ERR, VHOST_CONFIG,
|
|
|
|
"reconnection for fd %d failed\n",
|
|
|
|
reconn->fd);
|
|
|
|
goto remove_fd;
|
|
|
|
}
|
|
|
|
if (ret == -1)
|
2016-05-12 23:14:19 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
RTE_LOG(INFO, VHOST_CONFIG,
|
|
|
|
"%s: connected\n", reconn->vsocket->path);
|
|
|
|
vhost_user_add_connection(reconn->fd, reconn->vsocket);
|
2016-07-21 13:19:35 +00:00
|
|
|
remove_fd:
|
2016-05-12 23:14:19 +00:00
|
|
|
TAILQ_REMOVE(&reconn_list.head, reconn, next);
|
|
|
|
free(reconn);
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&reconn_list.mutex);
|
|
|
|
sleep(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
vhost_user_reconnect_init(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
pthread_mutex_init(&reconn_list.mutex, NULL);
|
|
|
|
TAILQ_INIT(&reconn_list.head);
|
|
|
|
|
|
|
|
ret = pthread_create(&reconn_tid, NULL,
|
|
|
|
vhost_user_client_reconnect, NULL);
|
|
|
|
if (ret < 0)
|
|
|
|
RTE_LOG(ERR, VHOST_CONFIG, "failed to create reconnect thread");
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-05-06 21:26:03 +00:00
|
|
|
static int
|
|
|
|
vhost_user_create_client(struct vhost_user_socket *vsocket)
|
|
|
|
{
|
|
|
|
int fd;
|
|
|
|
int ret;
|
|
|
|
struct sockaddr_un un;
|
|
|
|
const char *path = vsocket->path;
|
2016-05-12 23:14:19 +00:00
|
|
|
struct vhost_user_reconnect *reconn;
|
2016-05-06 21:26:03 +00:00
|
|
|
|
|
|
|
fd = create_unix_socket(path, &un, vsocket->is_server);
|
|
|
|
if (fd < 0)
|
|
|
|
return -1;
|
|
|
|
|
2016-07-21 13:19:35 +00:00
|
|
|
ret = vhost_user_connect_nonblock(fd, (struct sockaddr *)&un,
|
|
|
|
sizeof(un));
|
2016-05-12 23:14:19 +00:00
|
|
|
if (ret == 0) {
|
|
|
|
vhost_user_add_connection(fd, vsocket);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-02 09:39:34 +00:00
|
|
|
RTE_LOG(WARNING, VHOST_CONFIG,
|
2016-05-12 23:14:19 +00:00
|
|
|
"failed to connect to %s: %s\n",
|
|
|
|
path, strerror(errno));
|
|
|
|
|
2016-07-21 13:19:35 +00:00
|
|
|
if (ret == -2 || !vsocket->reconnect) {
|
2016-05-06 21:26:03 +00:00
|
|
|
close(fd);
|
2015-02-23 17:36:31 +00:00
|
|
|
return -1;
|
2015-06-30 09:20:48 +00:00
|
|
|
}
|
2015-02-23 17:36:31 +00:00
|
|
|
|
2017-03-02 09:39:34 +00:00
|
|
|
RTE_LOG(INFO, VHOST_CONFIG, "%s: reconnecting...\n", path);
|
2016-05-12 23:14:19 +00:00
|
|
|
reconn = malloc(sizeof(*reconn));
|
2016-06-28 03:58:31 +00:00
|
|
|
if (reconn == NULL) {
|
|
|
|
RTE_LOG(ERR, VHOST_CONFIG,
|
|
|
|
"failed to allocate memory for reconnect\n");
|
|
|
|
close(fd);
|
|
|
|
return -1;
|
|
|
|
}
|
2016-05-12 23:14:19 +00:00
|
|
|
reconn->un = un;
|
|
|
|
reconn->fd = fd;
|
|
|
|
reconn->vsocket = vsocket;
|
|
|
|
pthread_mutex_lock(&reconn_list.mutex);
|
|
|
|
TAILQ_INSERT_TAIL(&reconn_list.head, reconn, next);
|
|
|
|
pthread_mutex_unlock(&reconn_list.mutex);
|
2016-05-06 21:26:03 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Register a new vhost-user socket; here we could act as server
|
|
|
|
* (the default case), or client (when RTE_VHOST_USER_CLIENT) flag
|
|
|
|
* is set.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
rte_vhost_driver_register(const char *path, uint64_t flags)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
struct vhost_user_socket *vsocket;
|
|
|
|
|
|
|
|
if (!path)
|
2015-02-23 17:36:31 +00:00
|
|
|
return -1;
|
2016-05-06 21:26:03 +00:00
|
|
|
|
|
|
|
pthread_mutex_lock(&vhost_user.mutex);
|
|
|
|
|
|
|
|
if (vhost_user.vsocket_cnt == MAX_VHOST_SOCKET) {
|
|
|
|
RTE_LOG(ERR, VHOST_CONFIG,
|
|
|
|
"error: the number of vhost sockets reaches maximum\n");
|
|
|
|
goto out;
|
2015-02-23 17:36:31 +00:00
|
|
|
}
|
2015-06-30 09:20:48 +00:00
|
|
|
|
2016-05-06 21:26:03 +00:00
|
|
|
vsocket = malloc(sizeof(struct vhost_user_socket));
|
|
|
|
if (!vsocket)
|
|
|
|
goto out;
|
|
|
|
memset(vsocket, 0, sizeof(struct vhost_user_socket));
|
2016-05-06 20:13:22 +00:00
|
|
|
vsocket->path = strdup(path);
|
2016-07-21 12:55:36 +00:00
|
|
|
vsocket->connfd = -1;
|
2016-10-09 07:27:58 +00:00
|
|
|
vsocket->dequeue_zero_copy = flags & RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
|
2015-02-23 17:36:31 +00:00
|
|
|
|
2016-05-06 21:26:03 +00:00
|
|
|
if ((flags & RTE_VHOST_USER_CLIENT) != 0) {
|
2016-05-12 23:14:19 +00:00
|
|
|
vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT);
|
|
|
|
if (vsocket->reconnect && reconn_tid == 0) {
|
2016-06-28 03:58:29 +00:00
|
|
|
if (vhost_user_reconnect_init() < 0) {
|
|
|
|
free(vsocket->path);
|
|
|
|
free(vsocket);
|
2016-05-12 23:14:19 +00:00
|
|
|
goto out;
|
2016-06-28 03:58:29 +00:00
|
|
|
}
|
2016-05-12 23:14:19 +00:00
|
|
|
}
|
2016-05-06 21:26:03 +00:00
|
|
|
ret = vhost_user_create_client(vsocket);
|
|
|
|
} else {
|
|
|
|
vsocket->is_server = true;
|
|
|
|
ret = vhost_user_create_server(vsocket);
|
|
|
|
}
|
|
|
|
if (ret < 0) {
|
|
|
|
free(vsocket->path);
|
|
|
|
free(vsocket);
|
|
|
|
goto out;
|
|
|
|
}
|
2015-02-23 17:36:31 +00:00
|
|
|
|
2016-05-06 20:13:22 +00:00
|
|
|
vhost_user.vsockets[vhost_user.vsocket_cnt++] = vsocket;
|
2016-05-06 21:26:03 +00:00
|
|
|
|
|
|
|
out:
|
2016-05-06 20:13:22 +00:00
|
|
|
pthread_mutex_unlock(&vhost_user.mutex);
|
2015-02-23 17:36:31 +00:00
|
|
|
|
2016-05-06 21:26:03 +00:00
|
|
|
return ret;
|
2015-02-23 17:36:31 +00:00
|
|
|
}
|
|
|
|
|
2016-07-21 12:55:36 +00:00
|
|
|
static bool
|
|
|
|
vhost_user_remove_reconnect(struct vhost_user_socket *vsocket)
|
|
|
|
{
|
|
|
|
int found = false;
|
|
|
|
struct vhost_user_reconnect *reconn, *next;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&reconn_list.mutex);
|
|
|
|
|
|
|
|
for (reconn = TAILQ_FIRST(&reconn_list.head);
|
|
|
|
reconn != NULL; reconn = next) {
|
|
|
|
next = TAILQ_NEXT(reconn, next);
|
|
|
|
|
|
|
|
if (reconn->vsocket == vsocket) {
|
|
|
|
TAILQ_REMOVE(&reconn_list.head, reconn, next);
|
|
|
|
close(reconn->fd);
|
|
|
|
free(reconn);
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&reconn_list.mutex);
|
|
|
|
return found;
|
|
|
|
}
|
|
|
|
|
2015-06-30 09:20:48 +00:00
|
|
|
/**
|
2016-05-06 21:26:03 +00:00
|
|
|
* Unregister the specified vhost socket
|
2015-06-30 09:20:48 +00:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
rte_vhost_driver_unregister(const char *path)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int count;
|
2016-07-21 12:55:36 +00:00
|
|
|
struct vhost_user_connection *conn;
|
2015-06-30 09:20:48 +00:00
|
|
|
|
2016-05-06 20:13:22 +00:00
|
|
|
pthread_mutex_lock(&vhost_user.mutex);
|
2015-06-30 09:20:48 +00:00
|
|
|
|
2016-05-06 20:13:22 +00:00
|
|
|
for (i = 0; i < vhost_user.vsocket_cnt; i++) {
|
2016-07-21 12:55:36 +00:00
|
|
|
struct vhost_user_socket *vsocket = vhost_user.vsockets[i];
|
|
|
|
|
|
|
|
if (!strcmp(vsocket->path, path)) {
|
|
|
|
if (vsocket->is_server) {
|
|
|
|
fdset_del(&vhost_user.fdset, vsocket->listenfd);
|
|
|
|
close(vsocket->listenfd);
|
2016-05-06 21:26:03 +00:00
|
|
|
unlink(path);
|
2016-07-21 12:55:36 +00:00
|
|
|
} else if (vsocket->reconnect) {
|
|
|
|
vhost_user_remove_reconnect(vsocket);
|
|
|
|
}
|
|
|
|
|
|
|
|
conn = fdset_del(&vhost_user.fdset, vsocket->connfd);
|
|
|
|
if (conn) {
|
|
|
|
RTE_LOG(INFO, VHOST_CONFIG,
|
|
|
|
"free connfd = %d for device '%s'\n",
|
|
|
|
vsocket->connfd, path);
|
|
|
|
close(vsocket->connfd);
|
|
|
|
vhost_destroy_device(conn->vid);
|
|
|
|
free(conn);
|
2016-05-06 21:26:03 +00:00
|
|
|
}
|
2015-06-30 09:20:48 +00:00
|
|
|
|
2016-07-21 12:55:36 +00:00
|
|
|
free(vsocket->path);
|
|
|
|
free(vsocket);
|
2015-06-30 09:20:48 +00:00
|
|
|
|
2016-05-06 20:13:22 +00:00
|
|
|
count = --vhost_user.vsocket_cnt;
|
|
|
|
vhost_user.vsockets[i] = vhost_user.vsockets[count];
|
|
|
|
vhost_user.vsockets[count] = NULL;
|
|
|
|
pthread_mutex_unlock(&vhost_user.mutex);
|
2015-06-30 09:20:48 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
2016-05-06 20:13:22 +00:00
|
|
|
pthread_mutex_unlock(&vhost_user.mutex);
|
2015-06-30 09:20:48 +00:00
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-02-23 17:36:31 +00:00
|
|
|
int
|
|
|
|
rte_vhost_driver_session_start(void)
|
|
|
|
{
|
2016-05-06 20:13:22 +00:00
|
|
|
fdset_event_dispatch(&vhost_user.fdset);
|
2015-02-23 17:36:31 +00:00
|
|
|
return 0;
|
|
|
|
}
|