2017-12-19 15:49:03 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright(c) 2010-2014 Intel Corporation
|
2015-02-23 17:36:30 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/time.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <unistd.h>
|
2016-12-21 09:45:13 +00:00
|
|
|
#include <string.h>
|
2015-02-23 17:36:30 +00:00
|
|
|
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
#include <rte_common.h>
|
2015-02-23 17:36:30 +00:00
|
|
|
#include <rte_log.h>
|
|
|
|
|
|
|
|
#include "fd_man.h"
|
|
|
|
|
2018-03-28 05:49:25 +00:00
|
|
|
|
|
|
|
#define RTE_LOGTYPE_VHOST_FDMAN RTE_LOGTYPE_USER1
|
|
|
|
|
2016-12-21 09:45:13 +00:00
|
|
|
#define FDPOLLERR (POLLERR | POLLHUP | POLLNVAL)
|
|
|
|
|
2015-02-23 17:36:30 +00:00
|
|
|
static int
|
2016-12-21 09:45:13 +00:00
|
|
|
get_last_valid_idx(struct fdset *pfdset, int last_valid_idx)
|
2015-02-23 17:36:30 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2016-12-21 09:45:13 +00:00
|
|
|
for (i = last_valid_idx; i >= 0 && pfdset->fd[i].fd == -1; i--)
|
2015-02-23 17:36:30 +00:00
|
|
|
;
|
|
|
|
|
2016-12-21 09:45:13 +00:00
|
|
|
return i;
|
2015-02-23 17:36:30 +00:00
|
|
|
}
|
|
|
|
|
2016-12-21 09:45:13 +00:00
|
|
|
static void
|
|
|
|
fdset_move(struct fdset *pfdset, int dst, int src)
|
2015-02-23 17:36:30 +00:00
|
|
|
{
|
2016-12-21 09:45:13 +00:00
|
|
|
pfdset->fd[dst] = pfdset->fd[src];
|
|
|
|
pfdset->rwfds[dst] = pfdset->rwfds[src];
|
2015-02-23 17:36:30 +00:00
|
|
|
}
|
|
|
|
|
2016-12-21 09:45:13 +00:00
|
|
|
static void
|
2017-02-21 14:25:30 +00:00
|
|
|
fdset_shrink_nolock(struct fdset *pfdset)
|
2015-02-23 17:36:30 +00:00
|
|
|
{
|
2016-12-21 09:45:13 +00:00
|
|
|
int i;
|
|
|
|
int last_valid_idx = get_last_valid_idx(pfdset, pfdset->num - 1);
|
2015-02-23 17:36:30 +00:00
|
|
|
|
2016-12-21 09:45:13 +00:00
|
|
|
for (i = 0; i < last_valid_idx; i++) {
|
|
|
|
if (pfdset->fd[i].fd != -1)
|
|
|
|
continue;
|
2016-07-06 12:24:58 +00:00
|
|
|
|
2016-12-21 09:45:13 +00:00
|
|
|
fdset_move(pfdset, i, last_valid_idx);
|
|
|
|
last_valid_idx = get_last_valid_idx(pfdset, last_valid_idx - 1);
|
|
|
|
}
|
|
|
|
pfdset->num = last_valid_idx + 1;
|
2017-02-21 14:25:30 +00:00
|
|
|
}
|
2016-12-21 09:45:13 +00:00
|
|
|
|
2017-02-21 14:25:30 +00:00
|
|
|
/*
|
|
|
|
* Find deleted fd entries and remove them
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
fdset_shrink(struct fdset *pfdset)
|
|
|
|
{
|
|
|
|
pthread_mutex_lock(&pfdset->fd_mutex);
|
|
|
|
fdset_shrink_nolock(pfdset);
|
2016-12-21 09:45:13 +00:00
|
|
|
pthread_mutex_unlock(&pfdset->fd_mutex);
|
2015-02-23 17:36:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2016-12-21 09:45:13 +00:00
|
|
|
* Returns the index in the fdset for a given fd.
|
2015-02-23 17:36:30 +00:00
|
|
|
* @return
|
2016-12-21 09:45:13 +00:00
|
|
|
* index for the fd, or -1 if fd isn't in the fdset.
|
2015-02-23 17:36:30 +00:00
|
|
|
*/
|
|
|
|
static int
|
2016-12-21 09:45:13 +00:00
|
|
|
fdset_find_fd(struct fdset *pfdset, int fd)
|
2015-02-23 17:36:30 +00:00
|
|
|
{
|
2016-12-21 09:45:13 +00:00
|
|
|
int i;
|
2015-02-23 17:36:30 +00:00
|
|
|
|
2016-12-21 09:45:13 +00:00
|
|
|
for (i = 0; i < pfdset->num && pfdset->fd[i].fd != fd; i++)
|
|
|
|
;
|
2015-02-23 17:36:30 +00:00
|
|
|
|
2016-12-21 09:45:13 +00:00
|
|
|
return i == pfdset->num ? -1 : i;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fdset_add_fd(struct fdset *pfdset, int idx, int fd,
|
|
|
|
fd_cb rcb, fd_cb wcb, void *dat)
|
|
|
|
{
|
|
|
|
struct fdentry *pfdentry = &pfdset->fd[idx];
|
|
|
|
struct pollfd *pfd = &pfdset->rwfds[idx];
|
|
|
|
|
|
|
|
pfdentry->fd = fd;
|
|
|
|
pfdentry->rcb = rcb;
|
|
|
|
pfdentry->wcb = wcb;
|
|
|
|
pfdentry->dat = dat;
|
|
|
|
|
|
|
|
pfd->fd = fd;
|
|
|
|
pfd->events = rcb ? POLLIN : 0;
|
|
|
|
pfd->events |= wcb ? POLLOUT : 0;
|
|
|
|
pfd->revents = 0;
|
2015-02-23 17:36:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
fdset_init(struct fdset *pfdset)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (pfdset == NULL)
|
|
|
|
return;
|
|
|
|
|
2016-07-21 12:55:36 +00:00
|
|
|
for (i = 0; i < MAX_FDS; i++) {
|
2015-02-23 17:36:30 +00:00
|
|
|
pfdset->fd[i].fd = -1;
|
2016-07-21 12:55:36 +00:00
|
|
|
pfdset->fd[i].dat = NULL;
|
|
|
|
}
|
2015-02-23 17:36:30 +00:00
|
|
|
pfdset->num = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Register the fd in the fdset with read/write handler and context.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
fdset_add(struct fdset *pfdset, int fd, fd_cb rcb, fd_cb wcb, void *dat)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (pfdset == NULL || fd == -1)
|
|
|
|
return -1;
|
|
|
|
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
pthread_mutex_lock(&pfdset->fd_mutex);
|
2016-12-21 09:45:13 +00:00
|
|
|
i = pfdset->num < MAX_FDS ? pfdset->num++ : -1;
|
|
|
|
if (i == -1) {
|
2018-12-06 16:00:07 +00:00
|
|
|
pthread_mutex_lock(&pfdset->fd_pooling_mutex);
|
2017-02-21 14:25:30 +00:00
|
|
|
fdset_shrink_nolock(pfdset);
|
2018-12-06 16:00:07 +00:00
|
|
|
pthread_mutex_unlock(&pfdset->fd_pooling_mutex);
|
2017-02-21 14:25:30 +00:00
|
|
|
i = pfdset->num < MAX_FDS ? pfdset->num++ : -1;
|
|
|
|
if (i == -1) {
|
|
|
|
pthread_mutex_unlock(&pfdset->fd_mutex);
|
|
|
|
return -2;
|
|
|
|
}
|
2015-12-10 17:57:19 +00:00
|
|
|
}
|
2015-02-23 17:36:30 +00:00
|
|
|
|
2016-12-21 09:45:13 +00:00
|
|
|
fdset_add_fd(pfdset, i, fd, rcb, wcb, dat);
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
pthread_mutex_unlock(&pfdset->fd_mutex);
|
|
|
|
|
2015-02-23 17:36:30 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Unregister the fd from the fdset.
|
2016-07-21 12:55:36 +00:00
|
|
|
* Returns context of a given fd or NULL.
|
2015-02-23 17:36:30 +00:00
|
|
|
*/
|
2016-07-21 12:55:36 +00:00
|
|
|
void *
|
2015-02-23 17:36:30 +00:00
|
|
|
fdset_del(struct fdset *pfdset, int fd)
|
|
|
|
{
|
|
|
|
int i;
|
2016-07-21 12:55:36 +00:00
|
|
|
void *dat = NULL;
|
2015-02-23 17:36:30 +00:00
|
|
|
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
if (pfdset == NULL || fd == -1)
|
2016-07-21 12:55:36 +00:00
|
|
|
return NULL;
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
|
2015-03-06 05:52:43 +00:00
|
|
|
do {
|
|
|
|
pthread_mutex_lock(&pfdset->fd_mutex);
|
|
|
|
|
|
|
|
i = fdset_find_fd(pfdset, fd);
|
|
|
|
if (i != -1 && pfdset->fd[i].busy == 0) {
|
|
|
|
/* busy indicates r/wcb is executing! */
|
2016-07-21 12:55:36 +00:00
|
|
|
dat = pfdset->fd[i].dat;
|
2015-03-06 05:52:43 +00:00
|
|
|
pfdset->fd[i].fd = -1;
|
|
|
|
pfdset->fd[i].rcb = pfdset->fd[i].wcb = NULL;
|
2016-07-21 12:55:36 +00:00
|
|
|
pfdset->fd[i].dat = NULL;
|
2015-03-06 05:52:43 +00:00
|
|
|
i = -1;
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
}
|
2015-03-06 05:52:43 +00:00
|
|
|
pthread_mutex_unlock(&pfdset->fd_mutex);
|
|
|
|
} while (i != -1);
|
2016-07-21 12:55:36 +00:00
|
|
|
|
|
|
|
return dat;
|
2015-02-23 17:36:30 +00:00
|
|
|
}
|
|
|
|
|
2018-04-27 15:19:43 +00:00
|
|
|
/**
|
|
|
|
* Unregister the fd from the fdset.
|
|
|
|
*
|
|
|
|
* If parameters are invalid, return directly -2.
|
|
|
|
* And check whether fd is busy, if yes, return -1.
|
|
|
|
* Otherwise, try to delete the fd from fdset and
|
|
|
|
* return true.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
fdset_try_del(struct fdset *pfdset, int fd)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (pfdset == NULL || fd == -1)
|
|
|
|
return -2;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&pfdset->fd_mutex);
|
|
|
|
i = fdset_find_fd(pfdset, fd);
|
|
|
|
if (i != -1 && pfdset->fd[i].busy) {
|
|
|
|
pthread_mutex_unlock(&pfdset->fd_mutex);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i != -1) {
|
|
|
|
pfdset->fd[i].fd = -1;
|
|
|
|
pfdset->fd[i].rcb = pfdset->fd[i].wcb = NULL;
|
|
|
|
pfdset->fd[i].dat = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&pfdset->fd_mutex);
|
|
|
|
return 0;
|
|
|
|
}
|
2015-06-30 09:20:47 +00:00
|
|
|
|
2015-02-23 17:36:30 +00:00
|
|
|
/**
|
|
|
|
* This functions runs in infinite blocking loop until there is no fd in
|
|
|
|
* pfdset. It calls corresponding r/w handler if there is event on the fd.
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
*
|
|
|
|
* Before the callback is called, we set the flag to busy status; If other
|
|
|
|
* thread(now rte_vhost_driver_unregister) calls fdset_del concurrently, it
|
|
|
|
* will wait until the flag is reset to zero(which indicates the callback is
|
|
|
|
* finished), then it could free the context after fdset_del.
|
2015-02-23 17:36:30 +00:00
|
|
|
*/
|
vhost: introduce API to start a specific driver
We used to use rte_vhost_driver_session_start() to trigger the vhost-user
session. It takes no argument, thus it's a global trigger. And it could
be problematic.
The issue is, currently, rte_vhost_driver_register(path, flags) actually
tries to put it into the session loop (by fdset_add). However, it needs
a set of APIs to set a vhost-user driver properly:
* rte_vhost_driver_register(path, flags);
* rte_vhost_driver_set_features(path, features);
* rte_vhost_driver_callback_register(path, vhost_device_ops);
If a new vhost-user driver is registered after the trigger (think OVS-DPDK
that could add a port dynamically from cmdline), the current code will
effectively starts the session for the new driver just after the first
API rte_vhost_driver_register() is invoked, leaving later calls taking
no effect at all.
To handle the case properly, this patch introduce a new API,
rte_vhost_driver_start(path), to trigger a specific vhost-user driver.
To do that, the rte_vhost_driver_register(path, flags) is simplified
to create the socket only and let rte_vhost_driver_start(path) to
actually put it into the session loop.
Meanwhile, the rte_vhost_driver_session_start is removed: we could hide
the session thread internally (create the thread if it has not been
created). This would also simplify the application.
NOTE: the API order in prog guide is slightly adjusted for showing the
correct invoke order.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
2017-04-01 07:22:56 +00:00
|
|
|
void *
|
|
|
|
fdset_event_dispatch(void *arg)
|
2015-02-23 17:36:30 +00:00
|
|
|
{
|
2016-12-21 09:45:13 +00:00
|
|
|
int i;
|
|
|
|
struct pollfd *pfd;
|
2015-02-23 17:36:30 +00:00
|
|
|
struct fdentry *pfdentry;
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
fd_cb rcb, wcb;
|
|
|
|
void *dat;
|
2016-12-21 09:45:13 +00:00
|
|
|
int fd, numfds;
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
int remove1, remove2;
|
2016-12-21 09:45:13 +00:00
|
|
|
int need_shrink;
|
vhost: introduce API to start a specific driver
We used to use rte_vhost_driver_session_start() to trigger the vhost-user
session. It takes no argument, thus it's a global trigger. And it could
be problematic.
The issue is, currently, rte_vhost_driver_register(path, flags) actually
tries to put it into the session loop (by fdset_add). However, it needs
a set of APIs to set a vhost-user driver properly:
* rte_vhost_driver_register(path, flags);
* rte_vhost_driver_set_features(path, features);
* rte_vhost_driver_callback_register(path, vhost_device_ops);
If a new vhost-user driver is registered after the trigger (think OVS-DPDK
that could add a port dynamically from cmdline), the current code will
effectively starts the session for the new driver just after the first
API rte_vhost_driver_register() is invoked, leaving later calls taking
no effect at all.
To handle the case properly, this patch introduce a new API,
rte_vhost_driver_start(path), to trigger a specific vhost-user driver.
To do that, the rte_vhost_driver_register(path, flags) is simplified
to create the socket only and let rte_vhost_driver_start(path) to
actually put it into the session loop.
Meanwhile, the rte_vhost_driver_session_start is removed: we could hide
the session thread internally (create the thread if it has not been
created). This would also simplify the application.
NOTE: the API order in prog guide is slightly adjusted for showing the
correct invoke order.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
2017-04-01 07:22:56 +00:00
|
|
|
struct fdset *pfdset = arg;
|
2017-09-22 12:17:40 +00:00
|
|
|
int val;
|
2015-02-23 17:36:30 +00:00
|
|
|
|
|
|
|
if (pfdset == NULL)
|
vhost: introduce API to start a specific driver
We used to use rte_vhost_driver_session_start() to trigger the vhost-user
session. It takes no argument, thus it's a global trigger. And it could
be problematic.
The issue is, currently, rte_vhost_driver_register(path, flags) actually
tries to put it into the session loop (by fdset_add). However, it needs
a set of APIs to set a vhost-user driver properly:
* rte_vhost_driver_register(path, flags);
* rte_vhost_driver_set_features(path, features);
* rte_vhost_driver_callback_register(path, vhost_device_ops);
If a new vhost-user driver is registered after the trigger (think OVS-DPDK
that could add a port dynamically from cmdline), the current code will
effectively starts the session for the new driver just after the first
API rte_vhost_driver_register() is invoked, leaving later calls taking
no effect at all.
To handle the case properly, this patch introduce a new API,
rte_vhost_driver_start(path), to trigger a specific vhost-user driver.
To do that, the rte_vhost_driver_register(path, flags) is simplified
to create the socket only and let rte_vhost_driver_start(path) to
actually put it into the session loop.
Meanwhile, the rte_vhost_driver_session_start is removed: we could hide
the session thread internally (create the thread if it has not been
created). This would also simplify the application.
NOTE: the API order in prog guide is slightly adjusted for showing the
correct invoke order.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
2017-04-01 07:22:56 +00:00
|
|
|
return NULL;
|
2015-02-23 17:36:30 +00:00
|
|
|
|
|
|
|
while (1) {
|
|
|
|
|
2015-06-30 09:20:50 +00:00
|
|
|
/*
|
2016-12-21 09:45:13 +00:00
|
|
|
* When poll is blocked, other threads might unregister
|
2015-06-30 09:20:50 +00:00
|
|
|
* listenfds from and register new listenfds into fdset.
|
2016-12-21 09:45:13 +00:00
|
|
|
* When poll returns, the entries for listenfds in the fdset
|
2015-06-30 09:20:50 +00:00
|
|
|
* might have been updated. It is ok if there is unwanted call
|
|
|
|
* for new listenfds.
|
|
|
|
*/
|
2016-12-21 09:45:13 +00:00
|
|
|
pthread_mutex_lock(&pfdset->fd_mutex);
|
|
|
|
numfds = pfdset->num;
|
|
|
|
pthread_mutex_unlock(&pfdset->fd_mutex);
|
2015-02-23 17:36:30 +00:00
|
|
|
|
2018-12-06 16:00:07 +00:00
|
|
|
pthread_mutex_lock(&pfdset->fd_pooling_mutex);
|
2017-09-22 12:17:40 +00:00
|
|
|
val = poll(pfdset->rwfds, numfds, 1000 /* millisecs */);
|
2018-12-06 16:00:07 +00:00
|
|
|
pthread_mutex_unlock(&pfdset->fd_pooling_mutex);
|
2017-09-22 12:17:40 +00:00
|
|
|
if (val < 0)
|
|
|
|
continue;
|
2016-12-21 09:45:13 +00:00
|
|
|
|
|
|
|
need_shrink = 0;
|
|
|
|
for (i = 0; i < numfds; i++) {
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
pthread_mutex_lock(&pfdset->fd_mutex);
|
2016-12-21 09:45:13 +00:00
|
|
|
|
2015-02-23 17:36:30 +00:00
|
|
|
pfdentry = &pfdset->fd[i];
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
fd = pfdentry->fd;
|
2016-12-21 09:45:13 +00:00
|
|
|
pfd = &pfdset->rwfds[i];
|
|
|
|
|
|
|
|
if (fd < 0) {
|
|
|
|
need_shrink = 1;
|
|
|
|
pthread_mutex_unlock(&pfdset->fd_mutex);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!pfd->revents) {
|
|
|
|
pthread_mutex_unlock(&pfdset->fd_mutex);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
remove1 = remove2 = 0;
|
|
|
|
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
rcb = pfdentry->rcb;
|
|
|
|
wcb = pfdentry->wcb;
|
|
|
|
dat = pfdentry->dat;
|
|
|
|
pfdentry->busy = 1;
|
2016-12-21 09:45:13 +00:00
|
|
|
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
pthread_mutex_unlock(&pfdset->fd_mutex);
|
2016-12-21 09:45:13 +00:00
|
|
|
|
|
|
|
if (rcb && pfd->revents & (POLLIN | FDPOLLERR))
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
rcb(fd, dat, &remove1);
|
2016-12-21 09:45:13 +00:00
|
|
|
if (wcb && pfd->revents & (POLLOUT | FDPOLLERR))
|
vhost: support dynamically registering server
* support calling rte_vhost_driver_register after rte_vhost_driver_session_start
* add mutext to protect fdset from concurrent access
* add busy flag in fdentry. this flag is set before cb and cleared after cb is finished.
mutex lock scenario in vhost:
* event_dispatch(in rte_vhost_driver_session_start) runs in a separate thread, infinitely
processing vhost messages through cb(callback).
* event_dispatch acquires the lock, get the cb and its context, mark the busy flag,
and releases the mutex.
* vserver_new_vq_conn cb calls fdset_add, which acquires the mutex and add new fd into fdset.
* vserver_message_handler cb frees data context, marks remove flag to request to delete
connfd(connection fd) from fdset.
* after cb returns, event_dispatch
1. clears busy flag.
2. if there is remove request, call fdset_del, which acquires mutex, checks busy flag, and
removes connfd from fdset.
* rte_vhost_driver_unregister(not implemented) runs in another thread, acquires the mutex,
calls fdset_del to remove fd(listenerfd) from fdset. Then it could free data context.
The above steps ensures fd data context isn't freed when cb is using.
VM(s) should have been shutdown before rte_vhost_driver_unregister.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Tetsuya Mukawa <mukawa@igel.co.jp>
2015-02-23 17:36:33 +00:00
|
|
|
wcb(fd, dat, &remove2);
|
|
|
|
pfdentry->busy = 0;
|
|
|
|
/*
|
|
|
|
* fdset_del needs to check busy flag.
|
|
|
|
* We don't allow fdset_del to be called in callback
|
|
|
|
* directly.
|
|
|
|
*/
|
2015-06-30 09:20:47 +00:00
|
|
|
/*
|
|
|
|
* When we are to clean up the fd from fdset,
|
|
|
|
* because the fd is closed in the cb,
|
|
|
|
* the old fd val could be reused by when creates new
|
|
|
|
* listen fd in another thread, we couldn't call
|
2018-04-27 15:19:45 +00:00
|
|
|
* fdset_del.
|
2015-06-30 09:20:47 +00:00
|
|
|
*/
|
2016-12-21 09:45:13 +00:00
|
|
|
if (remove1 || remove2) {
|
|
|
|
pfdentry->fd = -1;
|
|
|
|
need_shrink = 1;
|
|
|
|
}
|
2015-02-23 17:36:30 +00:00
|
|
|
}
|
2016-12-21 09:45:13 +00:00
|
|
|
|
|
|
|
if (need_shrink)
|
|
|
|
fdset_shrink(pfdset);
|
2015-02-23 17:36:30 +00:00
|
|
|
}
|
vhost: introduce API to start a specific driver
We used to use rte_vhost_driver_session_start() to trigger the vhost-user
session. It takes no argument, thus it's a global trigger. And it could
be problematic.
The issue is, currently, rte_vhost_driver_register(path, flags) actually
tries to put it into the session loop (by fdset_add). However, it needs
a set of APIs to set a vhost-user driver properly:
* rte_vhost_driver_register(path, flags);
* rte_vhost_driver_set_features(path, features);
* rte_vhost_driver_callback_register(path, vhost_device_ops);
If a new vhost-user driver is registered after the trigger (think OVS-DPDK
that could add a port dynamically from cmdline), the current code will
effectively starts the session for the new driver just after the first
API rte_vhost_driver_register() is invoked, leaving later calls taking
no effect at all.
To handle the case properly, this patch introduce a new API,
rte_vhost_driver_start(path), to trigger a specific vhost-user driver.
To do that, the rte_vhost_driver_register(path, flags) is simplified
to create the socket only and let rte_vhost_driver_start(path) to
actually put it into the session loop.
Meanwhile, the rte_vhost_driver_session_start is removed: we could hide
the session thread internally (create the thread if it has not been
created). This would also simplify the application.
NOTE: the API order in prog guide is slightly adjusted for showing the
correct invoke order.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
2017-04-01 07:22:56 +00:00
|
|
|
|
|
|
|
return NULL;
|
2015-02-23 17:36:30 +00:00
|
|
|
}
|
2018-03-28 05:49:25 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
fdset_pipe_read_cb(int readfd, void *dat __rte_unused,
|
|
|
|
int *remove __rte_unused)
|
|
|
|
{
|
|
|
|
char charbuf[16];
|
|
|
|
int r = read(readfd, charbuf, sizeof(charbuf));
|
|
|
|
/*
|
|
|
|
* Just an optimization, we don't care if read() failed
|
|
|
|
* so ignore explicitly its return value to make the
|
|
|
|
* compiler happy
|
|
|
|
*/
|
|
|
|
RTE_SET_USED(r);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
fdset_pipe_uninit(struct fdset *fdset)
|
|
|
|
{
|
|
|
|
fdset_del(fdset, fdset->u.readfd);
|
|
|
|
close(fdset->u.readfd);
|
|
|
|
close(fdset->u.writefd);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
fdset_pipe_init(struct fdset *fdset)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (pipe(fdset->u.pipefd) < 0) {
|
|
|
|
RTE_LOG(ERR, VHOST_FDMAN,
|
|
|
|
"failed to create pipe for vhost fdset\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = fdset_add(fdset, fdset->u.readfd,
|
|
|
|
fdset_pipe_read_cb, NULL, NULL);
|
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
RTE_LOG(ERR, VHOST_FDMAN,
|
|
|
|
"failed to add pipe readfd %d into vhost server fdset\n",
|
|
|
|
fdset->u.readfd);
|
|
|
|
|
|
|
|
fdset_pipe_uninit(fdset);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
fdset_pipe_notify(struct fdset *fdset)
|
|
|
|
{
|
|
|
|
int r = write(fdset->u.writefd, "1", 1);
|
|
|
|
/*
|
|
|
|
* Just an optimization, we don't care if write() failed
|
|
|
|
* so ignore explicitly its return value to make the
|
|
|
|
* compiler happy
|
|
|
|
*/
|
|
|
|
RTE_SET_USED(r);
|
|
|
|
|
|
|
|
}
|