eal/linux: add function to allow interruptible epoll

The existing definition of rte_epoll_wait retries if interrupted
by a signal. This behavior makes it hard to use rte_epoll_wait
for applications that want to use signals do do things like
exit polling loop and shutdown.

Since changing existing semantic might break applications, add
a new rte_epoll_wait_interruptible() function that does the
same thing as rte_epoll_wait but will return -1 and errno of EINTR
if it receives a signal.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Reviewed-by: Harman Kalra <hkalra@marvell.com>
This commit is contained in:
Stephen Hemminger 2020-09-03 16:28:22 -07:00 committed by Thomas Monjalon
parent 0e8704a453
commit bb548625c6
4 changed files with 59 additions and 5 deletions

View File

@ -684,6 +684,18 @@ rte_epoll_wait(int epfd, struct rte_epoll_event *events,
return -ENOTSUP;
}
int
rte_epoll_wait_interruptible(int epfd, struct rte_epoll_event *events,
int maxevents, int timeout)
{
RTE_SET_USED(epfd);
RTE_SET_USED(events);
RTE_SET_USED(maxevents);
RTE_SET_USED(timeout);
return -ENOTSUP;
}
int
rte_epoll_ctl(int epfd, int op, int fd, struct rte_epoll_event *event)
{

View File

@ -95,6 +95,7 @@ struct rte_intr_handle {
/**
* It waits for events on the epoll instance.
* Retries if signal received.
*
* @param epfd
* Epoll instance fd on which the caller wait for events.
@ -113,6 +114,28 @@ int
rte_epoll_wait(int epfd, struct rte_epoll_event *events,
int maxevents, int timeout);
/**
* It waits for events on the epoll instance.
* Does not retry if signal received.
*
* @param epfd
* Epoll instance fd on which the caller wait for events.
* @param events
* Memory area contains the events that will be available for the caller.
* @param maxevents
* Up to maxevents are returned, must greater than zero.
* @param timeout
* Specifying a timeout of -1 causes a block indefinitely.
* Specifying a timeout equal to zero cause to return immediately.
* @return
* - On success, returns the number of available event.
* - On failure, a negative value.
*/
__rte_experimental
int
rte_epoll_wait_interruptible(int epfd, struct rte_epoll_event *events,
int maxevents, int timeout);
/**
* It performs control operations on epoll instance referred by the epfd.
* It requests that the operation op be performed for the target fd.

View File

@ -1275,9 +1275,9 @@ rte_intr_tls_epfd(void)
return RTE_PER_LCORE(_epfd);
}
int
rte_epoll_wait(int epfd, struct rte_epoll_event *events,
int maxevents, int timeout)
static int
eal_epoll_wait(int epfd, struct rte_epoll_event *events,
int maxevents, int timeout, bool interruptible)
{
struct epoll_event evs[maxevents];
int rc;
@ -1298,8 +1298,12 @@ rte_epoll_wait(int epfd, struct rte_epoll_event *events,
rc = eal_epoll_process_event(evs, rc, events);
break;
} else if (rc < 0) {
if (errno == EINTR)
continue;
if (errno == EINTR) {
if (interruptible)
return -1;
else
continue;
}
/* epoll_wait fail */
RTE_LOG(ERR, EAL, "epoll_wait returns with fail %s\n",
strerror(errno));
@ -1314,6 +1318,20 @@ rte_epoll_wait(int epfd, struct rte_epoll_event *events,
return rc;
}
int
rte_epoll_wait(int epfd, struct rte_epoll_event *events,
int maxevents, int timeout)
{
return eal_epoll_wait(epfd, events, maxevents, timeout, false);
}
int
rte_epoll_wait_interruptible(int epfd, struct rte_epoll_event *events,
int maxevents, int timeout)
{
return eal_epoll_wait(epfd, events, maxevents, timeout, true);
}
static inline void
eal_epoll_data_safe_free(struct rte_epoll_event *ev)
{

View File

@ -398,6 +398,7 @@ EXPERIMENTAL {
# added in 20.11
__rte_eal_trace_generic_size_t;
rte_epoll_wait_interruptible;
rte_service_lcore_may_be_active;
};