Fix a descriptor leak in devd. Clients reading /var/run/devd.pipe can close

their socket connection any time, and devd only notices that when it gets an
error trying to write an event to the client.  On a system with no device
change activity, clients could connect and disappear repeatedly without devd
noticing, leading to an ever-growing list of open socket descriptors in devd.

Now devd uses poll(2) looking for POLLHUP on all existing clients every time
a new client connection is established, and also periodically (once a minute)
to proactively find zombie clients and reap the socket descriptors.  It also
now has a connection limit, configurable with a new -l <num> command line arg.
When the maximum number of connections is reached it stops accepting new
connections until some current clients drop off.

Reviewed by:	imp
Approved by:	cognet (mentor)
This commit is contained in:
Ian Lepore 2013-01-30 15:21:18 +00:00
parent 96c95412ca
commit e1334f935f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=246121
2 changed files with 95 additions and 14 deletions

View File

@ -35,6 +35,7 @@
.Nm
.Op Fl Ddn
.Op Fl f Ar file
.Op Fl l Ar num
.Sh DESCRIPTION
The
.Nm
@ -55,6 +56,12 @@ instead of the default
If option
.Fl f
is specified more than once, the last file specified is used.
.It Fl l Ar num
Limit concurrent
.Pa /var/run/devd.pipe
connections to
.Ar num .
The default connection limit is 10.
.It Fl n
Do not process all pending events before becoming a daemon.
Instead, call daemon right away.

View File

@ -80,6 +80,7 @@ __FBSDID("$FreeBSD$");
#include <fcntl.h>
#include <libutil.h>
#include <paths.h>
#include <poll.h>
#include <regex.h>
#include <signal.h>
#include <stdlib.h>
@ -814,23 +815,58 @@ create_socket(const char *name)
return (fd);
}
unsigned int max_clients = 10; /* Default, can be overriden on cmdline. */
unsigned int num_clients;
list<int> clients;
void
notify_clients(const char *data, int len)
{
list<int> bad;
list<int>::const_iterator i;
list<int>::iterator i;
for (i = clients.begin(); i != clients.end(); ++i) {
if (write(*i, data, len) <= 0) {
bad.push_back(*i);
/*
* Deliver the data to all clients. Throw clients overboard at the
* first sign of trouble. This reaps clients who've died or closed
* their sockets, and also clients who are alive but failing to keep up
* (or who are maliciously not reading, to consume buffer space in
* kernel memory or tie up the limited number of available connections).
*/
for (i = clients.begin(); i != clients.end(); ) {
if (write(*i, data, len) != len) {
--num_clients;
close(*i);
}
i = clients.erase(i);
} else
++i;
}
}
for (i = bad.begin(); i != bad.end(); ++i)
clients.erase(find(clients.begin(), clients.end(), *i));
void
check_clients(void)
{
int s;
struct pollfd pfd;
list<int>::iterator i;
/*
* Check all existing clients to see if any of them have disappeared.
* Normally we reap clients when we get an error trying to send them an
* event. This check eliminates the problem of an ever-growing list of
* zombie clients because we're never writing to them on a system
* without frequent device-change activity.
*/
pfd.events = 0;
for (i = clients.begin(); i != clients.end(); ) {
pfd.fd = *i;
s = poll(&pfd, 1, 0);
if ((s < 0 && s != EINTR ) ||
(s > 0 && (pfd.revents & POLLHUP))) {
--num_clients;
close(*i);
i = clients.erase(i);
} else
++i;
}
}
void
@ -838,9 +874,18 @@ new_client(int fd)
{
int s;
/*
* First go reap any zombie clients, then accept the connection, and
* shut down the read side to stop clients from consuming kernel memory
* by sending large buffers full of data we'll never read.
*/
check_clients();
s = accept(fd, NULL, NULL);
if (s != -1)
if (s != -1) {
shutdown(s, SHUT_RD);
clients.push_back(s);
++num_clients;
}
}
static void
@ -851,6 +896,7 @@ event_loop(void)
char buffer[DEVCTL_MAXBUF];
int once = 0;
int server_fd, max_fd;
int accepting;
timeval tv;
fd_set fds;
@ -858,6 +904,7 @@ event_loop(void)
if (fd == -1)
err(1, "Can't open devctl device %s", PATH_DEVCTL);
server_fd = create_socket(PIPE);
accepting = 1;
max_fd = max(fd, server_fd) + 1;
while (1) {
if (romeo_must_die)
@ -880,15 +927,38 @@ event_loop(void)
once++;
}
}
/*
* When we've already got the max number of clients, stop
* accepting new connections (don't put server_fd in the set),
* shrink the accept() queue to reject connections quickly, and
* poll the existing clients more often, so that we notice more
* quickly when any of them disappear to free up client slots.
*/
FD_ZERO(&fds);
FD_SET(fd, &fds);
FD_SET(server_fd, &fds);
rv = select(max_fd, &fds, NULL, NULL, NULL);
if (num_clients < max_clients) {
if (!accepting) {
listen(server_fd, max_clients);
accepting = 1;
}
FD_SET(server_fd, &fds);
tv.tv_sec = 60;
tv.tv_usec = 0;
} else {
if (accepting) {
listen(server_fd, 0);
accepting = 0;
}
tv.tv_sec = 2;
tv.tv_usec = 0;
}
rv = select(max_fd, &fds, NULL, NULL, &tv);
if (rv == -1) {
if (errno == EINTR)
continue;
err(1, "select");
}
} else if (rv == 0)
check_clients();
if (FD_ISSET(fd, &fds)) {
rv = read(fd, buffer, sizeof(buffer) - 1);
if (rv > 0) {
@ -1007,7 +1077,8 @@ gensighand(int)
static void
usage()
{
fprintf(stderr, "usage: %s [-Ddn] [-f file]\n", getprogname());
fprintf(stderr, "usage: %s [-Ddn] [-l connlimit] [-f file]\n",
getprogname());
exit(1);
}
@ -1036,7 +1107,7 @@ main(int argc, char **argv)
int ch;
check_devd_enabled();
while ((ch = getopt(argc, argv, "Ddf:n")) != -1) {
while ((ch = getopt(argc, argv, "Ddf:l:n")) != -1) {
switch (ch) {
case 'D':
Dflag++;
@ -1047,6 +1118,9 @@ main(int argc, char **argv)
case 'f':
configfile = optarg;
break;
case 'l':
max_clients = MAX(1, strtoul(optarg, NULL, 0));
break;
case 'n':
nflag++;
break;