diff --git a/sys/kern/kern_mbuf.c b/sys/kern/kern_mbuf.c index 7f39bd889a2c..0f03779beb12 100644 --- a/sys/kern/kern_mbuf.c +++ b/sys/kern/kern_mbuf.c @@ -110,7 +110,6 @@ tunable_mbinit(void *dummy) } SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL); -SYSCTL_DECL(_kern_ipc); /* XXX: These should be tuneables. Can't change UMA limits on the fly. */ static int sysctl_nmbclusters(SYSCTL_HANDLER_ARGS) diff --git a/sys/kern/sys_pipe.c b/sys/kern/sys_pipe.c index 3b631988d4bf..b873a8b02848 100644 --- a/sys/kern/sys_pipe.c +++ b/sys/kern/sys_pipe.c @@ -181,8 +181,6 @@ static int pipeallocfail; static int piperesizefail; static int piperesizeallowed = 1; -SYSCTL_DECL(_kern_ipc); - SYSCTL_INT(_kern_ipc, OID_AUTO, maxpipekva, CTLFLAG_RDTUN, &maxpipekva, 0, "Pipe KVA limit"); SYSCTL_INT(_kern_ipc, OID_AUTO, pipes, CTLFLAG_RD, diff --git a/sys/kern/sysv_msg.c b/sys/kern/sysv_msg.c index 60a943171a6e..4dea9e14c437 100644 --- a/sys/kern/sysv_msg.c +++ b/sys/kern/sysv_msg.c @@ -1311,7 +1311,6 @@ sysctl_msqids(SYSCTL_HANDLER_ARGS) sizeof(struct msqid_kernel) * msginfo.msgmni)); } -SYSCTL_DECL(_kern_ipc); SYSCTL_INT(_kern_ipc, OID_AUTO, msgmax, CTLFLAG_RD, &msginfo.msgmax, 0, "Maximum message size"); SYSCTL_INT(_kern_ipc, OID_AUTO, msgmni, CTLFLAG_RDTUN, &msginfo.msgmni, 0, diff --git a/sys/kern/sysv_sem.c b/sys/kern/sysv_sem.c index b32ec0f09c6e..12e47aacd8bb 100644 --- a/sys/kern/sysv_sem.c +++ b/sys/kern/sysv_sem.c @@ -196,7 +196,6 @@ struct seminfo seminfo = { SEMAEM /* adjust on exit max value */ }; -SYSCTL_DECL(_kern_ipc); SYSCTL_INT(_kern_ipc, OID_AUTO, semmap, CTLFLAG_RW, &seminfo.semmap, 0, "Number of entries in the semaphore map"); SYSCTL_INT(_kern_ipc, OID_AUTO, semmni, CTLFLAG_RDTUN, &seminfo.semmni, 0, diff --git a/sys/kern/sysv_shm.c b/sys/kern/sysv_shm.c index d7858915be13..175bc67cff30 100644 --- a/sys/kern/sysv_shm.c +++ b/sys/kern/sysv_shm.c @@ -180,7 +180,6 @@ struct shminfo shminfo = { static int shm_use_phys; static int shm_allow_removed; -SYSCTL_DECL(_kern_ipc); SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, "Maximum shared memory segment size"); SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c index 5c4c5bc51a86..b58be2d3be84 100644 --- a/sys/kern/uipc_mbuf.c +++ b/sys/kern/uipc_mbuf.c @@ -64,7 +64,6 @@ int m_defragrandomfailures; /* * sysctl(8) exported objects */ -SYSCTL_DECL(_kern_ipc); SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD, &max_linkhdr, 0, "Size of largest link layer header"); SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD, diff --git a/sys/kern/uipc_sockbuf.c b/sys/kern/uipc_sockbuf.c index b18ff4a1591c..a39c685372a5 100644 --- a/sys/kern/uipc_sockbuf.c +++ b/sys/kern/uipc_sockbuf.c @@ -57,8 +57,6 @@ __FBSDID("$FreeBSD$"); #include <sys/sysctl.h> #include <sys/systm.h> -int maxsockets; - void (*aio_swake)(struct socket *, struct sockbuf *); /* @@ -71,12 +69,6 @@ static u_long sb_max_adj = static u_long sb_efficiency = 8; /* parameter for sbreserve() */ -#ifdef REGRESSION -static int regression_sonewconn_earlytest = 1; -SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW, - ®ression_sonewconn_earlytest, 0, "Perform early sonewconn limit test"); -#endif - /* * Procedures to manipulate state flags of socket * and do appropriate wakeups. Normal sequence from the @@ -201,97 +193,6 @@ soisdisconnected(so) wakeup(&so->so_timeo); } -/* - * When an attempt at a new connection is noted on a socket - * which accepts connections, sonewconn is called. If the - * connection is possible (subject to space constraints, etc.) - * then we allocate a new structure, propoerly linked into the - * data structure of the original socket, and return this. - * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. - * - * note: the ref count on the socket is 0 on return - */ -struct socket * -sonewconn(head, connstatus) - register struct socket *head; - int connstatus; -{ - register struct socket *so; - int over; - - ACCEPT_LOCK(); - over = (head->so_qlen > 3 * head->so_qlimit / 2); - ACCEPT_UNLOCK(); -#ifdef REGRESSION - if (regression_sonewconn_earlytest && over) -#else - if (over) -#endif - return (NULL); - so = soalloc(M_NOWAIT); - if (so == NULL) - return (NULL); - if ((head->so_options & SO_ACCEPTFILTER) != 0) - connstatus = 0; - so->so_head = head; - so->so_type = head->so_type; - so->so_options = head->so_options &~ SO_ACCEPTCONN; - so->so_linger = head->so_linger; - so->so_state = head->so_state | SS_NOFDREF; - so->so_proto = head->so_proto; - so->so_timeo = head->so_timeo; - so->so_cred = crhold(head->so_cred); -#ifdef MAC - SOCK_LOCK(head); - mac_create_socket_from_socket(head, so); - SOCK_UNLOCK(head); -#endif - knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv), - NULL, NULL, NULL); - knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd), - NULL, NULL, NULL); - if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) || - (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) { - sodealloc(so); - return (NULL); - } - so->so_state |= connstatus; - ACCEPT_LOCK(); - if (connstatus) { - TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); - so->so_qstate |= SQ_COMP; - head->so_qlen++; - } else { - /* - * Keep removing sockets from the head until there's room for - * us to insert on the tail. In pre-locking revisions, this - * was a simple if(), but as we could be racing with other - * threads and soabort() requires dropping locks, we must - * loop waiting for the condition to be true. - */ - while (head->so_incqlen > head->so_qlimit) { - struct socket *sp; - sp = TAILQ_FIRST(&head->so_incomp); - TAILQ_REMOVE(&head->so_incomp, sp, so_list); - head->so_incqlen--; - sp->so_qstate &= ~SQ_INCOMP; - sp->so_head = NULL; - ACCEPT_UNLOCK(); - soabort(sp); - ACCEPT_LOCK(); - } - TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list); - so->so_qstate |= SQ_INCOMP; - head->so_incqlen++; - } - ACCEPT_UNLOCK(); - if (connstatus) { - sorwakeup(head); - wakeup_one(&head->so_timeo); - } - return (so); -} - /* * Socantsendmore indicates that no more data will be sent on the * socket; it would normally be applied to a socket when the user @@ -1498,49 +1399,10 @@ sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb) xsb->sb_timeo = sb->sb_timeo; } -/* - * Here is the definition of some of the basic objects in the kern.ipc - * branch of the MIB. - */ -SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC"); - /* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */ static int dummy; SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, ""); SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_ULONG|CTLFLAG_RW, &sb_max, 0, sysctl_handle_sb_max, "LU", "Maximum socket buffer size"); -static int -sysctl_maxsockets(SYSCTL_HANDLER_ARGS) -{ - int error, newmaxsockets; - - newmaxsockets = maxsockets; - error = sysctl_handle_int(oidp, &newmaxsockets, sizeof(int), req); - if (error == 0 && req->newptr) { - if (newmaxsockets > maxsockets) { - maxsockets = newmaxsockets; - if (maxsockets > ((maxfiles / 4) * 3)) { - maxfiles = (maxsockets * 5) / 4; - maxfilesperproc = (maxfiles * 9) / 10; - } - EVENTHANDLER_INVOKE(maxsockets_change); - } else - error = EINVAL; - } - return (error); -} -SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW, - &maxsockets, 0, sysctl_maxsockets, "IU", - "Maximum number of sockets avaliable"); SYSCTL_ULONG(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW, &sb_efficiency, 0, ""); - -/* - * Initialise maxsockets - */ -static void init_maxsockets(void *ignored) -{ - TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets); - maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters)); -} -SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL); diff --git a/sys/kern/uipc_socket.c b/sys/kern/uipc_socket.c index f638f9132e7a..fa91a0c4c541 100644 --- a/sys/kern/uipc_socket.c +++ b/sys/kern/uipc_socket.c @@ -109,6 +109,7 @@ __FBSDID("$FreeBSD$"); #include <sys/file.h> /* for struct knote */ #include <sys/kernel.h> #include <sys/event.h> +#include <sys/eventhandler.h> #include <sys/poll.h> #include <sys/proc.h> #include <sys/protosw.h> @@ -148,11 +149,11 @@ static struct filterops sowrite_filtops = uma_zone_t socket_zone; so_gen_t so_gencnt; /* generation count for sockets */ +int maxsockets; + MALLOC_DEFINE(M_SONAME, "soname", "socket name"); MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); -SYSCTL_DECL(_kern_ipc); - static int somaxconn = SOMAXCONN; static int somaxconn_sysctl(SYSCTL_HANDLER_ARGS); /* XXX: we dont have SYSCTL_USHORT */ @@ -188,6 +189,43 @@ MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF); static struct mtx so_global_mtx; MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF); +SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC"); + +static int +sysctl_maxsockets(SYSCTL_HANDLER_ARGS) +{ + int error, newmaxsockets; + + newmaxsockets = maxsockets; + error = sysctl_handle_int(oidp, &newmaxsockets, sizeof(int), req); + if (error == 0 && req->newptr) { + if (newmaxsockets > maxsockets) { + maxsockets = newmaxsockets; + if (maxsockets > ((maxfiles / 4) * 3)) { + maxfiles = (maxsockets * 5) / 4; + maxfilesperproc = (maxfiles * 9) / 10; + } + EVENTHANDLER_INVOKE(maxsockets_change); + } else + error = EINVAL; + } + return (error); +} + +SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW, + &maxsockets, 0, sysctl_maxsockets, "IU", + "Maximum number of sockets avaliable"); + +/* + * Initialise maxsockets + */ +static void init_maxsockets(void *ignored) +{ + TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets); + maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters)); +} +SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL); + /* * Socket operation routines. * These routines are called by the routines in @@ -204,7 +242,7 @@ MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF); * * soalloc() returns a socket with a ref count of 0. */ -struct socket * +static struct socket * soalloc(int mflags) { struct socket *so; @@ -228,6 +266,39 @@ soalloc(int mflags) return (so); } +static void +sodealloc(struct socket *so) +{ + + KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count)); + KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL")); + + mtx_lock(&so_global_mtx); + so->so_gencnt = ++so_gencnt; + mtx_unlock(&so_global_mtx); + if (so->so_rcv.sb_hiwat) + (void)chgsbsize(so->so_cred->cr_uidinfo, + &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY); + if (so->so_snd.sb_hiwat) + (void)chgsbsize(so->so_cred->cr_uidinfo, + &so->so_snd.sb_hiwat, 0, RLIM_INFINITY); +#ifdef INET + /* remove acccept filter if one is present. */ + if (so->so_accf != NULL) + do_setopt_accept_filter(so, NULL); +#endif +#ifdef MAC + mac_destroy_socket(so); +#endif + crfree(so->so_cred); + SOCKBUF_LOCK_DESTROY(&so->so_snd); + SOCKBUF_LOCK_DESTROY(&so->so_rcv); + uma_zfree(socket_zone, so); + mtx_lock(&so_global_mtx); + --numopensockets; + mtx_unlock(&so_global_mtx); +} + /* * socreate returns a socket with a ref count of 1. The socket should be * closed with soclose(). @@ -292,6 +363,103 @@ socreate(dom, aso, type, proto, cred, td) return (0); } +#ifdef REGRESSION +static int regression_sonewconn_earlytest = 1; +SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW, + ®ression_sonewconn_earlytest, 0, "Perform early sonewconn limit test"); +#endif + +/* + * When an attempt at a new connection is noted on a socket + * which accepts connections, sonewconn is called. If the + * connection is possible (subject to space constraints, etc.) + * then we allocate a new structure, propoerly linked into the + * data structure of the original socket, and return this. + * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. + * + * note: the ref count on the socket is 0 on return + */ +struct socket * +sonewconn(head, connstatus) + register struct socket *head; + int connstatus; +{ + register struct socket *so; + int over; + + ACCEPT_LOCK(); + over = (head->so_qlen > 3 * head->so_qlimit / 2); + ACCEPT_UNLOCK(); +#ifdef REGRESSION + if (regression_sonewconn_earlytest && over) +#else + if (over) +#endif + return (NULL); + so = soalloc(M_NOWAIT); + if (so == NULL) + return (NULL); + if ((head->so_options & SO_ACCEPTFILTER) != 0) + connstatus = 0; + so->so_head = head; + so->so_type = head->so_type; + so->so_options = head->so_options &~ SO_ACCEPTCONN; + so->so_linger = head->so_linger; + so->so_state = head->so_state | SS_NOFDREF; + so->so_proto = head->so_proto; + so->so_timeo = head->so_timeo; + so->so_cred = crhold(head->so_cred); +#ifdef MAC + SOCK_LOCK(head); + mac_create_socket_from_socket(head, so); + SOCK_UNLOCK(head); +#endif + knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv), + NULL, NULL, NULL); + knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd), + NULL, NULL, NULL); + if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) || + (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) { + sodealloc(so); + return (NULL); + } + so->so_state |= connstatus; + ACCEPT_LOCK(); + if (connstatus) { + TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); + so->so_qstate |= SQ_COMP; + head->so_qlen++; + } else { + /* + * Keep removing sockets from the head until there's room for + * us to insert on the tail. In pre-locking revisions, this + * was a simple if(), but as we could be racing with other + * threads and soabort() requires dropping locks, we must + * loop waiting for the condition to be true. + */ + while (head->so_incqlen > head->so_qlimit) { + struct socket *sp; + sp = TAILQ_FIRST(&head->so_incomp); + TAILQ_REMOVE(&head->so_incomp, sp, so_list); + head->so_incqlen--; + sp->so_qstate &= ~SQ_INCOMP; + sp->so_head = NULL; + ACCEPT_UNLOCK(); + soabort(sp); + ACCEPT_LOCK(); + } + TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list); + so->so_qstate |= SQ_INCOMP; + head->so_incqlen++; + } + ACCEPT_UNLOCK(); + if (connstatus) { + sorwakeup(head); + wakeup_one(&head->so_timeo); + } + return (so); +} + int sobind(so, nam, td) struct socket *so; @@ -302,39 +470,6 @@ sobind(so, nam, td) return ((*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td)); } -void -sodealloc(struct socket *so) -{ - - KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count)); - KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL")); - - mtx_lock(&so_global_mtx); - so->so_gencnt = ++so_gencnt; - mtx_unlock(&so_global_mtx); - if (so->so_rcv.sb_hiwat) - (void)chgsbsize(so->so_cred->cr_uidinfo, - &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY); - if (so->so_snd.sb_hiwat) - (void)chgsbsize(so->so_cred->cr_uidinfo, - &so->so_snd.sb_hiwat, 0, RLIM_INFINITY); -#ifdef INET - /* remove acccept filter if one is present. */ - if (so->so_accf != NULL) - do_setopt_accept_filter(so, NULL); -#endif -#ifdef MAC - mac_destroy_socket(so); -#endif - crfree(so->so_cred); - SOCKBUF_LOCK_DESTROY(&so->so_snd); - SOCKBUF_LOCK_DESTROY(&so->so_rcv); - uma_zfree(socket_zone, so); - mtx_lock(&so_global_mtx); - --numopensockets; - mtx_unlock(&so_global_mtx); -} - /* * solisten() transitions a socket from a non-listening state to a listening * state, but can also be used to update the listen queue depth on an diff --git a/sys/kern/uipc_socket2.c b/sys/kern/uipc_socket2.c index b18ff4a1591c..a39c685372a5 100644 --- a/sys/kern/uipc_socket2.c +++ b/sys/kern/uipc_socket2.c @@ -57,8 +57,6 @@ __FBSDID("$FreeBSD$"); #include <sys/sysctl.h> #include <sys/systm.h> -int maxsockets; - void (*aio_swake)(struct socket *, struct sockbuf *); /* @@ -71,12 +69,6 @@ static u_long sb_max_adj = static u_long sb_efficiency = 8; /* parameter for sbreserve() */ -#ifdef REGRESSION -static int regression_sonewconn_earlytest = 1; -SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW, - ®ression_sonewconn_earlytest, 0, "Perform early sonewconn limit test"); -#endif - /* * Procedures to manipulate state flags of socket * and do appropriate wakeups. Normal sequence from the @@ -201,97 +193,6 @@ soisdisconnected(so) wakeup(&so->so_timeo); } -/* - * When an attempt at a new connection is noted on a socket - * which accepts connections, sonewconn is called. If the - * connection is possible (subject to space constraints, etc.) - * then we allocate a new structure, propoerly linked into the - * data structure of the original socket, and return this. - * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. - * - * note: the ref count on the socket is 0 on return - */ -struct socket * -sonewconn(head, connstatus) - register struct socket *head; - int connstatus; -{ - register struct socket *so; - int over; - - ACCEPT_LOCK(); - over = (head->so_qlen > 3 * head->so_qlimit / 2); - ACCEPT_UNLOCK(); -#ifdef REGRESSION - if (regression_sonewconn_earlytest && over) -#else - if (over) -#endif - return (NULL); - so = soalloc(M_NOWAIT); - if (so == NULL) - return (NULL); - if ((head->so_options & SO_ACCEPTFILTER) != 0) - connstatus = 0; - so->so_head = head; - so->so_type = head->so_type; - so->so_options = head->so_options &~ SO_ACCEPTCONN; - so->so_linger = head->so_linger; - so->so_state = head->so_state | SS_NOFDREF; - so->so_proto = head->so_proto; - so->so_timeo = head->so_timeo; - so->so_cred = crhold(head->so_cred); -#ifdef MAC - SOCK_LOCK(head); - mac_create_socket_from_socket(head, so); - SOCK_UNLOCK(head); -#endif - knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv), - NULL, NULL, NULL); - knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd), - NULL, NULL, NULL); - if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) || - (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) { - sodealloc(so); - return (NULL); - } - so->so_state |= connstatus; - ACCEPT_LOCK(); - if (connstatus) { - TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); - so->so_qstate |= SQ_COMP; - head->so_qlen++; - } else { - /* - * Keep removing sockets from the head until there's room for - * us to insert on the tail. In pre-locking revisions, this - * was a simple if(), but as we could be racing with other - * threads and soabort() requires dropping locks, we must - * loop waiting for the condition to be true. - */ - while (head->so_incqlen > head->so_qlimit) { - struct socket *sp; - sp = TAILQ_FIRST(&head->so_incomp); - TAILQ_REMOVE(&head->so_incomp, sp, so_list); - head->so_incqlen--; - sp->so_qstate &= ~SQ_INCOMP; - sp->so_head = NULL; - ACCEPT_UNLOCK(); - soabort(sp); - ACCEPT_LOCK(); - } - TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list); - so->so_qstate |= SQ_INCOMP; - head->so_incqlen++; - } - ACCEPT_UNLOCK(); - if (connstatus) { - sorwakeup(head); - wakeup_one(&head->so_timeo); - } - return (so); -} - /* * Socantsendmore indicates that no more data will be sent on the * socket; it would normally be applied to a socket when the user @@ -1498,49 +1399,10 @@ sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb) xsb->sb_timeo = sb->sb_timeo; } -/* - * Here is the definition of some of the basic objects in the kern.ipc - * branch of the MIB. - */ -SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC"); - /* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */ static int dummy; SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, ""); SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_ULONG|CTLFLAG_RW, &sb_max, 0, sysctl_handle_sb_max, "LU", "Maximum socket buffer size"); -static int -sysctl_maxsockets(SYSCTL_HANDLER_ARGS) -{ - int error, newmaxsockets; - - newmaxsockets = maxsockets; - error = sysctl_handle_int(oidp, &newmaxsockets, sizeof(int), req); - if (error == 0 && req->newptr) { - if (newmaxsockets > maxsockets) { - maxsockets = newmaxsockets; - if (maxsockets > ((maxfiles / 4) * 3)) { - maxfiles = (maxsockets * 5) / 4; - maxfilesperproc = (maxfiles * 9) / 10; - } - EVENTHANDLER_INVOKE(maxsockets_change); - } else - error = EINVAL; - } - return (error); -} -SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW, - &maxsockets, 0, sysctl_maxsockets, "IU", - "Maximum number of sockets avaliable"); SYSCTL_ULONG(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW, &sb_efficiency, 0, ""); - -/* - * Initialise maxsockets - */ -static void init_maxsockets(void *ignored) -{ - TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets); - maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters)); -} -SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL); diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c index 5cd6f9bed6b5..1826fb77e20f 100644 --- a/sys/kern/uipc_syscalls.c +++ b/sys/kern/uipc_syscalls.c @@ -92,7 +92,6 @@ int nsfbufs; int nsfbufspeak; int nsfbufsused; -SYSCTL_DECL(_kern_ipc); SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RDTUN, &nsfbufs, 0, "Maximum number of sendfile(2) sf_bufs available"); SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufspeak, CTLFLAG_RD, &nsfbufspeak, 0, diff --git a/sys/sys/socketvar.h b/sys/sys/socketvar.h index b1d7dd72239b..5e4cd5a32fa3 100644 --- a/sys/sys/socketvar.h +++ b/sys/sys/socketvar.h @@ -496,7 +496,6 @@ int sbwait(struct sockbuf *sb); int sb_lock(struct sockbuf *sb); void soabort(struct socket *so); int soaccept(struct socket *so, struct sockaddr **nam); -struct socket *soalloc(int mflags); int socheckuid(struct socket *so, uid_t uid); int sobind(struct socket *so, struct sockaddr *nam, struct thread *td); void socantrcvmore(struct socket *so); @@ -509,7 +508,6 @@ int soconnect2(struct socket *so1, struct socket *so2); int socow_setup(struct mbuf *m0, struct uio *uio); int socreate(int dom, struct socket **aso, int type, int proto, struct ucred *cred, struct thread *td); -void sodealloc(struct socket *so); int sodisconnect(struct socket *so); struct sockaddr *sodupsockaddr(const struct sockaddr *sa, int mflags); void sofree(struct socket *so); diff --git a/sys/sys/sysctl.h b/sys/sys/sysctl.h index 6116219e4759..f4f52edf721a 100644 --- a/sys/sys/sysctl.h +++ b/sys/sys/sysctl.h @@ -591,6 +591,7 @@ TAILQ_HEAD(sysctl_ctx_list, sysctl_ctx_entry); */ extern struct sysctl_oid_list sysctl__children; SYSCTL_DECL(_kern); +SYSCTL_DECL(_kern_ipc); SYSCTL_DECL(_sysctl); SYSCTL_DECL(_vm); SYSCTL_DECL(_vfs);