6164d7c280
the routing table. Move all usage and references in the tcp stack from the routing table metrics to the tcp hostcache. It caches measured parameters of past tcp sessions to provide better initial start values for following connections from or to the same source or destination. Depending on the network parameters to/from the remote host this can lead to significant speedups for new tcp connections after the first one because they inherit and shortcut the learning curve. tcp_hostcache is designed for multiple concurrent access in SMP environments with high contention and is hash indexed by remote ip address. It removes significant locking requirements from the tcp stack with regard to the routing table. Reviewed by: sam (mentor), bms Reviewed by: -net, -current, core@kame.net (IPv6 parts) Approved by: re (scottl)
845 lines
20 KiB
C
845 lines
20 KiB
C
/*
|
|
* Copyright (c) 1982, 1986, 1988, 1993
|
|
* The Regents of the University of California. All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
* must display the following acknowledgement:
|
|
* This product includes software developed by the University of
|
|
* California, Berkeley and its contributors.
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
* may be used to endorse or promote products derived from this software
|
|
* without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
* SUCH DAMAGE.
|
|
*
|
|
* @(#)raw_ip.c 8.7 (Berkeley) 5/15/95
|
|
* $FreeBSD$
|
|
*/
|
|
|
|
#include "opt_inet6.h"
|
|
#include "opt_ipsec.h"
|
|
#include "opt_mac.h"
|
|
#include "opt_random_ip_id.h"
|
|
|
|
#include <sys/param.h>
|
|
#include <sys/kernel.h>
|
|
#include <sys/lock.h>
|
|
#include <sys/mac.h>
|
|
#include <sys/malloc.h>
|
|
#include <sys/mbuf.h>
|
|
#include <sys/proc.h>
|
|
#include <sys/protosw.h>
|
|
#include <sys/signalvar.h>
|
|
#include <sys/socket.h>
|
|
#include <sys/socketvar.h>
|
|
#include <sys/sx.h>
|
|
#include <sys/sysctl.h>
|
|
#include <sys/systm.h>
|
|
|
|
#include <vm/uma.h>
|
|
|
|
#include <net/if.h>
|
|
#include <net/route.h>
|
|
|
|
#include <netinet/in.h>
|
|
#include <netinet/in_systm.h>
|
|
#include <netinet/in_pcb.h>
|
|
#include <netinet/in_var.h>
|
|
#include <netinet/ip.h>
|
|
#include <netinet/ip_var.h>
|
|
#include <netinet/ip_mroute.h>
|
|
|
|
#include <netinet/ip_fw.h>
|
|
#include <netinet/ip_dummynet.h>
|
|
|
|
#ifdef FAST_IPSEC
|
|
#include <netipsec/ipsec.h>
|
|
#endif /*FAST_IPSEC*/
|
|
|
|
#ifdef IPSEC
|
|
#include <netinet6/ipsec.h>
|
|
#endif /*IPSEC*/
|
|
|
|
struct inpcbhead ripcb;
|
|
struct inpcbinfo ripcbinfo;
|
|
|
|
/* control hooks for ipfw and dummynet */
|
|
ip_fw_ctl_t *ip_fw_ctl_ptr;
|
|
ip_dn_ctl_t *ip_dn_ctl_ptr;
|
|
|
|
/*
|
|
* hooks for multicast routing. They all default to NULL,
|
|
* so leave them not initialized and rely on BSS being set to 0.
|
|
*/
|
|
|
|
/* The socket used to communicate with the multicast routing daemon. */
|
|
struct socket *ip_mrouter;
|
|
|
|
/* The various mrouter and rsvp functions */
|
|
int (*ip_mrouter_set)(struct socket *, struct sockopt *);
|
|
int (*ip_mrouter_get)(struct socket *, struct sockopt *);
|
|
int (*ip_mrouter_done)(void);
|
|
int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *,
|
|
struct ip_moptions *);
|
|
int (*mrt_ioctl)(int, caddr_t);
|
|
int (*legal_vif_num)(int);
|
|
u_long (*ip_mcast_src)(int);
|
|
|
|
void (*rsvp_input_p)(struct mbuf *m, int off);
|
|
int (*ip_rsvp_vif)(struct socket *, struct sockopt *);
|
|
void (*ip_rsvp_force_done)(struct socket *);
|
|
|
|
/*
|
|
* Nominal space allocated to a raw ip socket.
|
|
*/
|
|
#define RIPSNDQ 8192
|
|
#define RIPRCVQ 8192
|
|
|
|
/*
|
|
* Raw interface to IP protocol.
|
|
*/
|
|
|
|
/*
|
|
* Initialize raw connection block q.
|
|
*/
|
|
void
|
|
rip_init()
|
|
{
|
|
INP_INFO_LOCK_INIT(&ripcbinfo, "rip");
|
|
LIST_INIT(&ripcb);
|
|
ripcbinfo.listhead = &ripcb;
|
|
/*
|
|
* XXX We don't use the hash list for raw IP, but it's easier
|
|
* to allocate a one entry hash list than it is to check all
|
|
* over the place for hashbase == NULL.
|
|
*/
|
|
ripcbinfo.hashbase = hashinit(1, M_PCB, &ripcbinfo.hashmask);
|
|
ripcbinfo.porthashbase = hashinit(1, M_PCB, &ripcbinfo.porthashmask);
|
|
ripcbinfo.ipi_zone = uma_zcreate("ripcb", sizeof(struct inpcb),
|
|
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
|
|
uma_zone_set_max(ripcbinfo.ipi_zone, maxsockets);
|
|
}
|
|
|
|
static struct sockaddr_in ripsrc = { sizeof(ripsrc), AF_INET };
|
|
|
|
static int
|
|
raw_append(struct inpcb *last, struct ip *ip, struct mbuf *n)
|
|
{
|
|
int policyfail = 0;
|
|
|
|
#ifdef IPSEC
|
|
/* check AH/ESP integrity. */
|
|
if (ipsec4_in_reject_so(n, last->inp_socket)) {
|
|
policyfail = 1;
|
|
ipsecstat.in_polvio++;
|
|
/* do not inject data to pcb */
|
|
}
|
|
#endif /*IPSEC*/
|
|
#ifdef FAST_IPSEC
|
|
/* check AH/ESP integrity. */
|
|
if (ipsec4_in_reject(n, last)) {
|
|
policyfail = 1;
|
|
/* do not inject data to pcb */
|
|
}
|
|
#endif /*FAST_IPSEC*/
|
|
#ifdef MAC
|
|
if (!policyfail && mac_check_inpcb_deliver(last, n) != 0)
|
|
policyfail = 1;
|
|
#endif
|
|
if (!policyfail) {
|
|
struct mbuf *opts = NULL;
|
|
|
|
if ((last->inp_flags & INP_CONTROLOPTS) ||
|
|
(last->inp_socket->so_options & SO_TIMESTAMP))
|
|
ip_savecontrol(last, &opts, ip, n);
|
|
if (sbappendaddr(&last->inp_socket->so_rcv,
|
|
(struct sockaddr *)&ripsrc, n, opts) == 0) {
|
|
/* should notify about lost packet */
|
|
m_freem(n);
|
|
if (opts)
|
|
m_freem(opts);
|
|
} else
|
|
sorwakeup(last->inp_socket);
|
|
} else
|
|
m_freem(n);
|
|
return policyfail;
|
|
}
|
|
|
|
/*
|
|
* Setup generic address and protocol structures
|
|
* for raw_input routine, then pass them along with
|
|
* mbuf chain.
|
|
*/
|
|
void
|
|
rip_input(struct mbuf *m, int off)
|
|
{
|
|
struct ip *ip = mtod(m, struct ip *);
|
|
int proto = ip->ip_p;
|
|
struct inpcb *inp, *last;
|
|
|
|
INP_INFO_RLOCK(&ripcbinfo);
|
|
ripsrc.sin_addr = ip->ip_src;
|
|
last = NULL;
|
|
LIST_FOREACH(inp, &ripcb, inp_list) {
|
|
INP_LOCK(inp);
|
|
if (inp->inp_ip_p && inp->inp_ip_p != proto) {
|
|
docontinue:
|
|
INP_UNLOCK(inp);
|
|
continue;
|
|
}
|
|
#ifdef INET6
|
|
if ((inp->inp_vflag & INP_IPV4) == 0)
|
|
goto docontinue;
|
|
#endif
|
|
if (inp->inp_laddr.s_addr &&
|
|
inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
|
|
goto docontinue;
|
|
if (inp->inp_faddr.s_addr &&
|
|
inp->inp_faddr.s_addr != ip->ip_src.s_addr)
|
|
goto docontinue;
|
|
if (last) {
|
|
struct mbuf *n;
|
|
|
|
n = m_copy(m, 0, (int)M_COPYALL);
|
|
if (n != NULL)
|
|
(void) raw_append(last, ip, n);
|
|
/* XXX count dropped packet */
|
|
INP_UNLOCK(last);
|
|
}
|
|
last = inp;
|
|
}
|
|
if (last != NULL) {
|
|
if (raw_append(last, ip, m) != 0)
|
|
ipstat.ips_delivered--;
|
|
INP_UNLOCK(last);
|
|
} else {
|
|
m_freem(m);
|
|
ipstat.ips_noproto++;
|
|
ipstat.ips_delivered--;
|
|
}
|
|
INP_INFO_RUNLOCK(&ripcbinfo);
|
|
}
|
|
|
|
/*
|
|
* Generate IP header and pass packet to ip_output.
|
|
* Tack on options user may have setup with control call.
|
|
*/
|
|
int
|
|
rip_output(struct mbuf *m, struct socket *so, u_long dst)
|
|
{
|
|
struct ip *ip;
|
|
struct inpcb *inp = sotoinpcb(so);
|
|
int flags = (so->so_options & SO_DONTROUTE) | IP_ALLOWBROADCAST;
|
|
|
|
#ifdef MAC
|
|
mac_create_mbuf_from_socket(so, m);
|
|
#endif
|
|
|
|
/*
|
|
* If the user handed us a complete IP packet, use it.
|
|
* Otherwise, allocate an mbuf for a header and fill it in.
|
|
*/
|
|
if ((inp->inp_flags & INP_HDRINCL) == 0) {
|
|
if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
|
|
m_freem(m);
|
|
return(EMSGSIZE);
|
|
}
|
|
M_PREPEND(m, sizeof(struct ip), M_TRYWAIT);
|
|
if (m == NULL)
|
|
return(ENOBUFS);
|
|
ip = mtod(m, struct ip *);
|
|
ip->ip_tos = inp->inp_ip_tos;
|
|
ip->ip_off = 0;
|
|
ip->ip_p = inp->inp_ip_p;
|
|
ip->ip_len = m->m_pkthdr.len;
|
|
ip->ip_src = inp->inp_laddr;
|
|
ip->ip_dst.s_addr = dst;
|
|
ip->ip_ttl = inp->inp_ip_ttl;
|
|
} else {
|
|
if (m->m_pkthdr.len > IP_MAXPACKET) {
|
|
m_freem(m);
|
|
return(EMSGSIZE);
|
|
}
|
|
ip = mtod(m, struct ip *);
|
|
/* don't allow both user specified and setsockopt options,
|
|
and don't allow packet length sizes that will crash */
|
|
if (((ip->ip_hl != (sizeof (*ip) >> 2))
|
|
&& inp->inp_options)
|
|
|| (ip->ip_len > m->m_pkthdr.len)
|
|
|| (ip->ip_len < (ip->ip_hl << 2))) {
|
|
m_freem(m);
|
|
return EINVAL;
|
|
}
|
|
if (ip->ip_id == 0)
|
|
#ifdef RANDOM_IP_ID
|
|
ip->ip_id = ip_randomid();
|
|
#else
|
|
ip->ip_id = htons(ip_id++);
|
|
#endif
|
|
/* XXX prevent ip_output from overwriting header fields */
|
|
flags |= IP_RAWOUTPUT;
|
|
ipstat.ips_rawout++;
|
|
}
|
|
|
|
if (inp->inp_flags & INP_ONESBCAST)
|
|
flags |= IP_SENDONES;
|
|
|
|
return (ip_output(m, inp->inp_options, NULL, flags,
|
|
inp->inp_moptions, inp));
|
|
}
|
|
|
|
/*
|
|
* Raw IP socket option processing.
|
|
*
|
|
* Note that access to all of the IP administrative functions here is
|
|
* implicitly protected by suser() as gaining access to a raw socket
|
|
* requires either that the thread pass a suser() check, or that it be
|
|
* passed a raw socket by another thread that has passed a suser() check.
|
|
* If FreeBSD moves to a more fine-grained access control mechanism,
|
|
* additional checks will need to be placed here if the raw IP attachment
|
|
* check is not equivilent the the check required for these
|
|
* administrative operations; in some cases, these checks are already
|
|
* present.
|
|
*/
|
|
int
|
|
rip_ctloutput(struct socket *so, struct sockopt *sopt)
|
|
{
|
|
struct inpcb *inp = sotoinpcb(so);
|
|
int error, optval;
|
|
|
|
if (sopt->sopt_level != IPPROTO_IP)
|
|
return (EINVAL);
|
|
|
|
error = 0;
|
|
|
|
switch (sopt->sopt_dir) {
|
|
case SOPT_GET:
|
|
switch (sopt->sopt_name) {
|
|
case IP_HDRINCL:
|
|
optval = inp->inp_flags & INP_HDRINCL;
|
|
error = sooptcopyout(sopt, &optval, sizeof optval);
|
|
break;
|
|
|
|
case IP_FW_ADD: /* ADD actually returns the body... */
|
|
case IP_FW_GET:
|
|
if (IPFW_LOADED)
|
|
error = ip_fw_ctl_ptr(sopt);
|
|
else
|
|
error = ENOPROTOOPT;
|
|
break;
|
|
|
|
case IP_DUMMYNET_GET:
|
|
if (DUMMYNET_LOADED)
|
|
error = ip_dn_ctl_ptr(sopt);
|
|
else
|
|
error = ENOPROTOOPT;
|
|
break ;
|
|
|
|
case MRT_INIT:
|
|
case MRT_DONE:
|
|
case MRT_ADD_VIF:
|
|
case MRT_DEL_VIF:
|
|
case MRT_ADD_MFC:
|
|
case MRT_DEL_MFC:
|
|
case MRT_VERSION:
|
|
case MRT_ASSERT:
|
|
case MRT_API_SUPPORT:
|
|
case MRT_API_CONFIG:
|
|
case MRT_ADD_BW_UPCALL:
|
|
case MRT_DEL_BW_UPCALL:
|
|
error = ip_mrouter_get ? ip_mrouter_get(so, sopt) :
|
|
EOPNOTSUPP;
|
|
break;
|
|
|
|
default:
|
|
error = ip_ctloutput(so, sopt);
|
|
break;
|
|
}
|
|
break;
|
|
|
|
case SOPT_SET:
|
|
switch (sopt->sopt_name) {
|
|
case IP_HDRINCL:
|
|
error = sooptcopyin(sopt, &optval, sizeof optval,
|
|
sizeof optval);
|
|
if (error)
|
|
break;
|
|
if (optval)
|
|
inp->inp_flags |= INP_HDRINCL;
|
|
else
|
|
inp->inp_flags &= ~INP_HDRINCL;
|
|
break;
|
|
|
|
case IP_FW_ADD:
|
|
case IP_FW_DEL:
|
|
case IP_FW_FLUSH:
|
|
case IP_FW_ZERO:
|
|
case IP_FW_RESETLOG:
|
|
if (IPFW_LOADED)
|
|
error = ip_fw_ctl_ptr(sopt);
|
|
else
|
|
error = ENOPROTOOPT;
|
|
break;
|
|
|
|
case IP_DUMMYNET_CONFIGURE:
|
|
case IP_DUMMYNET_DEL:
|
|
case IP_DUMMYNET_FLUSH:
|
|
if (DUMMYNET_LOADED)
|
|
error = ip_dn_ctl_ptr(sopt);
|
|
else
|
|
error = ENOPROTOOPT ;
|
|
break ;
|
|
|
|
case IP_RSVP_ON:
|
|
error = ip_rsvp_init(so);
|
|
break;
|
|
|
|
case IP_RSVP_OFF:
|
|
error = ip_rsvp_done();
|
|
break;
|
|
|
|
case IP_RSVP_VIF_ON:
|
|
case IP_RSVP_VIF_OFF:
|
|
error = ip_rsvp_vif ?
|
|
ip_rsvp_vif(so, sopt) : EINVAL;
|
|
break;
|
|
|
|
case MRT_INIT:
|
|
case MRT_DONE:
|
|
case MRT_ADD_VIF:
|
|
case MRT_DEL_VIF:
|
|
case MRT_ADD_MFC:
|
|
case MRT_DEL_MFC:
|
|
case MRT_VERSION:
|
|
case MRT_ASSERT:
|
|
case MRT_API_SUPPORT:
|
|
case MRT_API_CONFIG:
|
|
case MRT_ADD_BW_UPCALL:
|
|
case MRT_DEL_BW_UPCALL:
|
|
error = ip_mrouter_set ? ip_mrouter_set(so, sopt) :
|
|
EOPNOTSUPP;
|
|
break;
|
|
|
|
default:
|
|
error = ip_ctloutput(so, sopt);
|
|
break;
|
|
}
|
|
break;
|
|
}
|
|
|
|
return (error);
|
|
}
|
|
|
|
/*
|
|
* This function exists solely to receive the PRC_IFDOWN messages which
|
|
* are sent by if_down(). It looks for an ifaddr whose ifa_addr is sa,
|
|
* and calls in_ifadown() to remove all routes corresponding to that address.
|
|
* It also receives the PRC_IFUP messages from if_up() and reinstalls the
|
|
* interface routes.
|
|
*/
|
|
void
|
|
rip_ctlinput(int cmd, struct sockaddr *sa, void *vip)
|
|
{
|
|
struct in_ifaddr *ia;
|
|
struct ifnet *ifp;
|
|
int err;
|
|
int flags;
|
|
|
|
switch (cmd) {
|
|
case PRC_IFDOWN:
|
|
TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) {
|
|
if (ia->ia_ifa.ifa_addr == sa
|
|
&& (ia->ia_flags & IFA_ROUTE)) {
|
|
/*
|
|
* in_ifscrub kills the interface route.
|
|
*/
|
|
in_ifscrub(ia->ia_ifp, ia);
|
|
/*
|
|
* in_ifadown gets rid of all the rest of
|
|
* the routes. This is not quite the right
|
|
* thing to do, but at least if we are running
|
|
* a routing process they will come back.
|
|
*/
|
|
in_ifadown(&ia->ia_ifa, 0);
|
|
break;
|
|
}
|
|
}
|
|
break;
|
|
|
|
case PRC_IFUP:
|
|
TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) {
|
|
if (ia->ia_ifa.ifa_addr == sa)
|
|
break;
|
|
}
|
|
if (ia == 0 || (ia->ia_flags & IFA_ROUTE))
|
|
return;
|
|
flags = RTF_UP;
|
|
ifp = ia->ia_ifa.ifa_ifp;
|
|
|
|
if ((ifp->if_flags & IFF_LOOPBACK)
|
|
|| (ifp->if_flags & IFF_POINTOPOINT))
|
|
flags |= RTF_HOST;
|
|
|
|
err = rtinit(&ia->ia_ifa, RTM_ADD, flags);
|
|
if (err == 0)
|
|
ia->ia_flags |= IFA_ROUTE;
|
|
break;
|
|
}
|
|
}
|
|
|
|
u_long rip_sendspace = RIPSNDQ;
|
|
u_long rip_recvspace = RIPRCVQ;
|
|
|
|
SYSCTL_INT(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW,
|
|
&rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
|
|
SYSCTL_INT(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW,
|
|
&rip_recvspace, 0, "Maximum incoming raw IP datagram size");
|
|
|
|
static int
|
|
rip_attach(struct socket *so, int proto, struct thread *td)
|
|
{
|
|
struct inpcb *inp;
|
|
int error;
|
|
|
|
/* XXX why not lower? */
|
|
INP_INFO_WLOCK(&ripcbinfo);
|
|
inp = sotoinpcb(so);
|
|
if (inp) {
|
|
/* XXX counter, printf */
|
|
INP_INFO_WUNLOCK(&ripcbinfo);
|
|
return EINVAL;
|
|
}
|
|
if (td && (error = suser(td)) != 0) {
|
|
INP_INFO_WUNLOCK(&ripcbinfo);
|
|
return error;
|
|
}
|
|
if (proto >= IPPROTO_MAX || proto < 0) {
|
|
INP_INFO_WUNLOCK(&ripcbinfo);
|
|
return EPROTONOSUPPORT;
|
|
}
|
|
|
|
error = soreserve(so, rip_sendspace, rip_recvspace);
|
|
if (error) {
|
|
INP_INFO_WUNLOCK(&ripcbinfo);
|
|
return error;
|
|
}
|
|
error = in_pcballoc(so, &ripcbinfo, td);
|
|
if (error) {
|
|
INP_INFO_WUNLOCK(&ripcbinfo);
|
|
return error;
|
|
}
|
|
inp = (struct inpcb *)so->so_pcb;
|
|
INP_LOCK(inp);
|
|
INP_INFO_WUNLOCK(&ripcbinfo);
|
|
inp->inp_vflag |= INP_IPV4;
|
|
inp->inp_ip_p = proto;
|
|
inp->inp_ip_ttl = ip_defttl;
|
|
INP_UNLOCK(inp);
|
|
return 0;
|
|
}
|
|
|
|
static void
|
|
rip_pcbdetach(struct socket *so, struct inpcb *inp)
|
|
{
|
|
INP_INFO_WLOCK_ASSERT(&ripcbinfo);
|
|
INP_LOCK_ASSERT(inp);
|
|
|
|
if (so == ip_mrouter && ip_mrouter_done)
|
|
ip_mrouter_done();
|
|
if (ip_rsvp_force_done)
|
|
ip_rsvp_force_done(so);
|
|
if (so == ip_rsvpd)
|
|
ip_rsvp_done();
|
|
in_pcbdetach(inp);
|
|
}
|
|
|
|
static int
|
|
rip_detach(struct socket *so)
|
|
{
|
|
struct inpcb *inp;
|
|
|
|
INP_INFO_WLOCK(&ripcbinfo);
|
|
inp = sotoinpcb(so);
|
|
if (inp == 0) {
|
|
/* XXX counter, printf */
|
|
INP_INFO_WUNLOCK(&ripcbinfo);
|
|
return EINVAL;
|
|
}
|
|
INP_LOCK(inp);
|
|
rip_pcbdetach(so, inp);
|
|
INP_INFO_WUNLOCK(&ripcbinfo);
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
rip_abort(struct socket *so)
|
|
{
|
|
struct inpcb *inp;
|
|
|
|
INP_INFO_WLOCK(&ripcbinfo);
|
|
inp = sotoinpcb(so);
|
|
if (inp == 0) {
|
|
INP_INFO_WUNLOCK(&ripcbinfo);
|
|
return EINVAL; /* ??? possible? panic instead? */
|
|
}
|
|
INP_LOCK(inp);
|
|
soisdisconnected(so);
|
|
if (so->so_state & SS_NOFDREF)
|
|
rip_pcbdetach(so, inp);
|
|
else
|
|
INP_UNLOCK(inp);
|
|
INP_INFO_WUNLOCK(&ripcbinfo);
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
rip_disconnect(struct socket *so)
|
|
{
|
|
if ((so->so_state & SS_ISCONNECTED) == 0)
|
|
return ENOTCONN;
|
|
return rip_abort(so);
|
|
}
|
|
|
|
static int
|
|
rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
|
|
{
|
|
struct sockaddr_in *addr = (struct sockaddr_in *)nam;
|
|
struct inpcb *inp;
|
|
|
|
if (nam->sa_len != sizeof(*addr))
|
|
return EINVAL;
|
|
|
|
if (TAILQ_EMPTY(&ifnet) ||
|
|
(addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) ||
|
|
(addr->sin_addr.s_addr &&
|
|
ifa_ifwithaddr((struct sockaddr *)addr) == 0))
|
|
return EADDRNOTAVAIL;
|
|
|
|
INP_INFO_WLOCK(&ripcbinfo);
|
|
inp = sotoinpcb(so);
|
|
if (inp == 0) {
|
|
INP_INFO_WUNLOCK(&ripcbinfo);
|
|
return EINVAL;
|
|
}
|
|
INP_LOCK(inp);
|
|
inp->inp_laddr = addr->sin_addr;
|
|
INP_UNLOCK(inp);
|
|
INP_INFO_WUNLOCK(&ripcbinfo);
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
|
|
{
|
|
struct sockaddr_in *addr = (struct sockaddr_in *)nam;
|
|
struct inpcb *inp;
|
|
|
|
if (nam->sa_len != sizeof(*addr))
|
|
return EINVAL;
|
|
if (TAILQ_EMPTY(&ifnet))
|
|
return EADDRNOTAVAIL;
|
|
if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK)
|
|
return EAFNOSUPPORT;
|
|
|
|
INP_INFO_WLOCK(&ripcbinfo);
|
|
inp = sotoinpcb(so);
|
|
if (inp == 0) {
|
|
INP_INFO_WUNLOCK(&ripcbinfo);
|
|
return EINVAL;
|
|
}
|
|
INP_LOCK(inp);
|
|
inp->inp_faddr = addr->sin_addr;
|
|
soisconnected(so);
|
|
INP_UNLOCK(inp);
|
|
INP_INFO_WUNLOCK(&ripcbinfo);
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
rip_shutdown(struct socket *so)
|
|
{
|
|
struct inpcb *inp;
|
|
|
|
INP_INFO_RLOCK(&ripcbinfo);
|
|
inp = sotoinpcb(so);
|
|
if (inp == 0) {
|
|
INP_INFO_RUNLOCK(&ripcbinfo);
|
|
return EINVAL;
|
|
}
|
|
INP_LOCK(inp);
|
|
INP_INFO_RUNLOCK(&ripcbinfo);
|
|
socantsendmore(so);
|
|
INP_UNLOCK(inp);
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
|
|
struct mbuf *control, struct thread *td)
|
|
{
|
|
struct inpcb *inp;
|
|
u_long dst;
|
|
int ret;
|
|
|
|
INP_INFO_WLOCK(&ripcbinfo);
|
|
inp = sotoinpcb(so);
|
|
if (so->so_state & SS_ISCONNECTED) {
|
|
if (nam) {
|
|
INP_INFO_WUNLOCK(&ripcbinfo);
|
|
m_freem(m);
|
|
return EISCONN;
|
|
}
|
|
dst = inp->inp_faddr.s_addr;
|
|
} else {
|
|
if (nam == NULL) {
|
|
INP_INFO_WUNLOCK(&ripcbinfo);
|
|
m_freem(m);
|
|
return ENOTCONN;
|
|
}
|
|
dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr;
|
|
}
|
|
INP_LOCK(inp);
|
|
ret = rip_output(m, so, dst);
|
|
INP_UNLOCK(inp);
|
|
INP_INFO_WUNLOCK(&ripcbinfo);
|
|
return ret;
|
|
}
|
|
|
|
static int
|
|
rip_pcblist(SYSCTL_HANDLER_ARGS)
|
|
{
|
|
int error, i, n;
|
|
struct inpcb *inp, **inp_list;
|
|
inp_gen_t gencnt;
|
|
struct xinpgen xig;
|
|
|
|
/*
|
|
* The process of preparing the TCB list is too time-consuming and
|
|
* resource-intensive to repeat twice on every request.
|
|
*/
|
|
if (req->oldptr == 0) {
|
|
n = ripcbinfo.ipi_count;
|
|
req->oldidx = 2 * (sizeof xig)
|
|
+ (n + n/8) * sizeof(struct xinpcb);
|
|
return 0;
|
|
}
|
|
|
|
if (req->newptr != 0)
|
|
return EPERM;
|
|
|
|
/*
|
|
* OK, now we're committed to doing something.
|
|
*/
|
|
INP_INFO_RLOCK(&ripcbinfo);
|
|
gencnt = ripcbinfo.ipi_gencnt;
|
|
n = ripcbinfo.ipi_count;
|
|
INP_INFO_RUNLOCK(&ripcbinfo);
|
|
|
|
xig.xig_len = sizeof xig;
|
|
xig.xig_count = n;
|
|
xig.xig_gen = gencnt;
|
|
xig.xig_sogen = so_gencnt;
|
|
error = SYSCTL_OUT(req, &xig, sizeof xig);
|
|
if (error)
|
|
return error;
|
|
|
|
inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
|
|
if (inp_list == 0)
|
|
return ENOMEM;
|
|
|
|
INP_INFO_RLOCK(&ripcbinfo);
|
|
for (inp = LIST_FIRST(ripcbinfo.listhead), i = 0; inp && i < n;
|
|
inp = LIST_NEXT(inp, inp_list)) {
|
|
INP_LOCK(inp);
|
|
if (inp->inp_gencnt <= gencnt &&
|
|
cr_canseesocket(req->td->td_ucred, inp->inp_socket) == 0) {
|
|
/* XXX held references? */
|
|
inp_list[i++] = inp;
|
|
}
|
|
INP_UNLOCK(inp);
|
|
}
|
|
INP_INFO_RUNLOCK(&ripcbinfo);
|
|
n = i;
|
|
|
|
error = 0;
|
|
for (i = 0; i < n; i++) {
|
|
inp = inp_list[i];
|
|
if (inp->inp_gencnt <= gencnt) {
|
|
struct xinpcb xi;
|
|
xi.xi_len = sizeof xi;
|
|
/* XXX should avoid extra copy */
|
|
bcopy(inp, &xi.xi_inp, sizeof *inp);
|
|
if (inp->inp_socket)
|
|
sotoxsocket(inp->inp_socket, &xi.xi_socket);
|
|
error = SYSCTL_OUT(req, &xi, sizeof xi);
|
|
}
|
|
}
|
|
if (!error) {
|
|
/*
|
|
* Give the user an updated idea of our state.
|
|
* If the generation differs from what we told
|
|
* her before, she knows that something happened
|
|
* while we were processing this request, and it
|
|
* might be necessary to retry.
|
|
*/
|
|
INP_INFO_RLOCK(&ripcbinfo);
|
|
xig.xig_gen = ripcbinfo.ipi_gencnt;
|
|
xig.xig_sogen = so_gencnt;
|
|
xig.xig_count = ripcbinfo.ipi_count;
|
|
INP_INFO_RUNLOCK(&ripcbinfo);
|
|
error = SYSCTL_OUT(req, &xig, sizeof xig);
|
|
}
|
|
free(inp_list, M_TEMP);
|
|
return error;
|
|
}
|
|
|
|
/*
|
|
* This is the wrapper function for in_setsockaddr. We just pass down
|
|
* the pcbinfo for in_setpeeraddr to lock.
|
|
*/
|
|
static int
|
|
rip_sockaddr(struct socket *so, struct sockaddr **nam)
|
|
{
|
|
return (in_setsockaddr(so, nam, &ripcbinfo));
|
|
}
|
|
|
|
/*
|
|
* This is the wrapper function for in_setpeeraddr. We just pass down
|
|
* the pcbinfo for in_setpeeraddr to lock.
|
|
*/
|
|
static int
|
|
rip_peeraddr(struct socket *so, struct sockaddr **nam)
|
|
{
|
|
return (in_setpeeraddr(so, nam, &ripcbinfo));
|
|
}
|
|
|
|
|
|
SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, CTLFLAG_RD, 0, 0,
|
|
rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
|
|
|
|
struct pr_usrreqs rip_usrreqs = {
|
|
rip_abort, pru_accept_notsupp, rip_attach, rip_bind, rip_connect,
|
|
pru_connect2_notsupp, in_control, rip_detach, rip_disconnect,
|
|
pru_listen_notsupp, rip_peeraddr, pru_rcvd_notsupp,
|
|
pru_rcvoob_notsupp, rip_send, pru_sense_null, rip_shutdown,
|
|
rip_sockaddr, sosend, soreceive, sopoll, in_pcbsosetlabel
|
|
};
|