Implement a limit on on the number of IPv6 reassembly queues per bucket.

There is a hashing algorithm which should distribute IPv6 reassembly
queues across the available buckets in a relatively even way. However,
if there is a flaw in the hashing algorithm which allows a large number
of IPv6 fragment reassembly queues to end up in a single bucket, a per-
bucket limit could help mitigate the performance impact of this flaw.

Implement such a limit, with a default of twice the maximum number of
reassembly queues divided by the number of buckets. Recalculate the
limit any time the maximum number of reassembly queues changes.
However, allow the user to override the value using a sysctl
(net.inet6.ip6.maxfragbucketsize).

Reviewed by:	jhb
Security:	FreeBSD-SA-18:10.ip
Security:	CVE-2018-6923
This commit is contained in:
jtl 2018-08-14 17:27:41 +00:00
parent a7668fa529
commit e5f23fbf44
4 changed files with 88 additions and 19 deletions

View File

@ -79,13 +79,14 @@ static void frag6_enq(struct ip6asfrag *, struct ip6asfrag *,
uint32_t bucket __unused);
static void frag6_deq(struct ip6asfrag *, uint32_t bucket __unused);
static void frag6_insque_head(struct ip6q *, struct ip6q *,
uint32_t bucket __unused);
static void frag6_remque(struct ip6q *, uint32_t bucket __unused);
uint32_t bucket);
static void frag6_remque(struct ip6q *, uint32_t bucket);
static void frag6_freef(struct ip6q *, uint32_t bucket);
struct ip6qbucket {
struct ip6q ip6q;
struct mtx lock;
int count;
};
VNET_DEFINE_STATIC(volatile u_int, frag6_nfragpackets);
@ -108,6 +109,15 @@ static MALLOC_DEFINE(M_FTABLE, "fragment", "fragment reassembly header");
/*
* Initialise reassembly queue and fragment identifier.
*/
void
frag6_set_bucketsize()
{
int i;
if ((i = V_ip6_maxfragpackets) > 0)
V_ip6_maxfragbucketsize = imax(i / (IP6REASS_NHASH / 2), 1);
}
static void
frag6_change(void *tag)
{
@ -118,6 +128,7 @@ frag6_change(void *tag)
VNET_FOREACH(vnet_iter) {
CURVNET_SET(vnet_iter);
V_ip6_maxfragpackets = nmbclusters / 4;
frag6_set_bucketsize();
CURVNET_RESTORE();
}
VNET_LIST_RUNLOCK_NOSLEEP();
@ -130,10 +141,12 @@ frag6_init(void)
int i;
V_ip6_maxfragpackets = nmbclusters / 4;
frag6_set_bucketsize();
for (i = 0; i < IP6REASS_NHASH; i++) {
q6 = IP6Q_HEAD(i);
q6->ip6q_next = q6->ip6q_prev = q6;
mtx_init(&V_ip6q[i].lock, "ip6qlock", NULL, MTX_DEF);
V_ip6q[i].count = 0;
}
V_ip6q_hashseed = arc4random();
V_ip6_maxfragsperpacket = 64;
@ -304,7 +317,8 @@ frag6_input(struct mbuf **mp, int *offp, int proto)
*/
if (V_ip6_maxfragpackets < 0)
;
else if (atomic_load_int(&V_frag6_nfragpackets) >=
else if (V_ip6q[hash].count >= V_ip6_maxfragbucketsize ||
atomic_load_int(&V_frag6_nfragpackets) >=
(u_int)V_ip6_maxfragpackets)
goto dropfrag;
atomic_add_int(&V_frag6_nfragpackets, 1);
@ -763,7 +777,7 @@ frag6_deq(struct ip6asfrag *af6, uint32_t bucket __unused)
}
static void
frag6_insque_head(struct ip6q *new, struct ip6q *old, uint32_t bucket __unused)
frag6_insque_head(struct ip6q *new, struct ip6q *old, uint32_t bucket)
{
IP6Q_LOCK_ASSERT(bucket);
@ -775,16 +789,18 @@ frag6_insque_head(struct ip6q *new, struct ip6q *old, uint32_t bucket __unused)
new->ip6q_next = old->ip6q_next;
old->ip6q_next->ip6q_prev= new;
old->ip6q_next = new;
V_ip6q[bucket].count++;
}
static void
frag6_remque(struct ip6q *p6, uint32_t bucket __unused)
frag6_remque(struct ip6q *p6, uint32_t bucket)
{
IP6Q_LOCK_ASSERT(bucket);
p6->ip6q_prev->ip6q_next = p6->ip6q_next;
p6->ip6q_next->ip6q_prev = p6->ip6q_prev;
V_ip6q[bucket].count--;
}
/*
@ -806,23 +822,35 @@ frag6_slowtimo(void)
IP6Q_LOCK(i);
head = IP6Q_HEAD(i);
q6 = head->ip6q_next;
if (q6)
while (q6 != head) {
--q6->ip6q_ttl;
q6 = q6->ip6q_next;
if (q6->ip6q_prev->ip6q_ttl == 0) {
IP6STAT_INC(ip6s_fragtimeout);
/* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
frag6_freef(q6->ip6q_prev, i);
}
if (q6 == NULL) {
/*
* XXXJTL: This should never happen. This
* should turn into an assertion.
*/
IP6Q_UNLOCK(i);
continue;
}
while (q6 != head) {
--q6->ip6q_ttl;
q6 = q6->ip6q_next;
if (q6->ip6q_prev->ip6q_ttl == 0) {
IP6STAT_INC(ip6s_fragtimeout);
/* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
frag6_freef(q6->ip6q_prev, i);
}
}
/*
* If we are over the maximum number of fragments
* (due to the limit being lowered), drain off
* enough to get down to the new limit.
* Note that we drain all reassembly queues if
* maxfragpackets is 0 (fragmentation is disabled),
* and don't enforce a limit when maxfragpackets
* is negative.
*/
while (atomic_load_int(&V_frag6_nfragpackets) >
(u_int)V_ip6_maxfragpackets &&
while ((V_ip6_maxfragpackets == 0 ||
(V_ip6_maxfragpackets > 0 &&
V_ip6q[i].count > V_ip6_maxfragbucketsize)) &&
head->ip6q_prev != head) {
IP6STAT_INC(ip6s_fragoverflow);
/* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
@ -830,6 +858,24 @@ frag6_slowtimo(void)
}
IP6Q_UNLOCK(i);
}
/*
* If we are still over the maximum number of fragmented
* packets, drain off enough to get down to the new limit.
*/
i = 0;
while (V_ip6_maxfragpackets >= 0 &&
atomic_load_int(&V_frag6_nfragpackets) >
(u_int)V_ip6_maxfragpackets) {
IP6Q_LOCK(i);
head = IP6Q_HEAD(i);
if (head->ip6q_prev != head) {
IP6STAT_INC(ip6s_fragoverflow);
/* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
frag6_freef(head->ip6q_prev, i);
}
IP6Q_UNLOCK(i);
i = (i + 1) % IP6REASS_NHASH;
}
CURVNET_RESTORE();
}
VNET_LIST_RUNLOCK_NOSLEEP();

View File

@ -643,7 +643,8 @@ struct ip6_mtuinfo {
#define IPV6CTL_INTRDQMAXLEN 52 /* max length of direct IPv6 netisr
* queue */
#define IPV6CTL_MAXFRAGSPERPACKET 53 /* Max fragments per packet */
#define IPV6CTL_MAXID 54
#define IPV6CTL_MAXFRAGBUCKETSIZE 54 /* Max reassembly queues per bucket */
#define IPV6CTL_MAXID 55
#endif /* __BSD_VISIBLE */
/*

View File

@ -386,6 +386,7 @@ VNET_DEFINE(int, ip6_norbit_raif) = 0;
VNET_DEFINE(int, ip6_rfc6204w3) = 0;
VNET_DEFINE(int, ip6_maxfragpackets); /* initialized in frag6.c:frag6_init() */
int ip6_maxfrags; /* initialized in frag6.c:frag6_init() */
VNET_DEFINE(int, ip6_maxfragbucketsize);/* initialized in frag6.c:frag6_init() */
VNET_DEFINE(int, ip6_maxfragsperpacket); /* initialized in frag6.c:frag6_init() */
VNET_DEFINE(int, ip6_log_interval) = 5;
VNET_DEFINE(int, ip6_hdrnestlimit) = 15;/* How many header options will we
@ -473,6 +474,20 @@ sysctl_ip6_tempvltime(SYSCTL_HANDLER_ARGS)
return (0);
}
static int
sysctl_ip6_maxfragpackets(SYSCTL_HANDLER_ARGS)
{
int error, val;
val = V_ip6_maxfragpackets;
error = sysctl_handle_int(oidp, &val, 0, req);
if (error != 0 || !req->newptr)
return (error);
V_ip6_maxfragpackets = val;
frag6_set_bucketsize();
return (0);
}
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_FORWARDING, forwarding,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_forwarding), 0,
"Enable forwarding of IPv6 packets between interfaces");
@ -485,8 +500,9 @@ SYSCTL_INT(_net_inet6_ip6, IPV6CTL_DEFHLIM, hlim,
SYSCTL_VNET_PCPUSTAT(_net_inet6_ip6, IPV6CTL_STATS, stats, struct ip6stat,
ip6stat,
"IP6 statistics (struct ip6stat, netinet6/ip6_var.h)");
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGPACKETS, maxfragpackets,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragpackets), 0,
SYSCTL_PROC(_net_inet6_ip6, IPV6CTL_MAXFRAGPACKETS, maxfragpackets,
CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW, NULL, 0,
sysctl_ip6_maxfragpackets, "I",
"Default maximum number of outstanding fragmented IPv6 packets. "
"A value of 0 means no fragmented packets will be accepted, while a "
"a value of -1 means no limit");
@ -564,6 +580,9 @@ SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGS, maxfrags,
"Maximum allowed number of outstanding IPv6 packet fragments. "
"A value of 0 means no fragmented packets will be accepted, while a "
"a value of -1 means no limit");
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGBUCKETSIZE, maxfragbucketsize,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragbucketsize), 0,
"Maximum number of reassembly queues per hash bucket");
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGSPERPACKET, maxfragsperpacket,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragsperpacket), 0,
"Maximum allowed number of fragments per packet");

View File

@ -303,6 +303,7 @@ VNET_DECLARE(int, ip6_maxfragpackets); /* Maximum packets in reassembly
* queue */
extern int ip6_maxfrags; /* Maximum fragments in reassembly
* queue */
VNET_DECLARE(int, ip6_maxfragbucketsize); /* Maximum reassembly queues per bucket */
VNET_DECLARE(int, ip6_maxfragsperpacket); /* Maximum fragments per packet */
VNET_DECLARE(int, ip6_accept_rtadv); /* Acts as a host not a router */
VNET_DECLARE(int, ip6_no_radr); /* No defroute from RA */
@ -318,6 +319,7 @@ VNET_DECLARE(int, ip6_dad_count); /* DupAddrDetectionTransmits */
#define V_ip6_mrouter VNET(ip6_mrouter)
#define V_ip6_sendredirects VNET(ip6_sendredirects)
#define V_ip6_maxfragpackets VNET(ip6_maxfragpackets)
#define V_ip6_maxfragbucketsize VNET(ip6_maxfragbucketsize)
#define V_ip6_maxfragsperpacket VNET(ip6_maxfragsperpacket)
#define V_ip6_accept_rtadv VNET(ip6_accept_rtadv)
#define V_ip6_no_radr VNET(ip6_no_radr)
@ -405,6 +407,7 @@ int ip6_fragment(struct ifnet *, struct mbuf *, int, u_char, int,
int route6_input(struct mbuf **, int *, int);
void frag6_set_bucketsize(void);
void frag6_init(void);
int frag6_input(struct mbuf **, int *, int);
void frag6_slowtimo(void);