frag6: replace KAME hand-rolled queues with queue(9) TAILQs

Remove the KAME custom circular queue for fragments and fragmented packets
and replace them with a standard TAILQ.
This make the code a lot more understandable and maintainable and removes
further hand-rolled code from the the tree using a standard interface instead.

Hide the still public structures under #ifdef _KERNEL as there is no
use for them in user space.
The naming is a bit confusing now as struct ip6q and the ip6q[] buckets
array are not the same anymore;  sadly struct ip6q is also used by the
MAC framework and we cannot rename it.

Submitted by:	jtl (initally)
MFC after:	3 weeks
Sponsored by:	Netflix
Differential Revision:	https://reviews.freebsd.org/D16847 (jtl's original)
This commit is contained in:
Bjoern A. Zeeb 2019-10-23 23:01:18 +00:00
parent be2c561003
commit 21f08a074d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=353965
2 changed files with 94 additions and 170 deletions

View File

@ -3,6 +3,7 @@
*
* Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
* All rights reserved.
* Copyright (c) 2019 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -45,6 +46,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/protosw.h>
#include <sys/queue.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/syslog.h>
@ -72,28 +74,20 @@ __FBSDID("$FreeBSD$");
#define IP6REASS_NHASH (1 << IP6REASS_NHASH_LOG2)
#define IP6REASS_HMASK (IP6REASS_NHASH - 1)
static void frag6_enq(struct ip6asfrag *, struct ip6asfrag *,
uint32_t bucket __unused);
static void frag6_deq(struct ip6asfrag *, uint32_t bucket __unused);
static void frag6_insque_head(struct ip6q *, struct ip6q *,
uint32_t bucket);
static void frag6_remque(struct ip6q *, uint32_t bucket);
static void frag6_freef(struct ip6q *, uint32_t bucket);
TAILQ_HEAD(ip6qhead, ip6q);
struct ip6qbucket {
struct ip6q ip6q;
struct ip6qhead packets;
struct mtx lock;
int count;
};
struct ip6asfrag {
struct ip6asfrag *ip6af_down;
struct ip6asfrag *ip6af_up;
struct ip6asfrag {
TAILQ_ENTRY(ip6asfrag) ip6af_tq;
struct mbuf *ip6af_m;
int ip6af_offset; /* offset in ip6af_m to next header */
int ip6af_frglen; /* fragmentable part length */
int ip6af_off; /* fragment offset */
u_int16_t ip6af_mff; /* more fragment bit in frag off */
bool ip6af_mff; /* more fragment bit in frag off */
};
#define IP6_REASS_MBUF(ip6af) (*(struct mbuf **)&((ip6af)->ip6af_m))
@ -132,7 +126,7 @@ VNET_DEFINE_STATIC(uint32_t, ip6qb_hashseed);
#define IP6QB_TRYLOCK(_b) mtx_trylock(&V_ip6qb[(_b)].lock)
#define IP6QB_LOCK_ASSERT(_b) mtx_assert(&V_ip6qb[(_b)].lock, MA_OWNED)
#define IP6QB_UNLOCK(_b) mtx_unlock(&V_ip6qb[(_b)].lock)
#define IP6QB_HEAD(_b) (&V_ip6qb[(_b)].ip6q)
#define IP6QB_HEAD(_b) (&V_ip6qb[(_b)].packets)
/*
* By default, limit the number of IP6 fragments across all reassembly
@ -240,17 +234,15 @@ static void
frag6_freef(struct ip6q *q6, uint32_t bucket)
{
struct ip6_hdr *ip6;
struct ip6asfrag *af6, *down6;
struct ip6asfrag *af6;
struct mbuf *m;
IP6QB_LOCK_ASSERT(bucket);
for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
af6 = down6) {
while ((af6 = TAILQ_FIRST(&q6->ip6q_frags)) != NULL) {
m = IP6_REASS_MBUF(af6);
down6 = af6->ip6af_down;
frag6_deq(af6, bucket);
TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
/*
* Return ICMP time exceeded error for the 1st fragment.
@ -272,7 +264,9 @@ frag6_freef(struct ip6q *q6, uint32_t bucket)
free(af6, M_FRAG6);
}
frag6_remque(q6, bucket);
TAILQ_REMOVE(IP6QB_HEAD(bucket), q6, ip6q_tq);
V_ip6qb[bucket].count--;
atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
#ifdef MAC
mac_ip6q_destroy(q6);
@ -288,10 +282,11 @@ frag6_freef(struct ip6q *q6, uint32_t bucket)
static void
frag6_cleanup(void *arg __unused, struct ifnet *ifp)
{
struct ip6q *q6, *q6n, *head;
struct ip6qhead *head;
struct ip6q *q6;
struct ip6asfrag *af6;
struct mbuf *m;
int i;
uint32_t bucket;
KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
@ -305,15 +300,13 @@ frag6_cleanup(void *arg __unused, struct ifnet *ifp)
#endif
CURVNET_SET_QUIET(ifp->if_vnet);
for (i = 0; i < IP6REASS_NHASH; i++) {
IP6QB_LOCK(i);
head = IP6QB_HEAD(i);
for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
IP6QB_LOCK(bucket);
head = IP6QB_HEAD(bucket);
/* Scan fragment list. */
for (q6 = head->ip6q_next; q6 != head; q6 = q6n) {
q6n = q6->ip6q_next;
TAILQ_FOREACH(q6, head, ip6q_tq) {
TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) {
for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
af6 = af6->ip6af_down) {
m = IP6_REASS_MBUF(af6);
/* clear no longer valid rcvif pointer */
@ -321,7 +314,7 @@ frag6_cleanup(void *arg __unused, struct ifnet *ifp)
m->m_pkthdr.rcvif = NULL;
}
}
IP6QB_UNLOCK(i);
IP6QB_UNLOCK(bucket);
}
CURVNET_RESTORE();
}
@ -362,14 +355,14 @@ EVENTHANDLER_DEFINE(ifnet_departure_event, frag6_cleanup, NULL, 0);
int
frag6_input(struct mbuf **mp, int *offp, int proto)
{
struct ifnet *dstifp;
struct ifnet *srcifp;
struct in6_ifaddr *ia6;
struct mbuf *m, *t;
struct ip6_hdr *ip6;
struct ip6_frag *ip6f;
struct ip6q *head, *q6;
struct ip6asfrag *af6, *af6dwn, *ip6af;
struct mbuf *m, *t;
struct ip6qhead *head;
struct ip6q *q6;
struct ip6asfrag *af6, *ip6af, *af6tmp;
struct in6_ifaddr *ia6;
struct ifnet *dstifp, *srcifp;
uint32_t hashkey[(sizeof(struct in6_addr) * 2 +
sizeof(ip6f->ip6f_ident)) / sizeof(uint32_t)];
uint32_t bucket, *hashkeyp;
@ -466,8 +459,8 @@ frag6_input(struct mbuf **mp, int *offp, int proto)
*hashkeyp = ip6f->ip6f_ident;
bucket = jenkins_hash32(hashkey, nitems(hashkey), V_ip6qb_hashseed);
bucket &= IP6REASS_HMASK;
head = IP6QB_HEAD(bucket);
IP6QB_LOCK(bucket);
head = IP6QB_HEAD(bucket);
/*
* Enforce upper bound on number of fragments for the entire system.
@ -479,7 +472,7 @@ frag6_input(struct mbuf **mp, int *offp, int proto)
else if (atomic_load_int(&frag6_nfrags) >= (u_int)ip6_maxfrags)
goto dropfrag;
for (q6 = head->ip6q_next; q6 != head; q6 = q6->ip6q_next)
TAILQ_FOREACH(q6, head, ip6q_tq)
if (ip6f->ip6f_ident == q6->ip6q_ident &&
IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)
@ -490,7 +483,7 @@ frag6_input(struct mbuf **mp, int *offp, int proto)
break;
only_frag = false;
if (q6 == head) {
if (q6 == NULL) {
/* A first fragment to arrive creates a reassembly queue. */
only_frag = true;
@ -522,13 +515,9 @@ frag6_input(struct mbuf **mp, int *offp, int proto)
}
mac_ip6q_create(m, q6);
#endif
frag6_insque_head(q6, head, bucket);
/* ip6q_nxt will be filled afterwards, from 1st fragment. */
q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6;
#ifdef notyet
q6->ip6q_nxtp = (u_char *)nxtp;
#endif
TAILQ_INIT(&q6->ip6q_frags);
q6->ip6q_ident = ip6f->ip6f_ident;
q6->ip6q_ttl = IPV6_FRAGTTL;
q6->ip6q_src = ip6->ip6_src;
@ -537,7 +526,9 @@ frag6_input(struct mbuf **mp, int *offp, int proto)
(ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */
q6->ip6q_nfrag = 0;
/* Add the fragemented packet to the bucket. */
TAILQ_INSERT_HEAD(head, q6, ip6q_tq);
V_ip6qb[bucket].count++;
}
/*
@ -577,9 +568,7 @@ frag6_input(struct mbuf **mp, int *offp, int proto)
* fragment already stored in the reassembly queue.
*/
if (fragoff == 0) {
for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
af6 = af6dwn) {
af6dwn = af6->ip6af_down;
TAILQ_FOREACH_SAFE(af6, &q6->ip6q_frags, ip6af_tq, af6tmp) {
if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen >
IPV6_MAXPACKET) {
@ -591,7 +580,7 @@ frag6_input(struct mbuf **mp, int *offp, int proto)
erroff = af6->ip6af_offset;
/* Dequeue the fragment. */
frag6_deq(af6, bucket);
TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
free(af6, M_FRAG6);
/* Set a valid receive interface pointer. */
@ -620,15 +609,19 @@ frag6_input(struct mbuf **mp, int *offp, int proto)
M_NOWAIT | M_ZERO);
if (ip6af == NULL)
goto dropfrag;
ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG;
ip6af->ip6af_mff = (ip6f->ip6f_offlg & IP6F_MORE_FRAG) ? true : false;
ip6af->ip6af_off = fragoff;
ip6af->ip6af_frglen = frgpartlen;
ip6af->ip6af_offset = offset;
IP6_REASS_MBUF(ip6af) = m;
if (only_frag) {
af6 = (struct ip6asfrag *)q6;
goto insert;
/*
* Do a manual insert rather than a hard-to-understand cast
* to a different type relying on data structure order to work.
*/
TAILQ_INSERT_HEAD(&q6->ip6q_frags, ip6af, ip6af_tq);
goto postinsert;
}
/* Do duplicate, condition, and boundry checks. */
@ -653,8 +646,7 @@ frag6_input(struct mbuf **mp, int *offp, int proto)
}
/* Find a fragmented part which begins after this one does. */
for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
af6 = af6->ip6af_down)
TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq)
if (af6->ip6af_off > ip6af->ip6af_off)
break;
@ -666,14 +658,18 @@ frag6_input(struct mbuf **mp, int *offp, int proto)
* drop the existing fragment and leave the fragmentation queue
* unchanged, as allowed by the RFC. (RFC 8200, 4.5)
*/
if (af6->ip6af_up != (struct ip6asfrag *)q6) {
if (af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen -
if (af6 != NULL)
af6tmp = TAILQ_PREV(af6, ip6fraghead, ip6af_tq);
else
af6tmp = TAILQ_LAST(&q6->ip6q_frags, ip6fraghead);
if (af6tmp != NULL) {
if (af6tmp->ip6af_off + af6tmp->ip6af_frglen -
ip6af->ip6af_off > 0) {
free(ip6af, M_FRAG6);
goto dropfrag;
}
}
if (af6 != (struct ip6asfrag *)q6) {
if (af6 != NULL) {
if (ip6af->ip6af_off + ip6af->ip6af_frglen -
af6->ip6af_off > 0) {
free(ip6af, M_FRAG6);
@ -681,10 +677,8 @@ frag6_input(struct mbuf **mp, int *offp, int proto)
}
}
insert:
#ifdef MAC
if (!only_frag)
mac_ip6q_update(m, q6);
mac_ip6q_update(m, q6);
#endif
/*
@ -692,13 +686,16 @@ frag6_input(struct mbuf **mp, int *offp, int proto)
* If not complete, check fragment limit. Move to front of packet
* queue, as we are the most recently active fragmented packet.
*/
frag6_enq(ip6af, af6->ip6af_up, bucket);
if (af6 != NULL)
TAILQ_INSERT_BEFORE(af6, ip6af, ip6af_tq);
else
TAILQ_INSERT_TAIL(&q6->ip6q_frags, ip6af, ip6af_tq);
postinsert:
atomic_add_int(&frag6_nfrags, 1);
q6->ip6q_nfrag++;
plen = 0;
for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
af6 = af6->ip6af_down) {
TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) {
if (af6->ip6af_off != plen) {
if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag);
@ -709,7 +706,8 @@ frag6_input(struct mbuf **mp, int *offp, int proto)
}
plen += af6->ip6af_frglen;
}
if (af6->ip6af_up->ip6af_mff) {
af6 = TAILQ_LAST(&q6->ip6q_frags, ip6fraghead);
if (af6->ip6af_mff) {
if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag);
frag6_freef(q6, bucket);
@ -719,25 +717,22 @@ frag6_input(struct mbuf **mp, int *offp, int proto)
}
/* Reassembly is complete; concatenate fragments. */
ip6af = q6->ip6q_down;
ip6af = TAILQ_FIRST(&q6->ip6q_frags);
t = m = IP6_REASS_MBUF(ip6af);
af6 = ip6af->ip6af_down;
frag6_deq(ip6af, bucket);
while (af6 != (struct ip6asfrag *)q6) {
TAILQ_REMOVE(&q6->ip6q_frags, ip6af, ip6af_tq);
while ((af6 = TAILQ_FIRST(&q6->ip6q_frags)) != NULL) {
m->m_pkthdr.csum_flags &=
IP6_REASS_MBUF(af6)->m_pkthdr.csum_flags;
m->m_pkthdr.csum_data +=
IP6_REASS_MBUF(af6)->m_pkthdr.csum_data;
af6dwn = af6->ip6af_down;
frag6_deq(af6, bucket);
TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
while (t->m_next)
t = t->m_next;
m_adj(IP6_REASS_MBUF(af6), af6->ip6af_offset);
m_demote_pkthdr(IP6_REASS_MBUF(af6));
m_cat(t, IP6_REASS_MBUF(af6));
free(af6, M_FRAG6);
af6 = af6dwn;
}
while (m->m_pkthdr.csum_data & 0xffff0000)
@ -753,9 +748,11 @@ frag6_input(struct mbuf **mp, int *offp, int proto)
ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20);
nxt = q6->ip6q_nxt;
TAILQ_REMOVE(head, q6, ip6q_tq);
V_ip6qb[bucket].count--;
atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
if (ip6_deletefraghdr(m, offset, M_NOWAIT) != 0) {
frag6_remque(q6, bucket);
atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
#ifdef MAC
mac_ip6q_destroy(q6);
#endif
@ -769,8 +766,6 @@ frag6_input(struct mbuf **mp, int *offp, int proto)
m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t),
(caddr_t)&nxt);
frag6_remque(q6, bucket);
atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
#ifdef MAC
mac_ip6q_reassemble(q6, m);
mac_ip6q_destroy(q6);
@ -833,7 +828,8 @@ void
frag6_slowtimo(void)
{
VNET_ITERATOR_DECL(vnet_iter);
struct ip6q *head, *q6;
struct ip6qhead *head;
struct ip6q *q6, *q6tmp;
uint32_t bucket;
VNET_LIST_RLOCK_NOSLEEP();
@ -842,25 +838,13 @@ frag6_slowtimo(void)
for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
IP6QB_LOCK(bucket);
head = IP6QB_HEAD(bucket);
q6 = head->ip6q_next;
if (q6 == NULL) {
/*
* XXXJTL: This should never happen. This
* should turn into an assertion.
*/
IP6QB_UNLOCK(bucket);
continue;
}
while (q6 != head) {
--q6->ip6q_ttl;
q6 = q6->ip6q_next;
if (q6->ip6q_prev->ip6q_ttl == 0) {
TAILQ_FOREACH_SAFE(q6, head, ip6q_tq, q6tmp)
if (--q6->ip6q_ttl == 0) {
IP6STAT_ADD(ip6s_fragtimeout,
q6->ip6q_prev->ip6q_nfrag);
q6->ip6q_nfrag);
/* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
frag6_freef(q6->ip6q_prev, bucket);
frag6_freef(q6, bucket);
}
}
/*
* If we are over the maximum number of fragments
* (due to the limit being lowered), drain off
@ -873,11 +857,10 @@ frag6_slowtimo(void)
while ((V_ip6_maxfragpackets == 0 ||
(V_ip6_maxfragpackets > 0 &&
V_ip6qb[bucket].count > V_ip6_maxfragbucketsize)) &&
head->ip6q_prev != head) {
IP6STAT_ADD(ip6s_fragoverflow,
q6->ip6q_prev->ip6q_nfrag);
(q6 = TAILQ_LAST(head, ip6qhead)) != NULL) {
IP6STAT_ADD(ip6s_fragoverflow, q6->ip6q_nfrag);
/* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
frag6_freef(head->ip6q_prev, bucket);
frag6_freef(q6, bucket);
}
IP6QB_UNLOCK(bucket);
}
@ -890,12 +873,11 @@ frag6_slowtimo(void)
atomic_load_int(&V_frag6_nfragpackets) >
(u_int)V_ip6_maxfragpackets) {
IP6QB_LOCK(bucket);
head = IP6QB_HEAD(bucket);
if (head->ip6q_prev != head) {
IP6STAT_ADD(ip6s_fragoverflow,
q6->ip6q_prev->ip6q_nfrag);
q6 = TAILQ_LAST(IP6QB_HEAD(bucket), ip6qhead);
if (q6 != NULL) {
IP6STAT_ADD(ip6s_fragoverflow, q6->ip6q_nfrag);
/* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
frag6_freef(head->ip6q_prev, bucket);
frag6_freef(q6, bucket);
}
IP6QB_UNLOCK(bucket);
bucket = (bucket + 1) % IP6REASS_NHASH;
@ -930,14 +912,12 @@ frag6_change(void *tag)
void
frag6_init(void)
{
struct ip6q *q6;
uint32_t bucket;
V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
frag6_set_bucketsize();
for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
q6 = IP6QB_HEAD(bucket);
q6->ip6q_next = q6->ip6q_prev = q6;
TAILQ_INIT(IP6QB_HEAD(bucket));
mtx_init(&V_ip6qb[bucket].lock, "ip6qlock", NULL, MTX_DEF);
V_ip6qb[bucket].count = 0;
}
@ -960,16 +940,15 @@ frag6_init(void)
static void
frag6_drain_one(void)
{
struct ip6q *head;
struct ip6q *q6;
uint32_t bucket;
for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
IP6QB_LOCK(bucket);
head = IP6QB_HEAD(bucket);
while (head->ip6q_next != head) {
while ((q6 = TAILQ_FIRST(IP6QB_HEAD(bucket))) != NULL) {
IP6STAT_INC(ip6s_fragdropped);
/* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
frag6_freef(head->ip6q_next, bucket);
frag6_freef(q6, bucket);
}
IP6QB_UNLOCK(bucket);
}
@ -1008,60 +987,3 @@ frag6_destroy(void)
}
}
#endif
/*
* Put an ip fragment on a reassembly chain.
* Like insque, but pointers in middle of structure.
*/
static void
frag6_enq(struct ip6asfrag *af6, struct ip6asfrag *up6,
uint32_t bucket __unused)
{
IP6QB_LOCK_ASSERT(bucket);
af6->ip6af_up = up6;
af6->ip6af_down = up6->ip6af_down;
up6->ip6af_down->ip6af_up = af6;
up6->ip6af_down = af6;
}
/*
* To frag6_enq as remque is to insque.
*/
static void
frag6_deq(struct ip6asfrag *af6, uint32_t bucket __unused)
{
IP6QB_LOCK_ASSERT(bucket);
af6->ip6af_up->ip6af_down = af6->ip6af_down;
af6->ip6af_down->ip6af_up = af6->ip6af_up;
}
static void
frag6_insque_head(struct ip6q *new, struct ip6q *old, uint32_t bucket)
{
IP6QB_LOCK_ASSERT(bucket);
KASSERT(IP6QB_HEAD(bucket) == old,
("%s: attempt to insert at head of wrong bucket"
" (bucket=%u, old=%p)", __func__, bucket, old));
new->ip6q_prev = old;
new->ip6q_next = old->ip6q_next;
old->ip6q_next->ip6q_prev= new;
old->ip6q_next = new;
V_ip6qb[bucket].count++;
}
static void
frag6_remque(struct ip6q *p6, uint32_t bucket)
{
IP6QB_LOCK_ASSERT(bucket);
p6->ip6q_prev->ip6q_next = p6->ip6q_next;
p6->ip6q_next->ip6q_prev = p6->ip6q_prev;
V_ip6qb[bucket].count--;
}

View File

@ -68,25 +68,27 @@
#include <sys/epoch.h>
struct ip6asfrag;
#ifdef _KERNEL
struct ip6asfrag; /* frag6.c */
TAILQ_HEAD(ip6fraghead, ip6asfrag);
/*
* IP6 reassembly queue structure. Each fragment
* being reassembled is attached to one of these structures.
*/
struct ip6q {
struct ip6asfrag *ip6q_down;
struct ip6asfrag *ip6q_up;
struct ip6fraghead ip6q_frags;
u_int32_t ip6q_ident;
u_int8_t ip6q_nxt;
u_int8_t ip6q_ecn;
u_int8_t ip6q_ttl;
struct in6_addr ip6q_src, ip6q_dst;
struct ip6q *ip6q_next;
struct ip6q *ip6q_prev;
TAILQ_ENTRY(ip6q) ip6q_tq;
int ip6q_unfrglen; /* len of unfragmentable part */
int ip6q_nfrag; /* # of fragments */
struct label *ip6q_label;
};
#endif /* _KERNEL */
/*
* IP6 reinjecting structure.