Convert all IPv4 and IPv6 multicast memberships into using a STAILQ

instead of a linear array.

The multicast memberships for the inpcb structure are protected by a
non-sleepable lock, INP_WLOCK(), which needs to be dropped when
calling the underlying possibly sleeping if_ioctl() method. When using
a linear array to keep track of multicast memberships, the computed
memory location of the multicast filter may suddenly change, due to
concurrent insertion or removal of elements in the linear array. This
in turn leads to various invalid memory access issues and kernel
panics.

To avoid this problem, put all multicast memberships on a STAILQ based
list. Then the memory location of the IPv4 and IPv6 multicast filters
become fixed during their lifetime and use after free and memory leak
issues are easier to track, for example by: vmstat -m | grep multi

All list manipulation has been factored into inline functions
including some macros, to easily allow for a future hash-list
implementation, if needed.

This patch has been tested by pho@ .

Differential Revision: https://reviews.freebsd.org/D20080
Reviewed by:	markj @
MFC after:	1 week
Sponsored by:	Mellanox Technologies
This commit is contained in:
Hans Petter Selasky 2019-06-25 11:54:41 +00:00
parent 43a9329e1b
commit 59854ecf55
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=349369
15 changed files with 565 additions and 668 deletions

View File

@ -1134,7 +1134,7 @@ vxlan_socket_mc_join_group(struct vxlan_socket *vso,
* If we really need to, we can of course look in the INP's
* membership list:
* sotoinpcb(vso->vxlso_sock)->inp_moptions->
* imo_membership[]->inm_ifp
* imo_head[]->imf_inm->inm_ifp
* similarly to imo_match_group().
*/
source->in4.sin_addr = local->in4.sin_addr;

View File

@ -505,13 +505,9 @@ __END_DECLS
#define IP_DEFAULT_MULTICAST_LOOP 1 /* normally hear sends if a member */
/*
* The imo_membership vector for each socket is now dynamically allocated at
* run-time, bounded by USHRT_MAX, and is reallocated when needed, sized
* according to a power-of-two increment.
* Limit for IPv4 multicast memberships
*/
#define IP_MIN_MEMBERSHIPS 31
#define IP_MAX_MEMBERSHIPS 4095
#define IP_MAX_SOURCE_FILTER 1024 /* XXX to be unused */
/*
* Default resource limits for IPv4 multicast source filtering.

View File

@ -94,7 +94,9 @@ static MALLOC_DEFINE(M_IPMSOURCE, "ip_msource",
/*
* Locking:
* - Lock order is: Giant, INP_WLOCK, IN_MULTI_LIST_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
*
* - Lock order is: Giant, IN_MULTI_LOCK, INP_WLOCK,
* IN_MULTI_LIST_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
* - The IF_ADDR_LOCK is implicitly taken by inm_lookup() earlier, however
* it can be taken by code in net/if.c also.
* - ip_moptions and in_mfilter are covered by the INP_WLOCK.
@ -144,12 +146,11 @@ static int imf_prune(struct in_mfilter *, const struct sockaddr_in *);
static void imf_purge(struct in_mfilter *);
static void imf_rollback(struct in_mfilter *);
static void imf_reap(struct in_mfilter *);
static int imo_grow(struct ip_moptions *);
static size_t imo_match_group(const struct ip_moptions *,
static struct in_mfilter *
imo_match_group(const struct ip_moptions *,
const struct ifnet *, const struct sockaddr *);
static struct in_msource *
imo_match_source(const struct ip_moptions *, const size_t,
const struct sockaddr *);
imo_match_source(struct in_mfilter *, const struct sockaddr *);
static void ims_merge(struct ip_msource *ims,
const struct in_msource *lims, const int rollback);
static int in_getmulti(struct ifnet *, const struct in_addr *,
@ -333,6 +334,26 @@ imf_init(struct in_mfilter *imf, const int st0, const int st1)
imf->imf_st[1] = st1;
}
struct in_mfilter *
ip_mfilter_alloc(const int mflags, const int st0, const int st1)
{
struct in_mfilter *imf;
imf = malloc(sizeof(*imf), M_INMFILTER, mflags);
if (imf != NULL)
imf_init(imf, st0, st1);
return (imf);
}
void
ip_mfilter_free(struct in_mfilter *imf)
{
imf_purge(imf);
free(imf, M_INMFILTER);
}
/*
* Function for looking up an in_multi record for an IPv4 multicast address
* on a given interface. ifp must be valid. If no record found, return NULL.
@ -378,90 +399,31 @@ inm_lookup(struct ifnet *ifp, const struct in_addr ina)
return (inm);
}
/*
* Resize the ip_moptions vector to the next power-of-two minus 1.
* May be called with locks held; do not sleep.
*/
static int
imo_grow(struct ip_moptions *imo)
{
struct in_multi **nmships;
struct in_multi **omships;
struct in_mfilter *nmfilters;
struct in_mfilter *omfilters;
size_t idx;
size_t newmax;
size_t oldmax;
nmships = NULL;
nmfilters = NULL;
omships = imo->imo_membership;
omfilters = imo->imo_mfilters;
oldmax = imo->imo_max_memberships;
newmax = ((oldmax + 1) * 2) - 1;
if (newmax <= IP_MAX_MEMBERSHIPS) {
nmships = (struct in_multi **)realloc(omships,
sizeof(struct in_multi *) * newmax, M_IPMOPTS, M_NOWAIT);
nmfilters = (struct in_mfilter *)realloc(omfilters,
sizeof(struct in_mfilter) * newmax, M_INMFILTER, M_NOWAIT);
if (nmships != NULL && nmfilters != NULL) {
/* Initialize newly allocated source filter heads. */
for (idx = oldmax; idx < newmax; idx++) {
imf_init(&nmfilters[idx], MCAST_UNDEFINED,
MCAST_EXCLUDE);
}
imo->imo_max_memberships = newmax;
imo->imo_membership = nmships;
imo->imo_mfilters = nmfilters;
}
}
if (nmships == NULL || nmfilters == NULL) {
if (nmships != NULL)
free(nmships, M_IPMOPTS);
if (nmfilters != NULL)
free(nmfilters, M_INMFILTER);
return (ETOOMANYREFS);
}
return (0);
}
/*
* Find an IPv4 multicast group entry for this ip_moptions instance
* which matches the specified group, and optionally an interface.
* Return its index into the array, or -1 if not found.
*/
static size_t
static struct in_mfilter *
imo_match_group(const struct ip_moptions *imo, const struct ifnet *ifp,
const struct sockaddr *group)
{
const struct sockaddr_in *gsin;
struct in_multi **pinm;
int idx;
int nmships;
struct in_mfilter *imf;
struct in_multi *inm;
gsin = (const struct sockaddr_in *)group;
/* The imo_membership array may be lazy allocated. */
if (imo->imo_membership == NULL || imo->imo_num_memberships == 0)
return (-1);
nmships = imo->imo_num_memberships;
pinm = &imo->imo_membership[0];
for (idx = 0; idx < nmships; idx++, pinm++) {
if (*pinm == NULL)
IP_MFILTER_FOREACH(imf, &imo->imo_head) {
inm = imf->imf_inm;
if (inm == NULL)
continue;
if ((ifp == NULL || ((*pinm)->inm_ifp == ifp)) &&
in_hosteq((*pinm)->inm_addr, gsin->sin_addr)) {
if ((ifp == NULL || (inm->inm_ifp == ifp)) &&
in_hosteq(inm->inm_addr, gsin->sin_addr)) {
break;
}
}
if (idx >= nmships)
idx = -1;
return (idx);
return (imf);
}
/*
@ -472,22 +434,13 @@ imo_match_group(const struct ip_moptions *imo, const struct ifnet *ifp,
* it exists, which may not be the desired behaviour.
*/
static struct in_msource *
imo_match_source(const struct ip_moptions *imo, const size_t gidx,
const struct sockaddr *src)
imo_match_source(struct in_mfilter *imf, const struct sockaddr *src)
{
struct ip_msource find;
struct in_mfilter *imf;
struct ip_msource *ims;
const sockunion_t *psa;
KASSERT(src->sa_family == AF_INET, ("%s: !AF_INET", __func__));
KASSERT(gidx != -1 && gidx < imo->imo_num_memberships,
("%s: invalid index %d\n", __func__, (int)gidx));
/* The imo_mfilters array may be lazy allocated. */
if (imo->imo_mfilters == NULL)
return (NULL);
imf = &imo->imo_mfilters[gidx];
/* Source trees are keyed in host byte order. */
psa = (const sockunion_t *)src;
@ -507,14 +460,14 @@ int
imo_multi_filter(const struct ip_moptions *imo, const struct ifnet *ifp,
const struct sockaddr *group, const struct sockaddr *src)
{
size_t gidx;
struct in_mfilter *imf;
struct in_msource *ims;
int mode;
KASSERT(ifp != NULL, ("%s: null ifp", __func__));
gidx = imo_match_group(imo, ifp, group);
if (gidx == -1)
imf = imo_match_group(imo, ifp, group);
if (imf == NULL)
return (MCAST_NOTGMEMBER);
/*
@ -526,8 +479,8 @@ imo_multi_filter(const struct ip_moptions *imo, const struct ifnet *ifp,
* NOTE: We are comparing group state here at IGMP t1 (now)
* with socket-layer t0 (since last downcall).
*/
mode = imo->imo_mfilters[gidx].imf_st[1];
ims = imo_match_source(imo, gidx, src);
mode = imf->imf_st[1];
ims = imo_match_source(imf, src);
if ((ims == NULL && mode == MCAST_INCLUDE) ||
(ims != NULL && ims->imsl_st[0] != mode))
@ -1452,7 +1405,6 @@ inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
struct ip_moptions *imo;
struct in_msource *ims;
struct in_multi *inm;
size_t idx;
uint16_t fmode;
int error, doblock;
@ -1531,20 +1483,18 @@ inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr)))
return (EINVAL);
IN_MULTI_LOCK();
/*
* Check if we are actually a member of this group.
*/
imo = inp_findmoptions(inp);
idx = imo_match_group(imo, ifp, &gsa->sa);
if (idx == -1 || imo->imo_mfilters == NULL) {
imf = imo_match_group(imo, ifp, &gsa->sa);
if (imf == NULL) {
error = EADDRNOTAVAIL;
goto out_inp_locked;
}
KASSERT(imo->imo_mfilters != NULL,
("%s: imo_mfilters not allocated", __func__));
imf = &imo->imo_mfilters[idx];
inm = imo->imo_membership[idx];
inm = imf->imf_inm;
/*
* Attempting to use the delta-based API on an
@ -1562,7 +1512,7 @@ inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
* Asked to unblock, but nothing to unblock.
* If adding a new block entry, allocate it.
*/
ims = imo_match_source(imo, idx, &ssa->sa);
ims = imo_match_source(imf, &ssa->sa);
if ((ims != NULL && doblock) || (ims == NULL && !doblock)) {
CTR3(KTR_IGMPV3, "%s: source 0x%08x %spresent", __func__,
ntohl(ssa->sin.sin_addr.s_addr), doblock ? "" : "not ");
@ -1593,14 +1543,13 @@ inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
/*
* Begin state merge transaction at IGMP layer.
*/
IN_MULTI_LOCK();
CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
IN_MULTI_LIST_LOCK();
error = inm_merge(inm, imf);
if (error) {
CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__);
IN_MULTI_LIST_UNLOCK();
goto out_in_multi_locked;
goto out_imf_rollback;
}
CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
@ -1609,9 +1558,6 @@ inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
if (error)
CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__);
out_in_multi_locked:
IN_MULTI_UNLOCK();
out_imf_rollback:
if (error)
imf_rollback(imf);
@ -1622,6 +1568,7 @@ inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
out_inp_locked:
INP_WUNLOCK(inp);
IN_MULTI_UNLOCK();
return (error);
}
@ -1636,9 +1583,6 @@ static struct ip_moptions *
inp_findmoptions(struct inpcb *inp)
{
struct ip_moptions *imo;
struct in_multi **immp;
struct in_mfilter *imfp;
size_t idx;
INP_WLOCK(inp);
if (inp->inp_moptions != NULL)
@ -1647,29 +1591,16 @@ inp_findmoptions(struct inpcb *inp)
INP_WUNLOCK(inp);
imo = malloc(sizeof(*imo), M_IPMOPTS, M_WAITOK);
immp = malloc(sizeof(*immp) * IP_MIN_MEMBERSHIPS, M_IPMOPTS,
M_WAITOK | M_ZERO);
imfp = malloc(sizeof(struct in_mfilter) * IP_MIN_MEMBERSHIPS,
M_INMFILTER, M_WAITOK);
imo->imo_multicast_ifp = NULL;
imo->imo_multicast_addr.s_addr = INADDR_ANY;
imo->imo_multicast_vif = -1;
imo->imo_multicast_ttl = IP_DEFAULT_MULTICAST_TTL;
imo->imo_multicast_loop = in_mcast_loop;
imo->imo_num_memberships = 0;
imo->imo_max_memberships = IP_MIN_MEMBERSHIPS;
imo->imo_membership = immp;
/* Initialize per-group source filters. */
for (idx = 0; idx < IP_MIN_MEMBERSHIPS; idx++)
imf_init(&imfp[idx], MCAST_UNDEFINED, MCAST_EXCLUDE);
imo->imo_mfilters = imfp;
STAILQ_INIT(&imo->imo_head);
INP_WLOCK(inp);
if (inp->inp_moptions != NULL) {
free(imfp, M_INMFILTER);
free(immp, M_IPMOPTS);
free(imo, M_IPMOPTS);
return (inp->inp_moptions);
}
@ -1680,32 +1611,25 @@ inp_findmoptions(struct inpcb *inp)
static void
inp_gcmoptions(struct ip_moptions *imo)
{
struct in_mfilter *imf;
struct in_mfilter *imf;
struct in_multi *inm;
struct ifnet *ifp;
size_t idx, nmships;
nmships = imo->imo_num_memberships;
for (idx = 0; idx < nmships; ++idx) {
imf = imo->imo_mfilters ? &imo->imo_mfilters[idx] : NULL;
if (imf)
imf_leave(imf);
inm = imo->imo_membership[idx];
ifp = inm->inm_ifp;
if (ifp != NULL) {
CURVNET_SET(ifp->if_vnet);
(void)in_leavegroup(inm, imf);
CURVNET_RESTORE();
} else {
(void)in_leavegroup(inm, imf);
while ((imf = ip_mfilter_first(&imo->imo_head)) != NULL) {
ip_mfilter_remove(&imo->imo_head, imf);
imf_leave(imf);
if ((inm = imf->imf_inm) != NULL) {
if ((ifp = inm->inm_ifp) != NULL) {
CURVNET_SET(ifp->if_vnet);
(void)in_leavegroup(inm, imf);
CURVNET_RESTORE();
} else {
(void)in_leavegroup(inm, imf);
}
}
if (imf)
imf_purge(imf);
ip_mfilter_free(imf);
}
if (imo->imo_mfilters)
free(imo->imo_mfilters, M_INMFILTER);
free(imo->imo_membership, M_IPMOPTS);
free(imo, M_IPMOPTS);
}
@ -1741,7 +1665,7 @@ inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt)
struct sockaddr_storage *ptss;
struct sockaddr_storage *tss;
int error;
size_t idx, nsrcs, ncsrcs;
size_t nsrcs, ncsrcs;
INP_WLOCK_ASSERT(inp);
@ -1768,12 +1692,11 @@ inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt)
* Lookup group on the socket.
*/
gsa = (sockunion_t *)&msfr.msfr_group;
idx = imo_match_group(imo, ifp, &gsa->sa);
if (idx == -1 || imo->imo_mfilters == NULL) {
imf = imo_match_group(imo, ifp, &gsa->sa);
if (imf == NULL) {
INP_WUNLOCK(inp);
return (EADDRNOTAVAIL);
}
imf = &imo->imo_mfilters[idx];
/*
* Ignore memberships which are in limbo.
@ -2033,14 +1956,11 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
struct ip_moptions *imo;
struct in_multi *inm;
struct in_msource *lims;
size_t idx;
int error, is_new;
ifp = NULL;
imf = NULL;
lims = NULL;
error = 0;
is_new = 0;
memset(&gsr, 0, sizeof(struct group_source_req));
gsa = (sockunion_t *)&gsr.gsr_group;
@ -2148,13 +2068,25 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0)
return (EADDRNOTAVAIL);
IN_MULTI_LOCK();
/*
* Find the membership in the membership list.
*/
imo = inp_findmoptions(inp);
idx = imo_match_group(imo, ifp, &gsa->sa);
if (idx == -1) {
imf = imo_match_group(imo, ifp, &gsa->sa);
if (imf == NULL) {
is_new = 1;
inm = NULL;
if (ip_mfilter_count(&imo->imo_head) >= IP_MAX_MEMBERSHIPS) {
error = ENOMEM;
goto out_inp_locked;
}
} else {
inm = imo->imo_membership[idx];
imf = &imo->imo_mfilters[idx];
is_new = 0;
inm = imf->imf_inm;
if (ssa->ss.ss_family != AF_UNSPEC) {
/*
* MCAST_JOIN_SOURCE_GROUP on an exclusive membership
@ -2181,7 +2113,7 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
* full-state SSM API with the delta-based API,
* which is discouraged in the relevant RFCs.
*/
lims = imo_match_source(imo, idx, &ssa->sa);
lims = imo_match_source(imf, &ssa->sa);
if (lims != NULL /*&&
lims->imsl_st[1] == MCAST_INCLUDE*/) {
error = EADDRNOTAVAIL;
@ -2214,27 +2146,6 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
*/
INP_WLOCK_ASSERT(inp);
if (is_new) {
if (imo->imo_num_memberships == imo->imo_max_memberships) {
error = imo_grow(imo);
if (error)
goto out_inp_locked;
}
/*
* Allocate the new slot upfront so we can deal with
* grafting the new source filter in same code path
* as for join-source on existing membership.
*/
idx = imo->imo_num_memberships;
imo->imo_membership[idx] = NULL;
imo->imo_num_memberships++;
KASSERT(imo->imo_mfilters != NULL,
("%s: imf_mfilters vector was not allocated", __func__));
imf = &imo->imo_mfilters[idx];
KASSERT(RB_EMPTY(&imf->imf_sources),
("%s: imf_sources not empty", __func__));
}
/*
* Graft new source into filter list for this inpcb's
* membership of the group. The in_multi may not have
@ -2250,7 +2161,11 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
/* Membership starts in IN mode */
if (is_new) {
CTR1(KTR_IGMPV3, "%s: new join w/source", __func__);
imf_init(imf, MCAST_UNDEFINED, MCAST_INCLUDE);
imf = ip_mfilter_alloc(M_NOWAIT, MCAST_UNDEFINED, MCAST_INCLUDE);
if (imf == NULL) {
error = ENOMEM;
goto out_inp_locked;
}
} else {
CTR2(KTR_IGMPV3, "%s: %s source", __func__, "allow");
}
@ -2259,34 +2174,41 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
CTR1(KTR_IGMPV3, "%s: merge imf state failed",
__func__);
error = ENOMEM;
goto out_imo_free;
goto out_inp_locked;
}
} else {
/* No address specified; Membership starts in EX mode */
if (is_new) {
CTR1(KTR_IGMPV3, "%s: new join w/o source", __func__);
imf_init(imf, MCAST_UNDEFINED, MCAST_EXCLUDE);
imf = ip_mfilter_alloc(M_NOWAIT, MCAST_UNDEFINED, MCAST_EXCLUDE);
if (imf == NULL) {
error = ENOMEM;
goto out_inp_locked;
}
}
}
/*
* Begin state merge transaction at IGMP layer.
*/
in_pcbref(inp);
INP_WUNLOCK(inp);
IN_MULTI_LOCK();
if (is_new) {
in_pcbref(inp);
INP_WUNLOCK(inp);
error = in_joingroup_locked(ifp, &gsa->sin.sin_addr, imf,
&inm);
&imf->imf_inm);
INP_WLOCK(inp);
if (in_pcbrele_wlocked(inp)) {
error = ENXIO;
goto out_inp_unlocked;
}
if (error) {
CTR1(KTR_IGMPV3, "%s: in_joingroup_locked failed",
__func__);
IN_MULTI_LIST_UNLOCK();
goto out_imo_free;
goto out_inp_locked;
}
inm_acquire(inm);
imo->imo_membership[idx] = inm;
inm_acquire(imf->imf_inm);
} else {
CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
IN_MULTI_LIST_LOCK();
@ -2295,7 +2217,9 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
CTR1(KTR_IGMPV3, "%s: failed to merge inm state",
__func__);
IN_MULTI_LIST_UNLOCK();
goto out_in_multi_locked;
imf_rollback(imf);
imf_reap(imf);
goto out_inp_locked;
}
CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
error = igmp_change_state(inm);
@ -2303,40 +2227,30 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
if (error) {
CTR1(KTR_IGMPV3, "%s: failed igmp downcall",
__func__);
goto out_in_multi_locked;
}
}
out_in_multi_locked:
IN_MULTI_UNLOCK();
INP_WLOCK(inp);
if (in_pcbrele_wlocked(inp))
return (ENXIO);
if (error) {
imf_rollback(imf);
if (is_new)
imf_purge(imf);
else
imf_rollback(imf);
imf_reap(imf);
} else {
imf_commit(imf);
}
out_imo_free:
if (error && is_new) {
inm = imo->imo_membership[idx];
if (inm != NULL) {
IN_MULTI_LIST_LOCK();
inm_release_deferred(inm);
IN_MULTI_LIST_UNLOCK();
goto out_inp_locked;
}
imo->imo_membership[idx] = NULL;
--imo->imo_num_memberships;
}
if (is_new)
ip_mfilter_insert(&imo->imo_head, imf);
imf_commit(imf);
imf = NULL;
out_inp_locked:
INP_WUNLOCK(inp);
out_inp_unlocked:
IN_MULTI_UNLOCK();
if (is_new && imf) {
if (imf->imf_inm != NULL) {
IN_MULTI_LIST_LOCK();
inm_release_deferred(imf->imf_inm);
IN_MULTI_LIST_UNLOCK();
}
ip_mfilter_free(imf);
}
return (error);
}
@ -2355,12 +2269,12 @@ inp_leave_group(struct inpcb *inp, struct sockopt *sopt)
struct ip_moptions *imo;
struct in_msource *ims;
struct in_multi *inm;
size_t idx;
int error, is_final;
int error;
bool is_final;
ifp = NULL;
error = 0;
is_final = 1;
is_final = true;
memset(&gsr, 0, sizeof(struct group_source_req));
gsa = (sockunion_t *)&gsr.gsr_group;
@ -2460,20 +2374,21 @@ inp_leave_group(struct inpcb *inp, struct sockopt *sopt)
if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr)))
return (EINVAL);
IN_MULTI_LOCK();
/*
* Find the membership in the membership array.
* Find the membership in the membership list.
*/
imo = inp_findmoptions(inp);
idx = imo_match_group(imo, ifp, &gsa->sa);
if (idx == -1) {
imf = imo_match_group(imo, ifp, &gsa->sa);
if (imf == NULL) {
error = EADDRNOTAVAIL;
goto out_inp_locked;
}
inm = imo->imo_membership[idx];
imf = &imo->imo_mfilters[idx];
inm = imf->imf_inm;
if (ssa->ss.ss_family != AF_UNSPEC)
is_final = 0;
is_final = false;
/*
* Begin state merge transaction at socket layer.
@ -2485,13 +2400,14 @@ inp_leave_group(struct inpcb *inp, struct sockopt *sopt)
* MCAST_LEAVE_SOURCE_GROUP is only valid for inclusive memberships.
*/
if (is_final) {
ip_mfilter_remove(&imo->imo_head, imf);
imf_leave(imf);
} else {
if (imf->imf_st[0] == MCAST_EXCLUDE) {
error = EADDRNOTAVAIL;
goto out_inp_locked;
}
ims = imo_match_source(imo, idx, &ssa->sa);
ims = imo_match_source(imf, &ssa->sa);
if (ims == NULL) {
CTR3(KTR_IGMPV3, "%s: source 0x%08x %spresent",
__func__, ntohl(ssa->sin.sin_addr.s_addr), "not ");
@ -2510,17 +2426,7 @@ inp_leave_group(struct inpcb *inp, struct sockopt *sopt)
/*
* Begin state merge transaction at IGMP layer.
*/
in_pcbref(inp);
INP_WUNLOCK(inp);
IN_MULTI_LOCK();
if (is_final) {
/*
* Give up the multicast address record to which
* the membership points.
*/
(void)in_leavegroup_locked(inm, imf);
} else {
if (!is_final) {
CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
IN_MULTI_LIST_LOCK();
error = inm_merge(inm, imf);
@ -2528,7 +2434,9 @@ inp_leave_group(struct inpcb *inp, struct sockopt *sopt)
CTR1(KTR_IGMPV3, "%s: failed to merge inm state",
__func__);
IN_MULTI_LIST_UNLOCK();
goto out_in_multi_locked;
imf_rollback(imf);
imf_reap(imf);
goto out_inp_locked;
}
CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
@ -2537,38 +2445,27 @@ inp_leave_group(struct inpcb *inp, struct sockopt *sopt)
if (error) {
CTR1(KTR_IGMPV3, "%s: failed igmp downcall",
__func__);
imf_rollback(imf);
imf_reap(imf);
goto out_inp_locked;
}
}
out_in_multi_locked:
IN_MULTI_UNLOCK();
INP_WLOCK(inp);
if (in_pcbrele_wlocked(inp))
return (ENXIO);
if (error)
imf_rollback(imf);
else
imf_commit(imf);
imf_commit(imf);
imf_reap(imf);
if (is_final) {
/* Remove the gap in the membership and filter array. */
KASSERT(RB_EMPTY(&imf->imf_sources),
("%s: imf_sources not empty", __func__));
for (++idx; idx < imo->imo_num_memberships; ++idx) {
imo->imo_membership[idx - 1] = imo->imo_membership[idx];
imo->imo_mfilters[idx - 1] = imo->imo_mfilters[idx];
}
imf_init(&imo->imo_mfilters[idx - 1], MCAST_UNDEFINED,
MCAST_EXCLUDE);
imo->imo_num_memberships--;
}
out_inp_locked:
INP_WUNLOCK(inp);
if (is_final && imf) {
/*
* Give up the multicast address record to which
* the membership points.
*/
(void) in_leavegroup_locked(imf->imf_inm, imf);
ip_mfilter_free(imf);
}
IN_MULTI_UNLOCK();
return (error);
}
@ -2658,7 +2555,6 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
struct in_mfilter *imf;
struct ip_moptions *imo;
struct in_multi *inm;
size_t idx;
int error;
error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq),
@ -2690,18 +2586,19 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
if (ifp == NULL)
return (EADDRNOTAVAIL);
IN_MULTI_LOCK();
/*
* Take the INP write lock.
* Check if this socket is a member of this group.
*/
imo = inp_findmoptions(inp);
idx = imo_match_group(imo, ifp, &gsa->sa);
if (idx == -1 || imo->imo_mfilters == NULL) {
imf = imo_match_group(imo, ifp, &gsa->sa);
if (imf == NULL) {
error = EADDRNOTAVAIL;
goto out_inp_locked;
}
inm = imo->imo_membership[idx];
imf = &imo->imo_mfilters[idx];
inm = imf->imf_inm;
/*
* Begin state merge transaction at socket layer.
@ -2778,7 +2675,6 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
goto out_imf_rollback;
INP_WLOCK_ASSERT(inp);
IN_MULTI_LOCK();
/*
* Begin state merge transaction at IGMP layer.
@ -2789,7 +2685,7 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
if (error) {
CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__);
IN_MULTI_LIST_UNLOCK();
goto out_in_multi_locked;
goto out_imf_rollback;
}
CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
@ -2798,10 +2694,6 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
if (error)
CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__);
out_in_multi_locked:
IN_MULTI_UNLOCK();
out_imf_rollback:
if (error)
imf_rollback(imf);
@ -2812,6 +2704,7 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
out_inp_locked:
INP_WUNLOCK(inp);
IN_MULTI_UNLOCK();
return (error);
}

View File

@ -86,6 +86,9 @@ __FBSDID("$FreeBSD$");
#if defined(INET) || defined(INET6)
#include <netinet/in.h>
#include <netinet/in_pcb.h>
#ifdef INET
#include <netinet/in_var.h>
#endif
#include <netinet/ip_var.h>
#include <netinet/tcp_var.h>
#ifdef TCPHPTS
@ -93,16 +96,13 @@ __FBSDID("$FreeBSD$");
#endif
#include <netinet/udp.h>
#include <netinet/udp_var.h>
#endif
#ifdef INET
#include <netinet/in_var.h>
#endif
#ifdef INET6
#include <netinet/ip6.h>
#include <netinet6/in6_pcb.h>
#include <netinet6/in6_var.h>
#include <netinet6/ip6_var.h>
#endif /* INET6 */
#endif
#include <netipsec/ipsec_support.h>
@ -1779,8 +1779,9 @@ void
in_pcbpurgeif0(struct inpcbinfo *pcbinfo, struct ifnet *ifp)
{
struct inpcb *inp;
struct in_multi *inm;
struct in_mfilter *imf;
struct ip_moptions *imo;
int i, gap;
INP_INFO_WLOCK(pcbinfo);
CK_LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
@ -1801,17 +1802,18 @@ in_pcbpurgeif0(struct inpcbinfo *pcbinfo, struct ifnet *ifp)
*
* XXX This can all be deferred to an epoch_call
*/
for (i = 0, gap = 0; i < imo->imo_num_memberships;
i++) {
if (imo->imo_membership[i]->inm_ifp == ifp) {
IN_MULTI_LOCK_ASSERT();
in_leavegroup_locked(imo->imo_membership[i], NULL);
gap++;
} else if (gap != 0)
imo->imo_membership[i - gap] =
imo->imo_membership[i];
restart:
IP_MFILTER_FOREACH(imf, &imo->imo_head) {
if ((inm = imf->imf_inm) == NULL)
continue;
if (inm->inm_ifp != ifp)
continue;
ip_mfilter_remove(&imo->imo_head, imf);
IN_MULTI_LOCK_ASSERT();
in_leavegroup_locked(inm, NULL);
ip_mfilter_free(imf);
goto restart;
}
imo->imo_num_memberships -= gap;
}
INP_WUNLOCK(inp);
}

View File

@ -232,8 +232,60 @@ struct in_mfilter {
struct ip_msource_tree imf_sources; /* source list for (S,G) */
u_long imf_nsrc; /* # of source entries */
uint8_t imf_st[2]; /* state before/at commit */
struct in_multi *imf_inm; /* associated multicast address */
STAILQ_ENTRY(in_mfilter) imf_entry; /* list entry */
};
/*
* Helper types and functions for IPv4 multicast filters.
*/
STAILQ_HEAD(ip_mfilter_head, in_mfilter);
struct in_mfilter *ip_mfilter_alloc(int mflags, int st0, int st1);
void ip_mfilter_free(struct in_mfilter *);
static inline void
ip_mfilter_init(struct ip_mfilter_head *head)
{
STAILQ_INIT(head);
}
static inline struct in_mfilter *
ip_mfilter_first(const struct ip_mfilter_head *head)
{
return (STAILQ_FIRST(head));
}
static inline void
ip_mfilter_insert(struct ip_mfilter_head *head, struct in_mfilter *imf)
{
STAILQ_INSERT_TAIL(head, imf, imf_entry);
}
static inline void
ip_mfilter_remove(struct ip_mfilter_head *head, struct in_mfilter *imf)
{
STAILQ_REMOVE(head, imf, in_mfilter, imf_entry);
}
#define IP_MFILTER_FOREACH(imf, head) \
STAILQ_FOREACH(imf, head, imf_entry)
static inline size_t
ip_mfilter_count(struct ip_mfilter_head *head)
{
struct in_mfilter *imf;
size_t num = 0;
STAILQ_FOREACH(imf, head, imf_entry)
num++;
return (num);
}
/*
* IPv4 group descriptor.
*

View File

@ -1371,25 +1371,24 @@ carp_multicast_setup(struct carp_if *cif, sa_family_t sa)
case AF_INET:
{
struct ip_moptions *imo = &cif->cif_imo;
struct in_mfilter *imf;
struct in_addr addr;
if (imo->imo_membership)
if (ip_mfilter_first(&imo->imo_head) != NULL)
return (0);
imo->imo_membership = (struct in_multi **)malloc(
(sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_CARP,
M_WAITOK);
imo->imo_mfilters = NULL;
imo->imo_max_memberships = IP_MIN_MEMBERSHIPS;
imf = ip_mfilter_alloc(M_WAITOK, 0, 0);
ip_mfilter_init(&imo->imo_head);
imo->imo_multicast_vif = -1;
addr.s_addr = htonl(INADDR_CARP_GROUP);
if ((error = in_joingroup(ifp, &addr, NULL,
&imo->imo_membership[0])) != 0) {
free(imo->imo_membership, M_CARP);
&imf->imf_inm)) != 0) {
ip_mfilter_free(imf);
break;
}
imo->imo_num_memberships++;
ip_mfilter_insert(&imo->imo_head, imf);
imo->imo_multicast_ifp = ifp;
imo->imo_multicast_ttl = CARP_DFLTTL;
imo->imo_multicast_loop = 0;
@ -1400,17 +1399,16 @@ carp_multicast_setup(struct carp_if *cif, sa_family_t sa)
case AF_INET6:
{
struct ip6_moptions *im6o = &cif->cif_im6o;
struct in6_mfilter *im6f[2];
struct in6_addr in6;
struct in6_multi *in6m;
if (im6o->im6o_membership)
if (ip6_mfilter_first(&im6o->im6o_head))
return (0);
im6o->im6o_membership = (struct in6_multi **)malloc(
(sizeof(struct in6_multi *) * IPV6_MIN_MEMBERSHIPS), M_CARP,
M_ZERO | M_WAITOK);
im6o->im6o_mfilters = NULL;
im6o->im6o_max_memberships = IPV6_MIN_MEMBERSHIPS;
im6f[0] = ip6_mfilter_alloc(M_WAITOK, 0, 0);
im6f[1] = ip6_mfilter_alloc(M_WAITOK, 0, 0);
ip6_mfilter_init(&im6o->im6o_head);
im6o->im6o_multicast_hlim = CARP_DFLTTL;
im6o->im6o_multicast_ifp = ifp;
@ -1419,17 +1417,15 @@ carp_multicast_setup(struct carp_if *cif, sa_family_t sa)
in6.s6_addr16[0] = htons(0xff02);
in6.s6_addr8[15] = 0x12;
if ((error = in6_setscope(&in6, ifp, NULL)) != 0) {
free(im6o->im6o_membership, M_CARP);
ip6_mfilter_free(im6f[0]);
ip6_mfilter_free(im6f[1]);
break;
}
in6m = NULL;
if ((error = in6_joingroup(ifp, &in6, NULL, &in6m, 0)) != 0) {
free(im6o->im6o_membership, M_CARP);
if ((error = in6_joingroup(ifp, &in6, NULL, &im6f[0]->im6f_in6m, 0)) != 0) {
ip6_mfilter_free(im6f[0]);
ip6_mfilter_free(im6f[1]);
break;
}
in6m_acquire(in6m);
im6o->im6o_membership[0] = in6m;
im6o->im6o_num_memberships++;
/* Join solicited multicast address. */
bzero(&in6, sizeof(in6));
@ -1438,20 +1434,21 @@ carp_multicast_setup(struct carp_if *cif, sa_family_t sa)
in6.s6_addr32[2] = htonl(1);
in6.s6_addr32[3] = 0;
in6.s6_addr8[12] = 0xff;
if ((error = in6_setscope(&in6, ifp, NULL)) != 0) {
in6_leavegroup(im6o->im6o_membership[0], NULL);
free(im6o->im6o_membership, M_CARP);
ip6_mfilter_free(im6f[0]);
ip6_mfilter_free(im6f[1]);
break;
}
in6m = NULL;
if ((error = in6_joingroup(ifp, &in6, NULL, &in6m, 0)) != 0) {
in6_leavegroup(im6o->im6o_membership[0], NULL);
free(im6o->im6o_membership, M_CARP);
if ((error = in6_joingroup(ifp, &in6, NULL, &im6f[1]->im6f_in6m, 0)) != 0) {
in6_leavegroup(im6f[0]->im6f_in6m, NULL);
ip6_mfilter_free(im6f[0]);
ip6_mfilter_free(im6f[1]);
break;
}
in6m_acquire(in6m);
im6o->im6o_membership[1] = in6m;
im6o->im6o_num_memberships++;
ip6_mfilter_insert(&im6o->im6o_head, im6f[0]);
ip6_mfilter_insert(&im6o->im6o_head, im6f[1]);
break;
}
#endif
@ -1466,35 +1463,38 @@ carp_multicast_setup(struct carp_if *cif, sa_family_t sa)
static void
carp_multicast_cleanup(struct carp_if *cif, sa_family_t sa)
{
#ifdef INET
struct ip_moptions *imo = &cif->cif_imo;
struct in_mfilter *imf;
#endif
#ifdef INET6
struct ip6_moptions *im6o = &cif->cif_im6o;
struct in6_mfilter *im6f;
#endif
sx_assert(&carp_sx, SA_XLOCKED);
switch (sa) {
#ifdef INET
case AF_INET:
if (cif->cif_naddrs == 0) {
struct ip_moptions *imo = &cif->cif_imo;
in_leavegroup(imo->imo_membership[0], NULL);
KASSERT(imo->imo_mfilters == NULL,
("%s: imo_mfilters != NULL", __func__));
free(imo->imo_membership, M_CARP);
imo->imo_membership = NULL;
if (cif->cif_naddrs != 0)
break;
while ((imf = ip_mfilter_first(&imo->imo_head)) != NULL) {
ip_mfilter_remove(&imo->imo_head, imf);
in_leavegroup(imf->imf_inm, NULL);
ip_mfilter_free(imf);
}
break;
#endif
#ifdef INET6
case AF_INET6:
if (cif->cif_naddrs6 == 0) {
struct ip6_moptions *im6o = &cif->cif_im6o;
if (cif->cif_naddrs6 != 0)
break;
in6_leavegroup(im6o->im6o_membership[0], NULL);
in6_leavegroup(im6o->im6o_membership[1], NULL);
KASSERT(im6o->im6o_mfilters == NULL,
("%s: im6o_mfilters != NULL", __func__));
free(im6o->im6o_membership, M_CARP);
im6o->im6o_membership = NULL;
while ((im6f = ip6_mfilter_first(&im6o->im6o_head)) != NULL) {
ip6_mfilter_remove(&im6o->im6o_head, im6f);
in6_leavegroup(im6f->im6f_in6m, NULL);
ip6_mfilter_free(im6f);
}
break;
#endif

View File

@ -1680,7 +1680,6 @@ static void
send_packet(struct vif *vifp, struct mbuf *m)
{
struct ip_moptions imo;
struct in_multi *imm[2];
int error __unused;
VIF_LOCK_ASSERT();
@ -1689,9 +1688,7 @@ send_packet(struct vif *vifp, struct mbuf *m)
imo.imo_multicast_ttl = mtod(m, struct ip *)->ip_ttl - 1;
imo.imo_multicast_loop = 1;
imo.imo_multicast_vif = -1;
imo.imo_num_memberships = 0;
imo.imo_max_memberships = 2;
imo.imo_membership = &imm[0];
STAILQ_INIT(&imo.imo_head);
/*
* Re-entrancy should not be a problem here, because

View File

@ -82,6 +82,7 @@ struct ipoption {
char ipopt_list[MAX_IPOPTLEN]; /* options proper */
};
#if defined(_NETINET_IN_VAR_H_) && defined(_KERNEL)
/*
* Structure attached to inpcb.ip_moptions and
* passed to ip_output when IP multicast options are in use.
@ -93,12 +94,11 @@ struct ip_moptions {
u_long imo_multicast_vif; /* vif num outgoing multicasts */
u_char imo_multicast_ttl; /* TTL for outgoing multicasts */
u_char imo_multicast_loop; /* 1 => hear sends if a member */
u_short imo_num_memberships; /* no. memberships this socket */
u_short imo_max_memberships; /* max memberships this socket */
struct in_multi **imo_membership; /* group memberships */
struct in_mfilter *imo_mfilters; /* source filters */
struct epoch_context imo_epoch_ctx;
struct ip_mfilter_head imo_head; /* group membership list */
};
#else
struct ip_moptions;
#endif
struct ipstat {
uint64_t ips_total; /* total packets received */

View File

@ -523,11 +523,8 @@ struct route_in6 {
#define IPV6_DEFAULT_MULTICAST_LOOP 1 /* normally hear sends if a member */
/*
* The im6o_membership vector for each socket is now dynamically allocated at
* run-time, bounded by USHRT_MAX, and is reallocated when needed, sized
* according to a power-of-two increment.
* Limit for IPv6 multicast memberships
*/
#define IPV6_MIN_MEMBERSHIPS 31
#define IPV6_MAX_MEMBERSHIPS 4095
/*

View File

@ -774,9 +774,11 @@ _in6_ifdetach(struct ifnet *ifp, int purgeulp)
in6_purgeaddr(ifa);
}
if (purgeulp) {
IN6_MULTI_LOCK();
in6_pcbpurgeif0(&V_udbinfo, ifp);
in6_pcbpurgeif0(&V_ulitecbinfo, ifp);
in6_pcbpurgeif0(&V_ripcbinfo, ifp);
IN6_MULTI_UNLOCK();
}
/* leave from all multicast groups joined */
in6_purgemaddrs(ifp);

View File

@ -102,7 +102,8 @@ RB_GENERATE(ip6_msource_tree, ip6_msource, im6s_link, ip6_msource_cmp);
/*
* Locking:
* - Lock order is: Giant, INP_WLOCK, IN6_MULTI_LOCK, MLD_LOCK, IF_ADDR_LOCK.
* - Lock order is: Giant, IN6_MULTI_LOCK, INP_WLOCK,
* IN6_MULTI_LIST_LOCK, MLD_LOCK, IF_ADDR_LOCK.
* - The IF_ADDR_LOCK is implicitly taken by in6m_lookup() earlier, however
* it can be taken by code in net/if.c also.
* - ip6_moptions and in6_mfilter are covered by the INP_WLOCK.
@ -134,12 +135,11 @@ static int im6f_prune(struct in6_mfilter *, const struct sockaddr_in6 *);
static void im6f_purge(struct in6_mfilter *);
static void im6f_rollback(struct in6_mfilter *);
static void im6f_reap(struct in6_mfilter *);
static int im6o_grow(struct ip6_moptions *);
static size_t im6o_match_group(const struct ip6_moptions *,
static struct in6_mfilter *
im6o_match_group(const struct ip6_moptions *,
const struct ifnet *, const struct sockaddr *);
static struct in6_msource *
im6o_match_source(const struct ip6_moptions *, const size_t,
const struct sockaddr *);
im6o_match_source(struct in6_mfilter *, const struct sockaddr *);
static void im6s_merge(struct ip6_msource *ims,
const struct in6_msource *lims, const int rollback);
static int in6_getmulti(struct ifnet *, const struct in6_addr *,
@ -228,55 +228,25 @@ im6f_init(struct in6_mfilter *imf, const int st0, const int st1)
imf->im6f_st[1] = st1;
}
/*
* Resize the ip6_moptions vector to the next power-of-two minus 1.
* May be called with locks held; do not sleep.
*/
static int
im6o_grow(struct ip6_moptions *imo)
struct in6_mfilter *
ip6_mfilter_alloc(const int mflags, const int st0, const int st1)
{
struct in6_multi **nmships;
struct in6_multi **omships;
struct in6_mfilter *nmfilters;
struct in6_mfilter *omfilters;
size_t idx;
size_t newmax;
size_t oldmax;
struct in6_mfilter *imf;
nmships = NULL;
nmfilters = NULL;
omships = imo->im6o_membership;
omfilters = imo->im6o_mfilters;
oldmax = imo->im6o_max_memberships;
newmax = ((oldmax + 1) * 2) - 1;
imf = malloc(sizeof(*imf), M_IN6MFILTER, mflags);
if (newmax <= IPV6_MAX_MEMBERSHIPS) {
nmships = (struct in6_multi **)realloc(omships,
sizeof(struct in6_multi *) * newmax, M_IP6MOPTS, M_NOWAIT);
nmfilters = (struct in6_mfilter *)realloc(omfilters,
sizeof(struct in6_mfilter) * newmax, M_IN6MFILTER,
M_NOWAIT);
if (nmships != NULL && nmfilters != NULL) {
/* Initialize newly allocated source filter heads. */
for (idx = oldmax; idx < newmax; idx++) {
im6f_init(&nmfilters[idx], MCAST_UNDEFINED,
MCAST_EXCLUDE);
}
imo->im6o_max_memberships = newmax;
imo->im6o_membership = nmships;
imo->im6o_mfilters = nmfilters;
}
}
if (imf != NULL)
im6f_init(imf, st0, st1);
if (nmships == NULL || nmfilters == NULL) {
if (nmships != NULL)
free(nmships, M_IP6MOPTS);
if (nmfilters != NULL)
free(nmfilters, M_IN6MFILTER);
return (ETOOMANYREFS);
}
return (imf);
}
return (0);
void
ip6_mfilter_free(struct in6_mfilter *imf)
{
im6f_purge(imf);
free(imf, M_IN6MFILTER);
}
/*
@ -284,36 +254,27 @@ im6o_grow(struct ip6_moptions *imo)
* which matches the specified group, and optionally an interface.
* Return its index into the array, or -1 if not found.
*/
static size_t
static struct in6_mfilter *
im6o_match_group(const struct ip6_moptions *imo, const struct ifnet *ifp,
const struct sockaddr *group)
{
const struct sockaddr_in6 *gsin6;
struct in6_multi **pinm;
int idx;
int nmships;
struct in6_mfilter *imf;
struct in6_multi *inm;
gsin6 = (const struct sockaddr_in6 *)group;
gsin6 = (const struct sockaddr_in6 *)group;
/* The im6o_membership array may be lazy allocated. */
if (imo->im6o_membership == NULL || imo->im6o_num_memberships == 0)
return (-1);
nmships = imo->im6o_num_memberships;
pinm = &imo->im6o_membership[0];
for (idx = 0; idx < nmships; idx++, pinm++) {
if (*pinm == NULL)
IP6_MFILTER_FOREACH(imf, &imo->im6o_head) {
inm = imf->im6f_in6m;
if (inm == NULL)
continue;
if ((ifp == NULL || ((*pinm)->in6m_ifp == ifp)) &&
IN6_ARE_ADDR_EQUAL(&(*pinm)->in6m_addr,
if ((ifp == NULL || (inm->in6m_ifp == ifp)) &&
IN6_ARE_ADDR_EQUAL(&inm->in6m_addr,
&gsin6->sin6_addr)) {
break;
}
}
if (idx >= nmships)
idx = -1;
return (idx);
return (imf);
}
/*
@ -328,22 +289,13 @@ im6o_match_group(const struct ip6_moptions *imo, const struct ifnet *ifp,
* it exists, which may not be the desired behaviour.
*/
static struct in6_msource *
im6o_match_source(const struct ip6_moptions *imo, const size_t gidx,
const struct sockaddr *src)
im6o_match_source(struct in6_mfilter *imf, const struct sockaddr *src)
{
struct ip6_msource find;
struct in6_mfilter *imf;
struct ip6_msource *ims;
const sockunion_t *psa;
KASSERT(src->sa_family == AF_INET6, ("%s: !AF_INET6", __func__));
KASSERT(gidx != -1 && gidx < imo->im6o_num_memberships,
("%s: invalid index %d\n", __func__, (int)gidx));
/* The im6o_mfilters array may be lazy allocated. */
if (imo->im6o_mfilters == NULL)
return (NULL);
imf = &imo->im6o_mfilters[gidx];
psa = (const sockunion_t *)src;
find.im6s_addr = psa->sin6.sin6_addr;
@ -363,14 +315,14 @@ int
im6o_mc_filter(const struct ip6_moptions *imo, const struct ifnet *ifp,
const struct sockaddr *group, const struct sockaddr *src)
{
size_t gidx;
struct in6_mfilter *imf;
struct in6_msource *ims;
int mode;
KASSERT(ifp != NULL, ("%s: null ifp", __func__));
gidx = im6o_match_group(imo, ifp, group);
if (gidx == -1)
imf = im6o_match_group(imo, ifp, group);
if (imf == NULL)
return (MCAST_NOTGMEMBER);
/*
@ -382,8 +334,8 @@ im6o_mc_filter(const struct ip6_moptions *imo, const struct ifnet *ifp,
* NOTE: We are comparing group state here at MLD t1 (now)
* with socket-layer t0 (since last downcall).
*/
mode = imo->im6o_mfilters[gidx].im6f_st[1];
ims = im6o_match_source(imo, gidx, src);
mode = imf->im6f_st[1];
ims = im6o_match_source(imf, src);
if ((ims == NULL && mode == MCAST_INCLUDE) ||
(ims != NULL && ims->im6sl_st[0] != mode))
@ -1447,7 +1399,6 @@ in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
struct ip6_moptions *imo;
struct in6_msource *ims;
struct in6_multi *inm;
size_t idx;
uint16_t fmode;
int error, doblock;
#ifdef KTR
@ -1504,16 +1455,12 @@ in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
* Check if we are actually a member of this group.
*/
imo = in6p_findmoptions(inp);
idx = im6o_match_group(imo, ifp, &gsa->sa);
if (idx == -1 || imo->im6o_mfilters == NULL) {
imf = im6o_match_group(imo, ifp, &gsa->sa);
if (imf == NULL) {
error = EADDRNOTAVAIL;
goto out_in6p_locked;
}
KASSERT(imo->im6o_mfilters != NULL,
("%s: im6o_mfilters not allocated", __func__));
imf = &imo->im6o_mfilters[idx];
inm = imo->im6o_membership[idx];
inm = imf->im6f_in6m;
/*
* Attempting to use the delta-based API on an
@ -1531,7 +1478,7 @@ in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
* Asked to unblock, but nothing to unblock.
* If adding a new block entry, allocate it.
*/
ims = im6o_match_source(imo, idx, &ssa->sa);
ims = im6o_match_source(imf, &ssa->sa);
if ((ims != NULL && doblock) || (ims == NULL && !doblock)) {
CTR3(KTR_MLD, "%s: source %s %spresent", __func__,
ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr),
@ -1601,9 +1548,6 @@ static struct ip6_moptions *
in6p_findmoptions(struct inpcb *inp)
{
struct ip6_moptions *imo;
struct in6_multi **immp;
struct in6_mfilter *imfp;
size_t idx;
INP_WLOCK(inp);
if (inp->in6p_moptions != NULL)
@ -1612,27 +1556,14 @@ in6p_findmoptions(struct inpcb *inp)
INP_WUNLOCK(inp);
imo = malloc(sizeof(*imo), M_IP6MOPTS, M_WAITOK);
immp = malloc(sizeof(*immp) * IPV6_MIN_MEMBERSHIPS, M_IP6MOPTS,
M_WAITOK | M_ZERO);
imfp = malloc(sizeof(struct in6_mfilter) * IPV6_MIN_MEMBERSHIPS,
M_IN6MFILTER, M_WAITOK);
imo->im6o_multicast_ifp = NULL;
imo->im6o_multicast_hlim = V_ip6_defmcasthlim;
imo->im6o_multicast_loop = in6_mcast_loop;
imo->im6o_num_memberships = 0;
imo->im6o_max_memberships = IPV6_MIN_MEMBERSHIPS;
imo->im6o_membership = immp;
/* Initialize per-group source filters. */
for (idx = 0; idx < IPV6_MIN_MEMBERSHIPS; idx++)
im6f_init(&imfp[idx], MCAST_UNDEFINED, MCAST_EXCLUDE);
imo->im6o_mfilters = imfp;
STAILQ_INIT(&imo->im6o_head);
INP_WLOCK(inp);
if (inp->in6p_moptions != NULL) {
free(imfp, M_IN6MFILTER);
free(immp, M_IP6MOPTS);
free(imo, M_IP6MOPTS);
return (inp->in6p_moptions);
}
@ -1652,33 +1583,26 @@ in6p_findmoptions(struct inpcb *inp)
static void
inp_gcmoptions(struct ip6_moptions *imo)
{
struct in6_mfilter *imf;
struct in6_mfilter *imf;
struct in6_multi *inm;
struct ifnet *ifp;
size_t idx, nmships;
nmships = imo->im6o_num_memberships;
for (idx = 0; idx < nmships; ++idx) {
imf = imo->im6o_mfilters ? &imo->im6o_mfilters[idx] : NULL;
if (imf)
im6f_leave(imf);
inm = imo->im6o_membership[idx];
ifp = inm->in6m_ifp;
if (ifp != NULL) {
CURVNET_SET(ifp->if_vnet);
(void)in6_leavegroup(inm, imf);
CURVNET_RESTORE();
} else {
(void)in6_leavegroup(inm, imf);
}
if (imf)
im6f_purge(imf);
}
while ((imf = ip6_mfilter_first(&imo->im6o_head)) != NULL) {
ip6_mfilter_remove(&imo->im6o_head, imf);
if (imo->im6o_mfilters)
free(imo->im6o_mfilters, M_IN6MFILTER);
free(imo->im6o_membership, M_IP6MOPTS);
free(imo, M_IP6MOPTS);
im6f_leave(imf);
if ((inm = imf->im6f_in6m) != NULL) {
if ((ifp = inm->in6m_ifp) != NULL) {
CURVNET_SET(ifp->if_vnet);
(void)in6_leavegroup(inm, imf);
CURVNET_RESTORE();
} else {
(void)in6_leavegroup(inm, imf);
}
}
ip6_mfilter_free(imf);
}
free(imo, M_IP6MOPTS);
}
void
@ -1707,7 +1631,7 @@ in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt)
struct sockaddr_storage *ptss;
struct sockaddr_storage *tss;
int error;
size_t idx, nsrcs, ncsrcs;
size_t nsrcs, ncsrcs;
INP_WLOCK_ASSERT(inp);
@ -1741,12 +1665,11 @@ in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt)
/*
* Lookup group on the socket.
*/
idx = im6o_match_group(imo, ifp, &gsa->sa);
if (idx == -1 || imo->im6o_mfilters == NULL) {
imf = im6o_match_group(imo, ifp, &gsa->sa);
if (imf == NULL) {
INP_WUNLOCK(inp);
return (EADDRNOTAVAIL);
}
imf = &imo->im6o_mfilters[idx];
/*
* Ignore memberships which are in limbo.
@ -1943,15 +1866,12 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
struct ip6_moptions *imo;
struct in6_multi *inm;
struct in6_msource *lims;
size_t idx;
int error, is_new;
SLIST_INIT(&inmh);
ifp = NULL;
imf = NULL;
lims = NULL;
error = 0;
is_new = 0;
memset(&gsr, 0, sizeof(struct group_source_req));
gsa = (sockunion_t *)&gsr.gsr_group;
@ -2052,13 +1972,25 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
*/
(void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL);
IN6_MULTI_LOCK();
/*
* Find the membership in the membership list.
*/
imo = in6p_findmoptions(inp);
idx = im6o_match_group(imo, ifp, &gsa->sa);
if (idx == -1) {
imf = im6o_match_group(imo, ifp, &gsa->sa);
if (imf == NULL) {
is_new = 1;
inm = NULL;
if (ip6_mfilter_count(&imo->im6o_head) >= IPV6_MAX_MEMBERSHIPS) {
error = ENOMEM;
goto out_in6p_locked;
}
} else {
inm = imo->im6o_membership[idx];
imf = &imo->im6o_mfilters[idx];
is_new = 0;
inm = imf->im6f_in6m;
if (ssa->ss.ss_family != AF_UNSPEC) {
/*
* MCAST_JOIN_SOURCE_GROUP on an exclusive membership
@ -2085,7 +2017,7 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
* full-state SSM API with the delta-based API,
* which is discouraged in the relevant RFCs.
*/
lims = im6o_match_source(imo, idx, &ssa->sa);
lims = im6o_match_source(imf, &ssa->sa);
if (lims != NULL /*&&
lims->im6sl_st[1] == MCAST_INCLUDE*/) {
error = EADDRNOTAVAIL;
@ -2113,27 +2045,6 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
*/
INP_WLOCK_ASSERT(inp);
if (is_new) {
if (imo->im6o_num_memberships == imo->im6o_max_memberships) {
error = im6o_grow(imo);
if (error)
goto out_in6p_locked;
}
/*
* Allocate the new slot upfront so we can deal with
* grafting the new source filter in same code path
* as for join-source on existing membership.
*/
idx = imo->im6o_num_memberships;
imo->im6o_membership[idx] = NULL;
imo->im6o_num_memberships++;
KASSERT(imo->im6o_mfilters != NULL,
("%s: im6f_mfilters vector was not allocated", __func__));
imf = &imo->im6o_mfilters[idx];
KASSERT(RB_EMPTY(&imf->im6f_sources),
("%s: im6f_sources not empty", __func__));
}
/*
* Graft new source into filter list for this inpcb's
* membership of the group. The in6_multi may not have
@ -2149,7 +2060,11 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
/* Membership starts in IN mode */
if (is_new) {
CTR1(KTR_MLD, "%s: new join w/source", __func__);
im6f_init(imf, MCAST_UNDEFINED, MCAST_INCLUDE);
imf = ip6_mfilter_alloc(M_NOWAIT, MCAST_UNDEFINED, MCAST_INCLUDE);
if (imf == NULL) {
error = ENOMEM;
goto out_in6p_locked;
}
} else {
CTR2(KTR_MLD, "%s: %s source", __func__, "allow");
}
@ -2158,81 +2073,88 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
CTR1(KTR_MLD, "%s: merge imf state failed",
__func__);
error = ENOMEM;
goto out_im6o_free;
goto out_in6p_locked;
}
} else {
/* No address specified; Membership starts in EX mode */
if (is_new) {
CTR1(KTR_MLD, "%s: new join w/o source", __func__);
im6f_init(imf, MCAST_UNDEFINED, MCAST_EXCLUDE);
imf = ip6_mfilter_alloc(M_NOWAIT, MCAST_UNDEFINED, MCAST_EXCLUDE);
if (imf == NULL) {
error = ENOMEM;
goto out_in6p_locked;
}
}
}
/*
* Begin state merge transaction at MLD layer.
*/
in_pcbref(inp);
INP_WUNLOCK(inp);
IN6_MULTI_LOCK();
if (is_new) {
in_pcbref(inp);
INP_WUNLOCK(inp);
error = in6_joingroup_locked(ifp, &gsa->sin6.sin6_addr, imf,
&inm, 0);
&imf->im6f_in6m, 0);
INP_WLOCK(inp);
if (in_pcbrele_wlocked(inp)) {
error = ENXIO;
goto out_in6p_unlocked;
}
if (error) {
IN6_MULTI_UNLOCK();
goto out_im6o_free;
goto out_in6p_locked;
}
/*
* NOTE: Refcount from in6_joingroup_locked()
* is protecting membership.
*/
imo->im6o_membership[idx] = inm;
} else {
CTR1(KTR_MLD, "%s: merge inm state", __func__);
IN6_MULTI_LIST_LOCK();
error = in6m_merge(inm, imf);
if (error)
if (error) {
CTR1(KTR_MLD, "%s: failed to merge inm state",
__func__);
else {
CTR1(KTR_MLD, "%s: doing mld downcall", __func__);
error = mld_change_state(inm, 0);
if (error)
CTR1(KTR_MLD, "%s: failed mld downcall",
__func__);
}
IN6_MULTI_LIST_UNLOCK();
}
IN6_MULTI_UNLOCK();
INP_WLOCK(inp);
if (in_pcbrele_wlocked(inp))
return (ENXIO);
if (error) {
im6f_rollback(imf);
if (is_new)
im6f_purge(imf);
else
im6f_reap(imf);
} else {
im6f_commit(imf);
}
out_im6o_free:
if (error && is_new) {
inm = imo->im6o_membership[idx];
if (inm != NULL) {
IN6_MULTI_LIST_LOCK();
in6m_rele_locked(&inmh, inm);
IN6_MULTI_LIST_UNLOCK();
im6f_rollback(imf);
im6f_reap(imf);
goto out_in6p_locked;
}
CTR1(KTR_MLD, "%s: doing mld downcall", __func__);
error = mld_change_state(inm, 0);
IN6_MULTI_LIST_UNLOCK();
if (error) {
CTR1(KTR_MLD, "%s: failed mld downcall",
__func__);
im6f_rollback(imf);
im6f_reap(imf);
goto out_in6p_locked;
}
imo->im6o_membership[idx] = NULL;
--imo->im6o_num_memberships;
}
if (is_new)
ip6_mfilter_insert(&imo->im6o_head, imf);
im6f_commit(imf);
imf = NULL;
out_in6p_locked:
INP_WUNLOCK(inp);
in6m_release_list_deferred(&inmh);
out_in6p_unlocked:
IN6_MULTI_UNLOCK();
if (is_new && imf) {
if (imf->im6f_in6m != NULL) {
struct in6_multi_head inmh;
SLIST_INIT(&inmh);
SLIST_INSERT_HEAD(&inmh, imf->im6f_in6m, in6m_defer);
in6m_release_list_deferred(&inmh);
}
ip6_mfilter_free(imf);
}
return (error);
}
@ -2251,8 +2173,8 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt)
struct in6_msource *ims;
struct in6_multi *inm;
uint32_t ifindex;
size_t idx;
int error, is_final;
int error;
bool is_final;
#ifdef KTR
char ip6tbuf[INET6_ADDRSTRLEN];
#endif
@ -2260,7 +2182,7 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt)
ifp = NULL;
ifindex = 0;
error = 0;
is_final = 1;
is_final = true;
memset(&gsr, 0, sizeof(struct group_source_req));
gsa = (sockunion_t *)&gsr.gsr_group;
@ -2378,20 +2300,21 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt)
CTR2(KTR_MLD, "%s: ifp = %p", __func__, ifp);
KASSERT(ifp != NULL, ("%s: ifp did not resolve", __func__));
IN6_MULTI_LOCK();
/*
* Find the membership in the membership array.
* Find the membership in the membership list.
*/
imo = in6p_findmoptions(inp);
idx = im6o_match_group(imo, ifp, &gsa->sa);
if (idx == -1) {
imf = im6o_match_group(imo, ifp, &gsa->sa);
if (imf == NULL) {
error = EADDRNOTAVAIL;
goto out_in6p_locked;
}
inm = imo->im6o_membership[idx];
imf = &imo->im6o_mfilters[idx];
inm = imf->im6f_in6m;
if (ssa->ss.ss_family != AF_UNSPEC)
is_final = 0;
is_final = false;
/*
* Begin state merge transaction at socket layer.
@ -2403,13 +2326,14 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt)
* MCAST_LEAVE_SOURCE_GROUP is only valid for inclusive memberships.
*/
if (is_final) {
ip6_mfilter_remove(&imo->im6o_head, imf);
im6f_leave(imf);
} else {
if (imf->im6f_st[0] == MCAST_EXCLUDE) {
error = EADDRNOTAVAIL;
goto out_in6p_locked;
}
ims = im6o_match_source(imo, idx, &ssa->sa);
ims = im6o_match_source(imf, &ssa->sa);
if (ims == NULL) {
CTR3(KTR_MLD, "%s: source %p %spresent", __func__,
ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr),
@ -2429,60 +2353,47 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt)
/*
* Begin state merge transaction at MLD layer.
*/
in_pcbref(inp);
INP_WUNLOCK(inp);
IN6_MULTI_LOCK();
if (!is_final) {
CTR1(KTR_MLD, "%s: merge inm state", __func__);
IN6_MULTI_LIST_LOCK();
error = in6m_merge(inm, imf);
if (error) {
CTR1(KTR_MLD, "%s: failed to merge inm state",
__func__);
IN6_MULTI_LIST_UNLOCK();
im6f_rollback(imf);
im6f_reap(imf);
goto out_in6p_locked;
}
if (is_final) {
CTR1(KTR_MLD, "%s: doing mld downcall", __func__);
error = mld_change_state(inm, 0);
IN6_MULTI_LIST_UNLOCK();
if (error) {
CTR1(KTR_MLD, "%s: failed mld downcall",
__func__);
im6f_rollback(imf);
im6f_reap(imf);
goto out_in6p_locked;
}
}
im6f_commit(imf);
im6f_reap(imf);
out_in6p_locked:
INP_WUNLOCK(inp);
if (is_final && imf) {
/*
* Give up the multicast address record to which
* the membership points.
*/
(void)in6_leavegroup_locked(inm, imf);
} else {
CTR1(KTR_MLD, "%s: merge inm state", __func__);
IN6_MULTI_LIST_LOCK();
error = in6m_merge(inm, imf);
if (error)
CTR1(KTR_MLD, "%s: failed to merge inm state",
__func__);
else {
CTR1(KTR_MLD, "%s: doing mld downcall", __func__);
error = mld_change_state(inm, 0);
if (error)
CTR1(KTR_MLD, "%s: failed mld downcall",
__func__);
}
IN6_MULTI_LIST_UNLOCK();
ip6_mfilter_free(imf);
}
IN6_MULTI_UNLOCK();
INP_WLOCK(inp);
if (in_pcbrele_wlocked(inp))
return (ENXIO);
if (error)
im6f_rollback(imf);
else
im6f_commit(imf);
im6f_reap(imf);
if (is_final) {
/* Remove the gap in the membership array. */
KASSERT(RB_EMPTY(&imf->im6f_sources),
("%s: im6f_sources not empty", __func__));
for (++idx; idx < imo->im6o_num_memberships; ++idx) {
imo->im6o_membership[idx - 1] = imo->im6o_membership[idx];
imo->im6o_mfilters[idx - 1] = imo->im6o_mfilters[idx];
}
im6f_init(&imo->im6o_mfilters[idx - 1], MCAST_UNDEFINED,
MCAST_EXCLUDE);
imo->im6o_num_memberships--;
}
out_in6p_locked:
INP_WUNLOCK(inp);
return (error);
}
@ -2540,7 +2451,6 @@ in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
struct in6_mfilter *imf;
struct ip6_moptions *imo;
struct in6_multi *inm;
size_t idx;
int error;
error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq),
@ -2577,13 +2487,12 @@ in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
* Check if this socket is a member of this group.
*/
imo = in6p_findmoptions(inp);
idx = im6o_match_group(imo, ifp, &gsa->sa);
if (idx == -1 || imo->im6o_mfilters == NULL) {
imf = im6o_match_group(imo, ifp, &gsa->sa);
if (imf == NULL) {
error = EADDRNOTAVAIL;
goto out_in6p_locked;
}
inm = imo->im6o_membership[idx];
imf = &imo->im6o_mfilters[idx];
inm = imf->im6f_in6m;
/*
* Begin state merge transaction at socket layer.

View File

@ -802,8 +802,9 @@ void
in6_pcbpurgeif0(struct inpcbinfo *pcbinfo, struct ifnet *ifp)
{
struct inpcb *in6p;
struct in6_multi *inm;
struct in6_mfilter *imf;
struct ip6_moptions *im6o;
int i, gap;
INP_INFO_WLOCK(pcbinfo);
CK_LIST_FOREACH(in6p, pcbinfo->ipi_listhead, inp_list) {
@ -824,18 +825,18 @@ in6_pcbpurgeif0(struct inpcbinfo *pcbinfo, struct ifnet *ifp)
* Drop multicast group membership if we joined
* through the interface being detached.
*/
gap = 0;
for (i = 0; i < im6o->im6o_num_memberships; i++) {
if (im6o->im6o_membership[i]->in6m_ifp ==
ifp) {
in6_leavegroup(im6o->im6o_membership[i], NULL);
gap++;
} else if (gap != 0) {
im6o->im6o_membership[i - gap] =
im6o->im6o_membership[i];
}
restart:
IP6_MFILTER_FOREACH(imf, &im6o->im6o_head) {
if ((inm = imf->im6f_in6m) == NULL)
continue;
if (inm->in6m_ifp != ifp)
continue;
ip6_mfilter_remove(&im6o->im6o_head, imf);
IN6_MULTI_LOCK_ASSERT();
in6_leavegroup_locked(inm, NULL);
ip6_mfilter_free(imf);
goto restart;
}
im6o->im6o_num_memberships -= gap;
}
INP_WUNLOCK(in6p);
}

View File

@ -602,8 +602,60 @@ struct in6_mfilter {
struct ip6_msource_tree im6f_sources; /* source list for (S,G) */
u_long im6f_nsrc; /* # of source entries */
uint8_t im6f_st[2]; /* state before/at commit */
struct in6_multi *im6f_in6m; /* associated multicast address */
STAILQ_ENTRY(in6_mfilter) im6f_entry; /* list entry */
};
/*
* Helper types and functions for IPv4 multicast filters.
*/
STAILQ_HEAD(ip6_mfilter_head, in6_mfilter);
struct in6_mfilter *ip6_mfilter_alloc(int mflags, int st0, int st1);
void ip6_mfilter_free(struct in6_mfilter *);
static inline void
ip6_mfilter_init(struct ip6_mfilter_head *head)
{
STAILQ_INIT(head);
}
static inline struct in6_mfilter *
ip6_mfilter_first(const struct ip6_mfilter_head *head)
{
return (STAILQ_FIRST(head));
}
static inline void
ip6_mfilter_insert(struct ip6_mfilter_head *head, struct in6_mfilter *imf)
{
STAILQ_INSERT_TAIL(head, imf, im6f_entry);
}
static inline void
ip6_mfilter_remove(struct ip6_mfilter_head *head, struct in6_mfilter *imf)
{
STAILQ_REMOVE(head, imf, in6_mfilter, im6f_entry);
}
#define IP6_MFILTER_FOREACH(imf, head) \
STAILQ_FOREACH(imf, head, im6f_entry)
static inline size_t
ip6_mfilter_count(struct ip6_mfilter_head *head)
{
struct in6_mfilter *imf;
size_t num = 0;
STAILQ_FOREACH(imf, head, im6f_entry)
num++;
return (num);
}
/*
* Legacy KAME IPv6 multicast membership descriptor.
*/

View File

@ -110,6 +110,7 @@ struct ip6_direct_ctx {
uint32_t ip6dc_off; /* offset to next header */
};
#if defined(_NETINET6_IN6_VAR_H_) && defined(_KERNEL)
/*
* Structure attached to inpcb.in6p_moptions and
* passed to ip6_output when IPv6 multicast options are in use.
@ -119,13 +120,11 @@ struct ip6_moptions {
struct ifnet *im6o_multicast_ifp; /* ifp for outgoing multicasts */
u_char im6o_multicast_hlim; /* hoplimit for outgoing multicasts */
u_char im6o_multicast_loop; /* 1 >= hear sends if a member */
u_short im6o_num_memberships; /* no. memberships this socket */
u_short im6o_max_memberships; /* max memberships this socket */
struct in6_multi **im6o_membership; /* group memberships */
struct in6_mfilter *im6o_mfilters; /* source filters */
struct epoch_context imo6_epoch_ctx;
struct ip6_mfilter_head im6o_head; /* group membership list */
};
#else
struct ip6_moptions;
#endif
/*
* Control options for outgoing packets
*/

View File

@ -264,7 +264,7 @@ static void pfsync_push(struct pfsync_bucket *);
static void pfsync_push_all(struct pfsync_softc *);
static void pfsyncintr(void *);
static int pfsync_multicast_setup(struct pfsync_softc *, struct ifnet *,
void *);
struct in_mfilter *imf);
static void pfsync_multicast_cleanup(struct pfsync_softc *);
static void pfsync_pointers_init(void);
static void pfsync_pointers_uninit(void);
@ -430,8 +430,7 @@ pfsync_clone_destroy(struct ifnet *ifp)
pfsync_drop(sc);
if_free(ifp);
if (sc->sc_imo.imo_membership)
pfsync_multicast_cleanup(sc);
pfsync_multicast_cleanup(sc);
mtx_destroy(&sc->sc_mtx);
mtx_destroy(&sc->sc_bulk_mtx);
@ -1373,10 +1372,9 @@ pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
case SIOCSETPFSYNC:
{
struct ip_moptions *imo = &sc->sc_imo;
struct in_mfilter *imf = NULL;
struct ifnet *sifp;
struct ip *ip;
void *mship = NULL;
if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
return (error);
@ -1396,8 +1394,7 @@ pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
pfsyncr.pfsyncr_syncpeer.s_addr == 0 ||
pfsyncr.pfsyncr_syncpeer.s_addr ==
htonl(INADDR_PFSYNC_GROUP)))
mship = malloc((sizeof(struct in_multi *) *
IP_MIN_MEMBERSHIPS), M_PFSYNC, M_WAITOK | M_ZERO);
imf = ip_mfilter_alloc(M_WAITOK, 0, 0);
PFSYNC_LOCK(sc);
if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
@ -1419,8 +1416,7 @@ pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
if (sc->sc_sync_if)
if_rele(sc->sc_sync_if);
sc->sc_sync_if = NULL;
if (imo->imo_membership)
pfsync_multicast_cleanup(sc);
pfsync_multicast_cleanup(sc);
PFSYNC_UNLOCK(sc);
break;
}
@ -1436,14 +1432,13 @@ pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]);
}
if (imo->imo_membership)
pfsync_multicast_cleanup(sc);
pfsync_multicast_cleanup(sc);
if (sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) {
error = pfsync_multicast_setup(sc, sifp, mship);
error = pfsync_multicast_setup(sc, sifp, imf);
if (error) {
if_rele(sifp);
free(mship, M_PFSYNC);
ip_mfilter_free(imf);
PFSYNC_UNLOCK(sc);
return (error);
}
@ -2353,7 +2348,8 @@ pfsyncintr(void *arg)
}
static int
pfsync_multicast_setup(struct pfsync_softc *sc, struct ifnet *ifp, void *mship)
pfsync_multicast_setup(struct pfsync_softc *sc, struct ifnet *ifp,
struct in_mfilter *imf)
{
struct ip_moptions *imo = &sc->sc_imo;
int error;
@ -2361,16 +2357,14 @@ pfsync_multicast_setup(struct pfsync_softc *sc, struct ifnet *ifp, void *mship)
if (!(ifp->if_flags & IFF_MULTICAST))
return (EADDRNOTAVAIL);
imo->imo_membership = (struct in_multi **)mship;
imo->imo_max_memberships = IP_MIN_MEMBERSHIPS;
imo->imo_multicast_vif = -1;
if ((error = in_joingroup(ifp, &sc->sc_sync_peer, NULL,
&imo->imo_membership[0])) != 0) {
imo->imo_membership = NULL;
&imf->imf_inm)) != 0)
return (error);
}
imo->imo_num_memberships++;
ip_mfilter_init(&imo->imo_head);
ip_mfilter_insert(&imo->imo_head, imf);
imo->imo_multicast_ifp = ifp;
imo->imo_multicast_ttl = PFSYNC_DFLTTL;
imo->imo_multicast_loop = 0;
@ -2382,10 +2376,13 @@ static void
pfsync_multicast_cleanup(struct pfsync_softc *sc)
{
struct ip_moptions *imo = &sc->sc_imo;
struct in_mfilter *imf;
in_leavegroup(imo->imo_membership[0], NULL);
free(imo->imo_membership, M_PFSYNC);
imo->imo_membership = NULL;
while ((imf = ip_mfilter_first(&imo->imo_head)) != NULL) {
ip_mfilter_remove(&imo->imo_head, imf);
in_leavegroup(imf->imf_inm, NULL);
ip_mfilter_free(imf);
}
imo->imo_multicast_ifp = NULL;
}
@ -2404,7 +2401,7 @@ pfsync_detach_ifnet(struct ifnet *ifp)
* is going away. We do need to ensure we don't try to do
* cleanup later.
*/
sc->sc_imo.imo_membership = NULL;
ip_mfilter_init(&sc->sc_imo.imo_head);
sc->sc_imo.imo_multicast_ifp = NULL;
sc->sc_sync_if = NULL;
}