Separate list manipulation locking from state change in multicast

Multicast incorrectly calls in to drivers with a mutex held causing drivers
to have to go through all manner of contortions to use a non sleepable lock.
Serialize multicast updates instead.

Submitted by:	mmacy <mmacy@mattmacy.io>
Reviewed by:	shurd, sbruno
Sponsored by:	Limelight Networks
Differential Revision:	https://reviews.freebsd.org/D14969
This commit is contained in:
Stephen Hurd 2018-05-02 19:36:29 +00:00
parent adb947a67a
commit f3e1324b41
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=333175
18 changed files with 626 additions and 388 deletions

View File

@ -53,6 +53,7 @@ static void gtaskqueue_thread_enqueue(void *);
static void gtaskqueue_thread_loop(void *arg);
TASKQGROUP_DEFINE(softirq, mp_ncpus, 1);
TASKQGROUP_DEFINE(config, 1, 1);
struct gtaskqueue_busy {
struct gtask *tb_running;
@ -662,7 +663,7 @@ SYSINIT(tqg_record_smp_started, SI_SUB_SMP, SI_ORDER_FOURTH,
void
taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
void *uniq, int irq, char *name)
void *uniq, int irq, const char *name)
{
cpuset_t mask;
int qid, error;
@ -977,3 +978,12 @@ taskqgroup_destroy(struct taskqgroup *qgroup)
{
}
void
taskqgroup_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn,
const char *name)
{
GROUPTASK_INIT(gtask, 0, fn, ctx);
taskqgroup_attach(qgroup_config, gtask, gtask, -1, name);
}

View File

@ -532,18 +532,22 @@ static struct witness_order_list_entry order_lists[] = {
* IPv4 multicast:
* protocol locks before interface locks, after UDP locks.
*/
{ "in_multi_sx", &lock_class_sx },
{ "udpinp", &lock_class_rw },
{ "in_multi_mtx", &lock_class_mtx_sleep },
{ "in_multi_list_mtx", &lock_class_mtx_sleep },
{ "igmp_mtx", &lock_class_mtx_sleep },
{ "ifnet_rw", &lock_class_rw },
{ "if_addr_lock", &lock_class_rw },
{ NULL, NULL },
/*
* IPv6 multicast:
* protocol locks before interface locks, after UDP locks.
*/
{ "in6_multi_sx", &lock_class_sx },
{ "udpinp", &lock_class_rw },
{ "in6_multi_mtx", &lock_class_mtx_sleep },
{ "in6_multi_list_mtx", &lock_class_mtx_sleep },
{ "mld_mtx", &lock_class_mtx_sleep },
{ "ifnet_rw", &lock_class_rw },
{ "if_addr_lock", &lock_class_rw },
{ NULL, NULL },
/*

View File

@ -985,11 +985,13 @@ static void
if_purgemaddrs(struct ifnet *ifp)
{
struct ifmultiaddr *ifma;
struct ifmultiaddr *next;
IF_ADDR_WLOCK(ifp);
TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next)
while (!TAILQ_EMPTY(&ifp->if_multiaddrs)) {
ifma = TAILQ_FIRST(&ifp->if_multiaddrs);
TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link);
if_delmulti_locked(ifp, ifma, 1);
}
IF_ADDR_WUNLOCK(ifp);
}
@ -3429,6 +3431,12 @@ if_addmulti(struct ifnet *ifp, struct sockaddr *sa,
struct sockaddr_dl sdl;
int error;
#ifdef INET
IN_MULTI_LIST_UNLOCK_ASSERT();
#endif
#ifdef INET6
IN6_MULTI_LIST_UNLOCK_ASSERT();
#endif
/*
* If the address is already present, return a new reference to it;
* otherwise, allocate storage and set up a new address.
@ -3610,6 +3618,9 @@ if_delmulti_ifma(struct ifmultiaddr *ifma)
struct ifnet *ifp;
int lastref;
#ifdef INET
IN_MULTI_LIST_UNLOCK_ASSERT();
#endif
ifp = ifma->ifma_ifp;
#ifdef DIAGNOSTIC
if (ifp == NULL) {
@ -3711,8 +3722,7 @@ if_delmulti_locked(struct ifnet *ifp, struct ifmultiaddr *ifma, int detaching)
if_freemulti(ll_ifma);
}
}
if (ifp != NULL)
if (ifp != NULL && detaching == 0)
TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link);
if_freemulti(ifma);

View File

@ -136,7 +136,7 @@ static int igmp_v3_enqueue_group_record(struct mbufq *,
struct in_multi *, const int, const int, const int);
static int igmp_v3_enqueue_filter_change(struct mbufq *,
struct in_multi *);
static void igmp_v3_process_group_timers(struct igmp_ifsoftc *,
static void igmp_v3_process_group_timers(struct in_multi_head *,
struct mbufq *, struct mbufq *, struct in_multi *,
const int);
static int igmp_v3_merge_state_changes(struct in_multi *,
@ -162,12 +162,12 @@ static const struct netisr_handler igmp_nh = {
* themselves are not virtualized.
*
* Locking:
* * The permitted lock order is: IN_MULTI_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
* * The permitted lock order is: IN_MULTI_LIST_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
* Any may be taken independently; if any are held at the same
* time, the above lock order must be followed.
* * All output is delegated to the netisr.
* Now that Giant has been eliminated, the netisr may be inlined.
* * IN_MULTI_LOCK covers in_multi.
* * IN_MULTI_LIST_LOCK covers in_multi.
* * IGMP_LOCK covers igmp_ifsoftc and any global variables in this file,
* including the output queue.
* * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
@ -441,7 +441,7 @@ sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS)
if (error)
return (error);
IN_MULTI_LOCK();
IN_MULTI_LIST_LOCK();
IGMP_LOCK();
if (name[0] <= 0 || name[0] > V_if_index) {
@ -475,7 +475,7 @@ sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS)
out_locked:
IGMP_UNLOCK();
IN_MULTI_UNLOCK();
IN_MULTI_LIST_UNLOCK();
return (error);
}
@ -586,7 +586,6 @@ igi_alloc_locked(/*const*/ struct ifnet *ifp)
igi->igi_qi = IGMP_QI_INIT;
igi->igi_qri = IGMP_QRI_INIT;
igi->igi_uri = IGMP_URI_INIT;
SLIST_INIT(&igi->igi_relinmhead);
mbufq_init(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS);
LIST_INSERT_HEAD(&V_igi_head, igi, igi_link);
@ -612,11 +611,12 @@ igmp_ifdetach(struct ifnet *ifp)
{
struct igmp_ifsoftc *igi;
struct ifmultiaddr *ifma;
struct in_multi *inm, *tinm;
struct in_multi *inm;
struct in_multi_head inm_free_tmp;
CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", __func__, ifp,
ifp->if_xname);
SLIST_INIT(&inm_free_tmp);
IGMP_LOCK();
igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
@ -631,24 +631,15 @@ igmp_ifdetach(struct ifnet *ifp)
("%s: ifma_protospec is NULL", __func__));
#endif
inm = (struct in_multi *)ifma->ifma_protospec;
if (inm->inm_state == IGMP_LEAVING_MEMBER) {
SLIST_INSERT_HEAD(&igi->igi_relinmhead,
inm, inm_nrele);
}
if (inm->inm_state == IGMP_LEAVING_MEMBER)
inm_rele_locked(&inm_free_tmp, inm);
inm_clear_recorded(inm);
}
IF_ADDR_RUNLOCK(ifp);
/*
* Free the in_multi reference(s) for this IGMP lifecycle.
*/
SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele,
tinm) {
SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele);
inm_release_locked(inm);
}
inm_release_list_deferred(&inm_free_tmp);
}
IGMP_UNLOCK();
}
/*
@ -684,11 +675,6 @@ igi_delete_locked(const struct ifnet *ifp)
mbufq_drain(&igi->igi_gq);
LIST_REMOVE(igi, igi_link);
KASSERT(SLIST_EMPTY(&igi->igi_relinmhead),
("%s: there are dangling in_multi references",
__func__));
free(igi, M_IGMP);
return;
}
@ -722,7 +708,7 @@ igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip,
}
IGMPSTAT_INC(igps_rcv_gen_queries);
IN_MULTI_LOCK();
IN_MULTI_LIST_LOCK();
IGMP_LOCK();
igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
@ -778,7 +764,7 @@ igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip,
out_locked:
IGMP_UNLOCK();
IN_MULTI_UNLOCK();
IN_MULTI_LIST_UNLOCK();
return (0);
}
@ -816,7 +802,7 @@ igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip,
IGMPSTAT_INC(igps_rcv_group_queries);
}
IN_MULTI_LOCK();
IN_MULTI_LIST_LOCK();
IGMP_LOCK();
igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
@ -872,7 +858,7 @@ igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip,
out_locked:
IGMP_UNLOCK();
IN_MULTI_UNLOCK();
IN_MULTI_LIST_UNLOCK();
return (0);
}
@ -899,7 +885,7 @@ igmp_v2_update_group(struct in_multi *inm, const int timer)
CTR4(KTR_IGMPV3, "0x%08x: %s/%s timer=%d", __func__,
ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname, timer);
IN_MULTI_LOCK_ASSERT();
IN_MULTI_LIST_LOCK_ASSERT();
switch (inm->inm_state) {
case IGMP_NOT_MEMBER:
@ -1011,7 +997,7 @@ igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip,
IGMPSTAT_INC(igps_rcv_gsr_queries);
}
IN_MULTI_LOCK();
IN_MULTI_LIST_LOCK();
IGMP_LOCK();
igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
@ -1092,7 +1078,7 @@ igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip,
out_locked:
IGMP_UNLOCK();
IN_MULTI_UNLOCK();
IN_MULTI_LIST_UNLOCK();
return (0);
}
@ -1109,7 +1095,7 @@ igmp_input_v3_group_query(struct in_multi *inm, struct igmp_ifsoftc *igi,
int retval;
uint16_t nsrc;
IN_MULTI_LOCK_ASSERT();
IN_MULTI_LIST_LOCK_ASSERT();
IGMP_LOCK_ASSERT();
retval = 0;
@ -1246,7 +1232,7 @@ igmp_input_v1_report(struct ifnet *ifp, /*const*/ struct ip *ip,
* If we are a member of this group, and our membership should be
* reported, stop our group timer and transition to the 'lazy' state.
*/
IN_MULTI_LOCK();
IN_MULTI_LIST_LOCK();
inm = inm_lookup(ifp, igmp->igmp_group);
if (inm != NULL) {
struct igmp_ifsoftc *igi;
@ -1305,7 +1291,7 @@ igmp_input_v1_report(struct ifnet *ifp, /*const*/ struct ip *ip,
}
out_locked:
IN_MULTI_UNLOCK();
IN_MULTI_LIST_UNLOCK();
return (0);
}
@ -1373,7 +1359,7 @@ igmp_input_v2_report(struct ifnet *ifp, /*const*/ struct ip *ip,
* reported, and our group timer is pending or about to be reset,
* stop our group timer by transitioning to the 'lazy' state.
*/
IN_MULTI_LOCK();
IN_MULTI_LIST_LOCK();
inm = inm_lookup(ifp, igmp->igmp_group);
if (inm != NULL) {
struct igmp_ifsoftc *igi;
@ -1418,7 +1404,7 @@ igmp_input_v2_report(struct ifnet *ifp, /*const*/ struct ip *ip,
}
out_locked:
IN_MULTI_UNLOCK();
IN_MULTI_LIST_UNLOCK();
return (0);
}
@ -1647,6 +1633,7 @@ igmp_fasttimo_vnet(void)
struct igmp_ifsoftc *igi;
struct ifmultiaddr *ifma;
struct in_multi *inm;
struct in_multi_head inm_free_tmp;
int loop, uri_fasthz;
loop = 0;
@ -1662,7 +1649,8 @@ igmp_fasttimo_vnet(void)
!V_state_change_timers_running)
return;
IN_MULTI_LOCK();
SLIST_INIT(&inm_free_tmp);
IN_MULTI_LIST_LOCK();
IGMP_LOCK();
/*
@ -1720,7 +1708,7 @@ igmp_fasttimo_vnet(void)
igi->igi_version);
break;
case IGMP_VERSION_3:
igmp_v3_process_group_timers(igi, &qrq,
igmp_v3_process_group_timers(&inm_free_tmp, &qrq,
&scq, inm, uri_fasthz);
break;
}
@ -1728,8 +1716,6 @@ igmp_fasttimo_vnet(void)
IF_ADDR_RUNLOCK(ifp);
if (igi->igi_version == IGMP_VERSION_3) {
struct in_multi *tinm;
igmp_dispatch_queue(&qrq, 0, loop);
igmp_dispatch_queue(&scq, 0, loop);
@ -1737,18 +1723,13 @@ igmp_fasttimo_vnet(void)
* Free the in_multi reference(s) for this
* IGMP lifecycle.
*/
SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead,
inm_nrele, tinm) {
SLIST_REMOVE_HEAD(&igi->igi_relinmhead,
inm_nrele);
inm_release_locked(inm);
}
inm_release_list_deferred(&inm_free_tmp);
}
}
out_locked:
IGMP_UNLOCK();
IN_MULTI_UNLOCK();
IN_MULTI_LIST_UNLOCK();
}
/*
@ -1760,7 +1741,7 @@ igmp_v1v2_process_group_timer(struct in_multi *inm, const int version)
{
int report_timer_expired;
IN_MULTI_LOCK_ASSERT();
IN_MULTI_LIST_LOCK_ASSERT();
IGMP_LOCK_ASSERT();
if (inm->inm_timer == 0) {
@ -1802,14 +1783,14 @@ igmp_v1v2_process_group_timer(struct in_multi *inm, const int version)
* Note: Unlocked read from igi.
*/
static void
igmp_v3_process_group_timers(struct igmp_ifsoftc *igi,
igmp_v3_process_group_timers(struct in_multi_head *inmh,
struct mbufq *qrq, struct mbufq *scq,
struct in_multi *inm, const int uri_fasthz)
{
int query_response_timer_expired;
int state_change_retransmit_timer_expired;
IN_MULTI_LOCK_ASSERT();
IN_MULTI_LIST_LOCK_ASSERT();
IGMP_LOCK_ASSERT();
query_response_timer_expired = 0;
@ -1907,8 +1888,7 @@ igmp_v3_process_group_timers(struct igmp_ifsoftc *igi,
if (inm->inm_state == IGMP_LEAVING_MEMBER &&
inm->inm_scrv == 0) {
inm->inm_state = IGMP_NOT_MEMBER;
SLIST_INSERT_HEAD(&igi->igi_relinmhead,
inm, inm_nrele);
inm_rele_locked(inmh, inm);
}
}
break;
@ -1929,7 +1909,7 @@ static void
igmp_v3_suppress_group_record(struct in_multi *inm)
{
IN_MULTI_LOCK_ASSERT();
IN_MULTI_LIST_LOCK_ASSERT();
KASSERT(inm->inm_igi->igi_version == IGMP_VERSION_3,
("%s: not IGMPv3 mode on link", __func__));
@ -2003,13 +1983,15 @@ igmp_v3_cancel_link_timers(struct igmp_ifsoftc *igi)
{
struct ifmultiaddr *ifma;
struct ifnet *ifp;
struct in_multi *inm, *tinm;
struct in_multi *inm;
struct in_multi_head inm_free_tmp;
CTR3(KTR_IGMPV3, "%s: cancel v3 timers on ifp %p(%s)", __func__,
igi->igi_ifp, igi->igi_ifp->if_xname);
IN_MULTI_LOCK_ASSERT();
IN_MULTI_LIST_LOCK_ASSERT();
IGMP_LOCK_ASSERT();
SLIST_INIT(&inm_free_tmp);
/*
* Stop the v3 General Query Response on this link stone dead.
@ -2050,7 +2032,7 @@ igmp_v3_cancel_link_timers(struct igmp_ifsoftc *igi)
* message is sent upstream to the old querier --
* transition to NOT would lose the leave and race.
*/
SLIST_INSERT_HEAD(&igi->igi_relinmhead, inm, inm_nrele);
inm_rele_locked(&inm_free_tmp, inm);
/* FALLTHROUGH */
case IGMP_G_QUERY_PENDING_MEMBER:
case IGMP_SG_QUERY_PENDING_MEMBER:
@ -2069,10 +2051,8 @@ igmp_v3_cancel_link_timers(struct igmp_ifsoftc *igi)
mbufq_drain(&inm->inm_scq);
}
IF_ADDR_RUNLOCK(ifp);
SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele, tinm) {
SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele);
inm_release_locked(inm);
}
inm_release_list_deferred(&inm_free_tmp);
}
/*
@ -2199,7 +2179,7 @@ igmp_v1v2_queue_report(struct in_multi *inm, const int type)
struct ip *ip;
struct mbuf *m;
IN_MULTI_LOCK_ASSERT();
IN_MULTI_LIST_LOCK_ASSERT();
IGMP_LOCK_ASSERT();
ifp = inm->inm_ifp;
@ -2276,10 +2256,8 @@ igmp_change_state(struct in_multi *inm)
struct ifnet *ifp;
int error;
IN_MULTI_LOCK_ASSERT();
error = 0;
IN_MULTI_LOCK_ASSERT();
/*
* Try to detect if the upper layer just asked us to change state
* for an interface which has now gone away.
@ -2379,9 +2357,10 @@ igmp_initial_join(struct in_multi *inm, struct igmp_ifsoftc *igi)
* group around for the final INCLUDE {} enqueue.
*/
if (igi->igi_version == IGMP_VERSION_3 &&
inm->inm_state == IGMP_LEAVING_MEMBER)
inm_release_locked(inm);
inm->inm_state == IGMP_LEAVING_MEMBER) {
MPASS(inm->inm_refcount > 1);
inm_rele_locked(NULL, inm);
}
inm->inm_state = IGMP_REPORTING_MEMBER;
switch (igi->igi_version) {
@ -2473,7 +2452,7 @@ igmp_handle_state_change(struct in_multi *inm, struct igmp_ifsoftc *igi)
ifp = inm->inm_ifp;
IN_MULTI_LOCK_ASSERT();
IN_MULTI_LIST_LOCK_ASSERT();
IGMP_LOCK_ASSERT();
KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
@ -2531,7 +2510,7 @@ igmp_final_leave(struct in_multi *inm, struct igmp_ifsoftc *igi)
__func__, ntohl(inm->inm_addr.s_addr), inm->inm_ifp,
inm->inm_ifp->if_xname);
IN_MULTI_LOCK_ASSERT();
IN_MULTI_LIST_LOCK_ASSERT();
IGMP_LOCK_ASSERT();
switch (inm->inm_state) {
@ -2658,7 +2637,7 @@ igmp_v3_enqueue_group_record(struct mbufq *mq, struct in_multi *inm,
in_addr_t naddr;
uint8_t mode;
IN_MULTI_LOCK_ASSERT();
IN_MULTI_LIST_LOCK_ASSERT();
error = 0;
ifp = inm->inm_ifp;
@ -3018,7 +2997,7 @@ igmp_v3_enqueue_filter_change(struct mbufq *mq, struct in_multi *inm)
uint8_t mode, now, then;
rectype_t crt, drt, nrt;
IN_MULTI_LOCK_ASSERT();
IN_MULTI_LIST_LOCK_ASSERT();
if (inm->inm_nsrc == 0 ||
(inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0))
@ -3221,7 +3200,7 @@ igmp_v3_merge_state_changes(struct in_multi *inm, struct mbufq *scq)
domerge = 0;
recslen = 0;
IN_MULTI_LOCK_ASSERT();
IN_MULTI_LIST_LOCK_ASSERT();
IGMP_LOCK_ASSERT();
/*
@ -3320,7 +3299,7 @@ igmp_v3_dispatch_general_query(struct igmp_ifsoftc *igi)
struct in_multi *inm;
int retval, loop;
IN_MULTI_LOCK_ASSERT();
IN_MULTI_LIST_LOCK_ASSERT();
IGMP_LOCK_ASSERT();
KASSERT(igi->igi_version == IGMP_VERSION_3,
@ -3632,7 +3611,6 @@ DB_SHOW_COMMAND(igi_list, db_show_igi_list)
db_printf(" qi %u\n", igi->igi_qi);
db_printf(" qri %u\n", igi->igi_qri);
db_printf(" uri %u\n", igi->igi_uri);
/* SLIST_HEAD(,in_multi) igi_relinmhead */
/* struct mbufq igi_gq; */
db_printf("\n");
}

View File

@ -214,7 +214,6 @@ struct igmp_ifsoftc {
uint32_t igi_qi; /* IGMPv3 Query Interval (s) */
uint32_t igi_qri; /* IGMPv3 Query Response Interval (s) */
uint32_t igi_uri; /* IGMPv3 Unsolicited Report Interval (s) */
SLIST_HEAD(,in_multi) igi_relinmhead; /* released groups */
struct mbufq igi_gq; /* general query responses queue */
};

View File

@ -632,12 +632,10 @@ in_difaddr_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp, struct thread *td)
struct in_ifinfo *ii;
ii = ((struct in_ifinfo *)ifp->if_afdata[AF_INET]);
IN_MULTI_LOCK();
if (ii->ii_allhosts) {
(void)in_leavegroup_locked(ii->ii_allhosts, NULL);
(void)in_leavegroup(ii->ii_allhosts, NULL);
ii->ii_allhosts = NULL;
}
IN_MULTI_UNLOCK();
}
IF_ADDR_WLOCK(ifp);
@ -994,11 +992,12 @@ in_broadcast(struct in_addr in, struct ifnet *ifp)
void
in_ifdetach(struct ifnet *ifp)
{
IN_MULTI_LOCK();
in_pcbpurgeif0(&V_ripcbinfo, ifp);
in_pcbpurgeif0(&V_udbinfo, ifp);
in_pcbpurgeif0(&V_ulitecbinfo, ifp);
in_purgemaddrs(ifp);
IN_MULTI_UNLOCK();
}
/*
@ -1011,12 +1010,12 @@ in_ifdetach(struct ifnet *ifp)
static void
in_purgemaddrs(struct ifnet *ifp)
{
LIST_HEAD(,in_multi) purgeinms;
struct in_multi *inm, *tinm;
struct in_multi_head purgeinms;
struct in_multi *inm;
struct ifmultiaddr *ifma;
LIST_INIT(&purgeinms);
IN_MULTI_LOCK();
SLIST_INIT(&purgeinms);
IN_MULTI_LIST_LOCK();
/*
* Extract list of in_multi associated with the detaching ifp
@ -1034,17 +1033,13 @@ in_purgemaddrs(struct ifnet *ifp)
("%s: ifma_protospec is NULL", __func__));
#endif
inm = (struct in_multi *)ifma->ifma_protospec;
LIST_INSERT_HEAD(&purgeinms, inm, inm_link);
inm_rele_locked(&purgeinms, inm);
}
IF_ADDR_RUNLOCK(ifp);
LIST_FOREACH_SAFE(inm, &purgeinms, inm_link, tinm) {
LIST_REMOVE(inm, inm_link);
inm_release_locked(inm);
}
inm_release_list_deferred(&purgeinms);
igmp_ifdetach(ifp);
IN_MULTI_UNLOCK();
IN_MULTI_LIST_UNLOCK();
}
struct in_llentry {

View File

@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/ktr.h>
#include <sys/taskqueue.h>
#include <sys/gtaskqueue.h>
#include <sys/tree.h>
#include <net/if.h>
@ -59,6 +60,8 @@ __FBSDID("$FreeBSD$");
#include <net/route.h>
#include <net/vnet.h>
#include <net/ethernet.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_fib.h>
@ -91,17 +94,23 @@ static MALLOC_DEFINE(M_IPMSOURCE, "ip_msource",
/*
* Locking:
* - Lock order is: Giant, INP_WLOCK, IN_MULTI_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
* - Lock order is: Giant, INP_WLOCK, IN_MULTI_LIST_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
* - The IF_ADDR_LOCK is implicitly taken by inm_lookup() earlier, however
* it can be taken by code in net/if.c also.
* - ip_moptions and in_mfilter are covered by the INP_WLOCK.
*
* struct in_multi is covered by IN_MULTI_LOCK. There isn't strictly
* struct in_multi is covered by IN_MULTI_LIST_LOCK. There isn't strictly
* any need for in_multi itself to be virtualized -- it is bound to an ifp
* anyway no matter what happens.
*/
struct mtx in_multi_mtx;
MTX_SYSINIT(in_multi_mtx, &in_multi_mtx, "in_multi_mtx", MTX_DEF);
struct mtx in_multi_list_mtx;
MTX_SYSINIT(in_multi_mtx, &in_multi_list_mtx, "in_multi_list_mtx", MTX_DEF);
struct mtx in_multi_free_mtx;
MTX_SYSINIT(in_multi_free_mtx, &in_multi_free_mtx, "in_multi_free_mtx", MTX_DEF);
struct sx in_multi_sx;
SX_SYSINIT(in_multi_sx, &in_multi_sx, "in_multi_sx");
/*
* Functions with non-static linkage defined in this file should be
@ -151,6 +160,7 @@ static int inm_is_ifp_detached(const struct in_multi *);
static int inm_merge(struct in_multi *, /*const*/ struct in_mfilter *);
static void inm_purge(struct in_multi *);
static void inm_reap(struct in_multi *);
static void inm_release(struct in_multi *);
static struct ip_moptions *
inp_findmoptions(struct inpcb *);
static void inp_freemoptions_internal(struct ip_moptions *);
@ -216,6 +226,65 @@ inm_is_ifp_detached(const struct in_multi *inm)
}
#endif
static struct grouptask free_gtask;
static struct in_multi_head inm_free_list;
static void inm_release_task(void *arg __unused);
static void inm_init(void)
{
SLIST_INIT(&inm_free_list);
taskqgroup_config_gtask_init(NULL, &free_gtask, inm_release_task, "inm release task");
}
SYSINIT(inm_init, SI_SUB_SMP + 1, SI_ORDER_FIRST,
inm_init, NULL);
void
inm_release_list_deferred(struct in_multi_head *inmh)
{
if (SLIST_EMPTY(inmh))
return;
mtx_lock(&in_multi_free_mtx);
SLIST_CONCAT(&inm_free_list, inmh, in_multi, inm_nrele);
mtx_unlock(&in_multi_free_mtx);
GROUPTASK_ENQUEUE(&free_gtask);
}
void
inm_release_deferred(struct in_multi *inm)
{
struct in_multi_head tmp;
IN_MULTI_LIST_LOCK_ASSERT();
MPASS(inm->inm_refcount > 0);
if (--inm->inm_refcount == 0) {
SLIST_INIT(&tmp);
inm->inm_ifma->ifma_protospec = NULL;
SLIST_INSERT_HEAD(&tmp, inm, inm_nrele);
inm_release_list_deferred(&tmp);
}
}
static void
inm_release_task(void *arg __unused)
{
struct in_multi_head inm_free_tmp;
struct in_multi *inm, *tinm;
SLIST_INIT(&inm_free_tmp);
mtx_lock(&in_multi_free_mtx);
SLIST_CONCAT(&inm_free_tmp, &inm_free_list, in_multi, inm_nrele);
mtx_unlock(&in_multi_free_mtx);
IN_MULTI_LOCK();
SLIST_FOREACH_SAFE(inm, &inm_free_tmp, inm_nrele, tinm) {
SLIST_REMOVE_HEAD(&inm_free_tmp, inm_nrele);
MPASS(inm);
inm_release(inm);
}
IN_MULTI_UNLOCK();
}
/*
* Initialize an in_mfilter structure to a known state at t0, t1
* with an empty source filter list.
@ -232,7 +301,7 @@ imf_init(struct in_mfilter *imf, const int st0, const int st1)
/*
* Function for looking up an in_multi record for an IPv4 multicast address
* on a given interface. ifp must be valid. If no record found, return NULL.
* The IN_MULTI_LOCK and IF_ADDR_LOCK on ifp must be held.
* The IN_MULTI_LIST_LOCK and IF_ADDR_LOCK on ifp must be held.
*/
struct in_multi *
inm_lookup_locked(struct ifnet *ifp, const struct in_addr ina)
@ -240,7 +309,7 @@ inm_lookup_locked(struct ifnet *ifp, const struct in_addr ina)
struct ifmultiaddr *ifma;
struct in_multi *inm;
IN_MULTI_LOCK_ASSERT();
IN_MULTI_LIST_LOCK_ASSERT();
IF_ADDR_LOCK_ASSERT(ifp);
inm = NULL;
@ -264,7 +333,7 @@ inm_lookup(struct ifnet *ifp, const struct in_addr ina)
{
struct in_multi *inm;
IN_MULTI_LOCK_ASSERT();
IN_MULTI_LIST_LOCK_ASSERT();
IF_ADDR_RLOCK(ifp);
inm = inm_lookup_locked(ifp, ina);
IF_ADDR_RUNLOCK(ifp);
@ -451,7 +520,7 @@ in_getmulti(struct ifnet *ifp, const struct in_addr *group,
IN_MULTI_LOCK_ASSERT();
ii = (struct in_ifinfo *)ifp->if_afdata[AF_INET];
IN_MULTI_LIST_LOCK();
inm = inm_lookup(ifp, *group);
if (inm != NULL) {
/*
@ -460,11 +529,13 @@ in_getmulti(struct ifnet *ifp, const struct in_addr *group,
*/
KASSERT(inm->inm_refcount >= 1,
("%s: bad refcount %d", __func__, inm->inm_refcount));
++inm->inm_refcount;
inm_acquire_locked(inm);
*pinm = inm;
return (0);
}
IN_MULTI_LIST_UNLOCK();
if (inm != NULL)
return (0);
memset(&gsin, 0, sizeof(gsin));
gsin.sin_family = AF_INET;
gsin.sin_len = sizeof(struct sockaddr_in);
@ -479,6 +550,7 @@ in_getmulti(struct ifnet *ifp, const struct in_addr *group,
return (error);
/* XXX ifma_protospec must be covered by IF_ADDR_LOCK */
IN_MULTI_LIST_LOCK();
IF_ADDR_WLOCK(ifp);
/*
@ -504,10 +576,9 @@ in_getmulti(struct ifnet *ifp, const struct in_addr *group,
__func__, ifma, inm, inet_ntoa_r(*group, addrbuf));
}
#endif
++inm->inm_refcount;
inm_acquire_locked(inm);
*pinm = inm;
IF_ADDR_WUNLOCK(ifp);
return (0);
goto out_locked;
}
IF_ADDR_WLOCK_ASSERT(ifp);
@ -522,6 +593,7 @@ in_getmulti(struct ifnet *ifp, const struct in_addr *group,
inm = malloc(sizeof(*inm), M_IPMADDR, M_NOWAIT | M_ZERO);
if (inm == NULL) {
IF_ADDR_WUNLOCK(ifp);
IN_MULTI_LIST_UNLOCK();
if_delmulti_ifma(ifma);
return (ENOMEM);
}
@ -539,8 +611,9 @@ in_getmulti(struct ifnet *ifp, const struct in_addr *group,
ifma->ifma_protospec = inm;
*pinm = inm;
out_locked:
IF_ADDR_WUNLOCK(ifp);
IN_MULTI_LIST_UNLOCK();
return (0);
}
@ -550,36 +623,29 @@ in_getmulti(struct ifnet *ifp, const struct in_addr *group,
* If the refcount drops to 0, free the in_multi record and
* delete the underlying link-layer membership.
*/
void
inm_release_locked(struct in_multi *inm)
static void
inm_release(struct in_multi *inm)
{
struct ifmultiaddr *ifma;
IN_MULTI_LOCK_ASSERT();
struct ifnet *ifp;
CTR2(KTR_IGMPV3, "%s: refcount is %d", __func__, inm->inm_refcount);
if (--inm->inm_refcount > 0) {
CTR2(KTR_IGMPV3, "%s: refcount is now %d", __func__,
inm->inm_refcount);
return;
}
MPASS(inm->inm_refcount == 0);
CTR2(KTR_IGMPV3, "%s: freeing inm %p", __func__, inm);
ifma = inm->inm_ifma;
ifp = inm->inm_ifp;
/* XXX this access is not covered by IF_ADDR_LOCK */
CTR2(KTR_IGMPV3, "%s: purging ifma %p", __func__, ifma);
KASSERT(ifma->ifma_protospec == inm,
("%s: ifma_protospec != inm", __func__));
ifma->ifma_protospec = NULL;
if (ifp)
CURVNET_SET(ifp->if_vnet);
inm_purge(inm);
free(inm, M_IPMADDR);
if_delmulti_ifma(ifma);
if (ifp)
CURVNET_RESTORE();
}
/*
@ -592,7 +658,7 @@ inm_clear_recorded(struct in_multi *inm)
{
struct ip_msource *ims;
IN_MULTI_LOCK_ASSERT();
IN_MULTI_LIST_LOCK_ASSERT();
RB_FOREACH(ims, ip_msource_tree, &inm->inm_srcs) {
if (ims->ims_stp) {
@ -632,7 +698,7 @@ inm_record_source(struct in_multi *inm, const in_addr_t naddr)
struct ip_msource find;
struct ip_msource *ims, *nims;
IN_MULTI_LOCK_ASSERT();
IN_MULTI_LIST_LOCK_ASSERT();
find.ims_haddr = ntohl(naddr);
ims = RB_FIND(ip_msource_tree, &inm->inm_srcs, &find);
@ -959,6 +1025,7 @@ inm_merge(struct in_multi *inm, /*const*/ struct in_mfilter *imf)
schanged = 0;
error = 0;
nsrc1 = nsrc0 = 0;
IN_MULTI_LIST_LOCK_ASSERT();
/*
* Update the source filters first, as this may fail.
@ -1165,6 +1232,7 @@ in_joingroup_locked(struct ifnet *ifp, const struct in_addr *gina,
int error;
IN_MULTI_LOCK_ASSERT();
IN_MULTI_LIST_UNLOCK_ASSERT();
CTR4(KTR_IGMPV3, "%s: join 0x%08x on %p(%s))", __func__,
ntohl(gina->s_addr), ifp, ifp->if_xname);
@ -1186,7 +1254,7 @@ in_joingroup_locked(struct ifnet *ifp, const struct in_addr *gina,
CTR1(KTR_IGMPV3, "%s: in_getmulti() failure", __func__);
return (error);
}
IN_MULTI_LIST_LOCK();
CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
error = inm_merge(inm, imf);
if (error) {
@ -1201,10 +1269,12 @@ in_joingroup_locked(struct ifnet *ifp, const struct in_addr *gina,
goto out_inm_release;
}
out_inm_release:
out_inm_release:
IN_MULTI_LIST_UNLOCK();
if (error) {
CTR2(KTR_IGMPV3, "%s: dropping ref on %p", __func__, inm);
inm_release_locked(inm);
inm_release_deferred(inm);
} else {
*pinm = inm;
}
@ -1249,6 +1319,7 @@ in_leavegroup_locked(struct in_multi *inm, /*const*/ struct in_mfilter *imf)
error = 0;
IN_MULTI_LOCK_ASSERT();
IN_MULTI_LIST_UNLOCK_ASSERT();
CTR5(KTR_IGMPV3, "%s: leave inm %p, 0x%08x/%s, imf %p", __func__,
inm, ntohl(inm->inm_addr.s_addr),
@ -1272,18 +1343,20 @@ in_leavegroup_locked(struct in_multi *inm, /*const*/ struct in_mfilter *imf)
* the transaction, it MUST NOT fail.
*/
CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
IN_MULTI_LIST_LOCK();
error = inm_merge(inm, imf);
KASSERT(error == 0, ("%s: failed to merge inm state", __func__));
CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
CURVNET_SET(inm->inm_ifp->if_vnet);
error = igmp_change_state(inm);
inm_release_deferred(inm);
IN_MULTI_LIST_UNLOCK();
CURVNET_RESTORE();
if (error)
CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__);
CTR2(KTR_IGMPV3, "%s: dropping ref on %p", __func__, inm);
inm_release_locked(inm);
return (error);
}
@ -1314,18 +1387,6 @@ in_addmulti(struct in_addr *ap, struct ifnet *ifp)
return (pinm);
}
/*
* Leave an IPv4 multicast group, assumed to be in exclusive (*,G) mode.
* This KPI is for legacy kernel consumers only.
*/
void
in_delmulti(struct in_multi *inm)
{
(void)in_leavegroup(inm, NULL);
}
/*#endif*/
/*
* Block or unblock an ASM multicast source on an inpcb.
* This implements the delta-based API described in RFC 3678.
@ -1487,7 +1548,7 @@ inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
* Begin state merge transaction at IGMP layer.
*/
IN_MULTI_LOCK();
IN_MULTI_LIST_LOCK();
CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
error = inm_merge(inm, imf);
if (error) {
@ -1503,7 +1564,7 @@ inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
out_in_multi_locked:
IN_MULTI_UNLOCK();
IN_MULTI_UNLOCK();
out_imf_rollback:
if (error)
imf_rollback(imf);
@ -1581,10 +1642,12 @@ void
inp_freemoptions(struct ip_moptions *imo)
{
if (imo == NULL)
return;
KASSERT(imo != NULL, ("%s: ip_moptions is NULL", __func__));
IN_MULTI_LOCK();
IN_MULTI_LIST_LOCK();
STAILQ_INSERT_TAIL(&imo_gc_list, imo, imo_link);
IN_MULTI_UNLOCK();
IN_MULTI_LIST_UNLOCK();
taskqueue_enqueue(taskqueue_thread, &imo_gc_task);
}
@ -1615,15 +1678,15 @@ inp_gcmoptions(void *context, int pending)
{
struct ip_moptions *imo;
IN_MULTI_LOCK();
IN_MULTI_LIST_LOCK();
while (!STAILQ_EMPTY(&imo_gc_list)) {
imo = STAILQ_FIRST(&imo_gc_list);
STAILQ_REMOVE_HEAD(&imo_gc_list, imo_link);
IN_MULTI_UNLOCK();
IN_MULTI_LIST_UNLOCK();
inp_freemoptions_internal(imo);
IN_MULTI_LOCK();
IN_MULTI_LIST_LOCK();
}
IN_MULTI_UNLOCK();
IN_MULTI_LIST_UNLOCK();
}
/*
@ -2163,6 +2226,8 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
/*
* Begin state merge transaction at IGMP layer.
*/
in_pcbref(inp);
INP_WUNLOCK(inp);
IN_MULTI_LOCK();
if (is_new) {
@ -2171,20 +2236,23 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
if (error) {
CTR1(KTR_IGMPV3, "%s: in_joingroup_locked failed",
__func__);
IN_MULTI_UNLOCK();
IN_MULTI_LIST_UNLOCK();
goto out_imo_free;
}
imo->imo_membership[idx] = inm;
} else {
CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
IN_MULTI_LIST_LOCK();
error = inm_merge(inm, imf);
if (error) {
CTR1(KTR_IGMPV3, "%s: failed to merge inm state",
__func__);
__func__);
IN_MULTI_LIST_UNLOCK();
goto out_in_multi_locked;
}
CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
error = igmp_change_state(inm);
IN_MULTI_LIST_UNLOCK();
if (error) {
CTR1(KTR_IGMPV3, "%s: failed igmp downcall",
__func__);
@ -2195,8 +2263,9 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
out_in_multi_locked:
IN_MULTI_UNLOCK();
INP_WLOCK_ASSERT(inp);
INP_WLOCK(inp);
if (in_pcbrele_wlocked(inp))
return (ENXIO);
if (error) {
imf_rollback(imf);
if (is_new)
@ -2395,6 +2464,7 @@ inp_leave_group(struct inpcb *inp, struct sockopt *sopt)
(void)in_leavegroup_locked(inm, imf);
} else {
CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
IN_MULTI_LIST_LOCK();
error = inm_merge(inm, imf);
if (error) {
CTR1(KTR_IGMPV3, "%s: failed to merge inm state",
@ -2404,6 +2474,7 @@ inp_leave_group(struct inpcb *inp, struct sockopt *sopt)
CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
error = igmp_change_state(inm);
IN_MULTI_LIST_UNLOCK();
if (error) {
CTR1(KTR_IGMPV3, "%s: failed igmp downcall",
__func__);
@ -2639,6 +2710,7 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
INP_WLOCK_ASSERT(inp);
IN_MULTI_LOCK();
IN_MULTI_LIST_LOCK();
/*
* Begin state merge transaction at IGMP layer.
@ -2647,11 +2719,13 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
error = inm_merge(inm, imf);
if (error) {
CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__);
IN_MULTI_LIST_UNLOCK();
goto out_in_multi_locked;
}
CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
error = igmp_change_state(inm);
IN_MULTI_LIST_UNLOCK();
if (error)
CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__);
@ -2883,7 +2957,7 @@ sysctl_ip_mcast_filters(SYSCTL_HANDLER_ARGS)
if (retval)
return (retval);
IN_MULTI_LOCK();
IN_MULTI_LIST_LOCK();
IF_ADDR_RLOCK(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
@ -2916,7 +2990,7 @@ sysctl_ip_mcast_filters(SYSCTL_HANDLER_ARGS)
}
IF_ADDR_RUNLOCK(ifp);
IN_MULTI_UNLOCK();
IN_MULTI_LIST_UNLOCK();
return (retval);
}

View File

@ -1328,6 +1328,12 @@ in_pcbfree(struct inpcb *inp)
{
struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
#ifdef INET6
struct ip6_moptions *im6o = NULL;
#endif
#ifdef INET
struct ip_moptions *imo = NULL;
#endif
KASSERT(inp->inp_socket == NULL, ("%s: inp_socket != NULL", __func__));
KASSERT((inp->inp_flags2 & INP_FREED) == 0,
@ -1346,6 +1352,10 @@ in_pcbfree(struct inpcb *inp)
#endif
INP_WLOCK_ASSERT(inp);
#ifdef INET
imo = inp->inp_moptions;
inp->inp_moptions = NULL;
#endif
/* XXXRW: Do as much as possible here. */
#if defined(IPSEC) || defined(IPSEC_SUPPORT)
if (inp->inp_sp != NULL)
@ -1358,16 +1368,12 @@ in_pcbfree(struct inpcb *inp)
#ifdef INET6
if (inp->inp_vflag & INP_IPV6PROTO) {
ip6_freepcbopts(inp->in6p_outputopts);
if (inp->in6p_moptions != NULL)
ip6_freemoptions(inp->in6p_moptions);
im6o = inp->in6p_moptions;
inp->in6p_moptions = NULL;
}
#endif
if (inp->inp_options)
(void)m_free(inp->inp_options);
#ifdef INET
if (inp->inp_moptions != NULL)
inp_freemoptions(inp->inp_moptions);
#endif
RO_INVALIDATE_CACHE(&inp->inp_route);
inp->inp_vflag = 0;
@ -1378,6 +1384,18 @@ in_pcbfree(struct inpcb *inp)
#endif
if (!in_pcbrele_wlocked(inp))
INP_WUNLOCK(inp);
#if defined(INET) && defined(INET6)
if (imo == NULL && im6o == NULL)
return;
#endif
INP_INFO_WUNLOCK(pcbinfo);
#ifdef INET6
ip6_freemoptions(im6o);
#endif
#ifdef INET
inp_freemoptions(imo);
#endif
INP_INFO_WLOCK(pcbinfo);
}
/*
@ -1533,7 +1551,8 @@ in_pcbpurgeif0(struct inpcbinfo *pcbinfo, struct ifnet *ifp)
for (i = 0, gap = 0; i < imo->imo_num_memberships;
i++) {
if (imo->imo_membership[i]->inm_ifp == ifp) {
in_delmulti(imo->imo_membership[i]);
IN_MULTI_LOCK_ASSERT();
in_leavegroup_locked(imo->imo_membership[i], NULL);
gap++;
} else if (gap != 0)
imo->imo_membership[i - gap] =

View File

@ -55,6 +55,7 @@ struct in_aliasreq {
struct igmp_ifsoftc;
struct in_multi;
struct lltable;
SLIST_HEAD(in_multi_head, in_multi);
/*
* IPv4 per-interface state.
@ -329,21 +330,49 @@ SYSCTL_DECL(_net_inet_raw);
* consumers of IN_*_MULTI() macros should acquire the locks before
* calling them; users of the in_{add,del}multi() functions should not.
*/
extern struct mtx in_multi_mtx;
#define IN_MULTI_LOCK() mtx_lock(&in_multi_mtx)
#define IN_MULTI_UNLOCK() mtx_unlock(&in_multi_mtx)
#define IN_MULTI_LOCK_ASSERT() mtx_assert(&in_multi_mtx, MA_OWNED)
#define IN_MULTI_UNLOCK_ASSERT() mtx_assert(&in_multi_mtx, MA_NOTOWNED)
extern struct mtx in_multi_list_mtx;
extern struct sx in_multi_sx;
#define IN_MULTI_LIST_LOCK() mtx_lock(&in_multi_list_mtx)
#define IN_MULTI_LIST_UNLOCK() mtx_unlock(&in_multi_list_mtx)
#define IN_MULTI_LIST_LOCK_ASSERT() mtx_assert(&in_multi_list_mtx, MA_OWNED)
#define IN_MULTI_LIST_UNLOCK_ASSERT() mtx_assert(&in_multi_list_mtx, MA_NOTOWNED)
#define IN_MULTI_LOCK() sx_xlock(&in_multi_sx)
#define IN_MULTI_UNLOCK() sx_xunlock(&in_multi_sx)
#define IN_MULTI_LOCK_ASSERT() sx_assert(&in_multi_sx, SA_XLOCKED)
#define IN_MULTI_UNLOCK_ASSERT() sx_assert(&in_multi_sx, SA_XUNLOCKED)
/* Acquire an in_multi record. */
static __inline void
inm_acquire_locked(struct in_multi *inm)
{
IN_MULTI_LOCK_ASSERT();
IN_MULTI_LIST_LOCK_ASSERT();
++inm->inm_refcount;
}
static __inline void
inm_acquire(struct in_multi *inm)
{
IN_MULTI_LIST_LOCK();
inm_acquire_locked(inm);
IN_MULTI_LIST_UNLOCK();
}
static __inline void
inm_rele_locked(struct in_multi_head *inmh, struct in_multi *inm)
{
MPASS(inm->inm_refcount > 0);
IN_MULTI_LIST_LOCK_ASSERT();
if (--inm->inm_refcount == 0) {
MPASS(inmh != NULL);
inm->inm_ifma->ifma_protospec = NULL;
SLIST_INSERT_HEAD(inmh, inm, inm_nrele);
}
}
/*
* Return values for imo_multi_filter().
*/
@ -364,11 +393,10 @@ void inm_commit(struct in_multi *);
void inm_clear_recorded(struct in_multi *);
void inm_print(const struct in_multi *);
int inm_record_source(struct in_multi *inm, const in_addr_t);
void inm_release(struct in_multi *);
void inm_release_locked(struct in_multi *);
void inm_release_deferred(struct in_multi *);
void inm_release_list_deferred(struct in_multi_head *);
struct in_multi *
in_addmulti(struct in_addr *, struct ifnet *);
void in_delmulti(struct in_multi *);
in_addmulti(struct in_addr *, struct ifnet *);
int in_joingroup(struct ifnet *, const struct in_addr *,
/*const*/ struct in_mfilter *, struct in_multi **);
int in_joingroup_locked(struct ifnet *, const struct in_addr *,

View File

@ -1405,7 +1405,7 @@ carp_multicast_setup(struct carp_if *cif, sa_family_t sa)
break;
}
in6m = NULL;
if ((error = in6_mc_join(ifp, &in6, NULL, &in6m, 0)) != 0) {
if ((error = in6_joingroup(ifp, &in6, NULL, &in6m, 0)) != 0) {
free(im6o->im6o_membership, M_CARP);
break;
}
@ -1420,13 +1420,13 @@ carp_multicast_setup(struct carp_if *cif, sa_family_t sa)
in6.s6_addr32[3] = 0;
in6.s6_addr8[12] = 0xff;
if ((error = in6_setscope(&in6, ifp, NULL)) != 0) {
in6_mc_leave(im6o->im6o_membership[0], NULL);
in6_leavegroup(im6o->im6o_membership[0], NULL);
free(im6o->im6o_membership, M_CARP);
break;
}
in6m = NULL;
if ((error = in6_mc_join(ifp, &in6, NULL, &in6m, 0)) != 0) {
in6_mc_leave(im6o->im6o_membership[0], NULL);
if ((error = in6_joingroup(ifp, &in6, NULL, &in6m, 0)) != 0) {
in6_leavegroup(im6o->im6o_membership[0], NULL);
free(im6o->im6o_membership, M_CARP);
break;
}
@ -1469,8 +1469,8 @@ carp_multicast_cleanup(struct carp_if *cif, sa_family_t sa)
if (cif->cif_naddrs6 == 0) {
struct ip6_moptions *im6o = &cif->cif_im6o;
in6_mc_leave(im6o->im6o_membership[0], NULL);
in6_mc_leave(im6o->im6o_membership[1], NULL);
in6_leavegroup(im6o->im6o_membership[0], NULL);
in6_leavegroup(im6o->im6o_membership[1], NULL);
KASSERT(im6o->im6o_mfilters == NULL,
("%s: im6o_mfilters != NULL", __func__));
free(im6o->im6o_membership, M_CARP);

View File

@ -79,6 +79,7 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/protosw.h>
#include <sys/time.h>
#include <sys/kernel.h>
#include <sys/lock.h>
@ -732,6 +733,30 @@ in6_control(struct socket *so, u_long cmd, caddr_t data,
}
static struct in6_multi_mship *
in6_joingroup_legacy(struct ifnet *ifp, const struct in6_addr *mcaddr,
int *errorp, int delay)
{
struct in6_multi_mship *imm;
int error;
imm = malloc(sizeof(*imm), M_IP6MADDR, M_NOWAIT);
if (imm == NULL) {
*errorp = ENOBUFS;
return (NULL);
}
delay = (delay * PR_FASTHZ) / hz;
error = in6_joingroup(ifp, mcaddr, NULL, &imm->i6mm_maddr, delay);
if (error) {
*errorp = error;
free(imm, M_IP6MADDR);
return (NULL);
}
return (imm);
}
/*
* Join necessary multicast groups. Factored out from in6_update_ifa().
* This entire work should only be done once, for the default FIB.
@ -768,7 +793,7 @@ in6_update_ifa_join_mc(struct ifnet *ifp, struct in6_aliasreq *ifra,
*/
delay = arc4random() % (MAX_RTR_SOLICITATION_DELAY * hz);
}
imm = in6_joingroup(ifp, &mltaddr, &error, delay);
imm = in6_joingroup_legacy(ifp, &mltaddr, &error, delay);
if (imm == NULL) {
nd6log((LOG_WARNING, "%s: in6_joingroup failed for %s on %s "
"(errno=%d)\n", __func__, ip6_sprintf(ip6buf, &mltaddr),
@ -785,7 +810,7 @@ in6_update_ifa_join_mc(struct ifnet *ifp, struct in6_aliasreq *ifra,
if ((error = in6_setscope(&mltaddr, ifp, NULL)) != 0)
goto cleanup; /* XXX: should not fail */
imm = in6_joingroup(ifp, &mltaddr, &error, 0);
imm = in6_joingroup_legacy(ifp, &mltaddr, &error, 0);
if (imm == NULL) {
nd6log((LOG_WARNING, "%s: in6_joingroup failed for %s on %s "
"(errno=%d)\n", __func__, ip6_sprintf(ip6buf, &mltaddr),
@ -807,7 +832,7 @@ in6_update_ifa_join_mc(struct ifnet *ifp, struct in6_aliasreq *ifra,
}
if (in6_nigroup(ifp, NULL, -1, &mltaddr) == 0) {
/* XXX jinmei */
imm = in6_joingroup(ifp, &mltaddr, &error, delay);
imm = in6_joingroup_legacy(ifp, &mltaddr, &error, delay);
if (imm == NULL)
nd6log((LOG_WARNING,
"%s: in6_joingroup failed for %s on %s "
@ -819,7 +844,7 @@ in6_update_ifa_join_mc(struct ifnet *ifp, struct in6_aliasreq *ifra,
}
if (V_icmp6_nodeinfo_oldmcprefix &&
in6_nigroup_oldmcprefix(ifp, NULL, -1, &mltaddr) == 0) {
imm = in6_joingroup(ifp, &mltaddr, &error, delay);
imm = in6_joingroup_legacy(ifp, &mltaddr, &error, delay);
if (imm == NULL)
nd6log((LOG_WARNING,
"%s: in6_joingroup failed for %s on %s "
@ -838,7 +863,7 @@ in6_update_ifa_join_mc(struct ifnet *ifp, struct in6_aliasreq *ifra,
if ((error = in6_setscope(&mltaddr, ifp, NULL)) != 0)
goto cleanup; /* XXX: should not fail */
imm = in6_joingroup(ifp, &mltaddr, &error, 0);
imm = in6_joingroup_legacy(ifp, &mltaddr, &error, 0);
if (imm == NULL) {
nd6log((LOG_WARNING, "%s: in6_joingroup failed for %s on %s "
"(errno=%d)\n", __func__, ip6_sprintf(ip6buf,
@ -1273,7 +1298,9 @@ in6_purgeaddr(struct ifaddr *ifa)
/* Leave multicast groups. */
while ((imm = LIST_FIRST(&ia->ia6_memberships)) != NULL) {
LIST_REMOVE(imm, i6mm_chain);
in6_leavegroup(imm);
if (imm->i6mm_maddr != NULL)
in6_leavegroup(imm->i6mm_maddr, NULL);
free(imm, M_IP6MADDR);
}
plen = in6_mask2len(&ia->ia_prefixmask.sin6_addr, NULL); /* XXX */
if ((ia->ia_flags & IFA_ROUTE) && plen == 128) {

View File

@ -846,13 +846,13 @@ in6_tmpaddrtimer(void *arg)
static void
in6_purgemaddrs(struct ifnet *ifp)
{
LIST_HEAD(,in6_multi) purgeinms;
struct in6_multi *inm, *tinm;
struct in6_multi_head purgeinms;
struct in6_multi *inm;
struct ifmultiaddr *ifma;
LIST_INIT(&purgeinms);
SLIST_INIT(&purgeinms);
IN6_MULTI_LOCK();
IN6_MULTI_LIST_LOCK();
/*
* Extract list of in6_multi associated with the detaching ifp
* which the PF_INET6 layer is about to release.
@ -865,17 +865,13 @@ in6_purgemaddrs(struct ifnet *ifp)
ifma->ifma_protospec == NULL)
continue;
inm = (struct in6_multi *)ifma->ifma_protospec;
LIST_INSERT_HEAD(&purgeinms, inm, in6m_entry);
in6m_rele_locked(&purgeinms, inm);
}
IF_ADDR_RUNLOCK(ifp);
LIST_FOREACH_SAFE(inm, &purgeinms, in6m_entry, tinm) {
LIST_REMOVE(inm, in6m_entry);
in6m_release_locked(inm);
}
mld_ifdetach(ifp);
IN6_MULTI_LIST_UNLOCK();
IN6_MULTI_UNLOCK();
in6m_release_list_deferred(&purgeinms);
}
void

View File

@ -41,13 +41,13 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/gtaskqueue.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/protosw.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/protosw.h>
#include <sys/sysctl.h>
#include <sys/priv.h>
#include <sys/ktr.h>
@ -59,8 +59,12 @@ __FBSDID("$FreeBSD$");
#include <net/route.h>
#include <net/vnet.h>
#include <netinet/in.h>
#include <netinet/udp.h>
#include <netinet/in_var.h>
#include <netinet/ip_var.h>
#include <netinet/udp_var.h>
#include <netinet6/in6_fib.h>
#include <netinet6/in6_var.h>
#include <netinet/ip6.h>
@ -89,7 +93,7 @@ typedef union sockunion sockunion_t;
static MALLOC_DEFINE(M_IN6MFILTER, "in6_mfilter",
"IPv6 multicast PCB-layer source filter");
static MALLOC_DEFINE(M_IP6MADDR, "in6_multi", "IPv6 multicast group");
MALLOC_DEFINE(M_IP6MADDR, "in6_multi", "IPv6 multicast group");
static MALLOC_DEFINE(M_IP6MOPTS, "ip6_moptions", "IPv6 multicast options");
static MALLOC_DEFINE(M_IP6MSOURCE, "ip6_msource",
"IPv6 multicast MLD-layer source filter");
@ -107,8 +111,16 @@ RB_GENERATE(ip6_msource_tree, ip6_msource, im6s_link, ip6_msource_cmp);
* any need for in6_multi itself to be virtualized -- it is bound to an ifp
* anyway no matter what happens.
*/
struct mtx in6_multi_mtx;
MTX_SYSINIT(in6_multi_mtx, &in6_multi_mtx, "in6_multi_mtx", MTX_DEF);
struct mtx in6_multi_list_mtx;
MTX_SYSINIT(in6_multi_mtx, &in6_multi_list_mtx, "in6_multi_list_mtx", MTX_DEF);
struct mtx in6_multi_free_mtx;
MTX_SYSINIT(in6_multi_free_mtx, &in6_multi_free_mtx, "in6_multi_free_mtx", MTX_DEF);
struct sx in6_multi_sx;
SX_SYSINIT(in6_multi_sx, &in6_multi_sx, "in6_multi_sx");
static void im6f_commit(struct in6_mfilter *);
static int im6f_get_source(struct in6_mfilter *imf,
@ -130,7 +142,7 @@ static struct in6_msource *
const struct sockaddr *);
static void im6s_merge(struct ip6_msource *ims,
const struct in6_msource *lims, const int rollback);
static int in6_mc_get(struct ifnet *, const struct in6_addr *,
static int in6_getmulti(struct ifnet *, const struct in6_addr *,
struct in6_multi **);
static int in6m_get_source(struct in6_multi *inm,
const struct in6_addr *addr, const int noalloc,
@ -389,7 +401,7 @@ im6o_mc_filter(const struct ip6_moptions *imo, const struct ifnet *ifp,
* Return 0 if successful, otherwise return an appropriate error code.
*/
static int
in6_mc_get(struct ifnet *ifp, const struct in6_addr *group,
in6_getmulti(struct ifnet *ifp, const struct in6_addr *group,
struct in6_multi **pinm)
{
struct sockaddr_in6 gsin6;
@ -405,8 +417,8 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group,
* re-acquire around the call.
*/
IN6_MULTI_LOCK_ASSERT();
IN6_MULTI_LIST_LOCK();
IF_ADDR_WLOCK(ifp);
inm = in6m_lookup_locked(ifp, group);
if (inm != NULL) {
/*
@ -415,7 +427,7 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group,
*/
KASSERT(inm->in6m_refcount >= 1,
("%s: bad refcount %d", __func__, inm->in6m_refcount));
++inm->in6m_refcount;
in6m_acquire_locked(inm);
*pinm = inm;
goto out_locked;
}
@ -429,10 +441,12 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group,
* Check if a link-layer group is already associated
* with this network-layer group on the given ifnet.
*/
IN6_MULTI_LIST_UNLOCK();
IF_ADDR_WUNLOCK(ifp);
error = if_addmulti(ifp, (struct sockaddr *)&gsin6, &ifma);
if (error != 0)
return (error);
IN6_MULTI_LIST_LOCK();
IF_ADDR_WLOCK(ifp);
/*
@ -455,7 +469,7 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group,
panic("%s: ifma %p is inconsistent with %p (%p)",
__func__, ifma, inm, group);
#endif
++inm->in6m_refcount;
in6m_acquire_locked(inm);
*pinm = inm;
goto out_locked;
}
@ -472,6 +486,7 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group,
*/
inm = malloc(sizeof(*inm), M_IP6MADDR, M_NOWAIT | M_ZERO);
if (inm == NULL) {
IN6_MULTI_LIST_UNLOCK();
IF_ADDR_WUNLOCK(ifp);
if_delmulti_ifma(ifma);
return (ENOMEM);
@ -491,7 +506,8 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group,
ifma->ifma_protospec = inm;
*pinm = inm;
out_locked:
out_locked:
IN6_MULTI_LIST_UNLOCK();
IF_ADDR_WUNLOCK(ifp);
return (error);
}
@ -502,36 +518,105 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group,
* If the refcount drops to 0, free the in6_multi record and
* delete the underlying link-layer membership.
*/
void
in6m_release_locked(struct in6_multi *inm)
static void
in6m_release(struct in6_multi *inm)
{
struct ifmultiaddr *ifma;
IN6_MULTI_LOCK_ASSERT();
struct ifnet *ifp;
CTR2(KTR_MLD, "%s: refcount is %d", __func__, inm->in6m_refcount);
if (--inm->in6m_refcount > 0) {
CTR2(KTR_MLD, "%s: refcount is now %d", __func__,
inm->in6m_refcount);
return;
}
MPASS(inm->in6m_refcount == 0);
CTR2(KTR_MLD, "%s: freeing inm %p", __func__, inm);
ifma = inm->in6m_ifma;
ifp = inm->in6m_ifp;
/* XXX this access is not covered by IF_ADDR_LOCK */
CTR2(KTR_MLD, "%s: purging ifma %p", __func__, ifma);
KASSERT(ifma->ifma_protospec == inm,
("%s: ifma_protospec != inm", __func__));
ifma->ifma_protospec = NULL;
KASSERT(ifma->ifma_protospec == NULL,
("%s: ifma_protospec != NULL", __func__));
if (ifp)
CURVNET_SET(ifp->if_vnet);
in6m_purge(inm);
free(inm, M_IP6MADDR);
if_delmulti_ifma(ifma);
if (ifp)
CURVNET_RESTORE();
}
static struct grouptask free_gtask;
static struct in6_multi_head in6m_free_list;
static void in6m_release_task(void *arg __unused);
static void in6m_init(void)
{
SLIST_INIT(&in6m_free_list);
taskqgroup_config_gtask_init(NULL, &free_gtask, in6m_release_task, "in6m release task");
}
SYSINIT(in6m_init, SI_SUB_SMP + 1, SI_ORDER_FIRST,
in6m_init, NULL);
void
in6m_release_list_deferred(struct in6_multi_head *inmh)
{
if (SLIST_EMPTY(inmh))
return;
mtx_lock(&in6_multi_free_mtx);
SLIST_CONCAT(&in6m_free_list, inmh, in6_multi, in6m_nrele);
mtx_unlock(&in6_multi_free_mtx);
GROUPTASK_ENQUEUE(&free_gtask);
}
void
in6m_release_deferred(struct in6_multi *inm)
{
struct in6_multi_head tmp;
struct ifnet *ifp;
struct ifaddr *ifa;
struct in6_ifaddr *ifa6;
struct in6_multi_mship *imm;
IN6_MULTI_LIST_LOCK_ASSERT();
KASSERT(inm->in6m_refcount > 0, ("refcount == %d inm: %p", inm->in6m_refcount, inm));
if (--inm->in6m_refcount == 0) {
ifp = inm->in6m_ifp;
IF_ADDR_LOCK_ASSERT(ifp);
TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
if (ifa->ifa_addr->sa_family != AF_INET6)
continue;
ifa6 = (void *)ifa;
LIST_FOREACH(imm, &ifa6->ia6_memberships, i6mm_chain) {
if (inm == imm->i6mm_maddr)
imm->i6mm_maddr = NULL;
}
}
SLIST_INIT(&tmp);
inm->in6m_ifma->ifma_protospec = NULL;
SLIST_INSERT_HEAD(&tmp, inm, in6m_nrele);
in6m_release_list_deferred(&tmp);
}
}
static void
in6m_release_task(void *arg __unused)
{
struct in6_multi_head in6m_free_tmp;
struct in6_multi *inm, *tinm;
SLIST_INIT(&in6m_free_tmp);
mtx_lock(&in6_multi_free_mtx);
SLIST_CONCAT(&in6m_free_tmp, &in6m_free_list, in6_multi, in6m_nrele);
mtx_unlock(&in6_multi_free_mtx);
IN6_MULTI_LOCK();
SLIST_FOREACH_SAFE(inm, &in6m_free_tmp, in6m_nrele, tinm) {
SLIST_REMOVE_HEAD(&in6m_free_tmp, in6m_nrele);
in6m_release(inm);
}
IN6_MULTI_UNLOCK();
}
/*
@ -544,7 +629,7 @@ in6m_clear_recorded(struct in6_multi *inm)
{
struct ip6_msource *ims;
IN6_MULTI_LOCK_ASSERT();
IN6_MULTI_LIST_LOCK_ASSERT();
RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) {
if (ims->im6s_stp) {
@ -584,7 +669,7 @@ in6m_record_source(struct in6_multi *inm, const struct in6_addr *addr)
struct ip6_msource find;
struct ip6_msource *ims, *nims;
IN6_MULTI_LOCK_ASSERT();
IN6_MULTI_LIST_LOCK_ASSERT();
find.im6s_addr = *addr;
ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find);
@ -911,6 +996,7 @@ in6m_merge(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
schanged = 0;
error = 0;
nsrc1 = nsrc0 = 0;
IN6_MULTI_LIST_LOCK_ASSERT();
/*
* Update the source filters first, as this may fail.
@ -1087,65 +1173,16 @@ in6m_purge(struct in6_multi *inm)
*
* SMPng: Assume no mc locks held by caller.
*/
struct in6_multi_mship *
in6_joingroup(struct ifnet *ifp, struct in6_addr *mcaddr,
int *errorp, int delay)
{
struct in6_multi_mship *imm;
int error;
imm = malloc(sizeof(*imm), M_IP6MADDR, M_NOWAIT);
if (imm == NULL) {
*errorp = ENOBUFS;
return (NULL);
}
delay = (delay * PR_FASTHZ) / hz;
error = in6_mc_join(ifp, mcaddr, NULL, &imm->i6mm_maddr, delay);
if (error) {
*errorp = error;
free(imm, M_IP6MADDR);
return (NULL);
}
return (imm);
}
/*
* Leave a multicast address w/o sources.
* KAME compatibility entry point.
*
* SMPng: Assume no mc locks held by caller.
*/
int
in6_leavegroup(struct in6_multi_mship *imm)
{
if (imm->i6mm_maddr != NULL)
in6_mc_leave(imm->i6mm_maddr, NULL);
free(imm, M_IP6MADDR);
return 0;
}
/*
* Join a multicast group; unlocked entry point.
*
* SMPng: XXX: in6_mc_join() is called from in6_control() when upper
* locks are not held. Fortunately, ifp is unlikely to have been detached
* at this point, so we assume it's OK to recurse.
*/
int
in6_mc_join(struct ifnet *ifp, const struct in6_addr *mcaddr,
in6_joingroup(struct ifnet *ifp, const struct in6_addr *mcaddr,
/*const*/ struct in6_mfilter *imf, struct in6_multi **pinm,
const int delay)
{
int error;
IN6_MULTI_LOCK();
error = in6_mc_join_locked(ifp, mcaddr, imf, pinm, delay);
error = in6_joingroup_locked(ifp, mcaddr, NULL, pinm, delay);
IN6_MULTI_UNLOCK();
return (error);
}
@ -1159,12 +1196,13 @@ in6_mc_join(struct ifnet *ifp, const struct in6_addr *mcaddr,
* code is returned.
*/
int
in6_mc_join_locked(struct ifnet *ifp, const struct in6_addr *mcaddr,
in6_joingroup_locked(struct ifnet *ifp, const struct in6_addr *mcaddr,
/*const*/ struct in6_mfilter *imf, struct in6_multi **pinm,
const int delay)
{
struct in6_mfilter timf;
struct in6_multi *inm;
struct ifmultiaddr *ifma;
int error;
#ifdef KTR
char ip6tbuf[INET6_ADDRSTRLEN];
@ -1185,6 +1223,7 @@ in6_mc_join_locked(struct ifnet *ifp, const struct in6_addr *mcaddr,
#endif
IN6_MULTI_LOCK_ASSERT();
IN6_MULTI_LIST_UNLOCK_ASSERT();
CTR4(KTR_MLD, "%s: join %s on %p(%s))", __func__,
ip6_sprintf(ip6tbuf, mcaddr), ifp, if_name(ifp));
@ -1200,13 +1239,13 @@ in6_mc_join_locked(struct ifnet *ifp, const struct in6_addr *mcaddr,
im6f_init(&timf, MCAST_UNDEFINED, MCAST_EXCLUDE);
imf = &timf;
}
error = in6_mc_get(ifp, mcaddr, &inm);
error = in6_getmulti(ifp, mcaddr, &inm);
if (error) {
CTR1(KTR_MLD, "%s: in6_mc_get() failure", __func__);
CTR1(KTR_MLD, "%s: in6_getmulti() failure", __func__);
return (error);
}
IN6_MULTI_LIST_LOCK();
CTR1(KTR_MLD, "%s: merge inm state", __func__);
error = in6m_merge(inm, imf);
if (error) {
@ -1224,11 +1263,19 @@ in6_mc_join_locked(struct ifnet *ifp, const struct in6_addr *mcaddr,
out_in6m_release:
if (error) {
CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm);
in6m_release_locked(inm);
IF_ADDR_RLOCK(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_protospec == inm) {
ifma->ifma_protospec = NULL;
break;
}
}
in6m_release_deferred(inm);
IF_ADDR_RUNLOCK(ifp);
} else {
*pinm = inm;
}
IN6_MULTI_LIST_UNLOCK();
return (error);
}
@ -1236,14 +1283,13 @@ in6_mc_join_locked(struct ifnet *ifp, const struct in6_addr *mcaddr,
* Leave a multicast group; unlocked entry point.
*/
int
in6_mc_leave(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
in6_leavegroup(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
{
int error;
IN6_MULTI_LOCK();
error = in6_mc_leave_locked(inm, imf);
error = in6_leavegroup_locked(inm, imf);
IN6_MULTI_UNLOCK();
return (error);
}
@ -1261,9 +1307,10 @@ in6_mc_leave(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
* makes a state change downcall into MLD.
*/
int
in6_mc_leave_locked(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
in6_leavegroup_locked(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
{
struct in6_mfilter timf;
struct ifnet *ifp;
int error;
#ifdef KTR
char ip6tbuf[INET6_ADDRSTRLEN];
@ -1294,6 +1341,9 @@ in6_mc_leave_locked(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
* to be allocated, and there is no opportunity to roll back
* the transaction, it MUST NOT fail.
*/
ifp = inm->in6m_ifp;
IN6_MULTI_LIST_LOCK();
CTR1(KTR_MLD, "%s: merge inm state", __func__);
error = in6m_merge(inm, imf);
KASSERT(error == 0, ("%s: failed to merge inm state", __func__));
@ -1304,11 +1354,17 @@ in6_mc_leave_locked(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
CTR1(KTR_MLD, "%s: failed mld downcall", __func__);
CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm);
in6m_release_locked(inm);
if (ifp)
IF_ADDR_RLOCK(ifp);
in6m_release_deferred(inm);
if (ifp)
IF_ADDR_RUNLOCK(ifp);
IN6_MULTI_LIST_UNLOCK();
return (error);
}
/*
* Block or unblock an ASM multicast source on an inpcb.
* This implements the delta-based API described in RFC 3678.
@ -1446,8 +1502,7 @@ in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
/*
* Begin state merge transaction at MLD layer.
*/
IN6_MULTI_LOCK();
IN6_MULTI_LIST_LOCK();
CTR1(KTR_MLD, "%s: merge inm state", __func__);
error = in6m_merge(inm, imf);
if (error)
@ -1459,7 +1514,7 @@ in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
CTR1(KTR_MLD, "%s: failed mld downcall", __func__);
}
IN6_MULTI_UNLOCK();
IN6_MULTI_LIST_UNLOCK();
out_im6f_rollback:
if (error)
@ -1535,7 +1590,8 @@ ip6_freemoptions(struct ip6_moptions *imo)
struct in6_mfilter *imf;
size_t idx, nmships;
KASSERT(imo != NULL, ("%s: ip6_moptions is NULL", __func__));
if (imo == NULL)
return;
nmships = imo->im6o_num_memberships;
for (idx = 0; idx < nmships; ++idx) {
@ -1543,7 +1599,7 @@ ip6_freemoptions(struct ip6_moptions *imo)
if (imf)
im6f_leave(imf);
/* XXX this will thrash the lock(s) */
(void)in6_mc_leave(imo->im6o_membership[idx], imf);
(void)in6_leavegroup(imo->im6o_membership[idx], imf);
if (imf)
im6f_purge(imf);
}
@ -2034,10 +2090,12 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
/*
* Begin state merge transaction at MLD layer.
*/
in_pcbref(inp);
INP_WUNLOCK(inp);
IN6_MULTI_LOCK();
if (is_new) {
error = in6_mc_join_locked(ifp, &gsa->sin6.sin6_addr, imf,
error = in6_joingroup_locked(ifp, &gsa->sin6.sin6_addr, imf,
&inm, 0);
if (error) {
IN6_MULTI_UNLOCK();
@ -2046,6 +2104,7 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
imo->im6o_membership[idx] = inm;
} else {
CTR1(KTR_MLD, "%s: merge inm state", __func__);
IN6_MULTI_LIST_LOCK();
error = in6m_merge(inm, imf);
if (error)
CTR1(KTR_MLD, "%s: failed to merge inm state",
@ -2057,10 +2116,13 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
CTR1(KTR_MLD, "%s: failed mld downcall",
__func__);
}
IN6_MULTI_LIST_UNLOCK();
}
IN6_MULTI_UNLOCK();
INP_WLOCK_ASSERT(inp);
INP_WLOCK(inp);
if (in_pcbrele_wlocked(inp))
return (ENXIO);
if (error) {
im6f_rollback(imf);
if (is_new)
@ -2282,9 +2344,10 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt)
* Give up the multicast address record to which
* the membership points.
*/
(void)in6_mc_leave_locked(inm, imf);
(void)in6_leavegroup_locked(inm, imf);
} else {
CTR1(KTR_MLD, "%s: merge inm state", __func__);
IN6_MULTI_LIST_LOCK();
error = in6m_merge(inm, imf);
if (error)
CTR1(KTR_MLD, "%s: failed to merge inm state",
@ -2296,6 +2359,7 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt)
CTR1(KTR_MLD, "%s: failed mld downcall",
__func__);
}
IN6_MULTI_LIST_UNLOCK();
}
IN6_MULTI_UNLOCK();
@ -2505,7 +2569,7 @@ in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
goto out_im6f_rollback;
INP_WLOCK_ASSERT(inp);
IN6_MULTI_LOCK();
IN6_MULTI_LIST_LOCK();
/*
* Begin state merge transaction at MLD layer.
@ -2521,7 +2585,7 @@ in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
CTR1(KTR_MLD, "%s: failed mld downcall", __func__);
}
IN6_MULTI_UNLOCK();
IN6_MULTI_LIST_UNLOCK();
out_im6f_rollback:
if (error)
@ -2712,7 +2776,7 @@ sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS)
return (retval);
IN6_MULTI_LOCK();
IN6_MULTI_LIST_LOCK();
IF_ADDR_RLOCK(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_INET6 ||
@ -2744,6 +2808,7 @@ sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS)
}
IF_ADDR_RUNLOCK(ifp);
IN6_MULTI_LIST_UNLOCK();
IN6_MULTI_UNLOCK();
return (retval);

View File

@ -805,8 +805,7 @@ in6_pcbpurgeif0(struct inpcbinfo *pcbinfo, struct ifnet *ifp)
for (i = 0; i < im6o->im6o_num_memberships; i++) {
if (im6o->im6o_membership[i]->in6m_ifp ==
ifp) {
in6_mc_leave(im6o->im6o_membership[i],
NULL);
in6_leavegroup(im6o->im6o_membership[i], NULL);
gap++;
} else if (gap != 0) {
im6o->im6o_membership[i - gap] =

View File

@ -100,6 +100,7 @@ struct nd_ifinfo;
struct scope6_id;
struct lltable;
struct mld_ifsoftc;
struct in6_multi;
struct in6_ifextra {
counter_u64_t *in6_ifstat;
@ -113,6 +114,10 @@ struct in6_ifextra {
#define LLTABLE6(ifp) (((struct in6_ifextra *)(ifp)->if_afdata[AF_INET6])->lltable)
#ifdef _KERNEL
SLIST_HEAD(in6_multi_head, in6_multi);
MALLOC_DECLARE(M_IP6MADDR);
struct in6_ifaddr {
struct ifaddr ia_ifa; /* protocol-independent info */
#define ia_ifp ia_ifa.ifa_ifp
@ -630,7 +635,6 @@ struct in6_multi_mship {
* w/o breaking the ABI for ifmcstat.
*/
struct in6_multi {
LIST_ENTRY(in6_multi) in6m_entry; /* list glue */
struct in6_addr in6m_addr; /* IPv6 multicast address */
struct ifnet *in6m_ifp; /* back pointer to ifnet */
struct ifmultiaddr *in6m_ifma; /* back pointer to ifmultiaddr */
@ -694,11 +698,18 @@ im6s_get_mode(const struct in6_multi *inm, const struct ip6_msource *ims,
* consumers of IN_*_MULTI() macros should acquire the locks before
* calling them; users of the in_{add,del}multi() functions should not.
*/
extern struct mtx in6_multi_mtx;
#define IN6_MULTI_LOCK() mtx_lock(&in6_multi_mtx)
#define IN6_MULTI_UNLOCK() mtx_unlock(&in6_multi_mtx)
#define IN6_MULTI_LOCK_ASSERT() mtx_assert(&in6_multi_mtx, MA_OWNED)
#define IN6_MULTI_UNLOCK_ASSERT() mtx_assert(&in6_multi_mtx, MA_NOTOWNED)
extern struct mtx in6_multi_list_mtx;
extern struct sx in6_multi_sx;
#define IN6_MULTI_LIST_LOCK() mtx_lock(&in6_multi_list_mtx)
#define IN6_MULTI_LIST_UNLOCK() mtx_unlock(&in6_multi_list_mtx)
#define IN6_MULTI_LIST_LOCK_ASSERT() mtx_assert(&in6_multi_list_mtx, MA_OWNED)
#define IN6_MULTI_LIST_UNLOCK_ASSERT() mtx_assert(&in6_multi_list_mtx, MA_NOTOWNED)
#define IN6_MULTI_LOCK() sx_xlock(&in6_multi_sx)
#define IN6_MULTI_UNLOCK() sx_xunlock(&in6_multi_sx)
#define IN6_MULTI_LOCK_ASSERT() sx_assert(&in6_multi_sx, SA_XLOCKED)
#define IN6_MULTI_UNLOCK_ASSERT() sx_assert(&in6_multi_sx, SA_XUNLOCKED)
/*
* Look up an in6_multi record for an IPv6 multicast address
@ -713,13 +724,14 @@ in6m_lookup_locked(struct ifnet *ifp, const struct in6_addr *mcaddr)
struct ifmultiaddr *ifma;
struct in6_multi *inm;
IN6_MULTI_LOCK_ASSERT();
IF_ADDR_LOCK_ASSERT(ifp);
inm = NULL;
TAILQ_FOREACH(ifma, &((ifp)->if_multiaddrs), ifma_link) {
if (ifma->ifma_addr->sa_family == AF_INET6) {
inm = (struct in6_multi *)ifma->ifma_protospec;
if (inm == NULL)
continue;
if (IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, mcaddr))
break;
inm = NULL;
@ -738,11 +750,11 @@ in6m_lookup(struct ifnet *ifp, const struct in6_addr *mcaddr)
{
struct in6_multi *inm;
IN6_MULTI_LOCK();
IN6_MULTI_LIST_LOCK();
IF_ADDR_RLOCK(ifp);
inm = in6m_lookup_locked(ifp, mcaddr);
IF_ADDR_RUNLOCK(ifp);
IN6_MULTI_UNLOCK();
IN6_MULTI_LIST_UNLOCK();
return (inm);
}
@ -752,36 +764,69 @@ static __inline void
in6m_acquire_locked(struct in6_multi *inm)
{
IN6_MULTI_LOCK_ASSERT();
IN6_MULTI_LIST_LOCK_ASSERT();
++inm->in6m_refcount;
}
static __inline void
in6m_acquire(struct in6_multi *inm)
{
IN6_MULTI_LIST_LOCK();
in6m_acquire_locked(inm);
IN6_MULTI_LIST_UNLOCK();
}
static __inline void
in6m_rele_locked(struct in6_multi_head *inmh, struct in6_multi *inm)
{
struct ifnet *ifp;
struct ifaddr *ifa;
struct in6_ifaddr *ifa6;
struct in6_multi_mship *imm;
KASSERT(inm->in6m_refcount > 0, ("refcount == %d inm: %p", inm->in6m_refcount, inm));
IN6_MULTI_LIST_LOCK_ASSERT();
if (--inm->in6m_refcount == 0) {
ifp = inm->in6m_ifp;
IF_ADDR_LOCK_ASSERT(ifp);
TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
if (ifa->ifa_addr->sa_family != AF_INET6)
continue;
ifa6 = (void *)ifa;
LIST_FOREACH(imm, &ifa6->ia6_memberships, i6mm_chain) {
if (inm == imm->i6mm_maddr)
imm->i6mm_maddr = NULL;
}
}
inm->in6m_ifma->ifma_protospec = NULL;
SLIST_INSERT_HEAD(inmh, inm, in6m_nrele);
}
}
struct ip6_moptions;
struct sockopt;
/* Multicast KPIs. */
int im6o_mc_filter(const struct ip6_moptions *, const struct ifnet *,
const struct sockaddr *, const struct sockaddr *);
int in6_mc_join(struct ifnet *, const struct in6_addr *,
int in6_joingroup(struct ifnet *, const struct in6_addr *,
struct in6_mfilter *, struct in6_multi **, int);
int in6_mc_join_locked(struct ifnet *, const struct in6_addr *,
int in6_joingroup_locked(struct ifnet *, const struct in6_addr *,
struct in6_mfilter *, struct in6_multi **, int);
int in6_mc_leave(struct in6_multi *, struct in6_mfilter *);
int in6_mc_leave_locked(struct in6_multi *, struct in6_mfilter *);
int in6_leavegroup(struct in6_multi *, struct in6_mfilter *);
int in6_leavegroup_locked(struct in6_multi *, struct in6_mfilter *);
void in6m_clear_recorded(struct in6_multi *);
void in6m_commit(struct in6_multi *);
void in6m_print(const struct in6_multi *);
int in6m_record_source(struct in6_multi *, const struct in6_addr *);
void in6m_release_locked(struct in6_multi *);
void in6m_release_deferred(struct in6_multi *);
void in6m_release_list_deferred(struct in6_multi_head *);
void ip6_freemoptions(struct ip6_moptions *);
int ip6_getmoptions(struct inpcb *, struct sockopt *);
int ip6_setmoptions(struct inpcb *, struct sockopt *);
/* Legacy KAME multicast KPIs. */
struct in6_multi_mship *
in6_joingroup(struct ifnet *, struct in6_addr *, int *, int);
int in6_leavegroup(struct in6_multi_mship *);
/* flags to in6_update_ifa */
#define IN6_IFAUPDATE_DADDELAY 0x1 /* first time to configure an address */

View File

@ -124,7 +124,7 @@ static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *,
/*const*/ struct mld_hdr *);
static int mld_v1_input_report(struct ifnet *, const struct ip6_hdr *,
/*const*/ struct mld_hdr *);
static void mld_v1_process_group_timer(struct mld_ifsoftc *,
static void mld_v1_process_group_timer(struct in6_multi_head *,
struct in6_multi *);
static void mld_v1_process_querier_timers(struct mld_ifsoftc *);
static int mld_v1_transmit_report(struct in6_multi *, const int);
@ -142,7 +142,7 @@ static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *,
struct mbuf *, const int, const int);
static int mld_v2_merge_state_changes(struct in6_multi *,
struct mbufq *);
static void mld_v2_process_group_timers(struct mld_ifsoftc *,
static void mld_v2_process_group_timers(struct in6_multi_head *,
struct mbufq *, struct mbufq *,
struct in6_multi *, const int);
static int mld_v2_process_group_query(struct in6_multi *,
@ -377,6 +377,7 @@ sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS)
return (error);
IN6_MULTI_LOCK();
IN6_MULTI_LIST_LOCK();
MLD_LOCK();
if (name[0] <= 0 || name[0] > V_if_index) {
@ -409,6 +410,7 @@ sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS)
out_locked:
MLD_UNLOCK();
IN6_MULTI_LIST_UNLOCK();
IN6_MULTI_UNLOCK();
return (error);
}
@ -508,7 +510,6 @@ mli_alloc_locked(/*const*/ struct ifnet *ifp)
mli->mli_qi = MLD_QI_INIT;
mli->mli_qri = MLD_QRI_INIT;
mli->mli_uri = MLD_URI_INIT;
SLIST_INIT(&mli->mli_relinmhead);
mbufq_init(&mli->mli_gq, MLD_MAX_RESPONSE_PACKETS);
LIST_INSERT_HEAD(&V_mli_head, mli, mli_link);
@ -536,12 +537,14 @@ mld_ifdetach(struct ifnet *ifp)
{
struct mld_ifsoftc *mli;
struct ifmultiaddr *ifma;
struct in6_multi *inm, *tinm;
struct in6_multi *inm;
struct in6_multi_head inmh;
CTR3(KTR_MLD, "%s: called for ifp %p(%s)", __func__, ifp,
if_name(ifp));
IN6_MULTI_LOCK_ASSERT();
SLIST_INIT(&inmh);
IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK();
mli = MLD_IFINFO(ifp);
@ -553,20 +556,16 @@ mld_ifdetach(struct ifnet *ifp)
continue;
inm = (struct in6_multi *)ifma->ifma_protospec;
if (inm->in6m_state == MLD_LEAVING_MEMBER) {
SLIST_INSERT_HEAD(&mli->mli_relinmhead,
inm, in6m_nrele);
in6m_rele_locked(&inmh, inm);
ifma->ifma_protospec = NULL;
}
in6m_clear_recorded(inm);
}
IF_ADDR_RUNLOCK(ifp);
SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead, in6m_nrele,
tinm) {
SLIST_REMOVE_HEAD(&mli->mli_relinmhead, in6m_nrele);
in6m_release_locked(inm);
}
}
MLD_UNLOCK();
in6m_release_list_deferred(&inmh);
}
/*
@ -606,10 +605,6 @@ mli_delete_locked(const struct ifnet *ifp)
LIST_REMOVE(mli, mli_link);
KASSERT(SLIST_EMPTY(&mli->mli_relinmhead),
("%s: there are dangling in_multi references",
__func__));
free(mli, M_MLD);
return;
}
@ -680,7 +675,7 @@ mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
in6_setscope(&mld->mld_addr, ifp, NULL);
}
IN6_MULTI_LOCK();
IN6_MULTI_LIST_LOCK();
MLD_LOCK();
/*
@ -728,7 +723,7 @@ mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
IF_ADDR_RUNLOCK(ifp);
MLD_UNLOCK();
IN6_MULTI_UNLOCK();
IN6_MULTI_LIST_UNLOCK();
return (0);
}
@ -759,7 +754,7 @@ mld_v1_update_group(struct in6_multi *inm, const int timer)
ip6_sprintf(ip6tbuf, &inm->in6m_addr),
if_name(inm->in6m_ifp), timer);
IN6_MULTI_LOCK_ASSERT();
IN6_MULTI_LIST_LOCK_ASSERT();
switch (inm->in6m_state) {
case MLD_NOT_MEMBER:
@ -882,7 +877,7 @@ mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
in6_setscope(&mld->mld_addr, ifp, NULL);
}
IN6_MULTI_LOCK();
IN6_MULTI_LIST_LOCK();
MLD_LOCK();
mli = MLD_IFINFO(ifp);
@ -965,7 +960,7 @@ mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
out_locked:
MLD_UNLOCK();
IN6_MULTI_UNLOCK();
IN6_MULTI_LIST_UNLOCK();
return (0);
}
@ -983,7 +978,7 @@ mld_v2_process_group_query(struct in6_multi *inm, struct mld_ifsoftc *mli,
int retval;
uint16_t nsrc;
IN6_MULTI_LOCK_ASSERT();
IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK_ASSERT();
retval = 0;
@ -1168,7 +1163,7 @@ mld_v1_input_report(struct ifnet *ifp, const struct ip6_hdr *ip6,
if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr))
in6_setscope(&mld->mld_addr, ifp, NULL);
IN6_MULTI_LOCK();
IN6_MULTI_LIST_LOCK();
MLD_LOCK();
IF_ADDR_RLOCK(ifp);
@ -1220,7 +1215,7 @@ mld_v1_input_report(struct ifnet *ifp, const struct ip6_hdr *ip6,
out_locked:
IF_ADDR_RUNLOCK(ifp);
MLD_UNLOCK();
IN6_MULTI_UNLOCK();
IN6_MULTI_LIST_UNLOCK();
/* XXX Clear embedded scope ID as userland won't expect it. */
in6_clearscope(&mld->mld_addr);
@ -1333,6 +1328,7 @@ mld_fasttimo_vnet(void)
struct mld_ifsoftc *mli;
struct ifmultiaddr *ifma;
struct in6_multi *inm, *tinm;
struct in6_multi_head inmh;
int uri_fasthz;
uri_fasthz = 0;
@ -1347,7 +1343,8 @@ mld_fasttimo_vnet(void)
!V_state_change_timers_running6)
return;
IN6_MULTI_LOCK();
SLIST_INIT(&inmh);
IN6_MULTI_LIST_LOCK();
MLD_LOCK();
/*
@ -1399,10 +1396,10 @@ mld_fasttimo_vnet(void)
inm = (struct in6_multi *)ifma->ifma_protospec;
switch (mli->mli_version) {
case MLD_VERSION_1:
mld_v1_process_group_timer(mli, inm);
mld_v1_process_group_timer(&inmh, inm);
break;
case MLD_VERSION_2:
mld_v2_process_group_timers(mli, &qrq,
mld_v2_process_group_timers(&inmh, &qrq,
&scq, inm, uri_fasthz);
break;
}
@ -1419,9 +1416,8 @@ mld_fasttimo_vnet(void)
* IF_ADDR_LOCK internally as well as
* ip6_output() to transmit a packet.
*/
SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead,
in6m_nrele, tinm) {
SLIST_REMOVE_HEAD(&mli->mli_relinmhead,
SLIST_FOREACH_SAFE(inm, &inmh, in6m_nrele, tinm) {
SLIST_REMOVE_HEAD(&inmh,
in6m_nrele);
(void)mld_v1_transmit_report(inm,
MLD_LISTENER_REPORT);
@ -1435,19 +1431,14 @@ mld_fasttimo_vnet(void)
* Free the in_multi reference(s) for
* this lifecycle.
*/
SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead,
in6m_nrele, tinm) {
SLIST_REMOVE_HEAD(&mli->mli_relinmhead,
in6m_nrele);
in6m_release_locked(inm);
}
in6m_release_list_deferred(&inmh);
break;
}
}
out_locked:
MLD_UNLOCK();
IN6_MULTI_UNLOCK();
IN6_MULTI_LIST_UNLOCK();
}
/*
@ -1455,11 +1446,11 @@ mld_fasttimo_vnet(void)
* Will update the global pending timer flags.
*/
static void
mld_v1_process_group_timer(struct mld_ifsoftc *mli, struct in6_multi *inm)
mld_v1_process_group_timer(struct in6_multi_head *inmh, struct in6_multi *inm)
{
int report_timer_expired;
IN6_MULTI_LOCK_ASSERT();
IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK_ASSERT();
if (inm->in6m_timer == 0) {
@ -1482,8 +1473,7 @@ mld_v1_process_group_timer(struct mld_ifsoftc *mli, struct in6_multi *inm)
case MLD_REPORTING_MEMBER:
if (report_timer_expired) {
inm->in6m_state = MLD_IDLE_MEMBER;
SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm,
in6m_nrele);
in6m_rele_locked(inmh, inm);
}
break;
case MLD_G_QUERY_PENDING_MEMBER:
@ -1499,7 +1489,7 @@ mld_v1_process_group_timer(struct mld_ifsoftc *mli, struct in6_multi *inm)
* Note: Unlocked read from mli.
*/
static void
mld_v2_process_group_timers(struct mld_ifsoftc *mli,
mld_v2_process_group_timers(struct in6_multi_head *inmh,
struct mbufq *qrq, struct mbufq *scq,
struct in6_multi *inm, const int uri_fasthz)
{
@ -1509,7 +1499,7 @@ mld_v2_process_group_timers(struct mld_ifsoftc *mli,
char ip6tbuf[INET6_ADDRSTRLEN];
#endif
IN6_MULTI_LOCK_ASSERT();
IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK_ASSERT();
query_response_timer_expired = 0;
@ -1607,8 +1597,7 @@ mld_v2_process_group_timers(struct mld_ifsoftc *mli,
if (inm->in6m_state == MLD_LEAVING_MEMBER &&
inm->in6m_scrv == 0) {
inm->in6m_state = MLD_NOT_MEMBER;
SLIST_INSERT_HEAD(&mli->mli_relinmhead,
inm, in6m_nrele);
in6m_rele_locked(inmh, inm);
}
}
break;
@ -1654,12 +1643,14 @@ mld_v2_cancel_link_timers(struct mld_ifsoftc *mli)
{
struct ifmultiaddr *ifma;
struct ifnet *ifp;
struct in6_multi *inm, *tinm;
struct in6_multi *inm;
struct in6_multi_head inmh;
CTR3(KTR_MLD, "%s: cancel v2 timers on ifp %p(%s)", __func__,
mli->mli_ifp, if_name(mli->mli_ifp));
IN6_MULTI_LOCK_ASSERT();
SLIST_INIT(&inmh);
IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK_ASSERT();
/*
@ -1694,8 +1685,8 @@ mld_v2_cancel_link_timers(struct mld_ifsoftc *mli)
* version, we need to release the final
* reference held for issuing the INCLUDE {}.
*/
SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm,
in6m_nrele);
in6m_rele_locked(&inmh, inm);
ifma->ifma_protospec = NULL;
/* FALLTHROUGH */
case MLD_G_QUERY_PENDING_MEMBER:
case MLD_SG_QUERY_PENDING_MEMBER:
@ -1713,10 +1704,7 @@ mld_v2_cancel_link_timers(struct mld_ifsoftc *mli)
}
}
IF_ADDR_RUNLOCK(ifp);
SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead, in6m_nrele, tinm) {
SLIST_REMOVE_HEAD(&mli->mli_relinmhead, in6m_nrele);
in6m_release_locked(inm);
}
in6m_release_list_deferred(&inmh);
}
/*
@ -1788,7 +1776,7 @@ mld_v1_transmit_report(struct in6_multi *in6m, const int type)
struct mbuf *mh, *md;
struct mld_hdr *mld;
IN6_MULTI_LOCK_ASSERT();
IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK_ASSERT();
ifp = in6m->in6m_ifp;
@ -1879,7 +1867,7 @@ mld_change_state(struct in6_multi *inm, const int delay)
struct ifnet *ifp;
int error;
IN6_MULTI_LOCK_ASSERT();
IN6_MULTI_LIST_LOCK_ASSERT();
error = 0;
@ -1963,7 +1951,7 @@ mld_initial_join(struct in6_multi *inm, struct mld_ifsoftc *mli,
ifp = inm->in6m_ifp;
IN6_MULTI_LOCK_ASSERT();
IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK_ASSERT();
KASSERT(mli && mli->mli_ifp == ifp, ("%s: inconsistent ifp", __func__));
@ -1993,7 +1981,7 @@ mld_initial_join(struct in6_multi *inm, struct mld_ifsoftc *mli,
*/
if (mli->mli_version == MLD_VERSION_2 &&
inm->in6m_state == MLD_LEAVING_MEMBER)
in6m_release_locked(inm);
in6m_release_deferred(inm);
inm->in6m_state = MLD_REPORTING_MEMBER;
@ -2106,7 +2094,7 @@ mld_handle_state_change(struct in6_multi *inm, struct mld_ifsoftc *mli)
ifp = inm->in6m_ifp;
IN6_MULTI_LOCK_ASSERT();
IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK_ASSERT();
KASSERT(mli && mli->mli_ifp == ifp,
@ -2169,7 +2157,7 @@ mld_final_leave(struct in6_multi *inm, struct mld_ifsoftc *mli)
__func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
inm->in6m_ifp, if_name(inm->in6m_ifp));
IN6_MULTI_LOCK_ASSERT();
IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK_ASSERT();
switch (inm->in6m_state) {
@ -2296,7 +2284,7 @@ mld_v2_enqueue_group_record(struct mbufq *mq, struct in6_multi *inm,
char ip6tbuf[INET6_ADDRSTRLEN];
#endif
IN6_MULTI_LOCK_ASSERT();
IN6_MULTI_LIST_LOCK_ASSERT();
ifp = inm->in6m_ifp;
is_filter_list_change = 0;
@ -2679,7 +2667,7 @@ mld_v2_enqueue_filter_change(struct mbufq *mq, struct in6_multi *inm)
char ip6tbuf[INET6_ADDRSTRLEN];
#endif
IN6_MULTI_LOCK_ASSERT();
IN6_MULTI_LIST_LOCK_ASSERT();
if (inm->in6m_nsrc == 0 ||
(inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0))
@ -2879,7 +2867,7 @@ mld_v2_merge_state_changes(struct in6_multi *inm, struct mbufq *scq)
domerge = 0;
recslen = 0;
IN6_MULTI_LOCK_ASSERT();
IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK_ASSERT();
/*
@ -2978,7 +2966,7 @@ mld_v2_dispatch_general_query(struct mld_ifsoftc *mli)
struct in6_multi *inm;
int retval;
IN6_MULTI_LOCK_ASSERT();
IN6_MULTI_LIST_LOCK_ASSERT();
MLD_LOCK_ASSERT();
KASSERT(mli->mli_version == MLD_VERSION_2,

View File

@ -136,7 +136,6 @@ struct mld_ifsoftc {
uint32_t mli_qi; /* MLDv2 Query Interval (s) */
uint32_t mli_qri; /* MLDv2 Query Response Interval (s) */
uint32_t mli_uri; /* MLDv2 Unsolicited Report Interval (s) */
SLIST_HEAD(,in6_multi) mli_relinmhead; /* released groups */
struct mbufq mli_gq; /* queue of general query responses */
};

View File

@ -54,13 +54,15 @@ void gtaskqueue_drain_all(struct gtaskqueue *queue);
int grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *task);
void taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *grptask,
void *uniq, int irq, char *name);
void *uniq, int irq, const char *name);
int taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *grptask,
void *uniq, int cpu, int irq, char *name);
void taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask);
struct taskqgroup *taskqgroup_create(char *name);
void taskqgroup_destroy(struct taskqgroup *qgroup);
int taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride);
void taskqgroup_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn,
const char *name);
#define TASK_ENQUEUED 0x1
#define TASK_SKIP_WAKEUP 0x2