bridge: call member interface ioctl() without NET_EPOCH

We're not allowed to hold NET_EPOCH while sleeping, so when we call ioctl()
handlers for member interfaces we cannot be in NET_EPOCH.  We still need some
protection of our CK_LISTs, so hold BRIDGE_LOCK instead.

That requires changing BRIDGE_LOCK into a sleepable lock, and separating the
BRIDGE_RT_LOCK, to protect bridge_rtnode lists. That lock is taken in the data
path (while in NET_EPOCH), so it cannot be a sleepable lock.

While here document the locking strategy.

MFC after:	1 month
Differential Revision:	https://reviews.freebsd.org/D26418
This commit is contained in:
Kristof Provost 2020-10-06 19:19:56 +00:00
parent f34702b76e
commit 4af1bd8157

View File

@ -186,17 +186,41 @@ extern void nd6_setmtu(struct ifnet *);
/* /*
* Bridge locking * Bridge locking
*
* The bridge relies heavily on the epoch(9) system to protect its data
* structures. This means we can safely use CK_LISTs while in NET_EPOCH, but we
* must ensure there is only one writer at a time.
*
* That is: for read accesses we only need to be in NET_EPOCH, but for write
* accesses we must hold:
*
* - BRIDGE_RT_LOCK, for any change to bridge_rtnodes
* - BRIDGE_LOCK, for any other change
*
* The BRIDGE_LOCK is a sleepable lock, because it is held accross ioctl()
* calls to bridge member interfaces and these ioctl()s can sleep.
* The BRIDGE_RT_LOCK is a non-sleepable mutex, because it is sometimes
* required while we're in NET_EPOCH and then we're not allowed to sleep.
*/ */
#define BRIDGE_LOCK_INIT(_sc) do { \ #define BRIDGE_LOCK_INIT(_sc) do { \
mtx_init(&(_sc)->sc_mtx, "if_bridge", NULL, MTX_DEF); \ sx_init(&(_sc)->sc_sx, "if_bridge"); \
mtx_init(&(_sc)->sc_rt_mtx, "if_bridge rt", NULL, MTX_DEF); \
} while (0) } while (0)
#define BRIDGE_LOCK_DESTROY(_sc) do { \ #define BRIDGE_LOCK_DESTROY(_sc) do { \
mtx_destroy(&(_sc)->sc_mtx); \ sx_destroy(&(_sc)->sc_sx); \
mtx_destroy(&(_sc)->sc_rt_mtx); \
} while (0) } while (0)
#define BRIDGE_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define BRIDGE_LOCK(_sc) sx_xlock(&(_sc)->sc_sx)
#define BRIDGE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define BRIDGE_UNLOCK(_sc) sx_xunlock(&(_sc)->sc_sx)
#define BRIDGE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED) #define BRIDGE_LOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SX_XLOCKED)
#define BRIDGE_UNLOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_NOTOWNED) #define BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(_sc) \
MPASS(in_epoch(net_epoch_preempt) || sx_xlocked(&(_sc)->sc_sx))
#define BRIDGE_UNLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SX_UNLOCKED)
#define BRIDGE_RT_LOCK(_sc) mtx_lock(&(_sc)->sc_rt_mtx)
#define BRIDGE_RT_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_rt_mtx)
#define BRIDGE_RT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_rt_mtx, MA_OWNED)
#define BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(_sc) \
MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(_sc)->sc_rt_mtx))
/* /*
* Bridge interface list entry. * Bridge interface list entry.
@ -235,7 +259,8 @@ struct bridge_rtnode {
struct bridge_softc { struct bridge_softc {
struct ifnet *sc_ifp; /* make this an interface */ struct ifnet *sc_ifp; /* make this an interface */
LIST_ENTRY(bridge_softc) sc_list; LIST_ENTRY(bridge_softc) sc_list;
struct mtx sc_mtx; struct sx sc_sx;
struct mtx sc_rt_mtx;
uint32_t sc_brtmax; /* max # of addresses */ uint32_t sc_brtmax; /* max # of addresses */
uint32_t sc_brtcnt; /* cur. # of addresses */ uint32_t sc_brtcnt; /* cur. # of addresses */
uint32_t sc_brttimeout; /* rt timeout in seconds */ uint32_t sc_brttimeout; /* rt timeout in seconds */
@ -252,8 +277,8 @@ struct bridge_softc {
struct epoch_context sc_epoch_ctx; struct epoch_context sc_epoch_ctx;
}; };
VNET_DEFINE_STATIC(struct mtx, bridge_list_mtx); VNET_DEFINE_STATIC(struct sx, bridge_list_sx);
#define V_bridge_list_mtx VNET(bridge_list_mtx) #define V_bridge_list_sx VNET(bridge_list_sx)
static eventhandler_tag bridge_detach_cookie; static eventhandler_tag bridge_detach_cookie;
int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD; int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
@ -536,11 +561,11 @@ const int bridge_control_table_size = nitems(bridge_control_table);
VNET_DEFINE_STATIC(LIST_HEAD(, bridge_softc), bridge_list); VNET_DEFINE_STATIC(LIST_HEAD(, bridge_softc), bridge_list);
#define V_bridge_list VNET(bridge_list) #define V_bridge_list VNET(bridge_list)
#define BRIDGE_LIST_LOCK_INIT(x) mtx_init(&V_bridge_list_mtx, \ #define BRIDGE_LIST_LOCK_INIT(x) sx_init(&V_bridge_list_sx, \
"if_bridge list", NULL, MTX_DEF) "if_bridge list")
#define BRIDGE_LIST_LOCK_DESTROY(x) mtx_destroy(&V_bridge_list_mtx) #define BRIDGE_LIST_LOCK_DESTROY(x) sx_destroy(&V_bridge_list_sx)
#define BRIDGE_LIST_LOCK(x) mtx_lock(&V_bridge_list_mtx) #define BRIDGE_LIST_LOCK(x) sx_xlock(&V_bridge_list_sx)
#define BRIDGE_LIST_UNLOCK(x) mtx_unlock(&V_bridge_list_mtx) #define BRIDGE_LIST_UNLOCK(x) sx_xunlock(&V_bridge_list_sx)
VNET_DEFINE_STATIC(struct if_clone *, bridge_cloner); VNET_DEFINE_STATIC(struct if_clone *, bridge_cloner);
#define V_bridge_cloner VNET(bridge_cloner) #define V_bridge_cloner VNET(bridge_cloner)
@ -670,7 +695,7 @@ bridge_clone_create(struct if_clone *ifc, int unit, caddr_t params)
/* Initialize our routing table. */ /* Initialize our routing table. */
bridge_rtable_init(sc); bridge_rtable_init(sc);
callout_init_mtx(&sc->sc_brcallout, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_brcallout, &sc->sc_rt_mtx, 0);
CK_LIST_INIT(&sc->sc_iflist); CK_LIST_INIT(&sc->sc_iflist);
CK_LIST_INIT(&sc->sc_spanlist); CK_LIST_INIT(&sc->sc_spanlist);
@ -722,7 +747,6 @@ bridge_clone_destroy(struct ifnet *ifp)
struct bridge_iflist *bif; struct bridge_iflist *bif;
struct epoch_tracker et; struct epoch_tracker et;
NET_EPOCH_ENTER(et);
BRIDGE_LOCK(sc); BRIDGE_LOCK(sc);
bridge_stop(ifp, 1); bridge_stop(ifp, 1);
@ -740,6 +764,8 @@ bridge_clone_destroy(struct ifnet *ifp)
BRIDGE_UNLOCK(sc); BRIDGE_UNLOCK(sc);
NET_EPOCH_ENTER(et);
callout_drain(&sc->sc_brcallout); callout_drain(&sc->sc_brcallout);
BRIDGE_LIST_LOCK(); BRIDGE_LIST_LOCK();
@ -778,9 +804,8 @@ bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
struct ifdrv *ifd = (struct ifdrv *) data; struct ifdrv *ifd = (struct ifdrv *) data;
const struct bridge_control *bc; const struct bridge_control *bc;
int error = 0, oldmtu; int error = 0, oldmtu;
struct epoch_tracker et;
NET_EPOCH_ENTER(et); BRIDGE_LOCK(sc);
switch (cmd) { switch (cmd) {
case SIOCADDMULTI: case SIOCADDMULTI:
@ -826,9 +851,7 @@ bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
} }
oldmtu = ifp->if_mtu; oldmtu = ifp->if_mtu;
BRIDGE_LOCK(sc);
error = (*bc->bc_func)(sc, &args); error = (*bc->bc_func)(sc, &args);
BRIDGE_UNLOCK(sc);
if (error) if (error)
break; break;
@ -855,16 +878,16 @@ bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
* If interface is marked down and it is running, * If interface is marked down and it is running,
* then stop and disable it. * then stop and disable it.
*/ */
BRIDGE_LOCK(sc);
bridge_stop(ifp, 1); bridge_stop(ifp, 1);
BRIDGE_UNLOCK(sc);
} else if ((ifp->if_flags & IFF_UP) && } else if ((ifp->if_flags & IFF_UP) &&
!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
/* /*
* If interface is marked up and it is stopped, then * If interface is marked up and it is stopped, then
* start it. * start it.
*/ */
BRIDGE_UNLOCK(sc);
(*ifp->if_init)(sc); (*ifp->if_init)(sc);
BRIDGE_LOCK(sc);
} }
break; break;
@ -877,7 +900,6 @@ bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
sc->sc_ifp->if_mtu = ifr->ifr_mtu; sc->sc_ifp->if_mtu = ifr->ifr_mtu;
break; break;
} }
BRIDGE_LOCK(sc);
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
if (bif->bif_ifp->if_mtu != ifr->ifr_mtu) { if (bif->bif_ifp->if_mtu != ifr->ifr_mtu) {
log(LOG_NOTICE, "%s: invalid MTU: %u(%s)" log(LOG_NOTICE, "%s: invalid MTU: %u(%s)"
@ -890,18 +912,19 @@ bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
} }
if (!error) if (!error)
sc->sc_ifp->if_mtu = ifr->ifr_mtu; sc->sc_ifp->if_mtu = ifr->ifr_mtu;
BRIDGE_UNLOCK(sc);
break; break;
default: default:
/* /*
* drop the lock as ether_ioctl() will call bridge_start() and * drop the lock as ether_ioctl() will call bridge_start() and
* cause the lock to be recursed. * cause the lock to be recursed.
*/ */
BRIDGE_UNLOCK(sc);
error = ether_ioctl(ifp, cmd, data); error = ether_ioctl(ifp, cmd, data);
BRIDGE_LOCK(sc);
break; break;
} }
NET_EPOCH_EXIT(et); BRIDGE_UNLOCK(sc);
return (error); return (error);
} }
@ -933,9 +956,7 @@ bridge_mutecaps(struct bridge_softc *sc)
/* strip off mask bits and enable them again if allowed */ /* strip off mask bits and enable them again if allowed */
enabled &= ~BRIDGE_IFCAPS_MASK; enabled &= ~BRIDGE_IFCAPS_MASK;
enabled |= mask; enabled |= mask;
BRIDGE_UNLOCK(sc);
bridge_set_ifcap(sc, bif, enabled); bridge_set_ifcap(sc, bif, enabled);
BRIDGE_LOCK(sc);
} }
} }
@ -946,8 +967,6 @@ bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
struct ifreq ifr; struct ifreq ifr;
int error, mask, stuck; int error, mask, stuck;
BRIDGE_UNLOCK_ASSERT(sc);
bzero(&ifr, sizeof(ifr)); bzero(&ifr, sizeof(ifr));
ifr.ifr_reqcap = set; ifr.ifr_reqcap = set;
@ -977,7 +996,7 @@ bridge_lookup_member(struct bridge_softc *sc, const char *name)
struct bridge_iflist *bif; struct bridge_iflist *bif;
struct ifnet *ifp; struct ifnet *ifp;
NET_EPOCH_ASSERT(); BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
ifp = bif->bif_ifp; ifp = bif->bif_ifp;
@ -998,7 +1017,7 @@ bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
{ {
struct bridge_iflist *bif; struct bridge_iflist *bif;
NET_EPOCH_ASSERT(); BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
if (bif->bif_ifp == member_ifp) if (bif->bif_ifp == member_ifp)
@ -1061,14 +1080,15 @@ bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
bridge_linkcheck(sc); bridge_linkcheck(sc);
bridge_mutecaps(sc); /* recalcuate now this interface is removed */ bridge_mutecaps(sc); /* recalcuate now this interface is removed */
BRIDGE_RT_LOCK(sc);
bridge_rtdelete(sc, ifs, IFBF_FLUSHALL); bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
BRIDGE_RT_UNLOCK(sc);
KASSERT(bif->bif_addrcnt == 0, KASSERT(bif->bif_addrcnt == 0,
("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt)); ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
ifs->if_bridge_output = NULL; ifs->if_bridge_output = NULL;
ifs->if_bridge_input = NULL; ifs->if_bridge_input = NULL;
ifs->if_bridge_linkstate = NULL; ifs->if_bridge_linkstate = NULL;
BRIDGE_UNLOCK(sc);
if (!gone) { if (!gone) {
switch (ifs->if_type) { switch (ifs->if_type) {
case IFT_ETHER: case IFT_ETHER:
@ -1095,7 +1115,6 @@ bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
bridge_set_ifcap(sc, bif, bif->bif_savedcaps); bridge_set_ifcap(sc, bif, bif->bif_savedcaps);
} }
bstp_destroy(&bif->bif_stp); /* prepare to free */ bstp_destroy(&bif->bif_stp); /* prepare to free */
BRIDGE_LOCK(sc);
NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx); NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
} }
@ -1173,9 +1192,7 @@ bridge_ioctl_add(struct bridge_softc *sc, void *arg)
*/ */
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
if (in6ifa_llaonifp(bif->bif_ifp)) { if (in6ifa_llaonifp(bif->bif_ifp)) {
BRIDGE_UNLOCK(sc);
in6_ifdetach(bif->bif_ifp); in6_ifdetach(bif->bif_ifp);
BRIDGE_LOCK(sc);
if_printf(sc->sc_ifp, if_printf(sc->sc_ifp,
"IPv6 addresses on %s have been removed " "IPv6 addresses on %s have been removed "
"before adding it as a member to prevent " "before adding it as a member to prevent "
@ -1184,9 +1201,7 @@ bridge_ioctl_add(struct bridge_softc *sc, void *arg)
} }
} }
if (in6ifa_llaonifp(ifs)) { if (in6ifa_llaonifp(ifs)) {
BRIDGE_UNLOCK(sc);
in6_ifdetach(ifs); in6_ifdetach(ifs);
BRIDGE_LOCK(sc);
if_printf(sc->sc_ifp, if_printf(sc->sc_ifp,
"IPv6 addresses on %s have been removed " "IPv6 addresses on %s have been removed "
"before adding it as a member to prevent " "before adding it as a member to prevent "
@ -1244,9 +1259,7 @@ bridge_ioctl_add(struct bridge_softc *sc, void *arg)
switch (ifs->if_type) { switch (ifs->if_type) {
case IFT_ETHER: case IFT_ETHER:
case IFT_L2VLAN: case IFT_L2VLAN:
BRIDGE_UNLOCK(sc);
error = ifpromisc(ifs, 1); error = ifpromisc(ifs, 1);
BRIDGE_LOCK(sc);
break; break;
} }
@ -1428,10 +1441,8 @@ bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
len -= sizeof(breq); len -= sizeof(breq);
} }
BRIDGE_UNLOCK(sc);
bifc->ifbic_len = sizeof(breq) * count; bifc->ifbic_len = sizeof(breq) * count;
error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len); error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len);
BRIDGE_LOCK(sc);
free(outbuf, M_TEMP); free(outbuf, M_TEMP);
return (error); return (error);
} }
@ -1481,10 +1492,8 @@ bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
len -= sizeof(bareq); len -= sizeof(bareq);
} }
out: out:
BRIDGE_UNLOCK(sc);
bac->ifbac_len = sizeof(bareq) * count; bac->ifbac_len = sizeof(bareq) * count;
error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len); error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len);
BRIDGE_LOCK(sc);
free(outbuf, M_TEMP); free(outbuf, M_TEMP);
return (error); return (error);
} }
@ -1494,19 +1503,20 @@ bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
{ {
struct ifbareq *req = arg; struct ifbareq *req = arg;
struct bridge_iflist *bif; struct bridge_iflist *bif;
struct epoch_tracker et;
int error; int error;
NET_EPOCH_ASSERT(); NET_EPOCH_ENTER(et);
bif = bridge_lookup_member(sc, req->ifba_ifsname); bif = bridge_lookup_member(sc, req->ifba_ifsname);
if (bif == NULL) if (bif == NULL) {
NET_EPOCH_EXIT(et);
return (ENOENT); return (ENOENT);
}
/* bridge_rtupdate() may acquire the lock. */ /* bridge_rtupdate() may acquire the lock. */
BRIDGE_UNLOCK(sc);
error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1, error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
req->ifba_flags); req->ifba_flags);
BRIDGE_LOCK(sc); NET_EPOCH_EXIT(et);
return (error); return (error);
} }
@ -1542,7 +1552,10 @@ bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
{ {
struct ifbreq *req = arg; struct ifbreq *req = arg;
BRIDGE_RT_LOCK(sc);
bridge_rtflush(sc, req->ifbr_ifsflags); bridge_rtflush(sc, req->ifbr_ifsflags);
BRIDGE_RT_UNLOCK(sc);
return (0); return (0);
} }
@ -1810,10 +1823,8 @@ bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg)
len -= sizeof(bpreq); len -= sizeof(bpreq);
} }
BRIDGE_UNLOCK(sc);
bifstp->ifbpstp_len = sizeof(bpreq) * count; bifstp->ifbpstp_len = sizeof(bpreq) * count;
error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len); error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len);
BRIDGE_LOCK(sc);
free(outbuf, M_TEMP); free(outbuf, M_TEMP);
return (error); return (error);
} }
@ -1845,7 +1856,6 @@ bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
{ {
struct bridge_softc *sc = ifp->if_bridge; struct bridge_softc *sc = ifp->if_bridge;
struct bridge_iflist *bif; struct bridge_iflist *bif;
struct epoch_tracker et;
if (ifp->if_flags & IFF_RENAMING) if (ifp->if_flags & IFF_RENAMING)
return; return;
@ -1856,7 +1866,6 @@ bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
*/ */
return; return;
} }
NET_EPOCH_ENTER(et);
/* Check if the interface is a bridge member */ /* Check if the interface is a bridge member */
if (sc != NULL) { if (sc != NULL) {
BRIDGE_LOCK(sc); BRIDGE_LOCK(sc);
@ -1866,7 +1875,6 @@ bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
bridge_delete_member(sc, bif, 1); bridge_delete_member(sc, bif, 1);
BRIDGE_UNLOCK(sc); BRIDGE_UNLOCK(sc);
NET_EPOCH_EXIT(et);
return; return;
} }
@ -1883,7 +1891,6 @@ bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
BRIDGE_UNLOCK(sc); BRIDGE_UNLOCK(sc);
} }
BRIDGE_LIST_UNLOCK(); BRIDGE_LIST_UNLOCK();
NET_EPOCH_EXIT(et);
} }
/* /*
@ -1920,16 +1927,18 @@ bridge_stop(struct ifnet *ifp, int disable)
{ {
struct bridge_softc *sc = ifp->if_softc; struct bridge_softc *sc = ifp->if_softc;
NET_EPOCH_ASSERT();
BRIDGE_LOCK_ASSERT(sc); BRIDGE_LOCK_ASSERT(sc);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
return; return;
BRIDGE_RT_LOCK(sc);
callout_stop(&sc->sc_brcallout); callout_stop(&sc->sc_brcallout);
bstp_stop(&sc->sc_stp); bstp_stop(&sc->sc_stp);
bridge_rtflush(sc, IFBF_FLUSHDYN); bridge_rtflush(sc, IFBF_FLUSHDYN);
BRIDGE_RT_UNLOCK(sc);
ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
} }
@ -2659,8 +2668,7 @@ bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
struct bridge_rtnode *brt; struct bridge_rtnode *brt;
int error; int error;
NET_EPOCH_ASSERT(); BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
BRIDGE_UNLOCK_ASSERT(sc);
/* Check the source address is valid and not multicast. */ /* Check the source address is valid and not multicast. */
if (ETHER_IS_MULTICAST(dst) || if (ETHER_IS_MULTICAST(dst) ||
@ -2677,24 +2685,24 @@ bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
* update it, otherwise create a new one. * update it, otherwise create a new one.
*/ */
if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) { if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
BRIDGE_LOCK(sc); BRIDGE_RT_LOCK(sc);
/* Check again, now that we have the lock. There could have /* Check again, now that we have the lock. There could have
* been a race and we only want to insert this once. */ * been a race and we only want to insert this once. */
if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) != NULL) { if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) != NULL) {
BRIDGE_UNLOCK(sc); BRIDGE_RT_UNLOCK(sc);
return (0); return (0);
} }
if (sc->sc_brtcnt >= sc->sc_brtmax) { if (sc->sc_brtcnt >= sc->sc_brtmax) {
sc->sc_brtexceeded++; sc->sc_brtexceeded++;
BRIDGE_UNLOCK(sc); BRIDGE_RT_UNLOCK(sc);
return (ENOSPC); return (ENOSPC);
} }
/* Check per interface address limits (if enabled) */ /* Check per interface address limits (if enabled) */
if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) { if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
bif->bif_addrexceeded++; bif->bif_addrexceeded++;
BRIDGE_UNLOCK(sc); BRIDGE_RT_UNLOCK(sc);
return (ENOSPC); return (ENOSPC);
} }
@ -2705,7 +2713,7 @@ bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
*/ */
brt = uma_zalloc(V_bridge_rtnode_zone, M_NOWAIT | M_ZERO); brt = uma_zalloc(V_bridge_rtnode_zone, M_NOWAIT | M_ZERO);
if (brt == NULL) { if (brt == NULL) {
BRIDGE_UNLOCK(sc); BRIDGE_RT_UNLOCK(sc);
return (ENOMEM); return (ENOMEM);
} }
brt->brt_vnet = curvnet; brt->brt_vnet = curvnet;
@ -2720,22 +2728,22 @@ bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
if ((error = bridge_rtnode_insert(sc, brt)) != 0) { if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
uma_zfree(V_bridge_rtnode_zone, brt); uma_zfree(V_bridge_rtnode_zone, brt);
BRIDGE_UNLOCK(sc); BRIDGE_RT_UNLOCK(sc);
return (error); return (error);
} }
brt->brt_dst = bif; brt->brt_dst = bif;
bif->bif_addrcnt++; bif->bif_addrcnt++;
BRIDGE_UNLOCK(sc); BRIDGE_RT_UNLOCK(sc);
} }
if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC && if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
brt->brt_dst != bif) { brt->brt_dst != bif) {
BRIDGE_LOCK(sc); BRIDGE_RT_LOCK(sc);
brt->brt_dst->bif_addrcnt--; brt->brt_dst->bif_addrcnt--;
brt->brt_dst = bif; brt->brt_dst = bif;
brt->brt_dst->bif_addrcnt++; brt->brt_dst->bif_addrcnt++;
BRIDGE_UNLOCK(sc); BRIDGE_RT_UNLOCK(sc);
} }
if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
@ -2777,7 +2785,7 @@ bridge_rttrim(struct bridge_softc *sc)
struct bridge_rtnode *brt, *nbrt; struct bridge_rtnode *brt, *nbrt;
NET_EPOCH_ASSERT(); NET_EPOCH_ASSERT();
BRIDGE_LOCK_ASSERT(sc); BRIDGE_RT_LOCK_ASSERT(sc);
/* Make sure we actually need to do this. */ /* Make sure we actually need to do this. */
if (sc->sc_brtcnt <= sc->sc_brtmax) if (sc->sc_brtcnt <= sc->sc_brtmax)
@ -2806,10 +2814,8 @@ static void
bridge_timer(void *arg) bridge_timer(void *arg)
{ {
struct bridge_softc *sc = arg; struct bridge_softc *sc = arg;
struct epoch_tracker et;
NET_EPOCH_ENTER(et); BRIDGE_RT_LOCK_ASSERT(sc);
BRIDGE_LOCK_ASSERT(sc);
/* Destruction of rtnodes requires a proper vnet context */ /* Destruction of rtnodes requires a proper vnet context */
CURVNET_SET(sc->sc_ifp->if_vnet); CURVNET_SET(sc->sc_ifp->if_vnet);
@ -2819,7 +2825,6 @@ bridge_timer(void *arg)
callout_reset(&sc->sc_brcallout, callout_reset(&sc->sc_brcallout,
bridge_rtable_prune_period * hz, bridge_timer, sc); bridge_rtable_prune_period * hz, bridge_timer, sc);
CURVNET_RESTORE(); CURVNET_RESTORE();
NET_EPOCH_EXIT(et);
} }
/* /*
@ -2832,8 +2837,7 @@ bridge_rtage(struct bridge_softc *sc)
{ {
struct bridge_rtnode *brt, *nbrt; struct bridge_rtnode *brt, *nbrt;
NET_EPOCH_ASSERT(); BRIDGE_RT_LOCK_ASSERT(sc);
BRIDGE_LOCK_ASSERT(sc);
CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
@ -2853,8 +2857,7 @@ bridge_rtflush(struct bridge_softc *sc, int full)
{ {
struct bridge_rtnode *brt, *nbrt; struct bridge_rtnode *brt, *nbrt;
NET_EPOCH_ASSERT(); BRIDGE_RT_LOCK_ASSERT(sc);
BRIDGE_LOCK_ASSERT(sc);
CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
@ -2873,8 +2876,7 @@ bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
struct bridge_rtnode *brt; struct bridge_rtnode *brt;
int found = 0; int found = 0;
NET_EPOCH_ASSERT(); BRIDGE_RT_LOCK(sc);
BRIDGE_LOCK_ASSERT(sc);
/* /*
* If vlan is zero then we want to delete for all vlans so the lookup * If vlan is zero then we want to delete for all vlans so the lookup
@ -2885,6 +2887,8 @@ bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
found = 1; found = 1;
} }
BRIDGE_RT_UNLOCK(sc);
return (found ? 0 : ENOENT); return (found ? 0 : ENOENT);
} }
@ -2898,8 +2902,7 @@ bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
{ {
struct bridge_rtnode *brt, *nbrt; struct bridge_rtnode *brt, *nbrt;
NET_EPOCH_ASSERT(); BRIDGE_RT_LOCK_ASSERT(sc);
BRIDGE_LOCK_ASSERT(sc);
CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
if (brt->brt_ifp == ifp && (full || if (brt->brt_ifp == ifp && (full ||
@ -3003,7 +3006,7 @@ bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan
uint32_t hash; uint32_t hash;
int dir; int dir;
NET_EPOCH_ASSERT(); BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(sc);
hash = bridge_rthash(sc, addr); hash = bridge_rthash(sc, addr);
CK_LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) { CK_LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
@ -3030,7 +3033,7 @@ bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
uint32_t hash; uint32_t hash;
int dir; int dir;
BRIDGE_LOCK_ASSERT(sc); BRIDGE_RT_LOCK_ASSERT(sc);
hash = bridge_rthash(sc, brt->brt_addr); hash = bridge_rthash(sc, brt->brt_addr);
@ -3086,8 +3089,7 @@ bridge_rtnode_destroy_cb(struct epoch_context *ctx)
static void static void
bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt) bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
{ {
NET_EPOCH_ASSERT(); BRIDGE_RT_LOCK_ASSERT(sc);
BRIDGE_LOCK_ASSERT(sc);
CK_LIST_REMOVE(brt, brt_hash); CK_LIST_REMOVE(brt, brt_hash);
@ -3108,11 +3110,9 @@ bridge_rtable_expire(struct ifnet *ifp, int age)
{ {
struct bridge_softc *sc = ifp->if_bridge; struct bridge_softc *sc = ifp->if_bridge;
struct bridge_rtnode *brt; struct bridge_rtnode *brt;
struct epoch_tracker et;
NET_EPOCH_ENTER(et);
CURVNET_SET(ifp->if_vnet); CURVNET_SET(ifp->if_vnet);
BRIDGE_LOCK(sc); BRIDGE_RT_LOCK(sc);
/* /*
* If the age is zero then flush, otherwise set all the expiry times to * If the age is zero then flush, otherwise set all the expiry times to
@ -3129,9 +3129,8 @@ bridge_rtable_expire(struct ifnet *ifp, int age)
brt->brt_expire = time_uptime + age; brt->brt_expire = time_uptime + age;
} }
} }
BRIDGE_UNLOCK(sc); BRIDGE_RT_UNLOCK(sc);
CURVNET_RESTORE(); CURVNET_RESTORE();
NET_EPOCH_EXIT(et);
} }
/* /*
@ -3659,7 +3658,7 @@ bridge_linkcheck(struct bridge_softc *sc)
struct bridge_iflist *bif; struct bridge_iflist *bif;
int new_link, hasls; int new_link, hasls;
NET_EPOCH_ASSERT(); BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
new_link = LINK_STATE_DOWN; new_link = LINK_STATE_DOWN;
hasls = 0; hasls = 0;