Replace rmlock with epoch in lagg

Use the new epoch based reclamation API. Now the hot paths will not
block at all, and the sx lock is used for the softc data.  This fixes LORs
reported where the rwlock was obtained when the sxlock was held.

Submitted by:	mmacy
Reported by:	Harry Schmalzbauer <freebsd@omnilan.de>
Reviewed by:	sbruno
Sponsored by:	Limelight Networks
Differential Revision:	https://reviews.freebsd.org/D15355
This commit is contained in:
shurd 2018-05-14 20:06:49 +00:00
parent 8e38b4b70f
commit 8b4a96b13e
2 changed files with 109 additions and 112 deletions

View File

@ -73,6 +73,18 @@ __FBSDID("$FreeBSD$");
#include <net/if_lagg.h>
#include <net/ieee8023ad_lacp.h>
#define LAGG_RLOCK() epoch_enter(net_epoch)
#define LAGG_RUNLOCK() epoch_exit(net_epoch)
#define LAGG_RLOCK_ASSERT() MPASS(in_epoch())
#define LAGG_UNLOCK_ASSERT() MPASS(!in_epoch())
#define LAGG_SX_INIT(_sc) sx_init(&(_sc)->sc_sx, "if_lagg sx")
#define LAGG_SX_DESTROY(_sc) sx_destroy(&(_sc)->sc_sx)
#define LAGG_XLOCK(_sc) sx_xlock(&(_sc)->sc_sx)
#define LAGG_XUNLOCK(_sc) sx_xunlock(&(_sc)->sc_sx)
#define LAGG_SXLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SA_LOCKED)
#define LAGG_XLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SA_XLOCKED)
/* Special flags we should propagate to the lagg ports. */
static struct {
int flag;
@ -334,14 +346,11 @@ lagg_proto_detach(struct lagg_softc *sc)
lagg_proto pr;
LAGG_XLOCK_ASSERT(sc);
LAGG_WLOCK_ASSERT(sc);
pr = sc->sc_proto;
sc->sc_proto = LAGG_PROTO_NONE;
if (lagg_protos[pr].pr_detach != NULL)
lagg_protos[pr].pr_detach(sc);
else
LAGG_WUNLOCK(sc);
}
static int
@ -437,10 +446,10 @@ lagg_register_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
if (ifp->if_softc != arg) /* Not our event */
return;
LAGG_SLOCK(sc);
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
LAGG_RLOCK();
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
EVENTHANDLER_INVOKE(vlan_config, lp->lp_ifp, vtag);
LAGG_SUNLOCK(sc);
LAGG_RUNLOCK();
}
/*
@ -456,10 +465,10 @@ lagg_unregister_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
if (ifp->if_softc != arg) /* Not our event */
return;
LAGG_SLOCK(sc);
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
LAGG_RLOCK();
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
EVENTHANDLER_INVOKE(vlan_unconfig, lp->lp_ifp, vtag);
LAGG_SUNLOCK(sc);
LAGG_RUNLOCK();
}
static int
@ -475,7 +484,6 @@ lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params)
free(sc, M_DEVBUF);
return (ENOSPC);
}
LAGG_LOCK_INIT(sc);
LAGG_SX_INIT(sc);
LAGG_XLOCK(sc);
@ -550,9 +558,7 @@ lagg_clone_destroy(struct ifnet *ifp)
lagg_port_destroy(lp, 1);
/* Unhook the aggregation protocol */
LAGG_WLOCK(sc);
lagg_proto_detach(sc);
LAGG_UNLOCK_ASSERT(sc);
LAGG_XUNLOCK(sc);
ifmedia_removeall(&sc->sc_media);
@ -564,7 +570,6 @@ lagg_clone_destroy(struct ifnet *ifp)
LAGG_LIST_UNLOCK();
LAGG_SX_DESTROY(sc);
LAGG_LOCK_DESTROY(sc);
free(sc, M_DEVBUF);
}
@ -580,7 +585,7 @@ lagg_capabilities(struct lagg_softc *sc)
/* Get common enabled capabilities for the lagg ports */
ena = ~0;
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
ena &= lp->lp_ifp->if_capenable;
ena = (ena == ~0 ? 0 : ena);
@ -590,7 +595,7 @@ lagg_capabilities(struct lagg_softc *sc)
*/
do {
pena = ena;
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
lagg_setcaps(lp, ena);
ena &= lp->lp_ifp->if_capenable;
}
@ -600,7 +605,7 @@ lagg_capabilities(struct lagg_softc *sc)
cap = ~0;
hwa = ~(uint64_t)0;
memset(&hw_tsomax, 0, sizeof(hw_tsomax));
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
cap &= lp->lp_ifp->if_capabilities;
hwa &= lp->lp_ifp->if_hwassist;
if_hw_tsomax_common(lp->lp_ifp, &hw_tsomax);
@ -689,17 +694,14 @@ lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN);
lp->lp_ifcapenable = ifp->if_capenable;
if (SLIST_EMPTY(&sc->sc_ports)) {
LAGG_WLOCK(sc);
bcopy(IF_LLADDR(ifp), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
lagg_proto_lladdr(sc);
LAGG_WUNLOCK(sc);
EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
} else {
if_setlladdr(ifp, IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
}
lagg_setflags(lp, 1);
LAGG_WLOCK(sc);
if (SLIST_EMPTY(&sc->sc_ports))
sc->sc_primary = lp;
@ -723,13 +725,15 @@ lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
* is predictable and `ifconfig laggN create ...` command
* will lead to the same result each time.
*/
SLIST_FOREACH(tlp, &sc->sc_ports, lp_entries) {
LAGG_RLOCK();
CK_SLIST_FOREACH(tlp, &sc->sc_ports, lp_entries) {
if (tlp->lp_ifp->if_index < ifp->if_index && (
SLIST_NEXT(tlp, lp_entries) == NULL ||
SLIST_NEXT(tlp, lp_entries)->lp_ifp->if_index >
ifp->if_index))
break;
}
LAGG_RUNLOCK();
if (tlp != NULL)
SLIST_INSERT_AFTER(tlp, lp, lp_entries);
else
@ -738,13 +742,10 @@ lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
lagg_setmulti(lp);
LAGG_WUNLOCK(sc);
if ((error = lagg_proto_addport(sc, lp)) != 0) {
/* Remove the port, without calling pr_delport. */
LAGG_WLOCK(sc);
lagg_port_destroy(lp, 0);
LAGG_UNLOCK_ASSERT(sc);
return (error);
}
@ -764,7 +765,7 @@ lagg_port_checkstacking(struct lagg_softc *sc)
int m = 0;
LAGG_SXLOCK_ASSERT(sc);
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (lp->lp_flags & LAGG_PORT_STACK) {
sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
m = MAX(m, lagg_port_checkstacking(sc_ptr));
@ -775,6 +776,19 @@ lagg_port_checkstacking(struct lagg_softc *sc)
}
#endif
static void
lagg_port_destroy_cb(epoch_context_t ec)
{
struct lagg_port *lp;
struct ifnet *ifp;
lp = __containerof(ec, struct lagg_port, lp_epoch_ctx);
ifp = lp->lp_ifp;
if_rele(ifp);
free(lp, M_DEVBUF);
}
static int
lagg_port_destroy(struct lagg_port *lp, int rundelport)
{
@ -786,11 +800,8 @@ lagg_port_destroy(struct lagg_port *lp, int rundelport)
LAGG_XLOCK_ASSERT(sc);
if (rundelport) {
LAGG_WLOCK(sc);
if (rundelport)
lagg_proto_delport(sc, lp);
} else
LAGG_WLOCK_ASSERT(sc);
if (lp->lp_detaching == 0)
lagg_clrmulti(lp);
@ -809,7 +820,7 @@ lagg_port_destroy(struct lagg_port *lp, int rundelport)
}
/* Finally, remove the port from the lagg */
SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
CK_SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
sc->sc_count--;
/* Update the primary interface */
@ -824,19 +835,16 @@ lagg_port_destroy(struct lagg_port *lp, int rundelport)
if (sc->sc_destroying == 0) {
bcopy(lladdr, IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
lagg_proto_lladdr(sc);
LAGG_WUNLOCK(sc);
EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
} else
LAGG_WUNLOCK(sc);
}
/*
* Update lladdr for each port (new primary needs update
* as well, to switch from old lladdr to its 'real' one)
*/
SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
CK_SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
if_setlladdr(lp_ptr->lp_ifp, lladdr, ETHER_ADDR_LEN);
} else
LAGG_WUNLOCK(sc);
}
if (lp->lp_ifflags)
if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
@ -847,9 +855,11 @@ lagg_port_destroy(struct lagg_port *lp, int rundelport)
if_setlladdr(ifp, lp->lp_lladdr, ETHER_ADDR_LEN);
}
if_rele(ifp);
free(lp, M_DEVBUF);
/*
* free port and release it's ifnet reference after a grace period has
* elapsed.
*/
epoch_call(net_epoch, &lp->lp_epoch_ctx, lagg_port_destroy_cb);
/* Update lagg capabilities */
lagg_capabilities(sc);
lagg_linkstate(sc);
@ -878,15 +888,15 @@ lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
break;
}
LAGG_SLOCK(sc);
LAGG_RLOCK();
if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) {
error = ENOENT;
LAGG_SUNLOCK(sc);
LAGG_RUNLOCK();
break;
}
lagg_port2req(lp, rp);
LAGG_SUNLOCK(sc);
LAGG_RUNLOCK();
break;
case SIOCSIFCAP:
@ -942,17 +952,16 @@ lagg_get_counter(struct ifnet *ifp, ift_counter cnt)
struct lagg_softc *sc;
struct lagg_port *lp;
struct ifnet *lpifp;
struct rm_priotracker tracker;
uint64_t newval, oldval, vsum;
/* Revise this when we've got non-generic counters. */
KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt));
sc = (struct lagg_softc *)ifp->if_softc;
LAGG_RLOCK(sc, &tracker);
vsum = 0;
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
LAGG_RLOCK();
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
/* Saved attached value */
oldval = lp->port_counters.val[cnt];
/* current value */
@ -961,6 +970,7 @@ lagg_get_counter(struct ifnet *ifp, ift_counter cnt)
/* Calculate diff and save new */
vsum += newval - oldval;
}
LAGG_RUNLOCK();
/*
* Add counter data which might be added by upper
@ -973,7 +983,6 @@ lagg_get_counter(struct ifnet *ifp, ift_counter cnt)
*/
vsum += sc->detached_counters.val[cnt];
LAGG_RUNLOCK(sc, &tracker);
return (vsum);
}
@ -1079,7 +1088,7 @@ lagg_init(void *xsc)
* This might be if_setlladdr() notification
* that lladdr has been changed.
*/
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (memcmp(IF_LLADDR(ifp), IF_LLADDR(lp->lp_ifp),
ETHER_ADDR_LEN) != 0)
if_setlladdr(lp->lp_ifp, IF_LLADDR(ifp), ETHER_ADDR_LEN);
@ -1124,7 +1133,7 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
switch (cmd) {
case SIOCGLAGG:
LAGG_SLOCK(sc);
LAGG_XLOCK(sc);
buflen = sc->sc_count * sizeof(struct lagg_reqport);
outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
ra->ra_proto = sc->sc_proto;
@ -1132,7 +1141,7 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
count = 0;
buf = outbuf;
len = min(ra->ra_size, buflen);
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (len < sizeof(rpbuf))
break;
@ -1142,7 +1151,7 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
buf += sizeof(rpbuf);
len -= sizeof(rpbuf);
}
LAGG_SUNLOCK(sc);
LAGG_XUNLOCK(sc);
ra->ra_ports = count;
ra->ra_size = count * sizeof(rpbuf);
error = copyout(outbuf, ra->ra_port, ra->ra_size);
@ -1158,14 +1167,13 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
}
LAGG_XLOCK(sc);
LAGG_WLOCK(sc);
lagg_proto_detach(sc);
LAGG_UNLOCK_ASSERT(sc);
LAGG_UNLOCK_ASSERT();
lagg_proto_attach(sc, ra->ra_proto);
LAGG_XUNLOCK(sc);
break;
case SIOCGLAGGOPTS:
LAGG_SLOCK(sc);
LAGG_XLOCK(sc);
ro->ro_opts = sc->sc_opts;
if (sc->sc_proto == LAGG_PROTO_LACP) {
struct lacp_softc *lsc;
@ -1183,13 +1191,13 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
ro->ro_active = sc->sc_active;
} else {
ro->ro_active = 0;
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
ro->ro_active += LAGG_PORTACTIVE(lp);
}
ro->ro_bkt = sc->sc_bkt;
ro->ro_flapping = sc->sc_flapping;
ro->ro_flowid_shift = sc->flowid_shift;
LAGG_SUNLOCK(sc);
LAGG_XUNLOCK(sc);
break;
case SIOCSLAGGOPTS:
if (sc->sc_proto == LAGG_PROTO_ROUNDROBIN) {
@ -1296,14 +1304,14 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
break;
case SIOCGLAGGFLAGS:
rf->rf_flags = 0;
LAGG_SLOCK(sc);
LAGG_XLOCK(sc);
if (sc->sc_flags & MBUF_HASHFLAG_L2)
rf->rf_flags |= LAGG_F_HASHL2;
if (sc->sc_flags & MBUF_HASHFLAG_L3)
rf->rf_flags |= LAGG_F_HASHL3;
if (sc->sc_flags & MBUF_HASHFLAG_L4)
rf->rf_flags |= LAGG_F_HASHL4;
LAGG_SUNLOCK(sc);
LAGG_XUNLOCK(sc);
break;
case SIOCSLAGGHASH:
error = priv_check(td, PRIV_NET_LAGG);
@ -1330,17 +1338,17 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
break;
}
LAGG_SLOCK(sc);
LAGG_RLOCK();
if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
lp->lp_softc != sc) {
error = ENOENT;
LAGG_SUNLOCK(sc);
LAGG_RUNLOCK();
if_rele(tpif);
break;
}
lagg_port2req(lp, rp);
LAGG_SUNLOCK(sc);
LAGG_RUNLOCK();
if_rele(tpif);
break;
case SIOCSLAGGPORT:
@ -1405,7 +1413,7 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
case SIOCSIFFLAGS:
/* Set flags on ports too */
LAGG_XLOCK(sc);
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
lagg_setflags(lp, 1);
}
@ -1430,12 +1438,12 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
LAGG_WLOCK(sc);
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
LAGG_XLOCK(sc);
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
lagg_clrmulti(lp);
lagg_setmulti(lp);
}
LAGG_WUNLOCK(sc);
LAGG_XUNLOCK(sc);
error = 0;
break;
case SIOCSIFMEDIA:
@ -1445,7 +1453,7 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
case SIOCSIFCAP:
LAGG_XLOCK(sc);
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (lp->lp_ioctl != NULL)
(*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
}
@ -1523,7 +1531,6 @@ lagg_setmulti(struct lagg_port *lp)
struct ifmultiaddr *ifma;
int error;
LAGG_WLOCK_ASSERT(sc);
IF_ADDR_WLOCK(scifp);
TAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
@ -1554,7 +1561,7 @@ lagg_clrmulti(struct lagg_port *lp)
{
struct lagg_mc *mc;
LAGG_WLOCK_ASSERT(lp->lp_softc);
LAGG_XLOCK_ASSERT(lp->lp_softc);
while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
if (mc->mc_ifma && lp->lp_detaching == 0)
@ -1635,15 +1642,14 @@ lagg_transmit(struct ifnet *ifp, struct mbuf *m)
{
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
int error, len, mcast;
struct rm_priotracker tracker;
len = m->m_pkthdr.len;
mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0;
LAGG_RLOCK(sc, &tracker);
LAGG_RLOCK();
/* We need a Tx algorithm and at least one port */
if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) {
LAGG_RUNLOCK(sc, &tracker);
LAGG_RUNLOCK();
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (ENXIO);
@ -1652,7 +1658,7 @@ lagg_transmit(struct ifnet *ifp, struct mbuf *m)
ETHER_BPF_MTAP(ifp, m);
error = lagg_proto_start(sc, m);
LAGG_RUNLOCK(sc, &tracker);
LAGG_RUNLOCK();
if (error != 0)
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
@ -1674,13 +1680,12 @@ lagg_input(struct ifnet *ifp, struct mbuf *m)
struct lagg_port *lp = ifp->if_lagg;
struct lagg_softc *sc = lp->lp_softc;
struct ifnet *scifp = sc->sc_ifp;
struct rm_priotracker tracker;
LAGG_RLOCK(sc, &tracker);
LAGG_RLOCK();
if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
(lp->lp_flags & LAGG_PORT_DISABLED) ||
sc->sc_proto == LAGG_PROTO_NONE) {
LAGG_RUNLOCK(sc, &tracker);
LAGG_RUNLOCK();
m_freem(m);
return (NULL);
}
@ -1700,7 +1705,7 @@ lagg_input(struct ifnet *ifp, struct mbuf *m)
}
}
LAGG_RUNLOCK(sc, &tracker);
LAGG_RUNLOCK();
return (m);
}
@ -1725,12 +1730,12 @@ lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
imr->ifm_status = IFM_AVALID;
imr->ifm_active = IFM_ETHER | IFM_AUTO;
LAGG_SLOCK(sc);
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
LAGG_RLOCK();
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (LAGG_PORTACTIVE(lp))
imr->ifm_status |= IFM_ACTIVE;
}
LAGG_SUNLOCK(sc);
LAGG_RUNLOCK();
}
static void
@ -1743,12 +1748,14 @@ lagg_linkstate(struct lagg_softc *sc)
LAGG_XLOCK_ASSERT(sc);
/* Our link is considered up if at least one of our ports is active */
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
LAGG_RLOCK();
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (lp->lp_ifp->if_link_state == LINK_STATE_UP) {
new_link = LINK_STATE_UP;
break;
}
}
LAGG_RUNLOCK();
if_link_state_change(sc->sc_ifp, new_link);
/* Update if_baudrate to reflect the max possible speed */
@ -1761,8 +1768,10 @@ lagg_linkstate(struct lagg_softc *sc)
case LAGG_PROTO_LOADBALANCE:
case LAGG_PROTO_BROADCAST:
speed = 0;
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
LAGG_RLOCK();
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
speed += lp->lp_ifp->if_baudrate;
LAGG_RUNLOCK();
sc->sc_ifp->if_baudrate = speed;
break;
case LAGG_PROTO_LACP:
@ -1809,14 +1818,16 @@ lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
goto found;
}
search:
SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
search:
LAGG_RLOCK();
CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
if (LAGG_PORTACTIVE(lp_next)) {
LAGG_RUNLOCK();
rval = lp_next;
goto found;
}
}
LAGG_RUNLOCK();
found:
return (rval);
}
@ -1898,7 +1909,8 @@ lagg_bcast_start(struct lagg_softc *sc, struct mbuf *m)
struct lagg_port *lp, *last = NULL;
struct mbuf *m0;
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
LAGG_RLOCK();
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (!LAGG_PORTACTIVE(lp))
continue;
@ -1918,6 +1930,8 @@ lagg_bcast_start(struct lagg_softc *sc, struct mbuf *m)
}
last = lp;
}
LAGG_RUNLOCK();
if (last == NULL) {
m_freem(m);
return (ENOENT);
@ -2001,11 +2015,12 @@ lagg_lb_attach(struct lagg_softc *sc)
struct lagg_port *lp;
struct lagg_lb *lb;
LAGG_XLOCK_ASSERT(sc);
lb = malloc(sizeof(struct lagg_lb), M_DEVBUF, M_WAITOK | M_ZERO);
lb->lb_key = m_ether_tcpip_hash_init();
sc->sc_psc = lb;
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
lagg_lb_port_create(lp);
}
@ -2015,7 +2030,6 @@ lagg_lb_detach(struct lagg_softc *sc)
struct lagg_lb *lb;
lb = (struct lagg_lb *)sc->sc_psc;
LAGG_WUNLOCK(sc);
if (lb != NULL)
free(lb, M_DEVBUF);
}
@ -2028,7 +2042,8 @@ lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
int i = 0;
bzero(&lb->lb_ports, sizeof(lb->lb_ports));
SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
LAGG_RLOCK();
CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
if (lp_next == lp)
continue;
if (i >= LAGG_MAX_PORTS)
@ -2038,6 +2053,7 @@ lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
sc->sc_ifname, lp_next->lp_ifp->if_xname, i);
lb->lb_ports[i++] = lp_next;
}
LAGG_RUNLOCK();
return (0);
}
@ -2104,7 +2120,8 @@ lagg_lacp_attach(struct lagg_softc *sc)
struct lagg_port *lp;
lacp_attach(sc);
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
LAGG_XLOCK_ASSERT(sc);
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
lacp_port_create(lp);
}
@ -2114,13 +2131,12 @@ lagg_lacp_detach(struct lagg_softc *sc)
struct lagg_port *lp;
void *psc;
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
LAGG_XLOCK_ASSERT(sc);
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
lacp_port_destroy(lp);
psc = sc->sc_psc;
sc->sc_psc = NULL;
LAGG_WUNLOCK(sc);
lacp_detach(psc);
}
@ -2132,11 +2148,11 @@ lagg_lacp_lladdr(struct lagg_softc *sc)
LAGG_SXLOCK_ASSERT(sc);
/* purge all the lacp ports */
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
lacp_port_destroy(lp);
/* add them back in */
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
lacp_port_create(lp);
}

View File

@ -253,28 +253,9 @@ struct lagg_port {
struct lagg_counters port_counters; /* ifp counters copy */
SLIST_ENTRY(lagg_port) lp_entries;
struct epoch_context lp_epoch_ctx;
};
#define LAGG_LOCK_INIT(_sc) rm_init(&(_sc)->sc_mtx, "if_lagg rmlock")
#define LAGG_LOCK_DESTROY(_sc) rm_destroy(&(_sc)->sc_mtx)
#define LAGG_RLOCK(_sc, _p) rm_rlock(&(_sc)->sc_mtx, (_p))
#define LAGG_WLOCK(_sc) rm_wlock(&(_sc)->sc_mtx)
#define LAGG_RUNLOCK(_sc, _p) rm_runlock(&(_sc)->sc_mtx, (_p))
#define LAGG_WUNLOCK(_sc) rm_wunlock(&(_sc)->sc_mtx)
#define LAGG_RLOCK_ASSERT(_sc) rm_assert(&(_sc)->sc_mtx, RA_RLOCKED)
#define LAGG_WLOCK_ASSERT(_sc) rm_assert(&(_sc)->sc_mtx, RA_WLOCKED)
#define LAGG_UNLOCK_ASSERT(_sc) rm_assert(&(_sc)->sc_mtx, RA_UNLOCKED)
#define LAGG_SX_INIT(_sc) sx_init(&(_sc)->sc_sx, "if_lagg sx")
#define LAGG_SX_DESTROY(_sc) sx_destroy(&(_sc)->sc_sx)
#define LAGG_SLOCK(_sc) sx_slock(&(_sc)->sc_sx)
#define LAGG_XLOCK(_sc) sx_xlock(&(_sc)->sc_sx)
#define LAGG_SUNLOCK(_sc) sx_sunlock(&(_sc)->sc_sx)
#define LAGG_XUNLOCK(_sc) sx_xunlock(&(_sc)->sc_sx)
#define LAGG_SXLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SA_LOCKED)
#define LAGG_SLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SA_SLOCKED)
#define LAGG_XLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SA_XLOCKED)
extern struct mbuf *(*lagg_input_p)(struct ifnet *, struct mbuf *);
extern void (*lagg_linkstate_p)(struct ifnet *, int );