Remove nested epochs from lagg(4).
lagg_bcast_start appeared to have a bug in that was using the last lagg port structure after exiting the epoch that was keeping that structure alive. However, upon further inspection, the epoch was already entered by the caller (lagg_transmit), so the epoch enter/exit in lagg_bcast_start was actually unnecessary. This commit generally removes uses of the net epoch via LAGG_RLOCK to protect the list of ports when the list of ports was already protected by an existing LAGG_RLOCK in a caller, or the LAGG_XLOCK. It also adds a missing epoch enter/exit in lagg_snd_tag_alloc while accessing the lagg port structures. An ifp is still accessed via an unsafe reference after the epoch is exited, but that is true in the current code and will be fixed in a future change. Reviewed by: gallatin MFC after: 1 month Sponsored by: Netflix Differential Revision: https://reviews.freebsd.org/D19718
This commit is contained in:
parent
100b034530
commit
2f59b04af1
@ -751,7 +751,6 @@ lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
|
|||||||
* is predictable and `ifconfig laggN create ...` command
|
* is predictable and `ifconfig laggN create ...` command
|
||||||
* will lead to the same result each time.
|
* will lead to the same result each time.
|
||||||
*/
|
*/
|
||||||
LAGG_RLOCK();
|
|
||||||
CK_SLIST_FOREACH(tlp, &sc->sc_ports, lp_entries) {
|
CK_SLIST_FOREACH(tlp, &sc->sc_ports, lp_entries) {
|
||||||
if (tlp->lp_ifp->if_index < ifp->if_index && (
|
if (tlp->lp_ifp->if_index < ifp->if_index && (
|
||||||
CK_SLIST_NEXT(tlp, lp_entries) == NULL ||
|
CK_SLIST_NEXT(tlp, lp_entries) == NULL ||
|
||||||
@ -759,7 +758,6 @@ lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
|
|||||||
ifp->if_index))
|
ifp->if_index))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
LAGG_RUNLOCK();
|
|
||||||
if (tlp != NULL)
|
if (tlp != NULL)
|
||||||
CK_SLIST_INSERT_AFTER(tlp, lp, lp_entries);
|
CK_SLIST_INSERT_AFTER(tlp, lp, lp_entries);
|
||||||
else
|
else
|
||||||
@ -1537,14 +1535,17 @@ lagg_snd_tag_alloc(struct ifnet *ifp,
|
|||||||
struct lagg_lb *lb;
|
struct lagg_lb *lb;
|
||||||
uint32_t p;
|
uint32_t p;
|
||||||
|
|
||||||
|
LAGG_RLOCK();
|
||||||
switch (sc->sc_proto) {
|
switch (sc->sc_proto) {
|
||||||
case LAGG_PROTO_FAILOVER:
|
case LAGG_PROTO_FAILOVER:
|
||||||
lp = lagg_link_active(sc, sc->sc_primary);
|
lp = lagg_link_active(sc, sc->sc_primary);
|
||||||
break;
|
break;
|
||||||
case LAGG_PROTO_LOADBALANCE:
|
case LAGG_PROTO_LOADBALANCE:
|
||||||
if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 ||
|
if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 ||
|
||||||
params->hdr.flowtype == M_HASHTYPE_NONE)
|
params->hdr.flowtype == M_HASHTYPE_NONE) {
|
||||||
|
LAGG_RUNLOCK();
|
||||||
return (EOPNOTSUPP);
|
return (EOPNOTSUPP);
|
||||||
|
}
|
||||||
p = params->hdr.flowid >> sc->flowid_shift;
|
p = params->hdr.flowid >> sc->flowid_shift;
|
||||||
p %= sc->sc_count;
|
p %= sc->sc_count;
|
||||||
lb = (struct lagg_lb *)sc->sc_psc;
|
lb = (struct lagg_lb *)sc->sc_psc;
|
||||||
@ -1553,16 +1554,22 @@ lagg_snd_tag_alloc(struct ifnet *ifp,
|
|||||||
break;
|
break;
|
||||||
case LAGG_PROTO_LACP:
|
case LAGG_PROTO_LACP:
|
||||||
if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 ||
|
if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 ||
|
||||||
params->hdr.flowtype == M_HASHTYPE_NONE)
|
params->hdr.flowtype == M_HASHTYPE_NONE) {
|
||||||
|
LAGG_RUNLOCK();
|
||||||
return (EOPNOTSUPP);
|
return (EOPNOTSUPP);
|
||||||
|
}
|
||||||
lp = lacp_select_tx_port_by_hash(sc, params->hdr.flowid);
|
lp = lacp_select_tx_port_by_hash(sc, params->hdr.flowid);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
LAGG_RUNLOCK();
|
||||||
return (EOPNOTSUPP);
|
return (EOPNOTSUPP);
|
||||||
}
|
}
|
||||||
if (lp == NULL)
|
if (lp == NULL) {
|
||||||
|
LAGG_RUNLOCK();
|
||||||
return (EOPNOTSUPP);
|
return (EOPNOTSUPP);
|
||||||
|
}
|
||||||
ifp = lp->lp_ifp;
|
ifp = lp->lp_ifp;
|
||||||
|
LAGG_RUNLOCK();
|
||||||
if (ifp == NULL || ifp->if_snd_tag_alloc == NULL ||
|
if (ifp == NULL || ifp->if_snd_tag_alloc == NULL ||
|
||||||
(ifp->if_capenable & IFCAP_TXRTLMT) == 0)
|
(ifp->if_capenable & IFCAP_TXRTLMT) == 0)
|
||||||
return (EOPNOTSUPP);
|
return (EOPNOTSUPP);
|
||||||
@ -1853,12 +1860,18 @@ struct lagg_port *
|
|||||||
lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
|
lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
|
||||||
{
|
{
|
||||||
struct lagg_port *lp_next, *rval = NULL;
|
struct lagg_port *lp_next, *rval = NULL;
|
||||||
struct epoch_tracker net_et;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Search a port which reports an active link state.
|
* Search a port which reports an active link state.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is called with either LAGG_RLOCK() held or
|
||||||
|
* LAGG_XLOCK(sc) held.
|
||||||
|
*/
|
||||||
|
if (!in_epoch(net_epoch_preempt))
|
||||||
|
LAGG_XLOCK_ASSERT(sc);
|
||||||
|
|
||||||
if (lp == NULL)
|
if (lp == NULL)
|
||||||
goto search;
|
goto search;
|
||||||
if (LAGG_PORTACTIVE(lp)) {
|
if (LAGG_PORTACTIVE(lp)) {
|
||||||
@ -1871,15 +1884,12 @@ lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
|
|||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
|
|
||||||
search:
|
search:
|
||||||
epoch_enter_preempt(net_epoch_preempt, &net_et);
|
|
||||||
CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
|
CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
|
||||||
if (LAGG_PORTACTIVE(lp_next)) {
|
if (LAGG_PORTACTIVE(lp_next)) {
|
||||||
epoch_exit_preempt(net_epoch_preempt, &net_et);
|
|
||||||
return (lp_next);
|
return (lp_next);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
epoch_exit_preempt(net_epoch_preempt, &net_et);
|
|
||||||
found:
|
found:
|
||||||
return (rval);
|
return (rval);
|
||||||
}
|
}
|
||||||
@ -1961,7 +1971,7 @@ lagg_bcast_start(struct lagg_softc *sc, struct mbuf *m)
|
|||||||
struct lagg_port *lp, *last = NULL;
|
struct lagg_port *lp, *last = NULL;
|
||||||
struct mbuf *m0;
|
struct mbuf *m0;
|
||||||
|
|
||||||
LAGG_RLOCK();
|
LAGG_RLOCK_ASSERT();
|
||||||
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
||||||
if (!LAGG_PORTACTIVE(lp))
|
if (!LAGG_PORTACTIVE(lp))
|
||||||
continue;
|
continue;
|
||||||
@ -1982,7 +1992,6 @@ lagg_bcast_start(struct lagg_softc *sc, struct mbuf *m)
|
|||||||
}
|
}
|
||||||
last = lp;
|
last = lp;
|
||||||
}
|
}
|
||||||
LAGG_RUNLOCK();
|
|
||||||
|
|
||||||
if (last == NULL) {
|
if (last == NULL) {
|
||||||
m_freem(m);
|
m_freem(m);
|
||||||
@ -2095,7 +2104,7 @@ lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
|
|||||||
|
|
||||||
rv = 0;
|
rv = 0;
|
||||||
bzero(&lb->lb_ports, sizeof(lb->lb_ports));
|
bzero(&lb->lb_ports, sizeof(lb->lb_ports));
|
||||||
LAGG_RLOCK();
|
LAGG_XLOCK_ASSERT(sc);
|
||||||
CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
|
CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
|
||||||
if (lp_next == lp)
|
if (lp_next == lp)
|
||||||
continue;
|
continue;
|
||||||
@ -2108,7 +2117,6 @@ lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
|
|||||||
sc->sc_ifname, lp_next->lp_ifp->if_xname, i);
|
sc->sc_ifname, lp_next->lp_ifp->if_xname, i);
|
||||||
lb->lb_ports[i++] = lp_next;
|
lb->lb_ports[i++] = lp_next;
|
||||||
}
|
}
|
||||||
LAGG_RUNLOCK();
|
|
||||||
|
|
||||||
return (rv);
|
return (rv);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user