Change from a mutex to a read/write lock. This allows the tx port to be

selected simultaneously by multiple senders and transmit/receive is not
serialised between aggregated interfaces.
This commit is contained in:
Andrew Thompson 2007-05-15 07:41:46 +00:00
parent 9942d77f89
commit 3bf517e389
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=169569
4 changed files with 133 additions and 84 deletions

View File

@ -38,6 +38,9 @@ __FBSDID("$FreeBSD$");
#include <sys/socket.h> /* for net/if.h */
#include <sys/sockio.h>
#include <machine/stdarg.h>
#include <sys/lock.h>
#include <sys/rwlock.h>
#include <sys/taskqueue.h>
#include <net/if.h>
#include <net/if_dl.h>
@ -111,6 +114,9 @@ static void lacp_aggregator_delref(struct lacp_softc *,
/* receive machine */
static void lacp_dequeue(void *, int);
static int lacp_pdu_input(struct lagg_port *, struct mbuf *);
static int lacp_marker_input(struct lagg_port *, struct mbuf *);
static void lacp_sm_rx(struct lacp_port *, const struct lacpdu *);
static void lacp_sm_rx_timer(struct lacp_port *);
static void lacp_sm_rx_set_expired(struct lacp_port *);
@ -202,17 +208,66 @@ static const lacp_timer_func_t lacp_timer_funcs[LACP_NTIMER] = {
[LACP_TIMER_WAIT_WHILE] = lacp_sm_mux_timer,
};
/*
* lacp_input: process lacpdu
*/
int
void
lacp_input(struct lagg_port *lgp, struct mbuf *m)
{
struct lagg_softc *lgs = lgp->lp_lagg;
struct lacp_softc *lsc = LACP_SOFTC(lgs);
uint8_t subtype;
if (m->m_pkthdr.len < sizeof(struct ether_header) + sizeof(subtype)) {
m_freem(m);
return;
}
m_copydata(m, sizeof(struct ether_header), sizeof(subtype), &subtype);
switch (subtype) {
case SLOWPROTOCOLS_SUBTYPE_LACP:
IF_HANDOFF(&lsc->lsc_queue, m, NULL);
taskqueue_enqueue(taskqueue_swi, &lsc->lsc_qtask);
break;
case SLOWPROTOCOLS_SUBTYPE_MARKER:
lacp_marker_input(lgp, m);
break;
default:
/* Unknown LACP packet type */
m_freem(m);
break;
}
}
static void
lacp_dequeue(void *arg, int pending)
{
struct lacp_softc *lsc = (struct lacp_softc *)arg;
struct lagg_softc *sc = lsc->lsc_lagg;
struct lagg_port *lgp;
struct mbuf *m;
LAGG_WLOCK(sc);
for (;;) {
IF_DEQUEUE(&lsc->lsc_queue, m);
if (m == NULL)
break;
lgp = m->m_pkthdr.rcvif->if_lagg;
lacp_pdu_input(lgp, m);
}
LAGG_WUNLOCK(sc);
}
/*
* lacp_pdu_input: process lacpdu
*/
static int
lacp_pdu_input(struct lagg_port *lgp, struct mbuf *m)
{
struct lacp_port *lp = LACP_PORT(lgp);
struct lacpdu *du;
int error = 0;
LAGG_LOCK_ASSERT(lgp->lp_lagg);
LAGG_WLOCK_ASSERT(lgp->lp_lagg);
if (__predict_false(lp->lp_flags & LACP_PORT_DETACHING)) {
goto bad;
@ -303,7 +358,7 @@ lacp_xmit_lacpdu(struct lacp_port *lp)
struct lacpdu *du;
int error;
LAGG_LOCK_ASSERT(lgp->lp_lagg);
LAGG_WLOCK_ASSERT(lgp->lp_lagg);
m = m_gethdr(M_DONTWAIT, MT_DATA);
if (m == NULL) {
@ -360,7 +415,7 @@ lacp_linkstate(struct lagg_port *lgp)
uint8_t old_state;
uint16_t old_key;
LAGG_LOCK_ASSERT(lgp->lp_lagg);
LAGG_WLOCK_ASSERT(lgp->lp_lagg);
bzero((char *)&ifmr, sizeof(ifmr));
error = (*ifp->if_ioctl)(ifp, SIOCGIFMEDIA, (caddr_t)&ifmr);
@ -397,8 +452,10 @@ static void
lacp_tick(void *arg)
{
struct lacp_softc *lsc = arg;
struct lagg_softc *sc = lsc->lsc_lagg;
struct lacp_port *lp;
LAGG_WLOCK(sc);
LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) {
if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0)
continue;
@ -410,6 +467,7 @@ lacp_tick(void *arg)
lacp_sm_tx(lp);
lacp_sm_ptx_tx_schedule(lp);
}
LAGG_WUNLOCK(sc);
callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc);
}
@ -427,7 +485,7 @@ lacp_port_create(struct lagg_port *lgp)
boolean_t active = TRUE; /* XXX should be configurable */
boolean_t fast = FALSE; /* XXX should be configurable */
LAGG_LOCK_ASSERT(lgs);
LAGG_WLOCK_ASSERT(lgs);
bzero((char *)&sdl, sizeof(sdl));
sdl.sdl_len = sizeof(sdl);
@ -474,7 +532,7 @@ lacp_port_destroy(struct lagg_port *lgp)
struct lacp_port *lp = LACP_PORT(lgp);
int i;
LAGG_LOCK_ASSERT(lgp->lp_lagg);
LAGG_WLOCK_ASSERT(lgp->lp_lagg);
for (i = 0; i < LACP_NTIMER; i++) {
LACP_TIMER_DISARM(lp, i);
@ -539,7 +597,7 @@ lacp_disable_distributing(struct lacp_port *lp)
char buf[LACP_LAGIDSTR_MAX+1];
#endif /* defined(LACP_DEBUG) */
LAGG_LOCK_ASSERT(lgp->lp_lagg);
LAGG_WLOCK_ASSERT(lgp->lp_lagg);
if (la == NULL || (lp->lp_state & LACP_STATE_DISTRIBUTING) == 0) {
return;
@ -577,7 +635,7 @@ lacp_enable_distributing(struct lacp_port *lp)
char buf[LACP_LAGIDSTR_MAX+1];
#endif /* defined(LACP_DEBUG) */
LAGG_LOCK_ASSERT(lgp->lp_lagg);
LAGG_WLOCK_ASSERT(lgp->lp_lagg);
if ((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0) {
return;
@ -616,7 +674,7 @@ lacp_attach(struct lagg_softc *lgs)
{
struct lacp_softc *lsc;
LAGG_LOCK_ASSERT(lgs);
LAGG_WLOCK_ASSERT(lgs);
lsc = malloc(sizeof(struct lacp_softc),
M_DEVBUF, M_NOWAIT|M_ZERO);
@ -631,8 +689,12 @@ lacp_attach(struct lagg_softc *lgs)
TAILQ_INIT(&lsc->lsc_aggregators);
LIST_INIT(&lsc->lsc_ports);
callout_init_mtx(&lsc->lsc_transit_callout, &lgs->sc_mtx, 0);
callout_init_mtx(&lsc->lsc_callout, &lgs->sc_mtx, 0);
TASK_INIT(&lsc->lsc_qtask, 0, lacp_dequeue, lsc);
mtx_init(&lsc->lsc_queue.ifq_mtx, "lacp queue", NULL, MTX_DEF);
lsc->lsc_queue.ifq_maxlen = ifqmaxlen;
callout_init(&lsc->lsc_transit_callout, CALLOUT_MPSAFE);
callout_init(&lsc->lsc_callout, CALLOUT_MPSAFE);
/* if the lagg is already up then do the same */
if (lgs->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
@ -654,6 +716,9 @@ lacp_detach(struct lagg_softc *lgs)
lgs->sc_psc = NULL;
callout_drain(&lsc->lsc_transit_callout);
callout_drain(&lsc->lsc_callout);
taskqueue_drain(taskqueue_swi, &lsc->lsc_qtask);
IF_DRAIN(&lsc->lsc_queue);
mtx_destroy(&lsc->lsc_queue.ifq_mtx);
free(lsc, M_DEVBUF);
return (0);
@ -685,7 +750,7 @@ lacp_select_tx_port(struct lagg_softc *lgs, struct mbuf *m)
uint32_t hash;
int nports;
LAGG_LOCK_ASSERT(lgs);
LAGG_WLOCK_ASSERT(lgs);
if (__predict_false(lsc->lsc_suppress_distributing)) {
LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__));
@ -1538,7 +1603,7 @@ lacp_marker_input(struct lagg_port *lgp, struct mbuf *m)
struct markerdu *mdu;
int error = 0;
LAGG_LOCK_ASSERT(lgp->lp_lagg);
LAGG_RLOCK_ASSERT(lgp->lp_lagg);
if (__predict_false(lp->lp_flags & LACP_PORT_DETACHING)) {
goto bad;

View File

@ -214,6 +214,8 @@ struct lacp_softc {
struct callout lsc_callout;
LIST_HEAD(, lacp_port) lsc_ports;
u_int32_t lsc_hashkey;
struct task lsc_qtask;
struct ifqueue lsc_queue; /* pdu input queue */
};
#define LACP_TYPE_ACTORINFO 1
@ -262,8 +264,7 @@ struct markerdu {
#define LACP_PORT(_lp) ((struct lacp_port *)(_lp)->lp_psc)
#define LACP_SOFTC(_sc) ((struct lacp_softc *)(_sc)->sc_psc)
int lacp_input(struct lagg_port *, struct mbuf *);
int lacp_marker_input(struct lagg_port *, struct mbuf *);
void lacp_input(struct lagg_port *, struct mbuf *);
struct lagg_port *lacp_select_tx_port(struct lagg_softc *, struct mbuf *);
int lacp_attach(struct lagg_softc *);
int lacp_detach(struct lagg_softc *);

View File

@ -35,6 +35,8 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/hash.h>
#include <sys/lock.h>
#include <sys/rwlock.h>
#include <sys/taskqueue.h>
#include <net/ethernet.h>
@ -262,7 +264,7 @@ lagg_clone_destroy(struct ifnet *ifp)
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
struct lagg_port *lp;
LAGG_LOCK(sc);
LAGG_WLOCK(sc);
lagg_stop(sc);
ifp->if_flags &= ~IFF_UP;
@ -274,7 +276,7 @@ lagg_clone_destroy(struct ifnet *ifp)
if (sc->sc_detach != NULL)
(*sc->sc_detach)(sc);
LAGG_UNLOCK(sc);
LAGG_WUNLOCK(sc);
ifmedia_removeall(&sc->sc_media);
ether_ifdetach(ifp);
@ -309,7 +311,7 @@ lagg_capabilities(struct lagg_softc *sc)
struct lagg_port *lp;
int cap = ~0, priv;
LAGG_LOCK_ASSERT(sc);
LAGG_WLOCK_ASSERT(sc);
/* Preserve private capabilities */
priv = sc->sc_capabilities & IFCAP_LAGG_MASK;
@ -334,7 +336,7 @@ lagg_port_lladdr(struct lagg_port *lp, uint8_t *lladdr)
struct lagg_llq *llq;
int pending = 0;
LAGG_LOCK_ASSERT(sc);
LAGG_WLOCK_ASSERT(sc);
if (lp->lp_detaching ||
memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
@ -376,10 +378,10 @@ lagg_port_setlladdr(void *arg, int pending)
int error;
/* Grab a local reference of the queue and remove it from the softc */
LAGG_LOCK(sc);
LAGG_WLOCK(sc);
head = SLIST_FIRST(&sc->sc_llq_head);
SLIST_FIRST(&sc->sc_llq_head) = NULL;
LAGG_UNLOCK(sc);
LAGG_WUNLOCK(sc);
/*
* Traverse the queue and set the lladdr on each ifp. It is safe to do
@ -406,7 +408,7 @@ lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
struct lagg_port *lp;
int error = 0;
LAGG_LOCK_ASSERT(sc);
LAGG_WLOCK_ASSERT(sc);
/* Limit the maximal number of lagg ports */
if (sc->sc_count >= LAGG_MAX_PORTS)
@ -500,7 +502,7 @@ lagg_port_checkstacking(struct lagg_softc *sc)
struct lagg_port *lp;
int m = 0;
LAGG_LOCK_ASSERT(sc);
LAGG_WLOCK_ASSERT(sc);
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (lp->lp_flags & LAGG_PORT_STACK) {
@ -520,7 +522,7 @@ lagg_port_destroy(struct lagg_port *lp, int runpd)
struct lagg_llq *llq;
struct ifnet *ifp = lp->lp_ifp;
LAGG_LOCK_ASSERT(sc);
LAGG_WLOCK_ASSERT(sc);
if (runpd && sc->sc_port_destroy != NULL)
(*sc->sc_port_destroy)(lp);
@ -601,7 +603,7 @@ lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
switch (cmd) {
case SIOCGLAGGPORT:
LAGG_LOCK(sc);
LAGG_RLOCK(sc);
if (rp->rp_portname[0] == '\0' ||
ifunit(rp->rp_portname) != ifp) {
error = EINVAL;
@ -614,7 +616,7 @@ lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
}
lagg_port2req(lp, rp);
LAGG_UNLOCK(sc);
LAGG_RUNLOCK(sc);
break;
default:
goto fallback;
@ -670,10 +672,10 @@ lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
sc = lp->lp_lagg;
LAGG_LOCK(sc);
LAGG_WLOCK(sc);
lp->lp_detaching = 1;
lagg_port_destroy(lp, 1);
LAGG_UNLOCK(sc);
LAGG_WUNLOCK(sc);
}
static void
@ -717,7 +719,7 @@ lagg_init(void *xsc)
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
return;
LAGG_LOCK(sc);
LAGG_WLOCK(sc);
ifp->if_drv_flags |= IFF_DRV_RUNNING;
/* Update the port lladdrs */
@ -727,7 +729,7 @@ lagg_init(void *xsc)
if (sc->sc_init != NULL)
(*sc->sc_init)(sc);
LAGG_UNLOCK(sc);
LAGG_WUNLOCK(sc);
}
static void
@ -735,7 +737,7 @@ lagg_stop(struct lagg_softc *sc)
{
struct ifnet *ifp = sc->sc_ifp;
LAGG_LOCK_ASSERT(sc);
LAGG_WLOCK_ASSERT(sc);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
return;
@ -758,7 +760,7 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
struct thread *td = curthread;
int i, error = 0, unlock = 1;
LAGG_LOCK(sc);
LAGG_WLOCK(sc);
bzero(&rpbuf, sizeof(rpbuf));
@ -881,7 +883,7 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
* If interface is marked up and it is stopped, then
* start it.
*/
LAGG_UNLOCK(sc);
LAGG_WUNLOCK(sc);
unlock = 0;
(*ifp->if_init)(sc);
}
@ -892,12 +894,12 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
LAGG_UNLOCK(sc);
LAGG_WUNLOCK(sc);
unlock = 0;
error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
break;
default:
LAGG_UNLOCK(sc);
LAGG_WUNLOCK(sc);
unlock = 0;
error = ether_ioctl(ifp, cmd, data);
break;
@ -905,7 +907,7 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
out:
if (unlock)
LAGG_UNLOCK(sc);
LAGG_WUNLOCK(sc);
return (error);
}
@ -914,7 +916,7 @@ lagg_ether_setmulti(struct lagg_softc *sc)
{
struct lagg_port *lp;
LAGG_LOCK_ASSERT(sc);
LAGG_WLOCK_ASSERT(sc);
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
/* First, remove any existing filter entries. */
@ -936,7 +938,7 @@ lagg_ether_cmdmulti(struct lagg_port *lp, int set)
struct sockaddr_dl sdl;
int error;
LAGG_LOCK_ASSERT(sc);
LAGG_WLOCK_ASSERT(sc);
bzero((char *)&sdl, sizeof(sdl));
sdl.sdl_len = sizeof(sdl);
@ -981,7 +983,7 @@ lagg_setflag(struct lagg_port *lp, int flag, int status,
struct ifnet *ifp = lp->lp_ifp;
int error;
LAGG_LOCK_ASSERT(sc);
LAGG_WLOCK_ASSERT(sc);
status = status ? (trifp->if_flags & flag) : 0;
/* Now "status" contains the flag value or 0 */
@ -1031,6 +1033,7 @@ lagg_start(struct ifnet *ifp)
struct mbuf *m;
int error = 0;
LAGG_RLOCK(sc);
for (;; error = 0) {
IFQ_DEQUEUE(&ifp->if_snd, m);
if (m == NULL)
@ -1038,11 +1041,9 @@ lagg_start(struct ifnet *ifp)
BPF_MTAP(ifp, m);
if (sc->sc_proto != LAGG_PROTO_NONE) {
LAGG_LOCK(sc);
if (sc->sc_proto != LAGG_PROTO_NONE)
error = (*sc->sc_start)(sc, m);
LAGG_UNLOCK(sc);
} else
else
m_free(m);
if (error == 0)
@ -1050,6 +1051,7 @@ lagg_start(struct ifnet *ifp)
else
ifp->if_oerrors++;
}
LAGG_RUNLOCK(sc);
return;
}
@ -1068,7 +1070,7 @@ lagg_input(struct ifnet *ifp, struct mbuf *m)
return (NULL);
}
LAGG_LOCK(sc);
LAGG_RLOCK(sc);
BPF_MTAP(trifp, m);
m = (*sc->sc_input)(sc, lp, m);
@ -1080,7 +1082,7 @@ lagg_input(struct ifnet *ifp, struct mbuf *m)
trifp->if_ibytes += m->m_pkthdr.len;
}
LAGG_UNLOCK(sc);
LAGG_RUNLOCK(sc);
return (m);
}
@ -1105,12 +1107,12 @@ lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
imr->ifm_status = IFM_AVALID;
imr->ifm_active = IFM_ETHER | IFM_AUTO;
LAGG_LOCK(sc);
LAGG_RLOCK(sc);
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (LAGG_PORTACTIVE(lp))
imr->ifm_status |= IFM_ACTIVE;
}
LAGG_UNLOCK(sc);
LAGG_RUNLOCK(sc);
}
static void
@ -1124,10 +1126,10 @@ lagg_port_state(struct ifnet *ifp, int state)
if (sc == NULL)
return;
LAGG_LOCK(sc);
LAGG_WLOCK(sc);
if (sc->sc_linkstate != NULL)
(*sc->sc_linkstate)(lp);
LAGG_UNLOCK(sc);
LAGG_WUNLOCK(sc);
}
struct lagg_port *
@ -1136,7 +1138,7 @@ lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
struct lagg_port *lp_next, *rval = NULL;
// int new_link = LINK_STATE_DOWN;
LAGG_LOCK_ASSERT(sc);
LAGG_WLOCK_ASSERT(sc);
/*
* Search a port which reports an active link state.
*/
@ -1558,9 +1560,9 @@ lagg_lacp_detach(struct lagg_softc *sc)
lacp_port_destroy(lp);
/* unlocking is safe here */
LAGG_UNLOCK(sc);
LAGG_WUNLOCK(sc);
error = lacp_detach(sc);
LAGG_LOCK(sc);
LAGG_WLOCK(sc);
return (error);
}
@ -1598,33 +1600,13 @@ lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
struct ifnet *ifp = sc->sc_ifp;
struct ether_header *eh;
u_short etype;
uint8_t subtype;
eh = mtod(m, struct ether_header *);
etype = ntohs(eh->ether_type);
/* Tap off LACP control messages */
if (etype == ETHERTYPE_SLOW) {
if (m->m_pkthdr.len < sizeof(*eh) + sizeof(subtype)) {
m_freem(m);
return (NULL);
}
m_copydata(m, sizeof(*eh), sizeof(subtype), &subtype);
switch (subtype) {
case SLOWPROTOCOLS_SUBTYPE_LACP:
lacp_input(lp, m);
break;
case SLOWPROTOCOLS_SUBTYPE_MARKER:
lacp_marker_input(lp, m);
break;
default:
/* Unknown LACP packet type */
m_freem(m);
break;
}
lacp_input(lp, m);
return (NULL);
}

View File

@ -149,7 +149,7 @@ struct lagg_llq {
struct lagg_softc {
struct ifnet *sc_ifp; /* virtual interface */
struct mtx sc_mtx;
struct rwlock sc_mtx;
int sc_proto; /* lagg protocol */
u_int sc_count; /* number of ports */
struct lagg_port *sc_primary; /* primary port */
@ -199,13 +199,14 @@ struct lagg_port {
SLIST_ENTRY(lagg_port) lp_entries;
};
#define LAGG_LOCK_INIT(_tr) mtx_init(&(_tr)->sc_mtx, "if_lagg", NULL, \
MTX_DEF)
#define LAGG_LOCK_DESTROY(_tr) mtx_destroy(&(_tr)->sc_mtx)
#define LAGG_LOCK(_tr) mtx_lock(&(_tr)->sc_mtx)
#define LAGG_UNLOCK(_tr) mtx_unlock(&(_tr)->sc_mtx)
#define LAGG_LOCKED(_tr) mtx_owned(&(_tr)->sc_mtx)
#define LAGG_LOCK_ASSERT(_tr) mtx_assert(&(_tr)->sc_mtx, MA_OWNED)
#define LAGG_LOCK_INIT(_sc) rw_init(&(_sc)->sc_mtx, "if_lagg rwlock")
#define LAGG_LOCK_DESTROY(_sc) rw_destroy(&(_sc)->sc_mtx)
#define LAGG_RLOCK(_sc) rw_rlock(&(_sc)->sc_mtx)
#define LAGG_WLOCK(_sc) rw_wlock(&(_sc)->sc_mtx)
#define LAGG_RUNLOCK(_sc) rw_runlock(&(_sc)->sc_mtx)
#define LAGG_WUNLOCK(_sc) rw_wunlock(&(_sc)->sc_mtx)
#define LAGG_RLOCK_ASSERT(_sc) rw_assert(&(_sc)->sc_mtx, RA_RLOCKED)
#define LAGG_WLOCK_ASSERT(_sc) rw_assert(&(_sc)->sc_mtx, RA_WLOCKED)
extern struct mbuf *(*lagg_input_p)(struct ifnet *, struct mbuf *);
extern void (*lagg_linkstate_p)(struct ifnet *, int );