- Avoid lock contention in the if_transmit callback by using trylock and
enqueueing the frames when it fails. This way there is some latency removed from the transmitting path. - If IFF_DRV_OACTIVE is set (and also if IFF_DRV_RUNNING is not) just enqueue the desired frames and return successful transmit. This way we avoid to return errors on transmit side and resulting in possible out-of-order frames. Please note that IFF_DRV_OACTIVE is set everytime we get the threshold ring hit, so this can be happening quite often. Submitted by: Attilio.Rao@isilon.com MFC after:5 days
This commit is contained in:
parent
75f66cde5c
commit
ab97207add
@ -5999,19 +5999,26 @@ bxe_tx_mq_start_locked(struct bxe_softc *sc,
|
||||
|
||||
rc = tx_count = 0;
|
||||
|
||||
BXE_FP_TX_LOCK_ASSERT(fp);
|
||||
|
||||
if (!tx_br) {
|
||||
BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
if (!sc->link_vars.link_up ||
|
||||
(ifp->if_drv_flags &
|
||||
(IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) {
|
||||
rc = drbr_enqueue_drv(ifp, tx_br, m);
|
||||
goto bxe_tx_mq_start_locked_exit;
|
||||
}
|
||||
|
||||
/* fetch the depth of the driver queue */
|
||||
depth = drbr_inuse_drv(ifp, tx_br);
|
||||
if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
|
||||
fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
|
||||
}
|
||||
|
||||
BXE_FP_TX_LOCK_ASSERT(fp);
|
||||
|
||||
if (m == NULL) {
|
||||
/* no new work, check for pending frames */
|
||||
next = drbr_dequeue_drv(ifp, tx_br);
|
||||
@ -6103,26 +6110,11 @@ bxe_tx_mq_start(struct ifnet *ifp,
|
||||
|
||||
fp = &sc->fp[fp_index];
|
||||
|
||||
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
|
||||
BLOGW(sc, "Interface not running, ignoring transmit request\n");
|
||||
return (ENETDOWN);
|
||||
}
|
||||
|
||||
if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
|
||||
BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n");
|
||||
return (EBUSY);
|
||||
}
|
||||
|
||||
if (!sc->link_vars.link_up) {
|
||||
BLOGW(sc, "Interface link is down, ignoring transmit request\n");
|
||||
return (ENETDOWN);
|
||||
}
|
||||
|
||||
/* XXX change to TRYLOCK here and if failed then schedule taskqueue */
|
||||
|
||||
BXE_FP_TX_LOCK(fp);
|
||||
rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
|
||||
BXE_FP_TX_UNLOCK(fp);
|
||||
if (BXE_FP_TX_TRYLOCK(fp)) {
|
||||
rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
|
||||
BXE_FP_TX_UNLOCK(fp);
|
||||
} else
|
||||
rc = drbr_enqueue_drv(ifp, fp->tx_br, m);
|
||||
|
||||
return (rc);
|
||||
}
|
||||
|
@ -582,6 +582,7 @@ struct bxe_fastpath {
|
||||
#define BXE_FP_TX_LOCK(fp) mtx_lock(&fp->tx_mtx)
|
||||
#define BXE_FP_TX_UNLOCK(fp) mtx_unlock(&fp->tx_mtx)
|
||||
#define BXE_FP_TX_LOCK_ASSERT(fp) mtx_assert(&fp->tx_mtx, MA_OWNED)
|
||||
#define BXE_FP_TX_TRYLOCK(fp) mtx_trylock(&fp->tx_mtx)
|
||||
|
||||
#define BXE_FP_RX_LOCK(fp) mtx_lock(&fp->rx_mtx)
|
||||
#define BXE_FP_RX_UNLOCK(fp) mtx_unlock(&fp->rx_mtx)
|
||||
|
Loading…
x
Reference in New Issue
Block a user