MFp4 122896

- reduce cpu usage by as much as 25% (40% -> 30) by doing txq reclaim more efficiently
   - use mtx_trylock when trying to grab the lock to avoid spinning during long encap loop
   - add per-txq reclaim task
   - if mbufs were successfully re-claimed try another pass
- track txq overruns with sysctl

Approved by: re (blanket)
This commit is contained in:
Kip Macy 2007-07-10 06:01:45 +00:00
parent c108b80c8c
commit b8fe6051bf
3 changed files with 78 additions and 42 deletions

View File

@ -239,6 +239,8 @@ struct sge_fl {
struct tx_desc;
struct tx_sw_desc;
#define TXQ_TRANSMITTING 0x1
struct sge_txq {
uint64_t flags;
uint32_t in_use;
@ -254,7 +256,9 @@ struct sge_txq {
struct tx_sw_desc *sdesc;
uint32_t token;
bus_addr_t phys_addr;
struct task qresume_tsk;
struct task qresume_task;
struct task qreclaim_task;
struct port_info *port;
uint32_t cntxt_id;
uint64_t stops;
uint64_t restarts;

View File

@ -1613,7 +1613,11 @@ cxgb_start_tx(struct ifnet *ifp, uint32_t txmax)
txq = &qs->txq[TXQ_ETH];
err = 0;
if (txq->flags & TXQ_TRANSMITTING)
return (EINPROGRESS);
mtx_lock(&txq->lock);
txq->flags |= TXQ_TRANSMITTING;
in_use_init = txq->in_use;
while ((txq->in_use - in_use_init < txmax) &&
(txq->size > txq->in_use + TX_MAX_DESC)) {
@ -1651,6 +1655,7 @@ cxgb_start_tx(struct ifnet *ifp, uint32_t txmax)
break;
BPF_MTAP(ifp, m);
}
txq->flags &= ~TXQ_TRANSMITTING;
mtx_unlock(&txq->lock);
if (__predict_false(err)) {
@ -1685,8 +1690,7 @@ cxgb_start_proc(void *arg, int ncount)
do {
if (desc_reclaimable(txq) > TX_CLEAN_MAX_DESC)
taskqueue_enqueue(pi->adapter->tq,
&pi->timer_reclaim_task);
taskqueue_enqueue(pi->adapter->tq, &txq->qreclaim_task);
error = cxgb_start_tx(ifp, TX_START_MAX_DESC);
} while (error == 0);

View File

@ -66,6 +66,7 @@ __FBSDID("$FreeBSD$");
uint32_t collapse_free = 0;
uint32_t mb_free_vec_free = 0;
int txq_fills = 0;
int collapse_mbufs = 0;
static int recycle_enable = 1;
@ -186,7 +187,8 @@ int cxgb_debug = 0;
static void t3_free_qset(adapter_t *sc, struct sge_qset *q);
static void sge_timer_cb(void *arg);
static void sge_timer_reclaim(void *arg, int ncount);
static int free_tx_desc(adapter_t *sc, struct sge_txq *q, int n, struct mbuf **m_vec);
static void sge_txq_reclaim_handler(void *arg, int ncount);
static int free_tx_desc(struct sge_txq *q, int n, struct mbuf **m_vec);
/**
* reclaim_completed_tx - reclaims completed Tx descriptors
@ -198,14 +200,14 @@ static int free_tx_desc(adapter_t *sc, struct sge_txq *q, int n, struct mbuf **m
* queue's lock held.
*/
static __inline int
reclaim_completed_tx(adapter_t *adapter, struct sge_txq *q, int nbufs, struct mbuf **mvec)
reclaim_completed_tx(struct sge_txq *q, int nbufs, struct mbuf **mvec)
{
int reclaimed, reclaim = desc_reclaimable(q);
int n = 0;
mtx_assert(&q->lock, MA_OWNED);
if (reclaim > 0) {
n = free_tx_desc(adapter, q, min(reclaim, nbufs), mvec);
n = free_tx_desc(q, min(reclaim, nbufs), mvec);
reclaimed = min(reclaim, nbufs);
q->cleaned += reclaimed;
q->in_use -= reclaimed;
@ -742,6 +744,45 @@ refill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits)
V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
}
static __inline void
sge_txq_reclaim_(struct sge_txq *txq)
{
int reclaimable, i, n;
struct mbuf *m_vec[TX_CLEAN_MAX_DESC];
struct port_info *p;
p = txq->port;
reclaim_more:
n = 0;
reclaimable = desc_reclaimable(txq);
if (reclaimable > 0 && mtx_trylock(&txq->lock)) {
n = reclaim_completed_tx(txq, TX_CLEAN_MAX_DESC, m_vec);
mtx_unlock(&txq->lock);
}
if (n == 0)
return;
for (i = 0; i < n; i++) {
m_freem_vec(m_vec[i]);
}
if (p && p->ifp->if_drv_flags & IFF_DRV_OACTIVE &&
txq->size - txq->in_use >= TX_START_MAX_DESC) {
txq_fills++;
p->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
taskqueue_enqueue(p->tq, &p->start_task);
}
if (n)
goto reclaim_more;
}
static void
sge_txq_reclaim_handler(void *arg, int ncount)
{
struct sge_txq *q = arg;
sge_txq_reclaim_(q);
}
static void
sge_timer_reclaim(void *arg, int ncount)
@ -752,39 +793,15 @@ sge_timer_reclaim(void *arg, int ncount)
struct sge_qset *qs;
struct sge_txq *txq;
struct mtx *lock;
struct mbuf *m_vec[TX_CLEAN_MAX_DESC];
int n, reclaimable;
for (i = 0; i < nqsets; i++) {
qs = &sc->sge.qs[i];
txq = &qs->txq[TXQ_ETH];
reclaimable = desc_reclaimable(txq);
if (reclaimable > 0) {
mtx_lock(&txq->lock);
n = reclaim_completed_tx(sc, txq, TX_CLEAN_MAX_DESC, m_vec);
mtx_unlock(&txq->lock);
for (i = 0; i < n; i++)
m_freem_vec(m_vec[i]);
if (p->ifp->if_drv_flags & IFF_DRV_OACTIVE &&
txq->size - txq->in_use >= TX_START_MAX_DESC) {
p->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
taskqueue_enqueue(p->tq, &p->start_task);
}
}
sge_txq_reclaim_(txq);
txq = &qs->txq[TXQ_OFLD];
reclaimable = desc_reclaimable(txq);
if (reclaimable > 0) {
mtx_lock(&txq->lock);
n = reclaim_completed_tx(sc, txq, TX_CLEAN_MAX_DESC, m_vec);
mtx_unlock(&txq->lock);
for (i = 0; i < n; i++)
m_freem_vec(m_vec[i]);
}
sge_txq_reclaim_(txq);
lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
&sc->sge.qs[0].rspq.lock;
@ -1561,8 +1578,8 @@ t3_sge_stop(adapter_t *sc)
for (i = 0; i < nqsets; ++i) {
struct sge_qset *qs = &sc->sge.qs[i];
taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_tsk);
taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_tsk);
taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
}
}
@ -1577,7 +1594,7 @@ t3_sge_stop(adapter_t *sc)
* Tx buffers. Called with the Tx queue lock held.
*/
int
free_tx_desc(adapter_t *sc, struct sge_txq *q, int n, struct mbuf **m_vec)
free_tx_desc(struct sge_txq *q, int n, struct mbuf **m_vec)
{
struct tx_sw_desc *d;
unsigned int cidx = q->cidx;
@ -1738,7 +1755,7 @@ ofld_xmit(adapter_t *adap, struct sge_txq *q, struct mbuf *m)
return (ret);
}
ndesc = calc_tx_descs_ofld(m, nsegs);
again: cleaned = reclaim_completed_tx(adap, q, TX_CLEAN_MAX_DESC, m_vec);
again: cleaned = reclaim_completed_tx(q, TX_CLEAN_MAX_DESC, m_vec);
ret = check_desc_avail(adap, q, m, ndesc, TXQ_OFLD);
if (__predict_false(ret)) {
@ -1795,7 +1812,7 @@ restart_offloadq(void *data, int npending)
struct tx_sw_desc *stx = &q->sdesc[q->pidx];
mtx_lock(&q->lock);
again: cleaned = reclaim_completed_tx(adap, q, TX_CLEAN_MAX_DESC, m_vec);
again: cleaned = reclaim_completed_tx(q, TX_CLEAN_MAX_DESC, m_vec);
while ((m = mbufq_peek(&q->sendq)) != NULL) {
unsigned int gen, pidx;
@ -1934,13 +1951,13 @@ restart_tx(struct sge_qset *qs)
should_restart_tx(&qs->txq[TXQ_OFLD]) &&
test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
qs->txq[TXQ_OFLD].restarts++;
taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_tsk);
taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
}
if (isset(&qs->txq_stopped, TXQ_CTRL) &&
should_restart_tx(&qs->txq[TXQ_CTRL]) &&
test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
qs->txq[TXQ_CTRL].restarts++;
taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_tsk);
taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
}
}
@ -2019,8 +2036,15 @@ t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
MTX_INIT(&q->txq[i].lock, q->txq[i].lockbuf, NULL, MTX_DEF);
}
TASK_INIT(&q->txq[TXQ_OFLD].qresume_tsk, 0, restart_offloadq, q);
TASK_INIT(&q->txq[TXQ_CTRL].qresume_tsk, 0, restart_ctrlq, q);
q->txq[TXQ_ETH].port = pi;
TASK_INIT(&q->txq[TXQ_OFLD].qresume_task, 0, restart_offloadq, q);
TASK_INIT(&q->txq[TXQ_CTRL].qresume_task, 0, restart_ctrlq, q);
TASK_INIT(&q->txq[TXQ_OFLD].qreclaim_task, 0, sge_txq_reclaim_handler, &q->txq[TXQ_ETH]);
TASK_INIT(&q->txq[TXQ_CTRL].qreclaim_task, 0, sge_txq_reclaim_handler, &q->txq[TXQ_OFLD]);
q->fl[0].gen = q->fl[1].gen = 1;
q->fl[0].size = p->fl_size;
@ -2636,6 +2660,10 @@ t3_add_sysctls(adapter_t *sc)
"collapse_mbufs",
CTLFLAG_RW, &collapse_mbufs,
0, "collapse mbuf chains into iovecs");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"txq_overrun",
CTLFLAG_RD, &txq_fills,
0, "#times txq overrun");
}
/**