1. Use taskqueue_create() instead of taskqueue_create_fast() for both

fastpath and slowpath taskqueues.
2. Service all transmits in taskqueue threads.
3. additional stats counters for  keeping track of
	- bd availability
	- tx buf ring not emptied in the fp task queue.
	  These are drained via timeout taskqueue.
	- tx attempts during link down.

MFC after: 5 days
This commit is contained in:
David C Somayajulu 2016-10-18 21:33:57 +00:00
parent 1a58c70409
commit c7873d969c
3 changed files with 108 additions and 97 deletions

View File

@ -27,7 +27,7 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#define BXE_DRIVER_VERSION "1.78.81"
#define BXE_DRIVER_VERSION "1.78.89"
#include "bxe.h"
#include "ecore_sp.h"
@ -489,7 +489,14 @@ static const struct {
{ STATS_OFFSET32(mbuf_alloc_tpa),
4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"},
{ STATS_OFFSET32(tx_queue_full_return),
4, STATS_FLAGS_FUNC, "tx_queue_full_return"}
4, STATS_FLAGS_FUNC, "tx_queue_full_return"},
{ STATS_OFFSET32(tx_request_link_down_failures),
4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"},
{ STATS_OFFSET32(bd_avail_too_less_failures),
4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"},
{ STATS_OFFSET32(tx_mq_not_empty),
4, STATS_FLAGS_FUNC, "tx_mq_not_empty"}
};
static const struct {
@ -602,7 +609,14 @@ static const struct {
{ Q_STATS_OFFSET32(mbuf_alloc_tpa),
4, "mbuf_alloc_tpa"},
{ Q_STATS_OFFSET32(tx_queue_full_return),
4, "tx_queue_full_return"}
4, "tx_queue_full_return"},
{ Q_STATS_OFFSET32(tx_request_link_down_failures),
4, "tx_request_link_down_failures"},
{ Q_STATS_OFFSET32(bd_avail_too_less_failures),
4, "bd_avail_too_less_failures"},
{ Q_STATS_OFFSET32(tx_mq_not_empty),
4, "tx_mq_not_empty"}
};
#define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr)
@ -5599,7 +5613,7 @@ bxe_tx_start(if_t ifp)
BXE_FP_TX_UNLOCK(fp);
}
#if __FreeBSD_version >= 800000
#if __FreeBSD_version >= 901504
static int
bxe_tx_mq_start_locked(struct bxe_softc *sc,
@ -5621,11 +5635,16 @@ bxe_tx_mq_start_locked(struct bxe_softc *sc,
return (EINVAL);
}
if (!sc->link_vars.link_up ||
(if_getdrvflags(ifp) &
(IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) {
if (m != NULL)
rc = drbr_enqueue(ifp, tx_br, m);
if (m != NULL) {
rc = drbr_enqueue(ifp, tx_br, m);
if (rc != 0) {
fp->eth_q_stats.tx_soft_errors++;
goto bxe_tx_mq_start_locked_exit;
}
}
if (!sc->link_vars.link_up || !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
fp->eth_q_stats.tx_request_link_down_failures++;
goto bxe_tx_mq_start_locked_exit;
}
@ -5635,24 +5654,22 @@ bxe_tx_mq_start_locked(struct bxe_softc *sc,
fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
}
if (m == NULL) {
/* no new work, check for pending frames */
next = drbr_dequeue_drv(ifp, tx_br);
} else if (drbr_needs_enqueue_drv(ifp, tx_br)) {
/* have both new and pending work, maintain packet order */
rc = drbr_enqueue(ifp, tx_br, m);
if (rc != 0) {
fp->eth_q_stats.tx_soft_errors++;
goto bxe_tx_mq_start_locked_exit;
}
next = drbr_dequeue_drv(ifp, tx_br);
} else {
/* new work only and nothing pending */
next = m;
}
/* keep adding entries while there are frames to send */
while (next != NULL) {
while ((next = drbr_peek(ifp, tx_br)) != NULL) {
/* handle any completions if we're running low */
tx_bd_avail = bxe_tx_avail(sc, fp);
if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
/* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
bxe_txeof(sc, fp);
tx_bd_avail = bxe_tx_avail(sc, fp);
if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) {
fp->eth_q_stats.bd_avail_too_less_failures++;
m_freem(next);
drbr_advance(ifp, tx_br);
rc = ENOBUFS;
break;
}
}
/* the mbuf now belongs to us */
fp->eth_q_stats.mbuf_alloc_tx++;
@ -5668,11 +5685,11 @@ bxe_tx_mq_start_locked(struct bxe_softc *sc,
if (next != NULL) {
/* mark the TX queue as full and save the frame */
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
/* XXX this may reorder the frame */
rc = drbr_enqueue(ifp, tx_br, next);
drbr_putback(ifp, tx_br, next);
fp->eth_q_stats.mbuf_alloc_tx--;
fp->eth_q_stats.tx_frames_deferred++;
}
} else
drbr_advance(ifp, tx_br);
/* stop looking for more work */
break;
@ -5684,18 +5701,7 @@ bxe_tx_mq_start_locked(struct bxe_softc *sc,
/* send a copy of the frame to any BPF listeners */
if_etherbpfmtap(ifp, next);
tx_bd_avail = bxe_tx_avail(sc, fp);
/* handle any completions if we're running low */
if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
/* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
bxe_txeof(sc, fp);
if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
break;
}
}
next = drbr_dequeue_drv(ifp, tx_br);
drbr_advance(ifp, tx_br);
}
/* all TX packets were dequeued and/or the tx ring is full */
@ -5705,10 +5711,28 @@ bxe_tx_mq_start_locked(struct bxe_softc *sc,
}
bxe_tx_mq_start_locked_exit:
/* If we didn't drain the drbr, enqueue a task in the future to do it. */
if (!drbr_empty(ifp, tx_br)) {
fp->eth_q_stats.tx_mq_not_empty++;
taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1);
}
return (rc);
}
static void
bxe_tx_mq_start_deferred(void *arg,
int pending)
{
struct bxe_fastpath *fp = (struct bxe_fastpath *)arg;
struct bxe_softc *sc = fp->sc;
if_t ifp = sc->ifp;
BXE_FP_TX_LOCK(fp);
bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
BXE_FP_TX_UNLOCK(fp);
}
/* Multiqueue (TSS) dispatch routine. */
static int
bxe_tx_mq_start(struct ifnet *ifp,
@ -5730,8 +5754,10 @@ bxe_tx_mq_start(struct ifnet *ifp,
if (BXE_FP_TX_TRYLOCK(fp)) {
rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
BXE_FP_TX_UNLOCK(fp);
} else
} else {
rc = drbr_enqueue(ifp, fp->tx_br, m);
taskqueue_enqueue(fp->tq, &fp->tx_task);
}
return (rc);
}
@ -5766,7 +5792,7 @@ bxe_mq_flush(struct ifnet *ifp)
if_qflush(ifp);
}
#endif /* FreeBSD_version >= 800000 */
#endif /* FreeBSD_version >= 901504 */
static uint16_t
bxe_cid_ilt_lines(struct bxe_softc *sc)
@ -6126,7 +6152,7 @@ bxe_free_fp_buffers(struct bxe_softc *sc)
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
#if __FreeBSD_version >= 800000
#if __FreeBSD_version >= 901504
if (fp->tx_br != NULL) {
/* just in case bxe_mq_flush() wasn't called */
if (mtx_initialized(&fp->tx_mtx)) {
@ -6953,6 +6979,8 @@ bxe_link_attn(struct bxe_softc *sc)
uint32_t pause_enabled = 0;
struct host_port_stats *pstats;
int cmng_fns;
struct bxe_fastpath *fp;
int i;
/* Make sure that we are synced with the current statistics */
bxe_stats_handle(sc, STATS_EVENT_STOP);
@ -6984,6 +7012,12 @@ bxe_link_attn(struct bxe_softc *sc)
if (sc->state == BXE_STATE_OPEN) {
bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
}
/* Restart tx when the link comes back. */
FOR_EACH_ETH_QUEUE(sc, i) {
fp = &sc->fp[i];
taskqueue_enqueue(fp->tq, &fp->tx_task);
}
}
if (sc->link_vars.link_up && sc->link_vars.line_speed) {
@ -9035,6 +9069,10 @@ bxe_interrupt_detach(struct bxe_softc *sc)
fp = &sc->fp[i];
if (fp->tq) {
taskqueue_drain(fp->tq, &fp->tq_task);
taskqueue_drain(fp->tq, &fp->tx_task);
while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task,
NULL))
taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task);
taskqueue_free(fp->tq);
fp->tq = NULL;
}
@ -9067,9 +9105,9 @@ bxe_interrupt_attach(struct bxe_softc *sc)
snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
"bxe%d_sp_tq", sc->unit);
TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
sc->sp_tq = taskqueue_create_fast(sc->sp_tq_name, M_NOWAIT,
taskqueue_thread_enqueue,
&sc->sp_tq);
sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT,
taskqueue_thread_enqueue,
&sc->sp_tq);
taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
"%s", sc->sp_tq_name);
@ -9079,9 +9117,12 @@ bxe_interrupt_attach(struct bxe_softc *sc)
snprintf(fp->tq_name, sizeof(fp->tq_name),
"bxe%d_fp%d_tq", sc->unit, i);
TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
fp->tq = taskqueue_create_fast(fp->tq_name, M_NOWAIT,
taskqueue_thread_enqueue,
&fp->tq);
TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
taskqueue_thread_enqueue,
&fp->tq);
TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0,
bxe_tx_mq_start_deferred, fp);
taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
"%s", fp->tq_name);
}
@ -12114,8 +12155,6 @@ static void
bxe_periodic_callout_func(void *xsc)
{
struct bxe_softc *sc = (struct bxe_softc *)xsc;
struct bxe_fastpath *fp;
uint16_t tx_bd_avail;
int i;
if (!BXE_CORE_TRYLOCK(sc)) {
@ -12136,49 +12175,8 @@ bxe_periodic_callout_func(void *xsc)
BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
BXE_CORE_UNLOCK(sc);
return;
}
#if __FreeBSD_version >= 800000
FOR_EACH_QUEUE(sc, i) {
fp = &sc->fp[i];
if (BXE_FP_TX_TRYLOCK(fp)) {
if_t ifp = sc->ifp;
/*
* If interface was stopped due to unavailable
* bds, try to process some tx completions
*/
(void) bxe_txeof(sc, fp);
tx_bd_avail = bxe_tx_avail(sc, fp);
if (tx_bd_avail >= BXE_TX_CLEANUP_THRESHOLD) {
bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
}
BXE_FP_TX_UNLOCK(fp);
}
}
#else
fp = &sc->fp[0];
if (BXE_FP_TX_TRYLOCK(fp)) {
struct ifnet *ifp = sc->ifnet;
/*
* If interface was stopped due to unavailable
* bds, try to process some tx completions
*/
(void) bxe_txeof(sc, fp);
tx_bd_avail = bxe_tx_avail(sc, fp);
if (tx_bd_avail >= BXE_TX_CLEANUP_THRESHOLD) {
bxe_tx_start_locked(sc, ifp, fp);
}
BXE_FP_TX_UNLOCK(fp);
}
#endif /* #if __FreeBSD_version >= 800000 */
/* Check for TX timeouts on any fastpath. */
FOR_EACH_QUEUE(sc, i) {
@ -12656,7 +12654,7 @@ bxe_init_ifnet(struct bxe_softc *sc)
if_setioctlfn(ifp, bxe_ioctl);
if_setstartfn(ifp, bxe_tx_start);
if_setgetcounterfn(ifp, bxe_get_counter);
#if __FreeBSD_version >= 800000
#if __FreeBSD_version >= 901504
if_settransmitfn(ifp, bxe_tx_mq_start);
if_setqflushfn(ifp, bxe_mq_flush);
#endif
@ -15699,7 +15697,7 @@ bxe_add_sysctls(struct bxe_softc *sc)
static int
bxe_alloc_buf_rings(struct bxe_softc *sc)
{
#if __FreeBSD_version >= 800000
#if __FreeBSD_version >= 901504
int i;
struct bxe_fastpath *fp;
@ -15720,7 +15718,7 @@ bxe_alloc_buf_rings(struct bxe_softc *sc)
static void
bxe_free_buf_rings(struct bxe_softc *sc)
{
#if __FreeBSD_version >= 800000
#if __FreeBSD_version >= 901504
int i;
struct bxe_fastpath *fp;

View File

@ -644,6 +644,9 @@ struct bxe_fastpath {
struct taskqueue *tq;
char tq_name[32];
struct task tx_task;
struct timeout_task tx_timeout_task;
/* ethernet client ID (each fastpath set of RX/TX/CQE is a client) */
uint8_t cl_id;
#define FP_CL_ID(fp) (fp->cl_id)
@ -2300,7 +2303,8 @@ void bxe_dump_mbuf_data(struct bxe_softc *sc, char *pTag,
extern int bxe_grc_dump(struct bxe_softc *sc);
#if __FreeBSD_version >= 800000
#if __FreeBSD_version >= 1000000
#if (__FreeBSD_version >= 1001513 && __FreeBSD_version < 1100000) ||\
__FreeBSD_version >= 1100048
#define BXE_SET_FLOWID(m) M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE)
#define BXE_VALID_FLOWID(m) (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
#else

View File

@ -266,6 +266,10 @@ struct bxe_eth_stats {
/* num. of times tx queue full occurred */
uint32_t tx_queue_full_return;
/* debug stats */
uint32_t tx_request_link_down_failures;
uint32_t bd_avail_too_less_failures;
uint32_t tx_mq_not_empty;
};
@ -372,6 +376,11 @@ struct bxe_eth_q_stats {
/* num. of times tx queue full occurred */
uint32_t tx_queue_full_return;
/* debug stats */
uint32_t tx_request_link_down_failures;
uint32_t bd_avail_too_less_failures;
uint32_t tx_mq_not_empty;
};
struct bxe_eth_stats_old {