Use NET_TASK_INIT() and NET_GROUPTASK_INIT() for drivers that process

incoming packets in taskqueue context.

Reviewed by:	hselasky
Differential Revision:	https://reviews.freebsd.org/D23518
This commit is contained in:
Gleb Smirnoff 2020-02-11 18:57:07 +00:00
parent 4426b2e64b
commit 6c3e93cb5a
24 changed files with 32 additions and 41 deletions

View File

@ -2512,7 +2512,7 @@ al_eth_setup_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
return (ENOMEM);
/* Allocate taskqueues */
TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring);
NET_TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring);
rx_ring->enqueue_tq = taskqueue_create_fast("al_rx_enque", M_NOWAIT,
taskqueue_thread_enqueue, &rx_ring->enqueue_tq);
taskqueue_start_threads(&rx_ring->enqueue_tq, 1, PI_NET, "%s rxeq",

View File

@ -1387,7 +1387,7 @@ alc_attach(device_t dev)
mtx_init(&sc->alc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->alc_tick_ch, &sc->alc_mtx, 0);
TASK_INIT(&sc->alc_int_task, 0, alc_int_task, sc);
NET_TASK_INIT(&sc->alc_int_task, 0, alc_int_task, sc);
sc->alc_ident = alc_find_ident(dev);
/* Map the device. */

View File

@ -467,7 +467,7 @@ ale_attach(device_t dev)
mtx_init(&sc->ale_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->ale_tick_ch, &sc->ale_mtx, 0);
TASK_INIT(&sc->ale_int_task, 0, ale_int_task, sc);
NET_TASK_INIT(&sc->ale_int_task, 0, ale_int_task, sc);
/* Map the device. */
pci_enable_busmaster(dev);

View File

@ -760,7 +760,7 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->sc_dev));
TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc);
NET_TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc);
TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc);

View File

@ -647,7 +647,6 @@ ath_rx_pkt(struct ath_softc *sc, struct ath_rx_status *rs, HAL_STATUS status,
uint64_t tsf, int nf, HAL_RX_QUEUE qtype, struct ath_buf *bf,
struct mbuf *m)
{
struct epoch_tracker et;
uint64_t rstamp;
/* XXX TODO: make this an mbuf tag? */
struct ieee80211_rx_stats rxs;
@ -942,7 +941,6 @@ ath_rx_pkt(struct ath_softc *sc, struct ath_rx_status *rs, HAL_STATUS status,
rxs.c_nf_ext[i] = nf;
}
NET_EPOCH_ENTER(et);
if (ni != NULL) {
/*
* Only punt packets for ampdu reorder processing for
@ -988,7 +986,6 @@ ath_rx_pkt(struct ath_softc *sc, struct ath_rx_status *rs, HAL_STATUS status,
type = ieee80211_input_mimo_all(ic, m);
m = NULL;
}
NET_EPOCH_EXIT(et);
/*
* At this point we have passed the frame up the stack; thus

View File

@ -3306,7 +3306,7 @@ bge_attach(device_t dev)
sc->bge_dev = dev;
BGE_LOCK_INIT(sc, device_get_nameunit(dev));
TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
NET_TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
pci_enable_busmaster(dev);
@ -4601,7 +4601,6 @@ bge_msi_intr(void *arg)
static void
bge_intr_task(void *arg, int pending)
{
struct epoch_tracker et;
struct bge_softc *sc;
if_t ifp;
uint32_t status, status_tag;
@ -4644,9 +4643,7 @@ bge_intr_task(void *arg, int pending)
sc->bge_rx_saved_considx != rx_prod) {
/* Check RX return ring producer/consumer. */
BGE_UNLOCK(sc);
NET_EPOCH_ENTER(et);
bge_rxeof(sc, rx_prod, 0);
NET_EPOCH_EXIT(et);
BGE_LOCK(sc);
}
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {

View File

@ -621,7 +621,7 @@ bwn_attach(device_t dev)
mac->mac_flags |= BWN_MAC_FLAG_BADFRAME_PREEMP;
TASK_INIT(&mac->mac_hwreset, 0, bwn_hwreset, mac);
TASK_INIT(&mac->mac_intrtask, 0, bwn_intrtask, mac);
NET_TASK_INIT(&mac->mac_intrtask, 0, bwn_intrtask, mac);
TASK_INIT(&mac->mac_txpower, 0, bwn_txpwr, mac);
error = bwn_attach_core(mac);

View File

@ -9241,7 +9241,7 @@ bxe_interrupt_attach(struct bxe_softc *sc)
fp = &sc->fp[i];
snprintf(fp->tq_name, sizeof(fp->tq_name),
"bxe%d_fp%d_tq", sc->unit, i);
TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
NET_TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
taskqueue_thread_enqueue,

View File

@ -208,7 +208,7 @@ cas_attach(struct cas_softc *sc)
callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
/* Create local taskq. */
TASK_INIT(&sc->sc_intr_task, 0, cas_intr_task, sc);
NET_TASK_INIT(&sc->sc_intr_task, 0, cas_intr_task, sc);
TASK_INIT(&sc->sc_tx_task, 1, cas_tx_task, ifp);
sc->sc_tq = taskqueue_create_fast("cas_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->sc_tq);
@ -1608,11 +1608,14 @@ cas_tint(struct cas_softc *sc)
static void
cas_rint_timeout(void *arg)
{
struct epoch_tracker et;
struct cas_softc *sc = arg;
CAS_LOCK_ASSERT(sc, MA_OWNED);
NET_EPOCH_ENTER(et);
cas_rint(sc);
NET_EPOCH_EXIT(et);
}
static void

View File

@ -1353,7 +1353,7 @@ ena_create_io_queues(struct ena_adapter *adapter)
for (i = 0; i < adapter->num_queues; i++) {
queue = &adapter->que[i];
TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue);
NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue);
queue->cleanup_tq = taskqueue_create_fast("ena cleanup",
M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq);

View File

@ -253,7 +253,7 @@ malo_attach(uint16_t devid, struct malo_softc *sc)
taskqueue_start_threads(&sc->malo_tq, 1, PI_NET,
"%s taskq", device_get_nameunit(sc->malo_dev));
TASK_INIT(&sc->malo_rxtask, 0, malo_rx_proc, sc);
NET_TASK_INIT(&sc->malo_rxtask, 0, malo_rx_proc, sc);
TASK_INIT(&sc->malo_txtask, 0, malo_tx_proc, sc);
ic->ic_softc = sc;

View File

@ -360,7 +360,7 @@ mwl_attach(uint16_t devid, struct mwl_softc *sc)
taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
"%s taskq", device_get_nameunit(sc->sc_dev));
TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
NET_TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);

View File

@ -695,11 +695,12 @@ ptnet_irqs_init(struct ptnet_softc *sc)
cpu_cur = CPU_FIRST();
for (i = 0; i < nvecs; i++) {
struct ptnet_queue *pq = sc->queues + i;
static void (*handler)(void *context, int pending);
handler = (i < sc->num_tx_rings) ? ptnet_tx_task : ptnet_rx_task;
if (i < sc->num_tx_rings)
TASK_INIT(&pq->task, 0, ptnet_tx_task, pq);
else
NET_TASK_INIT(&pq->task, 0, ptnet_rx_task, pq);
TASK_INIT(&pq->task, 0, handler, pq);
pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT,
taskqueue_thread_enqueue, &pq->taskq);
taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d",

View File

@ -654,7 +654,7 @@ nfe_attach(device_t dev)
}
ether_ifattach(ifp, sc->eaddr);
TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
NET_TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->nfe_tq);
taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",

View File

@ -1543,7 +1543,7 @@ qla_create_fp_taskqueues(qla_host_t *ha)
bzero(tq_name, sizeof (tq_name));
snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp);
NET_TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp);
fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
taskqueue_thread_enqueue,

View File

@ -1656,7 +1656,7 @@ re_attach(device_t dev)
ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN;
IFQ_SET_READY(&ifp->if_snd);
TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc);
NET_TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc);
#define RE_PHYAD_INTERNAL 0
@ -2576,7 +2576,6 @@ re_intr(void *arg)
static void
re_int_task(void *arg, int npending)
{
struct epoch_tracker et;
struct rl_softc *sc;
struct ifnet *ifp;
u_int16_t status;
@ -2603,11 +2602,8 @@ re_int_task(void *arg, int npending)
}
#endif
if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) {
NET_EPOCH_ENTER(et);
if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW))
rval = re_rxeof(sc, NULL);
NET_EPOCH_EXIT(et);
}
/*
* Some chips will ignore a second TX request issued

View File

@ -552,7 +552,7 @@ rt_attach(device_t dev)
ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM;
/* init task queue */
TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc);
NET_TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc);
TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc);
TASK_INIT(&sc->periodic_task, 0, rt_periodic_task, sc);

View File

@ -395,7 +395,7 @@ smc_attach(device_t dev)
/* Set up taskqueue */
TASK_INIT(&sc->smc_intr, SMC_INTR_PRIORITY, smc_task_intr, ifp);
TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp);
NET_TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp);
TASK_INIT(&sc->smc_tx, SMC_TX_PRIORITY, smc_task_tx, ifp);
sc->smc_tq = taskqueue_create_fast("smc_taskq", M_NOWAIT,
taskqueue_thread_enqueue, &sc->smc_tq);

View File

@ -717,7 +717,7 @@ vtnet_init_rxq(struct vtnet_softc *sc, int id)
if (rxq->vtnrx_sg == NULL)
return (ENOMEM);
TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
NET_TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
taskqueue_thread_enqueue, &rxq->vtnrx_tq);

View File

@ -931,7 +931,7 @@ nicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len,
&cq->mtx);
/* Allocate taskqueue */
TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq);
NET_TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq);
cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK,
taskqueue_thread_enqueue, &cq->cmp_taskq);
taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)",
@ -1577,7 +1577,7 @@ nicvf_alloc_resources(struct nicvf *nic)
}
/* Allocate QS error taskqueue */
TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic);
NET_TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic);
qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK,
taskqueue_thread_enqueue, &qs->qs_err_taskq);
taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq",

View File

@ -676,7 +676,7 @@ vr_attach(device_t dev)
ifp->if_snd.ifq_maxlen = VR_TX_RING_CNT - 1;
IFQ_SET_READY(&ifp->if_snd);
TASK_INIT(&sc->vr_inttask, 0, vr_int_task, sc);
NET_TASK_INIT(&sc->vr_inttask, 0, vr_int_task, sc);
/* Configure Tx FIFO threshold. */
sc->vr_txthresh = VR_TXTHRESH_MIN;

View File

@ -637,7 +637,7 @@ wtap_attach(struct wtap_softc *sc, const uint8_t *macaddr)
sc->sc_tq = taskqueue_create("wtap_taskq", M_NOWAIT | M_ZERO,
taskqueue_thread_enqueue, &sc->sc_tq);
taskqueue_start_threads(&sc->sc_tq, 1, PI_SOFT, "%s taskQ", sc->name);
TASK_INIT(&sc->sc_rxtask, 0, wtap_rx_proc, sc);
NET_TASK_INIT(&sc->sc_rxtask, 0, wtap_rx_proc, sc);
ic->ic_softc = sc;
ic->ic_name = sc->name;

View File

@ -1218,7 +1218,7 @@ xl_attach(device_t dev)
}
callout_init_mtx(&sc->xl_tick_callout, &sc->xl_mtx, 0);
TASK_INIT(&sc->xl_task, 0, xl_rxeof_task, sc);
NET_TASK_INIT(&sc->xl_task, 0, xl_rxeof_task, sc);
/*
* Now allocate a tag for the DMA descriptor lists and a chunk

View File

@ -3781,7 +3781,6 @@ _task_fn_tx(void *context)
static void
_task_fn_rx(void *context)
{
struct epoch_tracker et;
iflib_rxq_t rxq = context;
if_ctx_t ctx = rxq->ifr_ctx;
bool more;
@ -3805,7 +3804,6 @@ _task_fn_rx(void *context)
budget = ctx->ifc_sysctl_rx_budget;
if (budget == 0)
budget = 16; /* XXX */
NET_EPOCH_ENTER(et);
if (more == false || (more = iflib_rxeof(rxq, budget)) == false) {
if (ctx->ifc_flags & IFC_LEGACY)
IFDI_INTR_ENABLE(ctx);
@ -3813,7 +3811,6 @@ _task_fn_rx(void *context)
IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
DBG_COUNTER_INC(rx_intr_enables);
}
NET_EPOCH_EXIT(et);
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
return;
if (more)
@ -5971,7 +5968,7 @@ iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
tqg = qgroup_if_io_tqg;
fn = _task_fn_rx;
intr_fast = iflib_fast_intr;
GROUPTASK_INIT(gtask, 0, fn, q);
NET_GROUPTASK_INIT(gtask, 0, fn, q);
break;
case IFLIB_INTR_RXTX:
q = &ctx->ifc_rxqs[qid];
@ -5980,7 +5977,7 @@ iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
tqg = qgroup_if_io_tqg;
fn = _task_fn_rx;
intr_fast = iflib_fast_intr_rxtx;
GROUPTASK_INIT(gtask, 0, fn, q);
NET_GROUPTASK_INIT(gtask, 0, fn, q);
break;
case IFLIB_INTR_ADMIN:
q = ctx;