diff --git a/sys/dev/al_eth/al_eth.c b/sys/dev/al_eth/al_eth.c index f3b7ad370df5..b2bd94bb504c 100644 --- a/sys/dev/al_eth/al_eth.c +++ b/sys/dev/al_eth/al_eth.c @@ -2512,7 +2512,7 @@ al_eth_setup_rx_resources(struct al_eth_adapter *adapter, unsigned int qid) return (ENOMEM); /* Allocate taskqueues */ - TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring); + NET_TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring); rx_ring->enqueue_tq = taskqueue_create_fast("al_rx_enque", M_NOWAIT, taskqueue_thread_enqueue, &rx_ring->enqueue_tq); taskqueue_start_threads(&rx_ring->enqueue_tq, 1, PI_NET, "%s rxeq", diff --git a/sys/dev/alc/if_alc.c b/sys/dev/alc/if_alc.c index 172b8fab4d46..531c9f18fc8f 100644 --- a/sys/dev/alc/if_alc.c +++ b/sys/dev/alc/if_alc.c @@ -1387,7 +1387,7 @@ alc_attach(device_t dev) mtx_init(&sc->alc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->alc_tick_ch, &sc->alc_mtx, 0); - TASK_INIT(&sc->alc_int_task, 0, alc_int_task, sc); + NET_TASK_INIT(&sc->alc_int_task, 0, alc_int_task, sc); sc->alc_ident = alc_find_ident(dev); /* Map the device. */ diff --git a/sys/dev/ale/if_ale.c b/sys/dev/ale/if_ale.c index 27aa9321357a..4a8afd887de6 100644 --- a/sys/dev/ale/if_ale.c +++ b/sys/dev/ale/if_ale.c @@ -467,7 +467,7 @@ ale_attach(device_t dev) mtx_init(&sc->ale_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->ale_tick_ch, &sc->ale_mtx, 0); - TASK_INIT(&sc->ale_int_task, 0, ale_int_task, sc); + NET_TASK_INIT(&sc->ale_int_task, 0, ale_int_task, sc); /* Map the device. */ pci_enable_busmaster(dev); diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c index fa3081cff296..0745d00ce3a8 100644 --- a/sys/dev/ath/if_ath.c +++ b/sys/dev/ath/if_ath.c @@ -760,7 +760,7 @@ ath_attach(u_int16_t devid, struct ath_softc *sc) taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->sc_dev)); - TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); + NET_TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); diff --git a/sys/dev/ath/if_ath_rx.c b/sys/dev/ath/if_ath_rx.c index 13a42c5038d6..93a1d7455191 100644 --- a/sys/dev/ath/if_ath_rx.c +++ b/sys/dev/ath/if_ath_rx.c @@ -647,7 +647,6 @@ ath_rx_pkt(struct ath_softc *sc, struct ath_rx_status *rs, HAL_STATUS status, uint64_t tsf, int nf, HAL_RX_QUEUE qtype, struct ath_buf *bf, struct mbuf *m) { - struct epoch_tracker et; uint64_t rstamp; /* XXX TODO: make this an mbuf tag? */ struct ieee80211_rx_stats rxs; @@ -942,7 +941,6 @@ ath_rx_pkt(struct ath_softc *sc, struct ath_rx_status *rs, HAL_STATUS status, rxs.c_nf_ext[i] = nf; } - NET_EPOCH_ENTER(et); if (ni != NULL) { /* * Only punt packets for ampdu reorder processing for @@ -988,7 +986,6 @@ ath_rx_pkt(struct ath_softc *sc, struct ath_rx_status *rs, HAL_STATUS status, type = ieee80211_input_mimo_all(ic, m); m = NULL; } - NET_EPOCH_EXIT(et); /* * At this point we have passed the frame up the stack; thus diff --git a/sys/dev/bge/if_bge.c b/sys/dev/bge/if_bge.c index b91ee5de9649..551c18f8bf4b 100644 --- a/sys/dev/bge/if_bge.c +++ b/sys/dev/bge/if_bge.c @@ -3306,7 +3306,7 @@ bge_attach(device_t dev) sc->bge_dev = dev; BGE_LOCK_INIT(sc, device_get_nameunit(dev)); - TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc); + NET_TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc); callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0); pci_enable_busmaster(dev); @@ -4601,7 +4601,6 @@ bge_msi_intr(void *arg) static void bge_intr_task(void *arg, int pending) { - struct epoch_tracker et; struct bge_softc *sc; if_t ifp; uint32_t status, status_tag; @@ -4644,9 +4643,7 @@ bge_intr_task(void *arg, int pending) sc->bge_rx_saved_considx != rx_prod) { /* Check RX return ring producer/consumer. */ BGE_UNLOCK(sc); - NET_EPOCH_ENTER(et); bge_rxeof(sc, rx_prod, 0); - NET_EPOCH_EXIT(et); BGE_LOCK(sc); } if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { diff --git a/sys/dev/bwn/if_bwn.c b/sys/dev/bwn/if_bwn.c index 8f22de453898..6fefaf4ccd89 100644 --- a/sys/dev/bwn/if_bwn.c +++ b/sys/dev/bwn/if_bwn.c @@ -621,7 +621,7 @@ bwn_attach(device_t dev) mac->mac_flags |= BWN_MAC_FLAG_BADFRAME_PREEMP; TASK_INIT(&mac->mac_hwreset, 0, bwn_hwreset, mac); - TASK_INIT(&mac->mac_intrtask, 0, bwn_intrtask, mac); + NET_TASK_INIT(&mac->mac_intrtask, 0, bwn_intrtask, mac); TASK_INIT(&mac->mac_txpower, 0, bwn_txpwr, mac); error = bwn_attach_core(mac); diff --git a/sys/dev/bxe/bxe.c b/sys/dev/bxe/bxe.c index a1231ac40bd8..7dc0154ecda4 100644 --- a/sys/dev/bxe/bxe.c +++ b/sys/dev/bxe/bxe.c @@ -9241,7 +9241,7 @@ bxe_interrupt_attach(struct bxe_softc *sc) fp = &sc->fp[i]; snprintf(fp->tq_name, sizeof(fp->tq_name), "bxe%d_fp%d_tq", sc->unit, i); - TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); + NET_TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp); fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT, taskqueue_thread_enqueue, diff --git a/sys/dev/cas/if_cas.c b/sys/dev/cas/if_cas.c index 1a20596f2923..653448eea405 100644 --- a/sys/dev/cas/if_cas.c +++ b/sys/dev/cas/if_cas.c @@ -208,7 +208,7 @@ cas_attach(struct cas_softc *sc) callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0); /* Create local taskq. */ - TASK_INIT(&sc->sc_intr_task, 0, cas_intr_task, sc); + NET_TASK_INIT(&sc->sc_intr_task, 0, cas_intr_task, sc); TASK_INIT(&sc->sc_tx_task, 1, cas_tx_task, ifp); sc->sc_tq = taskqueue_create_fast("cas_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->sc_tq); @@ -1608,11 +1608,14 @@ cas_tint(struct cas_softc *sc) static void cas_rint_timeout(void *arg) { + struct epoch_tracker et; struct cas_softc *sc = arg; CAS_LOCK_ASSERT(sc, MA_OWNED); + NET_EPOCH_ENTER(et); cas_rint(sc); + NET_EPOCH_EXIT(et); } static void diff --git a/sys/dev/ena/ena.c b/sys/dev/ena/ena.c index a5687d0213fd..f4a7236339cf 100644 --- a/sys/dev/ena/ena.c +++ b/sys/dev/ena/ena.c @@ -1353,7 +1353,7 @@ ena_create_io_queues(struct ena_adapter *adapter) for (i = 0; i < adapter->num_queues; i++) { queue = &adapter->que[i]; - TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue); + NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue); queue->cleanup_tq = taskqueue_create_fast("ena cleanup", M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq); diff --git a/sys/dev/malo/if_malo.c b/sys/dev/malo/if_malo.c index ae21af8b4536..a7823dd6003d 100644 --- a/sys/dev/malo/if_malo.c +++ b/sys/dev/malo/if_malo.c @@ -253,7 +253,7 @@ malo_attach(uint16_t devid, struct malo_softc *sc) taskqueue_start_threads(&sc->malo_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->malo_dev)); - TASK_INIT(&sc->malo_rxtask, 0, malo_rx_proc, sc); + NET_TASK_INIT(&sc->malo_rxtask, 0, malo_rx_proc, sc); TASK_INIT(&sc->malo_txtask, 0, malo_tx_proc, sc); ic->ic_softc = sc; diff --git a/sys/dev/mwl/if_mwl.c b/sys/dev/mwl/if_mwl.c index 6e8e560ba663..9918366f2c75 100644 --- a/sys/dev/mwl/if_mwl.c +++ b/sys/dev/mwl/if_mwl.c @@ -360,7 +360,7 @@ mwl_attach(uint16_t devid, struct mwl_softc *sc) taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->sc_dev)); - TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc); + NET_TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc); TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc); TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc); TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc); diff --git a/sys/dev/netmap/if_ptnet.c b/sys/dev/netmap/if_ptnet.c index 38149ac2650c..99d21c38f8ab 100644 --- a/sys/dev/netmap/if_ptnet.c +++ b/sys/dev/netmap/if_ptnet.c @@ -695,11 +695,12 @@ ptnet_irqs_init(struct ptnet_softc *sc) cpu_cur = CPU_FIRST(); for (i = 0; i < nvecs; i++) { struct ptnet_queue *pq = sc->queues + i; - static void (*handler)(void *context, int pending); - handler = (i < sc->num_tx_rings) ? ptnet_tx_task : ptnet_rx_task; + if (i < sc->num_tx_rings) + TASK_INIT(&pq->task, 0, ptnet_tx_task, pq); + else + NET_TASK_INIT(&pq->task, 0, ptnet_rx_task, pq); - TASK_INIT(&pq->task, 0, handler, pq); pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT, taskqueue_thread_enqueue, &pq->taskq); taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d", diff --git a/sys/dev/nfe/if_nfe.c b/sys/dev/nfe/if_nfe.c index 940bad645037..246e257e3b2b 100644 --- a/sys/dev/nfe/if_nfe.c +++ b/sys/dev/nfe/if_nfe.c @@ -654,7 +654,7 @@ nfe_attach(device_t dev) } ether_ifattach(ifp, sc->eaddr); - TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc); + NET_TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc); sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->nfe_tq); taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq", diff --git a/sys/dev/qlxgbe/ql_os.c b/sys/dev/qlxgbe/ql_os.c index 137936dc62eb..ad6bab9f5ce3 100644 --- a/sys/dev/qlxgbe/ql_os.c +++ b/sys/dev/qlxgbe/ql_os.c @@ -1543,7 +1543,7 @@ qla_create_fp_taskqueues(qla_host_t *ha) bzero(tq_name, sizeof (tq_name)); snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); - TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp); + NET_TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp); fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT, taskqueue_thread_enqueue, diff --git a/sys/dev/re/if_re.c b/sys/dev/re/if_re.c index 76fa06f63585..66fe980dfc1d 100644 --- a/sys/dev/re/if_re.c +++ b/sys/dev/re/if_re.c @@ -1656,7 +1656,7 @@ re_attach(device_t dev) ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); - TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); + NET_TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); #define RE_PHYAD_INTERNAL 0 @@ -2576,7 +2576,6 @@ re_intr(void *arg) static void re_int_task(void *arg, int npending) { - struct epoch_tracker et; struct rl_softc *sc; struct ifnet *ifp; u_int16_t status; @@ -2603,11 +2602,8 @@ re_int_task(void *arg, int npending) } #endif - if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) { - NET_EPOCH_ENTER(et); + if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) rval = re_rxeof(sc, NULL); - NET_EPOCH_EXIT(et); - } /* * Some chips will ignore a second TX request issued diff --git a/sys/dev/rt/if_rt.c b/sys/dev/rt/if_rt.c index e3305997ea88..cd47d051f36e 100644 --- a/sys/dev/rt/if_rt.c +++ b/sys/dev/rt/if_rt.c @@ -552,7 +552,7 @@ rt_attach(device_t dev) ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM; /* init task queue */ - TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc); + NET_TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc); TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc); TASK_INIT(&sc->periodic_task, 0, rt_periodic_task, sc); diff --git a/sys/dev/smc/if_smc.c b/sys/dev/smc/if_smc.c index 6b69deb74d99..d3f911d8327c 100644 --- a/sys/dev/smc/if_smc.c +++ b/sys/dev/smc/if_smc.c @@ -395,7 +395,7 @@ smc_attach(device_t dev) /* Set up taskqueue */ TASK_INIT(&sc->smc_intr, SMC_INTR_PRIORITY, smc_task_intr, ifp); - TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp); + NET_TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp); TASK_INIT(&sc->smc_tx, SMC_TX_PRIORITY, smc_task_tx, ifp); sc->smc_tq = taskqueue_create_fast("smc_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->smc_tq); diff --git a/sys/dev/virtio/network/if_vtnet.c b/sys/dev/virtio/network/if_vtnet.c index 697e8de8fe32..ceb3ffaaf2b4 100644 --- a/sys/dev/virtio/network/if_vtnet.c +++ b/sys/dev/virtio/network/if_vtnet.c @@ -717,7 +717,7 @@ vtnet_init_rxq(struct vtnet_softc *sc, int id) if (rxq->vtnrx_sg == NULL) return (ENOMEM); - TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq); + NET_TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq); rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT, taskqueue_thread_enqueue, &rxq->vtnrx_tq); diff --git a/sys/dev/vnic/nicvf_queues.c b/sys/dev/vnic/nicvf_queues.c index d469067c1cf8..ebef89e31ab1 100644 --- a/sys/dev/vnic/nicvf_queues.c +++ b/sys/dev/vnic/nicvf_queues.c @@ -931,7 +931,7 @@ nicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len, &cq->mtx); /* Allocate taskqueue */ - TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq); + NET_TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq); cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK, taskqueue_thread_enqueue, &cq->cmp_taskq); taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)", @@ -1577,7 +1577,7 @@ nicvf_alloc_resources(struct nicvf *nic) } /* Allocate QS error taskqueue */ - TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic); + NET_TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic); qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK, taskqueue_thread_enqueue, &qs->qs_err_taskq); taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq", diff --git a/sys/dev/vr/if_vr.c b/sys/dev/vr/if_vr.c index 7cbc36b44803..cf942c5cd197 100644 --- a/sys/dev/vr/if_vr.c +++ b/sys/dev/vr/if_vr.c @@ -676,7 +676,7 @@ vr_attach(device_t dev) ifp->if_snd.ifq_maxlen = VR_TX_RING_CNT - 1; IFQ_SET_READY(&ifp->if_snd); - TASK_INIT(&sc->vr_inttask, 0, vr_int_task, sc); + NET_TASK_INIT(&sc->vr_inttask, 0, vr_int_task, sc); /* Configure Tx FIFO threshold. */ sc->vr_txthresh = VR_TXTHRESH_MIN; diff --git a/sys/dev/wtap/if_wtap.c b/sys/dev/wtap/if_wtap.c index 3244c67213d8..e7a2986d0543 100644 --- a/sys/dev/wtap/if_wtap.c +++ b/sys/dev/wtap/if_wtap.c @@ -637,7 +637,7 @@ wtap_attach(struct wtap_softc *sc, const uint8_t *macaddr) sc->sc_tq = taskqueue_create("wtap_taskq", M_NOWAIT | M_ZERO, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_SOFT, "%s taskQ", sc->name); - TASK_INIT(&sc->sc_rxtask, 0, wtap_rx_proc, sc); + NET_TASK_INIT(&sc->sc_rxtask, 0, wtap_rx_proc, sc); ic->ic_softc = sc; ic->ic_name = sc->name; diff --git a/sys/dev/xl/if_xl.c b/sys/dev/xl/if_xl.c index 9daea3dc4fa6..ca6184758815 100644 --- a/sys/dev/xl/if_xl.c +++ b/sys/dev/xl/if_xl.c @@ -1218,7 +1218,7 @@ xl_attach(device_t dev) } callout_init_mtx(&sc->xl_tick_callout, &sc->xl_mtx, 0); - TASK_INIT(&sc->xl_task, 0, xl_rxeof_task, sc); + NET_TASK_INIT(&sc->xl_task, 0, xl_rxeof_task, sc); /* * Now allocate a tag for the DMA descriptor lists and a chunk diff --git a/sys/net/iflib.c b/sys/net/iflib.c index 26c5b0974048..a9fc8090c48a 100644 --- a/sys/net/iflib.c +++ b/sys/net/iflib.c @@ -3781,7 +3781,6 @@ _task_fn_tx(void *context) static void _task_fn_rx(void *context) { - struct epoch_tracker et; iflib_rxq_t rxq = context; if_ctx_t ctx = rxq->ifr_ctx; bool more; @@ -3805,7 +3804,6 @@ _task_fn_rx(void *context) budget = ctx->ifc_sysctl_rx_budget; if (budget == 0) budget = 16; /* XXX */ - NET_EPOCH_ENTER(et); if (more == false || (more = iflib_rxeof(rxq, budget)) == false) { if (ctx->ifc_flags & IFC_LEGACY) IFDI_INTR_ENABLE(ctx); @@ -3813,7 +3811,6 @@ _task_fn_rx(void *context) IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); DBG_COUNTER_INC(rx_intr_enables); } - NET_EPOCH_EXIT(et); if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) return; if (more) @@ -5971,7 +5968,7 @@ iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid, tqg = qgroup_if_io_tqg; fn = _task_fn_rx; intr_fast = iflib_fast_intr; - GROUPTASK_INIT(gtask, 0, fn, q); + NET_GROUPTASK_INIT(gtask, 0, fn, q); break; case IFLIB_INTR_RXTX: q = &ctx->ifc_rxqs[qid]; @@ -5980,7 +5977,7 @@ iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid, tqg = qgroup_if_io_tqg; fn = _task_fn_rx; intr_fast = iflib_fast_intr_rxtx; - GROUPTASK_INIT(gtask, 0, fn, q); + NET_GROUPTASK_INIT(gtask, 0, fn, q); break; case IFLIB_INTR_ADMIN: q = ctx;