Remove taskqueue_enqueue_fast().
taskqueue_enqueue() was changed to support both fast and non-fast taskqueues 10 years ago in r154167. It has been a compat shim ever since. It's time for the compat shim to go. Submitted by: Howard Su <howard0su@gmail.com> Reviewed by: sephe Differential Revision: https://reviews.freebsd.org/D5131
This commit is contained in:
parent
ac8f9de31a
commit
cbc4d2db75
@ -38,6 +38,8 @@
|
||||
# xargs -n1 | sort | uniq -d;
|
||||
# done
|
||||
|
||||
# 20160301: Remove taskqueue_enqueue_fast
|
||||
OLD_FILES+=usr/share/man/man9/taskqueue_enqueue_fast.9.gz
|
||||
# 20160225: Remove casperd and libcapsicum.
|
||||
OLD_FILES+=sbin/casperd
|
||||
OLD_FILES+=etc/rc.d/casperd
|
||||
|
@ -1705,7 +1705,6 @@ MLINKS+=taskqueue.9 TASK_INIT.9 \
|
||||
taskqueue.9 taskqueue_drain_all.9 \
|
||||
taskqueue.9 taskqueue_drain_timeout.9 \
|
||||
taskqueue.9 taskqueue_enqueue.9 \
|
||||
taskqueue.9 taskqueue_enqueue_fast.9 \
|
||||
taskqueue.9 taskqueue_enqueue_timeout.9 \
|
||||
taskqueue.9 TASKQUEUE_FAST_DEFINE.9 \
|
||||
taskqueue.9 TASKQUEUE_FAST_DEFINE_THREAD.9 \
|
||||
|
@ -28,7 +28,7 @@
|
||||
.\"
|
||||
.\" $FreeBSD$
|
||||
.\"
|
||||
.Dd January 4, 2015
|
||||
.Dd March 1, 2016
|
||||
.Dt TASKQUEUE 9
|
||||
.Os
|
||||
.Sh NAME
|
||||
@ -80,8 +80,6 @@ struct timeout_task;
|
||||
.Ft int
|
||||
.Fn taskqueue_enqueue "struct taskqueue *queue" "struct task *task"
|
||||
.Ft int
|
||||
.Fn taskqueue_enqueue_fast "struct taskqueue *queue" "struct task *task"
|
||||
.Ft int
|
||||
.Fn taskqueue_enqueue_timeout "struct taskqueue *queue" "struct timeout_task *timeout_task" "int ticks"
|
||||
.Ft int
|
||||
.Fn taskqueue_cancel "struct taskqueue *queue" "struct task *task" "u_int *pendp"
|
||||
@ -191,14 +189,6 @@ This function will return
|
||||
.Er EPIPE
|
||||
if the queue is being freed.
|
||||
.Pp
|
||||
The function
|
||||
.Fn taskqueue_enqueue_fast
|
||||
should be used in place of
|
||||
.Fn taskqueue_enqueue
|
||||
when the enqueuing must happen from a fast interrupt handler.
|
||||
This method uses spin locks to avoid the possibility of sleeping in the fast
|
||||
interrupt context.
|
||||
.Pp
|
||||
When a task is executed,
|
||||
first it is removed from the queue,
|
||||
the value of
|
||||
@ -467,15 +457,7 @@ To use these queues,
|
||||
call
|
||||
.Fn taskqueue_enqueue
|
||||
with the value of the global taskqueue variable for the queue you wish to
|
||||
use
|
||||
.Va ( taskqueue_swi ,
|
||||
.Va taskqueue_swi_giant ,
|
||||
or
|
||||
.Va taskqueue_thread ) .
|
||||
Use
|
||||
.Fn taskqueue_enqueue_fast
|
||||
for the global taskqueue variable
|
||||
.Va taskqueue_fast .
|
||||
use.
|
||||
.Pp
|
||||
The software interrupt queues can be used,
|
||||
for instance, for implementing interrupt handlers which must perform a
|
||||
|
@ -300,7 +300,7 @@ dmtpps_poll(struct timecounter *tc)
|
||||
pps_capture(&sc->pps_state);
|
||||
sc->pps_state.capcount = DMTIMER_READ4(sc, DMT_TCAR1);
|
||||
DMTIMER_WRITE4(sc, DMT_IRQSTATUS, DMT_IRQ_TCAR);
|
||||
taskqueue_enqueue_fast(taskqueue_fast, &sc->pps_task);
|
||||
taskqueue_enqueue(taskqueue_fast, &sc->pps_task);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -915,7 +915,7 @@ aac_filter(void *arg)
|
||||
|
||||
/* handle completion processing */
|
||||
if (reason & AAC_DB_RESPONSE_READY)
|
||||
taskqueue_enqueue_fast(taskqueue_fast, &sc->aac_task_complete);
|
||||
taskqueue_enqueue(taskqueue_fast, &sc->aac_task_complete);
|
||||
|
||||
/* controller wants to talk to us */
|
||||
if (reason & (AAC_DB_PRINTF | AAC_DB_COMMAND_READY)) {
|
||||
|
@ -709,7 +709,7 @@ ath_sysctl_forcebstuck(SYSCTL_HANDLER_ARGS)
|
||||
if (val == 0)
|
||||
return 0;
|
||||
|
||||
taskqueue_enqueue_fast(sc->sc_tq, &sc->sc_bstucktask);
|
||||
taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
|
||||
val = 0;
|
||||
return 0;
|
||||
}
|
||||
|
@ -8328,7 +8328,7 @@ bwn_intr(void *arg)
|
||||
BWN_BARRIER(mac, BUS_SPACE_BARRIER_READ);
|
||||
BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE);
|
||||
|
||||
taskqueue_enqueue_fast(sc->sc_tq, &mac->mac_intrtask);
|
||||
taskqueue_enqueue(sc->sc_tq, &mac->mac_intrtask);
|
||||
return (FILTER_HANDLED);
|
||||
}
|
||||
|
||||
|
@ -8737,7 +8737,7 @@ bxe_handle_fp_tq(void *context,
|
||||
|
||||
if (more_rx /*|| more_tx*/) {
|
||||
/* still more work to do */
|
||||
taskqueue_enqueue_fast(fp->tq, &fp->tq_task);
|
||||
taskqueue_enqueue(fp->tq, &fp->tq_task);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -8771,7 +8771,7 @@ bxe_task_fp(struct bxe_fastpath *fp)
|
||||
|
||||
if (more_rx /*|| more_tx*/) {
|
||||
/* still more work to do, bail out if this ISR and process later */
|
||||
taskqueue_enqueue_fast(fp->tq, &fp->tq_task);
|
||||
taskqueue_enqueue(fp->tq, &fp->tq_task);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -8838,7 +8838,7 @@ bxe_intr_legacy(void *xsc)
|
||||
bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
|
||||
|
||||
/* schedule slowpath handler */
|
||||
taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task);
|
||||
taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
|
||||
|
||||
status &= ~0x1;
|
||||
}
|
||||
@ -8860,7 +8860,7 @@ bxe_intr_sp(void *xsc)
|
||||
bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
|
||||
|
||||
/* schedule slowpath handler */
|
||||
taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task);
|
||||
taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
|
||||
}
|
||||
|
||||
/* fastpath interrupt entry point */
|
||||
|
@ -75,7 +75,7 @@ vt_kms_postswitch(void *arg)
|
||||
sc = (struct vt_kms_softc *)arg;
|
||||
|
||||
if (!kdb_active && panicstr == NULL)
|
||||
taskqueue_enqueue_fast(taskqueue_thread, &sc->fb_mode_task);
|
||||
taskqueue_enqueue(taskqueue_thread, &sc->fb_mode_task);
|
||||
else
|
||||
drm_fb_helper_restore_fbdev_mode(sc->fb_helper);
|
||||
|
||||
|
@ -350,7 +350,7 @@ hv_vmbus_on_events(int cpu)
|
||||
|
||||
if (channel->batched_reading)
|
||||
hv_ring_buffer_read_begin(&channel->inbound);
|
||||
taskqueue_enqueue_fast(channel->rxq, &channel->channel_task);
|
||||
taskqueue_enqueue(channel->rxq, &channel->channel_task);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -397,9 +397,9 @@ malo_intr(void *arg)
|
||||
__func__, status, sc->malo_imask);
|
||||
|
||||
if (status & MALO_A2HRIC_BIT_RX_RDY)
|
||||
taskqueue_enqueue_fast(sc->malo_tq, &sc->malo_rxtask);
|
||||
taskqueue_enqueue(sc->malo_tq, &sc->malo_rxtask);
|
||||
if (status & MALO_A2HRIC_BIT_TX_DONE)
|
||||
taskqueue_enqueue_fast(sc->malo_tq, &sc->malo_txtask);
|
||||
taskqueue_enqueue(sc->malo_tq, &sc->malo_txtask);
|
||||
if (status & MALO_A2HRIC_BIT_OPC_DONE)
|
||||
malo_hal_cmddone(mh);
|
||||
if (status & MALO_A2HRIC_BIT_MAC_EVENT)
|
||||
|
@ -1873,7 +1873,7 @@ nfe_intr(void *arg)
|
||||
if (status == 0 || status == 0xffffffff)
|
||||
return (FILTER_STRAY);
|
||||
nfe_disable_intr(sc);
|
||||
taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
|
||||
taskqueue_enqueue(sc->nfe_tq, &sc->nfe_int_task);
|
||||
|
||||
return (FILTER_HANDLED);
|
||||
}
|
||||
@ -1932,7 +1932,7 @@ nfe_int_task(void *arg, int pending)
|
||||
NFE_UNLOCK(sc);
|
||||
|
||||
if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
|
||||
taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
|
||||
taskqueue_enqueue(sc->nfe_tq, &sc->nfe_int_task);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -707,7 +707,7 @@ oce_fast_isr(void *arg)
|
||||
|
||||
oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
|
||||
|
||||
taskqueue_enqueue_fast(ii->tq, &ii->task);
|
||||
taskqueue_enqueue(ii->tq, &ii->task);
|
||||
|
||||
ii->eq->intr++;
|
||||
|
||||
@ -1065,7 +1065,7 @@ oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
|
||||
#else
|
||||
if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
|
||||
#endif
|
||||
taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
|
||||
taskqueue_enqueue(taskqueue_swi, &wq->txtask);
|
||||
|
||||
}
|
||||
|
||||
|
@ -2553,7 +2553,7 @@ re_intr(void *arg)
|
||||
return (FILTER_STRAY);
|
||||
CSR_WRITE_2(sc, RL_IMR, 0);
|
||||
|
||||
taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask);
|
||||
taskqueue_enqueue(taskqueue_fast, &sc->rl_inttask);
|
||||
|
||||
return (FILTER_HANDLED);
|
||||
}
|
||||
@ -2621,7 +2621,7 @@ re_int_task(void *arg, int npending)
|
||||
RL_UNLOCK(sc);
|
||||
|
||||
if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) {
|
||||
taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask);
|
||||
taskqueue_enqueue(taskqueue_fast, &sc->rl_inttask);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -560,7 +560,7 @@ smc_start_locked(struct ifnet *ifp)
|
||||
return;
|
||||
}
|
||||
|
||||
taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx);
|
||||
taskqueue_enqueue(sc->smc_tq, &sc->smc_tx);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -797,7 +797,7 @@ smc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
|
||||
SMC_UNLOCK(sc);
|
||||
|
||||
if (cmd == POLL_AND_CHECK_STATUS)
|
||||
taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr);
|
||||
taskqueue_enqueue(sc->smc_tq, &sc->smc_intr);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -823,7 +823,7 @@ smc_intr(void *context)
|
||||
/* Restore bank */
|
||||
smc_select_bank(sc, curbank);
|
||||
|
||||
taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr);
|
||||
taskqueue_enqueue(sc->smc_tq, &sc->smc_intr);
|
||||
return (FILTER_HANDLED);
|
||||
}
|
||||
|
||||
@ -877,7 +877,7 @@ smc_task_intr(void *context, int pending)
|
||||
tcr |= TCR_TXENA | TCR_PAD_EN;
|
||||
smc_write_2(sc, TCR, tcr);
|
||||
smc_select_bank(sc, 2);
|
||||
taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx);
|
||||
taskqueue_enqueue(sc->smc_tq, &sc->smc_tx);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -892,7 +892,7 @@ smc_task_intr(void *context, int pending)
|
||||
if (status & RCV_INT) {
|
||||
smc_write_1(sc, ACK, RCV_INT);
|
||||
sc->smc_mask &= ~RCV_INT;
|
||||
taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_rx);
|
||||
taskqueue_enqueue(sc->smc_tq, &sc->smc_rx);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -901,7 +901,7 @@ smc_task_intr(void *context, int pending)
|
||||
if (status & ALLOC_INT) {
|
||||
smc_write_1(sc, ACK, ALLOC_INT);
|
||||
sc->smc_mask &= ~ALLOC_INT;
|
||||
taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx);
|
||||
taskqueue_enqueue(sc->smc_tq, &sc->smc_tx);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -933,7 +933,7 @@ smc_task_intr(void *context, int pending)
|
||||
/*
|
||||
* See if there are any packets to transmit.
|
||||
*/
|
||||
taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx);
|
||||
taskqueue_enqueue(sc->smc_tq, &sc->smc_tx);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1233,7 +1233,7 @@ smc_watchdog(void *arg)
|
||||
|
||||
sc = (struct smc_softc *)arg;
|
||||
device_printf(sc->smc_dev, "watchdog timeout\n");
|
||||
taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr);
|
||||
taskqueue_enqueue(sc->smc_tq, &sc->smc_intr);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1671,7 +1671,7 @@ vr_intr(void *arg)
|
||||
/* Disable interrupts. */
|
||||
CSR_WRITE_2(sc, VR_IMR, 0x0000);
|
||||
|
||||
taskqueue_enqueue_fast(taskqueue_fast, &sc->vr_inttask);
|
||||
taskqueue_enqueue(taskqueue_fast, &sc->vr_inttask);
|
||||
|
||||
return (FILTER_HANDLED);
|
||||
}
|
||||
|
@ -1400,7 +1400,7 @@ xn_rxq_intr(void *xrxq)
|
||||
{
|
||||
struct netfront_rxq *rxq = xrxq;
|
||||
|
||||
taskqueue_enqueue_fast(rxq->tq, &rxq->intrtask);
|
||||
taskqueue_enqueue(rxq->tq, &rxq->intrtask);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1408,7 +1408,7 @@ xn_txq_intr(void *xtxq)
|
||||
{
|
||||
struct netfront_txq *txq = xtxq;
|
||||
|
||||
taskqueue_enqueue_fast(txq->tq, &txq->intrtask);
|
||||
taskqueue_enqueue(txq->tq, &txq->intrtask);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -737,13 +737,6 @@ taskqueue_create_fast(const char *name, int mflags,
|
||||
MTX_SPIN, "fast_taskqueue");
|
||||
}
|
||||
|
||||
/* NB: for backwards compatibility */
|
||||
int
|
||||
taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
|
||||
{
|
||||
return taskqueue_enqueue(queue, task);
|
||||
}
|
||||
|
||||
static void *taskqueue_fast_ih;
|
||||
|
||||
static void
|
||||
|
@ -84,7 +84,7 @@ dummynet(void *arg)
|
||||
{
|
||||
|
||||
(void)arg; /* UNUSED */
|
||||
taskqueue_enqueue_fast(dn_tq, &dn_task);
|
||||
taskqueue_enqueue(dn_tq, &dn_task);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -198,7 +198,6 @@ TASKQUEUE_DECLARE(thread);
|
||||
* from a fast interrupt handler context.
|
||||
*/
|
||||
TASKQUEUE_DECLARE(fast);
|
||||
int taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task);
|
||||
struct taskqueue *taskqueue_create_fast(const char *name, int mflags,
|
||||
taskqueue_enqueue_fn enqueue,
|
||||
void *context);
|
||||
|
@ -179,7 +179,7 @@ done:
|
||||
}
|
||||
|
||||
if (enqueue) {
|
||||
taskqueue_enqueue_fast(unit->fault_taskqueue,
|
||||
taskqueue_enqueue(unit->fault_taskqueue,
|
||||
&unit->fault_task);
|
||||
}
|
||||
return (FILTER_HANDLED);
|
||||
|
@ -325,7 +325,7 @@ dmar_qi_intr(void *arg)
|
||||
|
||||
unit = arg;
|
||||
KASSERT(unit->qi_enabled, ("dmar%d: QI is not enabled", unit->unit));
|
||||
taskqueue_enqueue_fast(unit->qi_taskqueue, &unit->qi_task);
|
||||
taskqueue_enqueue(unit->qi_taskqueue, &unit->qi_task);
|
||||
return (FILTER_HANDLED);
|
||||
}
|
||||
|
||||
|
@ -508,7 +508,7 @@ mca_record_entry(enum scan_mode mode, const struct mca_record *record)
|
||||
mca_count++;
|
||||
mtx_unlock_spin(&mca_lock);
|
||||
if (mode == CMCI)
|
||||
taskqueue_enqueue_fast(mca_tq, &mca_refill_task);
|
||||
taskqueue_enqueue(mca_tq, &mca_refill_task);
|
||||
}
|
||||
|
||||
#ifdef DEV_APIC
|
||||
@ -686,7 +686,7 @@ static void
|
||||
mca_periodic_scan(void *arg)
|
||||
{
|
||||
|
||||
taskqueue_enqueue_fast(mca_tq, &mca_scan_task);
|
||||
taskqueue_enqueue(mca_tq, &mca_scan_task);
|
||||
callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL);
|
||||
}
|
||||
|
||||
@ -700,7 +700,7 @@ sysctl_mca_scan(SYSCTL_HANDLER_ARGS)
|
||||
if (error)
|
||||
return (error);
|
||||
if (i)
|
||||
taskqueue_enqueue_fast(mca_tq, &mca_scan_task);
|
||||
taskqueue_enqueue(mca_tq, &mca_scan_task);
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user