Fix race condition in linuxkpi workqueue

Consider the following scenario:

1. A delayed_work struct in the WORK_ST_TIMER state.
2. Thread A calls mod_delayed_work()
3. Thread B (a callout thread) simultaneously calls
linux_delayed_work_timer_fn()

The following sequence of events is possible:

A: Call linux_cancel_delayed_work()
A: Change state from TIMER TO CANCEL
B: Change state from CANCEL to TASK
B: taskqueue_enqueue() the task
A: taskqueue_cancel() the task
A: Call linux_queue_delayed_work_on().  This is a no-op because the
state is WORK_ST_TASK.

As a result, the delayed_work struct will never be invoked.  This is
causing address resolution in ib_addr.c to stop permanently, as it
never tries to reschedule a task that it thinks is already scheduled.

Fix this by introducing locking into the cancel path (which
corresponds with the lock held while the callout runs).  This will
prevent the callout from changing the state of the task until the
cancel is complete, preventing the race.

Differential Revision:	https://reviews.freebsd.org/D28420
Reviewed by: hselasky
MFC after: 2 months
This commit is contained in:
Ryan Stone 2021-01-07 12:25:56 -05:00
parent 1eec5861d5
commit b58cf1cb35

View File

@ -435,13 +435,17 @@ linux_cancel_delayed_work(struct delayed_work *dwork)
[WORK_ST_CANCEL] = WORK_ST_CANCEL, /* NOP */
};
struct taskqueue *tq;
bool cancelled;
mtx_lock(&dwork->timer.mtx);
switch (linux_update_state(&dwork->work.state, states)) {
case WORK_ST_TIMER:
case WORK_ST_CANCEL:
if (linux_cancel_timer(dwork, 0)) {
cancelled = (callout_stop(&dwork->timer.callout) == 1);
if (cancelled) {
atomic_cmpxchg(&dwork->work.state,
WORK_ST_CANCEL, WORK_ST_IDLE);
mtx_unlock(&dwork->timer.mtx);
return (true);
}
/* FALLTHROUGH */
@ -450,10 +454,12 @@ linux_cancel_delayed_work(struct delayed_work *dwork)
if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) == 0) {
atomic_cmpxchg(&dwork->work.state,
WORK_ST_CANCEL, WORK_ST_IDLE);
mtx_unlock(&dwork->timer.mtx);
return (true);
}
/* FALLTHROUGH */
default:
mtx_unlock(&dwork->timer.mtx);
return (false);
}
}
@ -475,37 +481,36 @@ linux_cancel_delayed_work_sync(struct delayed_work *dwork)
};
struct taskqueue *tq;
bool retval = false;
int ret, state;
bool cancelled;
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
"linux_cancel_delayed_work_sync() might sleep");
retry:
switch (linux_update_state(&dwork->work.state, states)) {
mtx_lock(&dwork->timer.mtx);
state = linux_update_state(&dwork->work.state, states);
switch (state) {
case WORK_ST_IDLE:
mtx_unlock(&dwork->timer.mtx);
return (retval);
case WORK_ST_EXEC:
tq = dwork->work.work_queue->taskqueue;
if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) != 0)
taskqueue_drain(tq, &dwork->work.work_task);
goto retry; /* work may have restarted itself */
case WORK_ST_TIMER:
case WORK_ST_CANCEL:
if (linux_cancel_timer(dwork, 1)) {
/*
* Make sure taskqueue is also drained before
* returning:
*/
tq = dwork->work.work_queue->taskqueue;
taskqueue_drain(tq, &dwork->work.work_task);
retval = true;
goto retry;
}
/* FALLTHROUGH */
cancelled = (callout_stop(&dwork->timer.callout) == 1);
tq = dwork->work.work_queue->taskqueue;
ret = taskqueue_cancel(tq, &dwork->work.work_task, NULL);
mtx_unlock(&dwork->timer.mtx);
callout_drain(&dwork->timer.callout);
taskqueue_drain(tq, &dwork->work.work_task);
return (cancelled || (ret != 0));
default:
tq = dwork->work.work_queue->taskqueue;
if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) != 0)
ret = taskqueue_cancel(tq, &dwork->work.work_task, NULL);
mtx_unlock(&dwork->timer.mtx);
if (ret != 0)
taskqueue_drain(tq, &dwork->work.work_task);
retval = true;
goto retry;
return (ret != 0);
}
}