Use true and false when dealing with bool type in the LinuxKPI.

No functional change.

MFC after:		1 week
Sponsored by:		Mellanox Technologies
This commit is contained in:
Hans Petter Selasky 2019-09-11 08:24:47 +00:00
parent 16732c193c
commit 4c8ba7d94f

View File

@ -92,7 +92,7 @@ linux_work_exec_unblock(struct work_struct *work)
{
struct workqueue_struct *wq;
struct work_exec *exec;
bool retval = 0;
bool retval = false;
wq = work->work_queue;
if (unlikely(wq == NULL))
@ -102,7 +102,7 @@ linux_work_exec_unblock(struct work_struct *work)
TAILQ_FOREACH(exec, &wq->exec_head, entry) {
if (exec->target == work) {
exec->target = NULL;
retval = 1;
retval = true;
break;
}
}
@ -144,14 +144,14 @@ linux_queue_work_on(int cpu __unused, struct workqueue_struct *wq,
case WORK_ST_EXEC:
case WORK_ST_CANCEL:
if (linux_work_exec_unblock(work) != 0)
return (1);
return (true);
/* FALLTHROUGH */
case WORK_ST_IDLE:
work->work_queue = wq;
taskqueue_enqueue(wq->taskqueue, &work->work_task);
return (1);
return (true);
default:
return (0); /* already on a queue */
return (false); /* already on a queue */
}
}
@ -181,7 +181,7 @@ linux_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
case WORK_ST_CANCEL:
if (delay == 0 && linux_work_exec_unblock(&dwork->work) != 0) {
dwork->timer.expires = jiffies;
return (1);
return (true);
}
/* FALLTHROUGH */
case WORK_ST_IDLE:
@ -201,9 +201,9 @@ linux_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
&linux_delayed_work_timer_fn, dwork);
mtx_unlock(&dwork->timer.mtx);
}
return (1);
return (true);
default:
return (0); /* already on a queue */
return (false); /* already on a queue */
}
}
@ -391,7 +391,7 @@ linux_cancel_delayed_work(struct delayed_work *dwork)
if (linux_cancel_timer(dwork, 0)) {
atomic_cmpxchg(&dwork->work.state,
WORK_ST_CANCEL, WORK_ST_IDLE);
return (1);
return (true);
}
/* FALLTHROUGH */
case WORK_ST_TASK:
@ -399,11 +399,11 @@ linux_cancel_delayed_work(struct delayed_work *dwork)
if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) == 0) {
atomic_cmpxchg(&dwork->work.state,
WORK_ST_CANCEL, WORK_ST_IDLE);
return (1);
return (true);
}
/* FALLTHROUGH */
default:
return (0);
return (false);
}
}
@ -467,14 +467,14 @@ bool
linux_flush_work(struct work_struct *work)
{
struct taskqueue *tq;
int retval;
bool retval;
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
"linux_flush_work() might sleep");
switch (atomic_read(&work->state)) {
case WORK_ST_IDLE:
return (0);
return (false);
default:
tq = work->work_queue->taskqueue;
retval = taskqueue_poll_is_busy(tq, &work->work_task);
@ -492,14 +492,14 @@ bool
linux_flush_delayed_work(struct delayed_work *dwork)
{
struct taskqueue *tq;
int retval;
bool retval;
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
"linux_flush_delayed_work() might sleep");
switch (atomic_read(&dwork->work.state)) {
case WORK_ST_IDLE:
return (0);
return (false);
case WORK_ST_TIMER:
if (linux_cancel_timer(dwork, 1))
linux_delayed_work_enqueue(dwork);
@ -523,9 +523,9 @@ linux_work_pending(struct work_struct *work)
case WORK_ST_TIMER:
case WORK_ST_TASK:
case WORK_ST_CANCEL:
return (1);
return (true);
default:
return (0);
return (false);
}
}
@ -539,12 +539,12 @@ linux_work_busy(struct work_struct *work)
switch (atomic_read(&work->state)) {
case WORK_ST_IDLE:
return (0);
return (false);
case WORK_ST_EXEC:
tq = work->work_queue->taskqueue;
return (taskqueue_poll_is_busy(tq, &work->work_task));
default:
return (1);
return (true);
}
}