Implement a bio-taskqueue to reduce number of context switches in
disk I/O processing. The intent is that the disk driver in its hardware interrupt routine will simply schedule the bio on the task queue with a routine to finish off whatever needs done. The g_up thread will then schedule this routine, the likely outcome of which is a biodone() which queues the bio on g_up's regular queue where it will be picked up and processed. Compared to the using the regular taskqueue, this saves one contextswitch. Change our scheduling of the g_up and g_down queues to be water-tight, at the cost of breaking the userland regression test-shims. Input and ideas from: scottl
This commit is contained in:
parent
55b84e8a54
commit
f0e185d705
@ -59,6 +59,7 @@
|
|||||||
|
|
||||||
static struct g_bioq g_bio_run_down;
|
static struct g_bioq g_bio_run_down;
|
||||||
static struct g_bioq g_bio_run_up;
|
static struct g_bioq g_bio_run_up;
|
||||||
|
static struct g_bioq g_bio_run_task;
|
||||||
static struct g_bioq g_bio_idle;
|
static struct g_bioq g_bio_idle;
|
||||||
|
|
||||||
static u_int pace;
|
static u_int pace;
|
||||||
@ -101,13 +102,11 @@ g_bioq_first(struct g_bioq *bq)
|
|||||||
{
|
{
|
||||||
struct bio *bp;
|
struct bio *bp;
|
||||||
|
|
||||||
g_bioq_lock(bq);
|
|
||||||
bp = TAILQ_FIRST(&bq->bio_queue);
|
bp = TAILQ_FIRST(&bq->bio_queue);
|
||||||
if (bp != NULL) {
|
if (bp != NULL) {
|
||||||
TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
|
TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
|
||||||
bq->bio_queue_length--;
|
bq->bio_queue_length--;
|
||||||
}
|
}
|
||||||
g_bioq_unlock(bq);
|
|
||||||
return (bp);
|
return (bp);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,7 +125,9 @@ g_new_bio(void)
|
|||||||
{
|
{
|
||||||
struct bio *bp;
|
struct bio *bp;
|
||||||
|
|
||||||
|
g_bioq_lock(&g_bio_idle);
|
||||||
bp = g_bioq_first(&g_bio_idle);
|
bp = g_bioq_first(&g_bio_idle);
|
||||||
|
g_bioq_unlock(&g_bio_idle);
|
||||||
if (bp == NULL)
|
if (bp == NULL)
|
||||||
bp = g_malloc(sizeof *bp, M_NOWAIT | M_ZERO);
|
bp = g_malloc(sizeof *bp, M_NOWAIT | M_ZERO);
|
||||||
/* g_trace(G_T_BIO, "g_new_bio() = %p", bp); */
|
/* g_trace(G_T_BIO, "g_new_bio() = %p", bp); */
|
||||||
@ -167,6 +168,7 @@ g_io_init()
|
|||||||
|
|
||||||
g_bioq_init(&g_bio_run_down);
|
g_bioq_init(&g_bio_run_down);
|
||||||
g_bioq_init(&g_bio_run_up);
|
g_bioq_init(&g_bio_run_up);
|
||||||
|
g_bioq_init(&g_bio_run_task);
|
||||||
g_bioq_init(&g_bio_idle);
|
g_bioq_init(&g_bio_idle);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -383,11 +385,20 @@ g_io_schedule_down(struct thread *tp __unused)
|
|||||||
struct bio *bp;
|
struct bio *bp;
|
||||||
off_t excess;
|
off_t excess;
|
||||||
int error;
|
int error;
|
||||||
|
struct mtx mymutex;
|
||||||
|
|
||||||
|
bzero(&mymutex, sizeof mymutex);
|
||||||
|
mtx_init(&mymutex, "g_xdown", MTX_DEF, 0);
|
||||||
|
|
||||||
for(;;) {
|
for(;;) {
|
||||||
|
g_bioq_lock(&g_bio_run_down);
|
||||||
bp = g_bioq_first(&g_bio_run_down);
|
bp = g_bioq_first(&g_bio_run_down);
|
||||||
if (bp == NULL)
|
if (bp == NULL) {
|
||||||
break;
|
msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
|
||||||
|
PRIBIO | PDROP, "g_down", hz/10);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
g_bioq_unlock(&g_bio_run_down);
|
||||||
error = g_io_check(bp);
|
error = g_io_check(bp);
|
||||||
if (error) {
|
if (error) {
|
||||||
g_io_deliver(bp, error);
|
g_io_deliver(bp, error);
|
||||||
@ -412,7 +423,9 @@ g_io_schedule_down(struct thread *tp __unused)
|
|||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
mtx_lock(&mymutex);
|
||||||
bp->bio_to->geom->start(bp);
|
bp->bio_to->geom->start(bp);
|
||||||
|
mtx_unlock(&mymutex);
|
||||||
if (pace) {
|
if (pace) {
|
||||||
pace--;
|
pace--;
|
||||||
break;
|
break;
|
||||||
@ -420,19 +433,51 @@ g_io_schedule_down(struct thread *tp __unused)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg)
|
||||||
|
{
|
||||||
|
bp->bio_task = func;
|
||||||
|
bp->bio_task_arg = arg;
|
||||||
|
/*
|
||||||
|
* The taskqueue is actually just a second queue off the "up"
|
||||||
|
* queue, so we use the same lock.
|
||||||
|
*/
|
||||||
|
g_bioq_lock(&g_bio_run_up);
|
||||||
|
TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue);
|
||||||
|
g_bio_run_task.bio_queue_length++;
|
||||||
|
wakeup(&g_wait_up);
|
||||||
|
g_bioq_unlock(&g_bio_run_up);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void
|
void
|
||||||
g_io_schedule_up(struct thread *tp __unused)
|
g_io_schedule_up(struct thread *tp __unused)
|
||||||
{
|
{
|
||||||
struct bio *bp;
|
struct bio *bp;
|
||||||
struct g_consumer *cp;
|
struct mtx mymutex;
|
||||||
|
|
||||||
|
bzero(&mymutex, sizeof mymutex);
|
||||||
|
mtx_init(&mymutex, "g_xup", MTX_DEF, 0);
|
||||||
for(;;) {
|
for(;;) {
|
||||||
|
g_bioq_lock(&g_bio_run_up);
|
||||||
|
bp = g_bioq_first(&g_bio_run_task);
|
||||||
|
if (bp != NULL) {
|
||||||
|
g_bioq_unlock(&g_bio_run_up);
|
||||||
|
mtx_lock(&mymutex);
|
||||||
|
bp->bio_task(bp, bp->bio_task_arg);
|
||||||
|
mtx_unlock(&mymutex);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
bp = g_bioq_first(&g_bio_run_up);
|
bp = g_bioq_first(&g_bio_run_up);
|
||||||
if (bp == NULL)
|
if (bp != NULL) {
|
||||||
break;
|
g_bioq_unlock(&g_bio_run_up);
|
||||||
|
mtx_lock(&mymutex);
|
||||||
cp = bp->bio_from;
|
biodone(bp);
|
||||||
biodone(bp);
|
mtx_unlock(&mymutex);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
|
||||||
|
PRIBIO | PDROP, "g_up", hz/10);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,16 +83,11 @@ g_up_procbody(void)
|
|||||||
{
|
{
|
||||||
struct proc *p = g_up_proc;
|
struct proc *p = g_up_proc;
|
||||||
struct thread *tp = FIRST_THREAD_IN_PROC(p);
|
struct thread *tp = FIRST_THREAD_IN_PROC(p);
|
||||||
struct mtx mymutex;
|
|
||||||
|
|
||||||
mtx_assert(&Giant, MA_NOTOWNED);
|
mtx_assert(&Giant, MA_NOTOWNED);
|
||||||
bzero(&mymutex, sizeof mymutex);
|
|
||||||
mtx_init(&mymutex, "g_up", MTX_DEF, 0);
|
|
||||||
mtx_lock(&mymutex);
|
|
||||||
tp->td_base_pri = PRIBIO;
|
tp->td_base_pri = PRIBIO;
|
||||||
for(;;) {
|
for(;;) {
|
||||||
g_io_schedule_up(tp);
|
g_io_schedule_up(tp);
|
||||||
msleep(&g_wait_up, &mymutex, PRIBIO, "g_up", hz/10);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -109,16 +104,11 @@ g_down_procbody(void)
|
|||||||
{
|
{
|
||||||
struct proc *p = g_down_proc;
|
struct proc *p = g_down_proc;
|
||||||
struct thread *tp = FIRST_THREAD_IN_PROC(p);
|
struct thread *tp = FIRST_THREAD_IN_PROC(p);
|
||||||
struct mtx mymutex;
|
|
||||||
|
|
||||||
mtx_assert(&Giant, MA_NOTOWNED);
|
mtx_assert(&Giant, MA_NOTOWNED);
|
||||||
bzero(&mymutex, sizeof mymutex);
|
|
||||||
mtx_init(&mymutex, "g_down", MTX_DEF, 0);
|
|
||||||
mtx_lock(&mymutex);
|
|
||||||
tp->td_base_pri = PRIBIO;
|
tp->td_base_pri = PRIBIO;
|
||||||
for(;;) {
|
for(;;) {
|
||||||
g_io_schedule_down(tp);
|
g_io_schedule_down(tp);
|
||||||
msleep(&g_wait_down, &mymutex, PRIBIO, "g_down", hz/10);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,6 +45,10 @@
|
|||||||
#include <sys/queue.h>
|
#include <sys/queue.h>
|
||||||
|
|
||||||
struct disk;
|
struct disk;
|
||||||
|
struct bio;
|
||||||
|
|
||||||
|
typedef void bio_task_t(struct bio *, void *);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The bio structure describes an I/O operation in the kernel.
|
* The bio structure describes an I/O operation in the kernel.
|
||||||
*/
|
*/
|
||||||
@ -75,6 +79,9 @@ struct bio {
|
|||||||
struct bio *bio_parent; /* Pointer to parent */
|
struct bio *bio_parent; /* Pointer to parent */
|
||||||
struct bintime bio_t0; /* Time request started */
|
struct bintime bio_t0; /* Time request started */
|
||||||
|
|
||||||
|
bio_task_t *bio_task; /* Task_queue handler */
|
||||||
|
void *bio_task_arg; /* Argument to above */
|
||||||
|
|
||||||
/* XXX: these go away when bio chaining is introduced */
|
/* XXX: these go away when bio chaining is introduced */
|
||||||
daddr_t bio_pblkno; /* physical block number */
|
daddr_t bio_pblkno; /* physical block number */
|
||||||
};
|
};
|
||||||
@ -133,6 +140,8 @@ void bioq_disksort(struct bio_queue_head *ap, struct bio *bp);
|
|||||||
void bioq_init(struct bio_queue_head *head);
|
void bioq_init(struct bio_queue_head *head);
|
||||||
void bioq_remove(struct bio_queue_head *head, struct bio *bp);
|
void bioq_remove(struct bio_queue_head *head, struct bio *bp);
|
||||||
|
|
||||||
|
void bio_taskqueue(struct bio *bp, bio_task_t *fund, void *arg);
|
||||||
|
|
||||||
int physio(dev_t dev, struct uio *uio, int ioflag);
|
int physio(dev_t dev, struct uio *uio, int ioflag);
|
||||||
#define physread physio
|
#define physread physio
|
||||||
#define physwrite physio
|
#define physwrite physio
|
||||||
|
Loading…
x
Reference in New Issue
Block a user