When a buffer is removed from a buffer queue, remember it's block number
and use it as "the currently active" buffer in doing disk sort calculations.
This commit is contained in:
parent
2cfa0a0381
commit
eda00cb5d2
@ -36,7 +36,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)ufs_disksubr.c 8.5 (Berkeley) 1/21/94
|
||||
* $Id: ufs_disksubr.c,v 1.34 1998/02/20 13:37:40 bde Exp $
|
||||
* $Id: ufs_disksubr.c,v 1.35 1998/07/28 18:25:51 bde Exp $
|
||||
*/
|
||||
|
||||
#include <sys/param.h>
|
||||
@ -69,7 +69,9 @@ bufqdisksort(bufq, bp)
|
||||
{
|
||||
struct buf *bq;
|
||||
struct buf *bn;
|
||||
struct buf *be;
|
||||
|
||||
be = TAILQ_LAST(&bufq->queue, buf_queue);
|
||||
/*
|
||||
* If the queue is empty or we are an
|
||||
* ordered transaction, then it's easy.
|
||||
@ -86,38 +88,62 @@ bufqdisksort(bufq, bp)
|
||||
* we can only insert after the insert
|
||||
* point.
|
||||
*/
|
||||
bq = TAILQ_NEXT(bufq->insert_point, b_act);
|
||||
if (bq == NULL) {
|
||||
bufq_insert_tail(bufq, bp);
|
||||
return;
|
||||
}
|
||||
}
|
||||
bq = bufq->insert_point;
|
||||
} else {
|
||||
|
||||
/*
|
||||
* If we lie before the first (currently active) request, then we
|
||||
* must add ourselves to the second request list.
|
||||
*/
|
||||
if (bp->b_pblkno < bq->b_pblkno) {
|
||||
|
||||
bq = bufq->switch_point;
|
||||
/*
|
||||
* If we are starting a new secondary list, then it's easy.
|
||||
* If we lie before the last removed (currently active)
|
||||
* request, and are not inserting ourselves into the
|
||||
* "locked" portion of the list, then we must add ourselves
|
||||
* to the second request list.
|
||||
*/
|
||||
if (bq == NULL) {
|
||||
bufq->switch_point = bp;
|
||||
bufq_insert_tail(bufq, bp);
|
||||
return;
|
||||
}
|
||||
if (bp->b_pblkno < bq->b_pblkno) {
|
||||
bufq->switch_point = bp;
|
||||
TAILQ_INSERT_BEFORE(bq, bp, b_act);
|
||||
return;
|
||||
if (bp->b_pblkno < bufq->last_pblkno) {
|
||||
|
||||
bq = bufq->switch_point;
|
||||
/*
|
||||
* If we are starting a new secondary list,
|
||||
* then it's easy.
|
||||
*/
|
||||
if (bq == NULL) {
|
||||
bufq->switch_point = bp;
|
||||
bufq_insert_tail(bufq, bp);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* If we lie ahead of the current switch point,
|
||||
* insert us before the switch point and move
|
||||
* the switch point.
|
||||
*/
|
||||
if (bp->b_pblkno < bq->b_pblkno) {
|
||||
bufq->switch_point = bp;
|
||||
TAILQ_INSERT_BEFORE(bq, bp, b_act);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (bufq->switch_point != NULL)
|
||||
be = TAILQ_PREV(bufq->switch_point,
|
||||
buf_queue, b_act);
|
||||
/*
|
||||
* If we lie between last_pblkno and bq,
|
||||
* insert before bq.
|
||||
*/
|
||||
if (bp->b_pblkno < bq->b_pblkno) {
|
||||
TAILQ_INSERT_BEFORE(bq, bp, b_act);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Request is at/after the current request...
|
||||
* sort in the first request list.
|
||||
* Request is at/after our current position in the list.
|
||||
* Optimize for sequential I/O by seeing if we go at the tail.
|
||||
*/
|
||||
if (bp->b_pblkno > be->b_pblkno) {
|
||||
TAILQ_INSERT_AFTER(&bufq->queue, be, bp, b_act);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Otherwise, insertion sort */
|
||||
while ((bn = TAILQ_NEXT(bq, b_act)) != NULL) {
|
||||
|
||||
/*
|
||||
|
@ -36,7 +36,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)buf.h 8.9 (Berkeley) 3/30/95
|
||||
* $Id: buf.h,v 1.54 1998/08/24 17:47:08 phk Exp $
|
||||
* $Id: buf.h,v 1.55 1998/09/05 14:13:12 phk Exp $
|
||||
*/
|
||||
|
||||
#ifndef _SYS_BUF_H_
|
||||
@ -170,32 +170,34 @@ struct buf {
|
||||
|
||||
#define NOOFFSET (-1LL) /* No buffer offset calculated yet */
|
||||
|
||||
typedef struct buf_queue_head {
|
||||
struct buf_queue_head {
|
||||
TAILQ_HEAD(buf_queue, buf) queue;
|
||||
daddr_t last_pblkno;
|
||||
struct buf *insert_point;
|
||||
struct buf *switch_point;
|
||||
} buf_queue_head, *buf_queue_head_t;
|
||||
};
|
||||
|
||||
static __inline void bufq_init __P((buf_queue_head *head));
|
||||
static __inline void bufq_init __P((struct buf_queue_head *head));
|
||||
|
||||
static __inline void bufq_insert_tail __P((buf_queue_head *head,
|
||||
struct buf *bp));
|
||||
|
||||
static __inline void bufq_remove __P((buf_queue_head *head,
|
||||
static __inline void bufq_insert_tail __P((struct buf_queue_head *head,
|
||||
struct buf *bp));
|
||||
|
||||
static __inline struct buf *bufq_first __P((buf_queue_head *head));
|
||||
static __inline void bufq_remove __P((struct buf_queue_head *head,
|
||||
struct buf *bp));
|
||||
|
||||
static __inline struct buf *bufq_first __P((struct buf_queue_head *head));
|
||||
|
||||
static __inline void
|
||||
bufq_init(buf_queue_head *head)
|
||||
bufq_init(struct buf_queue_head *head)
|
||||
{
|
||||
TAILQ_INIT(&head->queue);
|
||||
head->last_pblkno = 0;
|
||||
head->insert_point = NULL;
|
||||
head->switch_point = NULL;
|
||||
}
|
||||
|
||||
static __inline void
|
||||
bufq_insert_tail(buf_queue_head *head, struct buf *bp)
|
||||
bufq_insert_tail(struct buf_queue_head *head, struct buf *bp)
|
||||
{
|
||||
if ((bp->b_flags & B_ORDERED) != 0) {
|
||||
head->insert_point = bp;
|
||||
@ -205,19 +207,24 @@ bufq_insert_tail(buf_queue_head *head, struct buf *bp)
|
||||
}
|
||||
|
||||
static __inline void
|
||||
bufq_remove(buf_queue_head *head, struct buf *bp)
|
||||
bufq_remove(struct buf_queue_head *head, struct buf *bp)
|
||||
{
|
||||
if (bp == head->insert_point)
|
||||
head->insert_point = TAILQ_PREV(bp, buf_queue, b_act);
|
||||
if (bp == head->switch_point)
|
||||
head->switch_point = TAILQ_NEXT(bp, b_act);
|
||||
if (bp == head->insert_point) {
|
||||
head->insert_point = TAILQ_PREV(bp, buf_queue, b_act);
|
||||
if (head->insert_point == NULL)
|
||||
head->last_pblkno = 0;
|
||||
} else if (bp == TAILQ_FIRST(&head->queue)) {
|
||||
head->last_pblkno = bp->b_pblkno;
|
||||
}
|
||||
TAILQ_REMOVE(&head->queue, bp, b_act);
|
||||
if (TAILQ_FIRST(&head->queue) == head->switch_point)
|
||||
head->switch_point = NULL;
|
||||
}
|
||||
|
||||
static __inline struct buf *
|
||||
bufq_first(buf_queue_head *head)
|
||||
bufq_first(struct buf_queue_head *head)
|
||||
{
|
||||
return (TAILQ_FIRST(&head->queue));
|
||||
}
|
||||
|
@ -36,7 +36,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)buf.h 8.9 (Berkeley) 3/30/95
|
||||
* $Id: buf.h,v 1.54 1998/08/24 17:47:08 phk Exp $
|
||||
* $Id: buf.h,v 1.55 1998/09/05 14:13:12 phk Exp $
|
||||
*/
|
||||
|
||||
#ifndef _SYS_BUF_H_
|
||||
@ -170,32 +170,34 @@ struct buf {
|
||||
|
||||
#define NOOFFSET (-1LL) /* No buffer offset calculated yet */
|
||||
|
||||
typedef struct buf_queue_head {
|
||||
struct buf_queue_head {
|
||||
TAILQ_HEAD(buf_queue, buf) queue;
|
||||
daddr_t last_pblkno;
|
||||
struct buf *insert_point;
|
||||
struct buf *switch_point;
|
||||
} buf_queue_head, *buf_queue_head_t;
|
||||
};
|
||||
|
||||
static __inline void bufq_init __P((buf_queue_head *head));
|
||||
static __inline void bufq_init __P((struct buf_queue_head *head));
|
||||
|
||||
static __inline void bufq_insert_tail __P((buf_queue_head *head,
|
||||
struct buf *bp));
|
||||
|
||||
static __inline void bufq_remove __P((buf_queue_head *head,
|
||||
static __inline void bufq_insert_tail __P((struct buf_queue_head *head,
|
||||
struct buf *bp));
|
||||
|
||||
static __inline struct buf *bufq_first __P((buf_queue_head *head));
|
||||
static __inline void bufq_remove __P((struct buf_queue_head *head,
|
||||
struct buf *bp));
|
||||
|
||||
static __inline struct buf *bufq_first __P((struct buf_queue_head *head));
|
||||
|
||||
static __inline void
|
||||
bufq_init(buf_queue_head *head)
|
||||
bufq_init(struct buf_queue_head *head)
|
||||
{
|
||||
TAILQ_INIT(&head->queue);
|
||||
head->last_pblkno = 0;
|
||||
head->insert_point = NULL;
|
||||
head->switch_point = NULL;
|
||||
}
|
||||
|
||||
static __inline void
|
||||
bufq_insert_tail(buf_queue_head *head, struct buf *bp)
|
||||
bufq_insert_tail(struct buf_queue_head *head, struct buf *bp)
|
||||
{
|
||||
if ((bp->b_flags & B_ORDERED) != 0) {
|
||||
head->insert_point = bp;
|
||||
@ -205,19 +207,24 @@ bufq_insert_tail(buf_queue_head *head, struct buf *bp)
|
||||
}
|
||||
|
||||
static __inline void
|
||||
bufq_remove(buf_queue_head *head, struct buf *bp)
|
||||
bufq_remove(struct buf_queue_head *head, struct buf *bp)
|
||||
{
|
||||
if (bp == head->insert_point)
|
||||
head->insert_point = TAILQ_PREV(bp, buf_queue, b_act);
|
||||
if (bp == head->switch_point)
|
||||
head->switch_point = TAILQ_NEXT(bp, b_act);
|
||||
if (bp == head->insert_point) {
|
||||
head->insert_point = TAILQ_PREV(bp, buf_queue, b_act);
|
||||
if (head->insert_point == NULL)
|
||||
head->last_pblkno = 0;
|
||||
} else if (bp == TAILQ_FIRST(&head->queue)) {
|
||||
head->last_pblkno = bp->b_pblkno;
|
||||
}
|
||||
TAILQ_REMOVE(&head->queue, bp, b_act);
|
||||
if (TAILQ_FIRST(&head->queue) == head->switch_point)
|
||||
head->switch_point = NULL;
|
||||
}
|
||||
|
||||
static __inline struct buf *
|
||||
bufq_first(buf_queue_head *head)
|
||||
bufq_first(struct buf_queue_head *head)
|
||||
{
|
||||
return (TAILQ_FIRST(&head->queue));
|
||||
}
|
||||
|
@ -36,7 +36,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)ufs_disksubr.c 8.5 (Berkeley) 1/21/94
|
||||
* $Id: ufs_disksubr.c,v 1.34 1998/02/20 13:37:40 bde Exp $
|
||||
* $Id: ufs_disksubr.c,v 1.35 1998/07/28 18:25:51 bde Exp $
|
||||
*/
|
||||
|
||||
#include <sys/param.h>
|
||||
@ -69,7 +69,9 @@ bufqdisksort(bufq, bp)
|
||||
{
|
||||
struct buf *bq;
|
||||
struct buf *bn;
|
||||
struct buf *be;
|
||||
|
||||
be = TAILQ_LAST(&bufq->queue, buf_queue);
|
||||
/*
|
||||
* If the queue is empty or we are an
|
||||
* ordered transaction, then it's easy.
|
||||
@ -86,38 +88,62 @@ bufqdisksort(bufq, bp)
|
||||
* we can only insert after the insert
|
||||
* point.
|
||||
*/
|
||||
bq = TAILQ_NEXT(bufq->insert_point, b_act);
|
||||
if (bq == NULL) {
|
||||
bufq_insert_tail(bufq, bp);
|
||||
return;
|
||||
}
|
||||
}
|
||||
bq = bufq->insert_point;
|
||||
} else {
|
||||
|
||||
/*
|
||||
* If we lie before the first (currently active) request, then we
|
||||
* must add ourselves to the second request list.
|
||||
*/
|
||||
if (bp->b_pblkno < bq->b_pblkno) {
|
||||
|
||||
bq = bufq->switch_point;
|
||||
/*
|
||||
* If we are starting a new secondary list, then it's easy.
|
||||
* If we lie before the last removed (currently active)
|
||||
* request, and are not inserting ourselves into the
|
||||
* "locked" portion of the list, then we must add ourselves
|
||||
* to the second request list.
|
||||
*/
|
||||
if (bq == NULL) {
|
||||
bufq->switch_point = bp;
|
||||
bufq_insert_tail(bufq, bp);
|
||||
return;
|
||||
}
|
||||
if (bp->b_pblkno < bq->b_pblkno) {
|
||||
bufq->switch_point = bp;
|
||||
TAILQ_INSERT_BEFORE(bq, bp, b_act);
|
||||
return;
|
||||
if (bp->b_pblkno < bufq->last_pblkno) {
|
||||
|
||||
bq = bufq->switch_point;
|
||||
/*
|
||||
* If we are starting a new secondary list,
|
||||
* then it's easy.
|
||||
*/
|
||||
if (bq == NULL) {
|
||||
bufq->switch_point = bp;
|
||||
bufq_insert_tail(bufq, bp);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* If we lie ahead of the current switch point,
|
||||
* insert us before the switch point and move
|
||||
* the switch point.
|
||||
*/
|
||||
if (bp->b_pblkno < bq->b_pblkno) {
|
||||
bufq->switch_point = bp;
|
||||
TAILQ_INSERT_BEFORE(bq, bp, b_act);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (bufq->switch_point != NULL)
|
||||
be = TAILQ_PREV(bufq->switch_point,
|
||||
buf_queue, b_act);
|
||||
/*
|
||||
* If we lie between last_pblkno and bq,
|
||||
* insert before bq.
|
||||
*/
|
||||
if (bp->b_pblkno < bq->b_pblkno) {
|
||||
TAILQ_INSERT_BEFORE(bq, bp, b_act);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Request is at/after the current request...
|
||||
* sort in the first request list.
|
||||
* Request is at/after our current position in the list.
|
||||
* Optimize for sequential I/O by seeing if we go at the tail.
|
||||
*/
|
||||
if (bp->b_pblkno > be->b_pblkno) {
|
||||
TAILQ_INSERT_AFTER(&bufq->queue, be, bp, b_act);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Otherwise, insertion sort */
|
||||
while ((bn = TAILQ_NEXT(bq, b_act)) != NULL) {
|
||||
|
||||
/*
|
||||
|
Loading…
x
Reference in New Issue
Block a user