Improve debugger visibility into queuing functions by removing the macro

scheme for defining inline command queuing functions.

Prefer enums to #defines.

sys/dev/xen/blkfront/block.h
	Replace inline function generation performed by the
	XBDQ_COMMAND_QUEUE() macro with single instances of each
	inline function (init, enqueue, dequeue, remove).  This was
	made possible by using queue indexes instead of bit flags
	in the command structure, and passing the index enum as
	an argument to the functions.

	Improve panic/assert messages in the queue functions.

	Combine queue data and stats into a single data structure
	and declare an array of them instead of each queue individually.

	Convert command flags, softc state, and softc flags to enums.

sys/dev/xen/blkfront/blkfront.c
	Mechanical adjustments for new queue api.

Sponsored by:	Spectra Logic Corporation
MFC after:	1 week
This commit is contained in:
Justin T. Gibbs 2013-06-14 17:00:58 +00:00
parent d31870574c
commit e2c1fe9009
2 changed files with 193 additions and 174 deletions

View File

@ -83,10 +83,6 @@ static void xbd_startio(struct xbd_softc *sc);
#define XBD_SECTOR_SHFT 9
#define XBD_STATE_DISCONNECTED 0
#define XBD_STATE_CONNECTED 1
#define XBD_STATE_SUSPENDED 2
/*---------------------------- Global Static Data ----------------------------*/
static MALLOC_DEFINE(M_XENBLOCKFRONT, "xbd", "Xen Block Front driver data");
@ -106,13 +102,14 @@ static void
xbd_free_command(struct xbd_command *cm)
{
KASSERT((cm->cm_flags & XBD_ON_XBDQ_MASK) == 0,
("Freeing command that is still on a queue\n"));
KASSERT((cm->cm_flags & XBDCF_Q_MASK) == XBD_Q_NONE,
("Freeing command that is still on queue %d.",
cm->cm_flags & XBDCF_Q_MASK));
cm->cm_flags = 0;
cm->cm_flags = XBDCF_INITIALIZER;
cm->cm_bp = NULL;
cm->cm_complete = NULL;
xbd_enqueue_free(cm);
xbd_enqueue_cm(cm, XBD_Q_FREE);
}
static void
@ -212,13 +209,13 @@ xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
gnttab_free_grant_references(cm->cm_gref_head);
xbd_enqueue_busy(cm);
xbd_enqueue_cm(cm, XBD_Q_BUSY);
/*
* This flag means that we're probably executing in the busdma swi
* instead of in the startio context, so an explicit flush is needed.
*/
if (cm->cm_flags & XBD_CMD_FROZEN)
if (cm->cm_flags & XBDCF_FROZEN)
xbd_flush_requests(sc);
return;
@ -233,8 +230,8 @@ xbd_queue_request(struct xbd_softc *sc, struct xbd_command *cm)
cm->cm_datalen, xbd_queue_cb, cm, 0);
if (error == EINPROGRESS) {
printf("EINPROGRESS\n");
sc->xbd_flags |= XBD_FROZEN;
cm->cm_flags |= XBD_CMD_FROZEN;
sc->xbd_flags |= XBDF_FROZEN;
cm->cm_flags |= XBDCF_FROZEN;
return (0);
}
@ -259,14 +256,14 @@ xbd_bio_command(struct xbd_softc *sc)
struct xbd_command *cm;
struct bio *bp;
if (unlikely(sc->xbd_connected != XBD_STATE_CONNECTED))
if (unlikely(sc->xbd_state != XBD_STATE_CONNECTED))
return (NULL);
bp = xbd_dequeue_bio(sc);
if (bp == NULL)
return (NULL);
if ((cm = xbd_dequeue_free(sc)) == NULL) {
if ((cm = xbd_dequeue_cm(sc, XBD_Q_FREE)) == NULL) {
xbd_requeue_bio(sc, bp);
return (NULL);
}
@ -277,8 +274,8 @@ xbd_bio_command(struct xbd_softc *sc)
xbd_restart_queue_callback, sc,
sc->xbd_max_request_segments);
xbd_requeue_bio(sc, bp);
xbd_enqueue_free(cm);
sc->xbd_flags |= XBD_FROZEN;
xbd_enqueue_cm(cm, XBD_Q_FREE);
sc->xbd_flags |= XBDF_FROZEN;
return (NULL);
}
@ -307,15 +304,15 @@ xbd_startio(struct xbd_softc *sc)
mtx_assert(&sc->xbd_io_lock, MA_OWNED);
if (sc->xbd_connected != XBD_STATE_CONNECTED)
if (sc->xbd_state != XBD_STATE_CONNECTED)
return;
while (RING_FREE_REQUESTS(&sc->xbd_ring) >=
sc->xbd_max_request_blocks) {
if (sc->xbd_flags & XBD_FROZEN)
if (sc->xbd_flags & XBDF_FROZEN)
break;
cm = xbd_dequeue_ready(sc);
cm = xbd_dequeue_cm(sc, XBD_Q_READY);
if (cm == NULL)
cm = xbd_bio_command(sc);
@ -374,7 +371,7 @@ xbd_int(void *xsc)
mtx_lock(&sc->xbd_io_lock);
if (unlikely(sc->xbd_connected == XBD_STATE_DISCONNECTED)) {
if (unlikely(sc->xbd_state == XBD_STATE_DISCONNECTED)) {
mtx_unlock(&sc->xbd_io_lock);
return;
}
@ -387,7 +384,7 @@ xbd_int(void *xsc)
bret = RING_GET_RESPONSE(&sc->xbd_ring, i);
cm = &sc->xbd_shadow[bret->id];
xbd_remove_busy(cm);
xbd_remove_cm(cm, XBD_Q_BUSY);
i += xbd_completion(cm);
if (cm->cm_operation == BLKIF_OP_READ)
@ -404,7 +401,7 @@ xbd_int(void *xsc)
* being freed as well. It's a cheap assumption even when
* wrong.
*/
sc->xbd_flags &= ~XBD_FROZEN;
sc->xbd_flags &= ~XBDF_FROZEN;
/*
* Directly call the i/o complete routine to save an
@ -432,8 +429,8 @@ xbd_int(void *xsc)
xbd_startio(sc);
if (unlikely(sc->xbd_connected == XBD_STATE_SUSPENDED))
wakeup(&sc->xbd_cm_busy);
if (unlikely(sc->xbd_state == XBD_STATE_SUSPENDED))
wakeup(&sc->xbd_cm_q[XBD_Q_BUSY]);
mtx_unlock(&sc->xbd_io_lock);
}
@ -448,13 +445,13 @@ xbd_quiesce(struct xbd_softc *sc)
int mtd;
// While there are outstanding requests
while (!TAILQ_EMPTY(&sc->xbd_cm_busy)) {
while (!TAILQ_EMPTY(&sc->xbd_cm_q[XBD_Q_BUSY].q_tailq)) {
RING_FINAL_CHECK_FOR_RESPONSES(&sc->xbd_ring, mtd);
if (mtd) {
/* Recieved request completions, update queue. */
xbd_int(sc);
}
if (!TAILQ_EMPTY(&sc->xbd_cm_busy)) {
if (!TAILQ_EMPTY(&sc->xbd_cm_q[XBD_Q_BUSY].q_tailq)) {
/*
* Still pending requests, wait for the disk i/o
* to complete.
@ -469,7 +466,7 @@ static void
xbd_dump_complete(struct xbd_command *cm)
{
xbd_enqueue_complete(cm);
xbd_enqueue_cm(cm, XBD_Q_COMPLETE);
}
static int
@ -496,7 +493,7 @@ xbd_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
/* Split the 64KB block as needed */
for (sbp=0; length > 0; sbp++) {
cm = xbd_dequeue_free(sc);
cm = xbd_dequeue_cm(sc, XBD_Q_FREE);
if (cm == NULL) {
mtx_unlock(&sc->xbd_io_lock);
device_printf(sc->xbd_dev, "dump: no more commands?\n");
@ -519,7 +516,7 @@ xbd_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
cm->cm_sector_number = offset / dp->d_sectorsize;
cm->cm_complete = xbd_dump_complete;
xbd_enqueue_ready(cm);
xbd_enqueue_cm(cm, XBD_Q_READY);
length -= chunk;
offset += chunk;
@ -534,7 +531,7 @@ xbd_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
xbd_quiesce(sc); /* All quite on the eastern front */
/* If there were any errors, bail out... */
while ((cm = xbd_dequeue_complete(sc)) != NULL) {
while ((cm = xbd_dequeue_cm(sc, XBD_Q_COMPLETE)) != NULL) {
if (cm->cm_status != BLKIF_RSP_OKAY) {
device_printf(sc->xbd_dev,
"Dump I/O failed at sector %jd\n",
@ -558,7 +555,7 @@ xbd_open(struct disk *dp)
return (ENXIO);
}
sc->xbd_flags |= XBD_OPEN;
sc->xbd_flags |= XBDF_OPEN;
sc->xbd_users++;
return (0);
}
@ -570,7 +567,7 @@ xbd_close(struct disk *dp)
if (sc == NULL)
return (ENXIO);
sc->xbd_flags &= ~XBD_OPEN;
sc->xbd_flags &= ~XBDF_OPEN;
if (--(sc->xbd_users) == 0) {
/*
* Check whether we have been instructed to close. We will
@ -855,7 +852,7 @@ xbd_free(struct xbd_softc *sc)
/* Prevent new requests being issued until we fix things up. */
mtx_lock(&sc->xbd_io_lock);
sc->xbd_connected = XBD_STATE_DISCONNECTED;
sc->xbd_state = XBD_STATE_DISCONNECTED;
mtx_unlock(&sc->xbd_io_lock);
/* Free resources associated with old device channel. */
@ -878,9 +875,9 @@ xbd_free(struct xbd_softc *sc)
bus_dma_tag_destroy(sc->xbd_io_dmat);
xbd_initq_free(sc);
xbd_initq_ready(sc);
xbd_initq_complete(sc);
xbd_initq_cm(sc, XBD_Q_FREE);
xbd_initq_cm(sc, XBD_Q_READY);
xbd_initq_cm(sc, XBD_Q_COMPLETE);
}
if (sc->xbd_irq) {
@ -1054,6 +1051,7 @@ xbd_initialize(struct xbd_softc *sc)
if (cm->cm_sg_refs == NULL)
break;
cm->cm_id = i;
cm->cm_flags = XBDCF_INITIALIZER;
cm->cm_sc = sc;
if (bus_dmamap_create(sc->xbd_io_dmat, 0, &cm->cm_map) != 0)
break;
@ -1149,8 +1147,8 @@ xbd_connect(struct xbd_softc *sc)
unsigned int binfo;
int err, feature_barrier;
if ((sc->xbd_connected == XBD_STATE_CONNECTED) ||
(sc->xbd_connected == XBD_STATE_SUSPENDED))
if (sc->xbd_state == XBD_STATE_CONNECTED ||
sc->xbd_state == XBD_STATE_SUSPENDED)
return;
DPRINTK("blkfront.c:connect:%s.\n", xenbus_get_otherend_path(dev));
@ -1170,7 +1168,7 @@ xbd_connect(struct xbd_softc *sc)
"feature-barrier", "%lu", &feature_barrier,
NULL);
if (!err || feature_barrier)
sc->xbd_flags |= XBD_BARRIER;
sc->xbd_flags |= XBDF_BARRIER;
if (sc->xbd_disk == NULL) {
device_printf(dev, "%juMB <%s> at %s",
@ -1187,9 +1185,9 @@ xbd_connect(struct xbd_softc *sc)
/* Kick pending requests. */
mtx_lock(&sc->xbd_io_lock);
sc->xbd_connected = XBD_STATE_CONNECTED;
sc->xbd_state = XBD_STATE_CONNECTED;
xbd_startio(sc);
sc->xbd_flags |= XBD_READY;
sc->xbd_flags |= XBDF_READY;
mtx_unlock(&sc->xbd_io_lock);
}
@ -1260,17 +1258,13 @@ xbd_attach(device_t dev)
sc = device_get_softc(dev);
mtx_init(&sc->xbd_io_lock, "blkfront i/o lock", NULL, MTX_DEF);
xbd_initq_free(sc);
xbd_initq_busy(sc);
xbd_initq_ready(sc);
xbd_initq_complete(sc);
xbd_initq_bio(sc);
xbd_initqs(sc);
for (i = 0; i < XBD_MAX_RING_PAGES; i++)
sc->xbd_ring_ref[i] = GRANT_REF_INVALID;
sc->xbd_dev = dev;
sc->xbd_vdevice = vdevice;
sc->xbd_connected = XBD_STATE_DISCONNECTED;
sc->xbd_state = XBD_STATE_DISCONNECTED;
xbd_setup_sysctl(sc);
@ -1285,7 +1279,7 @@ xbd_detach(device_t dev)
{
struct xbd_softc *sc = device_get_softc(dev);
DPRINTK("xbd_remove: %s removed\n", xenbus_get_node(dev));
DPRINTK("%s: %s removed\n", __func__, xenbus_get_node(dev));
xbd_free(sc);
mtx_destroy(&sc->xbd_io_lock);
@ -1302,13 +1296,13 @@ xbd_suspend(device_t dev)
/* Prevent new requests being issued until we fix things up. */
mtx_lock(&sc->xbd_io_lock);
saved_state = sc->xbd_connected;
sc->xbd_connected = XBD_STATE_SUSPENDED;
saved_state = sc->xbd_state;
sc->xbd_state = XBD_STATE_SUSPENDED;
/* Wait for outstanding I/O to drain. */
retval = 0;
while (TAILQ_EMPTY(&sc->xbd_cm_busy) == 0) {
if (msleep(&sc->xbd_cm_busy, &sc->xbd_io_lock,
while (TAILQ_EMPTY(&sc->xbd_cm_q[XBD_Q_BUSY].q_tailq) == 0) {
if (msleep(&sc->xbd_cm_q[XBD_Q_BUSY], &sc->xbd_io_lock,
PRIBIO, "blkf_susp", 30 * hz) == EWOULDBLOCK) {
retval = EBUSY;
break;
@ -1317,7 +1311,7 @@ xbd_suspend(device_t dev)
mtx_unlock(&sc->xbd_io_lock);
if (retval != 0)
sc->xbd_connected = saved_state;
sc->xbd_state = saved_state;
return (retval);
}

View File

@ -30,7 +30,6 @@
* $FreeBSD$
*/
#ifndef __XEN_BLKFRONT_BLOCK_H__
#define __XEN_BLKFRONT_BLOCK_H__
#include <xen/blkif.h>
@ -93,20 +92,20 @@
BLKIF_RING_PAGES(BLKIF_SEGS_TO_BLOCKS(XBD_MAX_SEGMENTS_PER_REQUEST) \
* XBD_MAX_REQUESTS)
typedef enum {
XBDCF_Q_MASK = 0xFF,
XBDCF_FROZEN = 1<<8,
XBDCF_POLLED = 1<<9,
XBDCF_INITIALIZER = XBDCF_Q_MASK
} xbdc_flag_t;
struct xbd_command;
typedef void xbd_cbcf_t(struct xbd_command *);
struct xbd_command {
TAILQ_ENTRY(xbd_command) cm_link;
struct xbd_softc *cm_sc;
u_int cm_flags;
#define XBD_CMD_FROZEN (1<<0)
#define XBD_CMD_POLLED (1<<1)
#define XBD_ON_XBDQ_FREE (1<<2)
#define XBD_ON_XBDQ_READY (1<<3)
#define XBD_ON_XBDQ_BUSY (1<<4)
#define XBD_ON_XBDQ_COMPLETE (1<<5)
#define XBD_ON_XBDQ_MASK ((1<<2)|(1<<3)|(1<<4)|(1<<5))
xbdc_flag_t cm_flags;
bus_dmamap_t cm_map;
uint64_t cm_id;
grant_ref_t *cm_sg_refs;
@ -121,22 +120,34 @@ struct xbd_command {
xbd_cbcf_t *cm_complete;
};
#define XBDQ_FREE 0
#define XBDQ_BIO 1
#define XBDQ_READY 2
#define XBDQ_BUSY 3
#define XBDQ_COMPLETE 4
#define XBDQ_COUNT 5
typedef enum {
XBD_Q_FREE,
XBD_Q_READY,
XBD_Q_BUSY,
XBD_Q_COMPLETE,
XBD_Q_BIO,
XBD_Q_COUNT,
XBD_Q_NONE = XBDCF_Q_MASK
} xbd_q_index_t;
struct xbd_qstat {
uint32_t q_length;
uint32_t q_max;
};
typedef struct xbd_cm_q {
TAILQ_HEAD(, xbd_command) q_tailq;
uint32_t q_length;
uint32_t q_max;
} xbd_cm_q_t;
union xbd_statrequest {
uint32_t ms_item;
struct xbd_qstat ms_qstat;
};
typedef enum {
XBD_STATE_DISCONNECTED,
XBD_STATE_CONNECTED,
XBD_STATE_SUSPENDED
} xbd_state_t;
typedef enum {
XBDF_OPEN = 1 << 0, /* drive is open (can't shut down) */
XBDF_BARRIER = 1 << 1, /* backend supports barriers */
XBDF_READY = 1 << 2, /* Is ready */
XBDF_FROZEN = 1 << 3 /* Waiting for resources */
} xbd_flag_t;
/*
* We have one of these per vbd, whether ide, scsi or 'other'.
@ -146,13 +157,9 @@ struct xbd_softc {
struct disk *xbd_disk; /* disk params */
struct bio_queue_head xbd_bioq; /* sort queue */
int xbd_unit;
int xbd_flags;
#define XBD_OPEN (1<<0) /* drive is open (can't shut down) */
#define XBD_BARRIER (1 << 1) /* backend supports barriers */
#define XBD_READY (1 << 2) /* Is ready */
#define XBD_FROZEN (1 << 3) /* Waiting for resources */
xbd_flag_t xbd_flags;
int xbd_vdevice;
int xbd_connected;
xbd_state_t xbd_state;
u_int xbd_ring_pages;
uint32_t xbd_max_requests;
uint32_t xbd_max_request_segments;
@ -162,11 +169,7 @@ struct xbd_softc {
blkif_front_ring_t xbd_ring;
unsigned int xbd_irq;
struct gnttab_free_callback xbd_callback;
TAILQ_HEAD(,xbd_command) xbd_cm_free;
TAILQ_HEAD(,xbd_command) xbd_cm_ready;
TAILQ_HEAD(,xbd_command) xbd_cm_busy;
TAILQ_HEAD(,xbd_command) xbd_cm_complete;
struct xbd_qstat xbd_qstat[XBDQ_COUNT];
xbd_cm_q_t xbd_cm_q[XBD_Q_COUNT];
bus_dma_tag_t xbd_io_dmat;
/**
@ -182,113 +185,124 @@ struct xbd_softc {
int xbd_instance_create(struct xbd_softc *, blkif_sector_t sectors, int device,
uint16_t vdisk_info, unsigned long sector_size);
#define XBDQ_ADD(sc, qname) \
do { \
struct xbd_qstat *qs; \
\
qs = &(sc)->xbd_qstat[qname]; \
qs->q_length++; \
if (qs->q_length > qs->q_max) \
qs->q_max = qs->q_length; \
} while (0)
static inline void
xbd_added_qentry(struct xbd_softc *sc, xbd_q_index_t index)
{
struct xbd_cm_q *cmq;
#define XBDQ_REMOVE(sc, qname) (sc)->xbd_qstat[qname].q_length--
cmq = &sc->xbd_cm_q[index];
cmq->q_length++;
if (cmq->q_length > cmq->q_max)
cmq->q_max = cmq->q_length;
}
#define XBDQ_INIT(sc, qname) \
do { \
sc->xbd_qstat[qname].q_length = 0; \
sc->xbd_qstat[qname].q_max = 0; \
} while (0)
static inline void
xbd_removed_qentry(struct xbd_softc *sc, xbd_q_index_t index)
{
sc->xbd_cm_q[index].q_length--;
}
#define XBDQ_COMMAND_QUEUE(name, index) \
static __inline void \
xbd_initq_ ## name (struct xbd_softc *sc) \
{ \
TAILQ_INIT(&sc->xbd_cm_ ## name); \
XBDQ_INIT(sc, index); \
} \
static __inline void \
xbd_enqueue_ ## name (struct xbd_command *cm) \
{ \
if ((cm->cm_flags & XBD_ON_XBDQ_MASK) != 0) { \
printf("command %p is on another queue, " \
"flags = %#x\n", cm, cm->cm_flags); \
panic("command is on another queue"); \
} \
TAILQ_INSERT_TAIL(&cm->cm_sc->xbd_cm_ ## name, cm, cm_link); \
cm->cm_flags |= XBD_ON_ ## index; \
XBDQ_ADD(cm->cm_sc, index); \
} \
static __inline void \
xbd_requeue_ ## name (struct xbd_command *cm) \
{ \
if ((cm->cm_flags & XBD_ON_XBDQ_MASK) != 0) { \
printf("command %p is on another queue, " \
"flags = %#x\n", cm, cm->cm_flags); \
panic("command is on another queue"); \
} \
TAILQ_INSERT_HEAD(&cm->cm_sc->xbd_cm_ ## name, cm, cm_link); \
cm->cm_flags |= XBD_ON_ ## index; \
XBDQ_ADD(cm->cm_sc, index); \
} \
static __inline struct xbd_command * \
xbd_dequeue_ ## name (struct xbd_softc *sc) \
{ \
struct xbd_command *cm; \
\
if ((cm = TAILQ_FIRST(&sc->xbd_cm_ ## name)) != NULL) { \
if ((cm->cm_flags & XBD_ON_XBDQ_MASK) != \
XBD_ON_ ## index) { \
printf("command %p not in queue, " \
"flags = %#x, bit = %#x\n", cm, \
cm->cm_flags, XBD_ON_ ## index); \
panic("command not in queue"); \
} \
TAILQ_REMOVE(&sc->xbd_cm_ ## name, cm, cm_link);\
cm->cm_flags &= ~XBD_ON_ ## index; \
XBDQ_REMOVE(sc, index); \
} \
return (cm); \
} \
static __inline void \
xbd_remove_ ## name (struct xbd_command *cm) \
{ \
if ((cm->cm_flags & XBD_ON_XBDQ_MASK) != XBD_ON_ ## index){\
printf("command %p not in queue, flags = %#x, " \
"bit = %#x\n", cm, cm->cm_flags, \
XBD_ON_ ## index); \
panic("command not in queue"); \
} \
TAILQ_REMOVE(&cm->cm_sc->xbd_cm_ ## name, cm, cm_link); \
cm->cm_flags &= ~XBD_ON_ ## index; \
XBDQ_REMOVE(cm->cm_sc, index); \
} \
struct hack
static inline void
xbd_initq_cm(struct xbd_softc *sc, xbd_q_index_t index)
{
struct xbd_cm_q *cmq;
XBDQ_COMMAND_QUEUE(free, XBDQ_FREE);
XBDQ_COMMAND_QUEUE(ready, XBDQ_READY);
XBDQ_COMMAND_QUEUE(busy, XBDQ_BUSY);
XBDQ_COMMAND_QUEUE(complete, XBDQ_COMPLETE);
cmq = &sc->xbd_cm_q[index];
TAILQ_INIT(&cmq->q_tailq);
cmq->q_length = 0;
cmq->q_max = 0;
}
static inline void
xbd_enqueue_cm(struct xbd_command *cm, xbd_q_index_t index)
{
KASSERT(index != XBD_Q_BIO,
("%s: Commands cannot access the bio queue.", __func__));
if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE)
panic("%s: command %p is already on queue %d.",
__func__, cm, cm->cm_flags & XBDCF_Q_MASK);
TAILQ_INSERT_TAIL(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
cm->cm_flags &= ~XBDCF_Q_MASK;
cm->cm_flags |= index;
xbd_added_qentry(cm->cm_sc, index);
}
static inline void
xbd_requeue_cm(struct xbd_command *cm, xbd_q_index_t index)
{
KASSERT(index != XBD_Q_BIO,
("%s: Commands cannot access the bio queue.", __func__));
if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE)
panic("%s: command %p is already on queue %d.",
__func__, cm, cm->cm_flags & XBDCF_Q_MASK);
TAILQ_INSERT_HEAD(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
cm->cm_flags &= ~XBDCF_Q_MASK;
cm->cm_flags |= index;
xbd_added_qentry(cm->cm_sc, index);
}
static inline struct xbd_command *
xbd_dequeue_cm(struct xbd_softc *sc, xbd_q_index_t index)
{
struct xbd_command *cm;
KASSERT(index != XBD_Q_BIO,
("%s: Commands cannot access the bio queue.", __func__));
if ((cm = TAILQ_FIRST(&sc->xbd_cm_q[index].q_tailq)) != NULL) {
if ((cm->cm_flags & XBDCF_Q_MASK) != index) {
panic("%s: command %p is on queue %d, "
"not specified queue %d",
__func__, cm,
cm->cm_flags & XBDCF_Q_MASK,
index);
}
TAILQ_REMOVE(&sc->xbd_cm_q[index].q_tailq, cm, cm_link);
cm->cm_flags &= ~XBDCF_Q_MASK;
cm->cm_flags |= XBD_Q_NONE;
xbd_removed_qentry(cm->cm_sc, index);
}
return (cm);
}
static inline void
xbd_remove_cm(struct xbd_command *cm, xbd_q_index_t expected_index)
{
xbd_q_index_t index;
index = cm->cm_flags & XBDCF_Q_MASK;
KASSERT(index != XBD_Q_BIO,
("%s: Commands cannot access the bio queue.", __func__));
if (index != expected_index) {
panic("%s: command %p is on queue %d, not specified queue %d",
__func__, cm, index, expected_index);
}
TAILQ_REMOVE(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
cm->cm_flags &= ~XBDCF_Q_MASK;
cm->cm_flags |= XBD_Q_NONE;
xbd_removed_qentry(cm->cm_sc, index);
}
static __inline void
xbd_initq_bio(struct xbd_softc *sc)
{
bioq_init(&sc->xbd_bioq);
XBDQ_INIT(sc, XBDQ_BIO);
}
static __inline void
xbd_enqueue_bio(struct xbd_softc *sc, struct bio *bp)
{
bioq_insert_tail(&sc->xbd_bioq, bp);
XBDQ_ADD(sc, XBDQ_BIO);
xbd_added_qentry(sc, XBD_Q_BIO);
}
static __inline void
xbd_requeue_bio(struct xbd_softc *sc, struct bio *bp)
{
bioq_insert_head(&sc->xbd_bioq, bp);
XBDQ_ADD(sc, XBDQ_BIO);
xbd_added_qentry(sc, XBD_Q_BIO);
}
static __inline struct bio *
@ -298,9 +312,20 @@ xbd_dequeue_bio(struct xbd_softc *sc)
if ((bp = bioq_first(&sc->xbd_bioq)) != NULL) {
bioq_remove(&sc->xbd_bioq, bp);
XBDQ_REMOVE(sc, XBDQ_BIO);
xbd_removed_qentry(sc, XBD_Q_BIO);
}
return (bp);
}
static inline void
xbd_initqs(struct xbd_softc *sc)
{
u_int index;
for (index = 0; index < XBD_Q_COUNT; index++)
xbd_initq_cm(sc, index);
xbd_initq_bio(sc);
}
#endif /* __XEN_BLKFRONT_BLOCK_H__ */