Properly track the different reasons new I/O is temporarily disabled, and

only re-enable I/O when all reasons have cleared.

sys/dev/xen/blkfront/block.h:
	In the block front driver softc, replace the boolean
	XBDF_FROZEN flag with a count of commands and driver global
	issues that freeze the I/O queue.  So long xbd_qfrozen_cnt
	is non-zero, I/O is halted.

	Add flags to xbd_flags for tracking grant table entry and
	free command resource shortages.  Each of these classes can
	increment xbd_qfrozen_cnt at most once.

	Add a command flag (XBDCF_ASYNC_MAPPING) that is set whenever
	the initial mapping attempt of a command fails with EINPROGRESS.

sys/dev/xen/blkfront/blkfront.c:
	In xbd_queue_cb(), use new XBDCF_ASYNC_MAPPING flag to definitively
	know if an async bus dmamap load has occurred.

	Add xbd_freeze() and xbd_thaw() helper methods for managing
	xbd_qfrozen_cnt and use them to implement all queue freezing logic.

	Add missing "thaw" to restart I/O processing once grant references
	become available.

Sponsored by:	Spectra Logic Corporation
This commit is contained in:
gibbs 2013-06-15 04:51:31 +00:00
parent 1cc3945aab
commit 90080fdf07
2 changed files with 58 additions and 16 deletions

View File

@ -87,6 +87,30 @@ static void xbd_startio(struct xbd_softc *sc);
static MALLOC_DEFINE(M_XENBLOCKFRONT, "xbd", "Xen Block Front driver data");
/*---------------------------- Command Processing ----------------------------*/
static void
xbd_freeze(struct xbd_softc *sc, xbd_flag_t xbd_flag)
{
if (xbd_flag != XBDF_NONE && (sc->xbd_flags & xbd_flag) != 0)
return;
sc->xbd_flags |= xbd_flag;
sc->xbd_qfrozen_cnt++;
}
static void
xbd_thaw(struct xbd_softc *sc, xbd_flag_t xbd_flag)
{
if (xbd_flag != XBDF_NONE && (sc->xbd_flags & xbd_flag) == 0)
return;
if (sc->xbd_qfrozen_cnt != 0)
panic("%s: Thaw with flag 0x%x while not frozen.",
__func__, xbd_flag);
sc->xbd_flags &= ~xbd_flag;
sc->xbd_qfrozen_cnt--;
}
static inline void
xbd_flush_requests(struct xbd_softc *sc)
{
@ -110,6 +134,7 @@ xbd_free_command(struct xbd_command *cm)
cm->cm_bp = NULL;
cm->cm_complete = NULL;
xbd_enqueue_cm(cm, XBD_Q_FREE);
xbd_thaw(cm->cm_sc, XBDF_CM_SHORTAGE);
}
static void
@ -212,10 +237,13 @@ xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
xbd_enqueue_cm(cm, XBD_Q_BUSY);
/*
* This flag means that we're probably executing in the busdma swi
* instead of in the startio context, so an explicit flush is needed.
* If bus dma had to asynchronously call us back to dispatch
* this command, we are no longer executing in the context of
* xbd_startio(). Thus we cannot rely on xbd_startio()'s call to
* xbd_flush_requests() to publish this command to the backend
* along with any other commands that it could batch.
*/
if (cm->cm_flags & XBDCF_FROZEN)
if ((cm->cm_flags & XBDCF_ASYNC_MAPPING) != 0)
xbd_flush_requests(sc);
return;
@ -229,9 +257,14 @@ xbd_queue_request(struct xbd_softc *sc, struct xbd_command *cm)
error = bus_dmamap_load(sc->xbd_io_dmat, cm->cm_map, cm->cm_data,
cm->cm_datalen, xbd_queue_cb, cm, 0);
if (error == EINPROGRESS) {
printf("EINPROGRESS\n");
sc->xbd_flags |= XBDF_FROZEN;
cm->cm_flags |= XBDCF_FROZEN;
/*
* Maintain queuing order by freezing the queue. The next
* command may not require as many resources as the command
* we just attempted to map, so we can't rely on bus dma
* blocking for it too.
*/
xbd_freeze(sc, XBDF_NONE);
cm->cm_flags |= XBDCF_FROZEN|XBDCF_ASYNC_MAPPING;
return (0);
}
@ -245,6 +278,8 @@ xbd_restart_queue_callback(void *arg)
mtx_lock(&sc->xbd_io_lock);
xbd_thaw(sc, XBDF_GNT_SHORTAGE);
xbd_startio(sc);
mtx_unlock(&sc->xbd_io_lock);
@ -264,6 +299,7 @@ xbd_bio_command(struct xbd_softc *sc)
return (NULL);
if ((cm = xbd_dequeue_cm(sc, XBD_Q_FREE)) == NULL) {
xbd_freeze(sc, XBDF_CM_SHORTAGE);
xbd_requeue_bio(sc, bp);
return (NULL);
}
@ -273,9 +309,9 @@ xbd_bio_command(struct xbd_softc *sc)
gnttab_request_free_callback(&sc->xbd_callback,
xbd_restart_queue_callback, sc,
sc->xbd_max_request_segments);
xbd_freeze(sc, XBDF_GNT_SHORTAGE);
xbd_requeue_bio(sc, bp);
xbd_enqueue_cm(cm, XBD_Q_FREE);
sc->xbd_flags |= XBDF_FROZEN;
return (NULL);
}
@ -309,7 +345,7 @@ xbd_startio(struct xbd_softc *sc)
while (RING_FREE_REQUESTS(&sc->xbd_ring) >=
sc->xbd_max_request_blocks) {
if (sc->xbd_flags & XBDF_FROZEN)
if (sc->xbd_qfrozen_cnt != 0)
break;
cm = xbd_dequeue_cm(sc, XBD_Q_READY);
@ -397,11 +433,13 @@ xbd_int(void *xsc)
bus_dmamap_unload(sc->xbd_io_dmat, cm->cm_map);
/*
* If commands are completing then resources are probably
* being freed as well. It's a cheap assumption even when
* wrong.
* Release any hold this command has on future command
* dispatch.
*/
sc->xbd_flags &= ~XBDF_FROZEN;
if ((cm->cm_flags & XBDCF_FROZEN) != 0) {
xbd_thaw(sc, XBDF_NONE);
cm->cm_flags &= ~XBDCF_FROZEN;
}
/*
* Directly call the i/o complete routine to save an

View File

@ -96,6 +96,7 @@ typedef enum {
XBDCF_Q_MASK = 0xFF,
XBDCF_FROZEN = 1<<8,
XBDCF_POLLED = 1<<9,
XBDCF_ASYNC_MAPPING = 1<<10,
XBDCF_INITIALIZER = XBDCF_Q_MASK
} xbdc_flag_t;
@ -143,10 +144,12 @@ typedef enum {
} xbd_state_t;
typedef enum {
XBDF_OPEN = 1 << 0, /* drive is open (can't shut down) */
XBDF_BARRIER = 1 << 1, /* backend supports barriers */
XBDF_READY = 1 << 2, /* Is ready */
XBDF_FROZEN = 1 << 3 /* Waiting for resources */
XBDF_NONE = 0,
XBDF_OPEN = 1 << 0, /* drive is open (can't shut down) */
XBDF_BARRIER = 1 << 1, /* backend supports barriers */
XBDF_READY = 1 << 2, /* Is ready */
XBDF_CM_SHORTAGE = 1 << 3, /* Free cm resource shortage active. */
XBDF_GNT_SHORTAGE = 1 << 4 /* Grant ref resource shortage active */
} xbd_flag_t;
/*
@ -158,6 +161,7 @@ struct xbd_softc {
struct bio_queue_head xbd_bioq; /* sort queue */
int xbd_unit;
xbd_flag_t xbd_flags;
int xbd_qfrozen_cnt;
int xbd_vdevice;
xbd_state_t xbd_state;
u_int xbd_ring_pages;