Avoid queue unlock followed by relock when the enable interrupt race is lost

This already happens infrequently, and the hold time is still bounded since
we defer to a taskqueue after a few tries.
This commit is contained in:
Bryan Venteicher 2014-01-25 19:57:30 +00:00
parent bddddcd566
commit dd6f83a00f

View File

@ -1821,9 +1821,9 @@ vtnet_rx_vq_intr(void *xrxq)
return;
}
again:
VTNET_RXQ_LOCK(rxq);
again:
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
VTNET_RXQ_UNLOCK(rxq);
return;
@ -1837,10 +1837,11 @@ vtnet_rx_vq_intr(void *xrxq)
* This is an occasional condition or race (when !more),
* so retry a few times before scheduling the taskqueue.
*/
rxq->vtnrx_stats.vrxs_rescheduled++;
VTNET_RXQ_UNLOCK(rxq);
if (tries++ < VTNET_INTR_DISABLE_RETRIES)
goto again;
VTNET_RXQ_UNLOCK(rxq);
rxq->vtnrx_stats.vrxs_rescheduled++;
taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
} else
VTNET_RXQ_UNLOCK(rxq);
@ -2408,9 +2409,9 @@ vtnet_tx_vq_intr(void *xtxq)
return;
}
again:
VTNET_TXQ_LOCK(txq);
again:
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
VTNET_TXQ_UNLOCK(txq);
return;
@ -2426,9 +2427,10 @@ vtnet_tx_vq_intr(void *xtxq)
* This is an occasional race, so retry a few times
* before scheduling the taskqueue.
*/
VTNET_TXQ_UNLOCK(txq);
if (tries++ < VTNET_INTR_DISABLE_RETRIES)
goto again;
VTNET_TXQ_UNLOCK(txq);
txq->vtntx_stats.vtxs_rescheduled++;
taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
} else