Avoid queue unlock followed by relock when the enable interrupt race is lost
This already happens infrequently, and the hold time is still bounded since we defer to a taskqueue after a few tries.
This commit is contained in:
parent
bddddcd566
commit
dd6f83a00f
@ -1821,9 +1821,9 @@ vtnet_rx_vq_intr(void *xrxq)
|
||||
return;
|
||||
}
|
||||
|
||||
again:
|
||||
VTNET_RXQ_LOCK(rxq);
|
||||
|
||||
again:
|
||||
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
|
||||
VTNET_RXQ_UNLOCK(rxq);
|
||||
return;
|
||||
@ -1837,10 +1837,11 @@ vtnet_rx_vq_intr(void *xrxq)
|
||||
* This is an occasional condition or race (when !more),
|
||||
* so retry a few times before scheduling the taskqueue.
|
||||
*/
|
||||
rxq->vtnrx_stats.vrxs_rescheduled++;
|
||||
VTNET_RXQ_UNLOCK(rxq);
|
||||
if (tries++ < VTNET_INTR_DISABLE_RETRIES)
|
||||
goto again;
|
||||
|
||||
VTNET_RXQ_UNLOCK(rxq);
|
||||
rxq->vtnrx_stats.vrxs_rescheduled++;
|
||||
taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
|
||||
} else
|
||||
VTNET_RXQ_UNLOCK(rxq);
|
||||
@ -2408,9 +2409,9 @@ vtnet_tx_vq_intr(void *xtxq)
|
||||
return;
|
||||
}
|
||||
|
||||
again:
|
||||
VTNET_TXQ_LOCK(txq);
|
||||
|
||||
again:
|
||||
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
|
||||
VTNET_TXQ_UNLOCK(txq);
|
||||
return;
|
||||
@ -2426,9 +2427,10 @@ vtnet_tx_vq_intr(void *xtxq)
|
||||
* This is an occasional race, so retry a few times
|
||||
* before scheduling the taskqueue.
|
||||
*/
|
||||
VTNET_TXQ_UNLOCK(txq);
|
||||
if (tries++ < VTNET_INTR_DISABLE_RETRIES)
|
||||
goto again;
|
||||
|
||||
VTNET_TXQ_UNLOCK(txq);
|
||||
txq->vtntx_stats.vtxs_rescheduled++;
|
||||
taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
|
||||
} else
|
||||
|
Loading…
Reference in New Issue
Block a user