safexcel: Fix a race around unblocking of crypto ops
safexcel_ring_intr() could fail to observed that sc_blocked is set after completing all outstanding ops for a ring, in which case blocked ops would be deferred forever. Request structures are managed by individual rings, so move the "blocked" flag into the per-ring state block and use the ring lock to synchronize with safexcel_process(). Remove sc_mtx since it is now unused. MFC after: 3 days Sponsored by: Rubicon Communications, LLC (Netgate)
This commit is contained in:
parent
8ba6acbbe6
commit
092cf8d63f
@ -160,8 +160,9 @@ safexcel_rdr_intr(struct safexcel_softc *sc, int ringidx)
|
||||
struct safexcel_res_descr *rdesc;
|
||||
struct safexcel_request *req;
|
||||
struct safexcel_ring *ring;
|
||||
uint32_t error, i, ncdescs, nrdescs, nreqs;
|
||||
uint32_t blocked, error, i, ncdescs, nrdescs, nreqs;
|
||||
|
||||
blocked = 0;
|
||||
ring = &sc->sc_ring[ringidx];
|
||||
|
||||
mtx_lock(&ring->mtx);
|
||||
@ -231,6 +232,8 @@ safexcel_rdr_intr(struct safexcel_softc *sc, int ringidx)
|
||||
SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PROC_COUNT,
|
||||
SAFEXCEL_xDR_PROC_xD_PKT(nreqs) |
|
||||
(sc->sc_config.rd_offset * nrdescs * sizeof(uint32_t)));
|
||||
blocked = ring->blocked;
|
||||
ring->blocked = 0;
|
||||
}
|
||||
out:
|
||||
if (!STAILQ_EMPTY(&ring->queued_requests)) {
|
||||
@ -239,6 +242,9 @@ out:
|
||||
SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | 1);
|
||||
}
|
||||
mtx_unlock(&ring->mtx);
|
||||
|
||||
if (blocked)
|
||||
crypto_unblock(sc->sc_cid, blocked);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -248,7 +254,7 @@ safexcel_ring_intr(void *arg)
|
||||
struct safexcel_intr_handle *ih;
|
||||
uint32_t status, stat;
|
||||
int ring;
|
||||
bool blocked, rdrpending;
|
||||
bool rdrpending;
|
||||
|
||||
ih = arg;
|
||||
sc = ih->sc;
|
||||
@ -281,14 +287,6 @@ safexcel_ring_intr(void *arg)
|
||||
|
||||
if (rdrpending)
|
||||
safexcel_rdr_intr(sc, ring);
|
||||
|
||||
mtx_lock(&sc->sc_mtx);
|
||||
blocked = sc->sc_blocked;
|
||||
sc->sc_blocked = 0;
|
||||
mtx_unlock(&sc->sc_mtx);
|
||||
|
||||
if (blocked)
|
||||
crypto_unblock(sc->sc_cid, blocked);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1100,8 +1098,6 @@ safexcel_alloc_dev_resources(struct safexcel_softc *sc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
mtx_init(&sc->sc_mtx, "safexcel softc", NULL, MTX_DEF);
|
||||
|
||||
return (0);
|
||||
|
||||
out:
|
||||
@ -1118,8 +1114,6 @@ safexcel_free_dev_resources(struct safexcel_softc *sc)
|
||||
{
|
||||
int i;
|
||||
|
||||
mtx_destroy(&sc->sc_mtx);
|
||||
|
||||
for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++)
|
||||
bus_release_resource(sc->sc_dev, SYS_RES_IRQ,
|
||||
rman_get_rid(sc->sc_intr[i]), sc->sc_intr[i]);
|
||||
@ -1157,7 +1151,6 @@ safexcel_attach(device_t dev)
|
||||
|
||||
sc = device_get_softc(dev);
|
||||
sc->sc_dev = dev;
|
||||
sc->sc_blocked = 0;
|
||||
sc->sc_cid = -1;
|
||||
|
||||
if (safexcel_alloc_dev_resources(sc))
|
||||
@ -2569,10 +2562,8 @@ safexcel_process(device_t dev, struct cryptop *crp, int hint)
|
||||
mtx_lock(&ring->mtx);
|
||||
req = safexcel_alloc_request(sc, ring);
|
||||
if (__predict_false(req == NULL)) {
|
||||
mtx_lock(&sc->sc_mtx);
|
||||
ring->blocked = CRYPTO_SYMQ;
|
||||
mtx_unlock(&ring->mtx);
|
||||
sc->sc_blocked = CRYPTO_SYMQ;
|
||||
mtx_unlock(&sc->sc_mtx);
|
||||
return (ERESTART);
|
||||
}
|
||||
|
||||
|
@ -377,6 +377,8 @@ struct safexcel_ring {
|
||||
struct sglist *res_data;
|
||||
struct safexcel_res_descr_ring rdr;
|
||||
|
||||
int blocked;
|
||||
|
||||
struct safexcel_request *requests;
|
||||
STAILQ_HEAD(, safexcel_request) ready_requests;
|
||||
STAILQ_HEAD(, safexcel_request) queued_requests;
|
||||
@ -405,9 +407,7 @@ struct safexcel_softc {
|
||||
|
||||
struct safexcel_ring sc_ring[SAFEXCEL_MAX_RINGS];
|
||||
int sc_ringidx;
|
||||
struct mtx sc_mtx;
|
||||
|
||||
int sc_blocked;
|
||||
int32_t sc_cid;
|
||||
struct safexcel_reg_offsets sc_offsets;
|
||||
struct safexcel_config sc_config;
|
||||
|
Loading…
x
Reference in New Issue
Block a user