safexcel: Handle command/result descriptor exhaustion gracefully

Rather than returning a hard error in this case, return ERESTART so that
upper layers get a chance to retry the request (or drop it, depending on
the desired policy).

This case is hard to hit due to the somewhat low bound on queued
requests, but that will no longer be true after an upcoming change.

MFC after:	1 week
Sponsored by:	Rubicon Communications, LLC (Netgate)
This commit is contained in:
Mark Johnston 2021-01-18 17:07:56 -05:00
parent 0371c3faaa
commit b7e27af36b

View File

@ -2093,7 +2093,7 @@ safexcel_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg,
if (cdesc == NULL) {
safexcel_cmd_descr_rollback(ring, i);
counter_u64_add(req->sc->sc_cdesc_alloc_failures, 1);
req->error = EAGAIN;
req->error = ERESTART;
return;
}
if (i == 0)
@ -2121,7 +2121,7 @@ safexcel_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg,
ring->cmd_data->sg_nseg);
safexcel_res_descr_rollback(ring, i);
counter_u64_add(req->sc->sc_rdesc_alloc_failures, 1);
req->error = EAGAIN;
req->error = ERESTART;
return;
}
}
@ -2608,10 +2608,16 @@ safexcel_process(device_t dev, struct cryptop *crp, int hint)
error = safexcel_create_chain(ring, req);
if (__predict_false(error != 0)) {
safexcel_free_request(ring, req);
if (error == ERESTART)
ring->blocked = CRYPTO_SYMQ;
mtx_unlock(&ring->mtx);
crp->crp_etype = error;
crypto_done(crp);
return (0);
if (error != ERESTART) {
crp->crp_etype = error;
crypto_done(crp);
return (0);
} else {
return (ERESTART);
}
}
safexcel_set_token(req);