Fix LOR between controller and queue locks.
Admin pass-through requests took controller lock before the queue lock, but in case of request submission to a failed controller controller lock was taken after the queue lock. Fix that by reducing the lock scopes and switching to mtx_pool locks to track pass-through request completion. Sponsored by: iXsystems, Inc.
This commit is contained in:
parent
08084bf9c5
commit
8bccbb2ef4
@ -233,11 +233,12 @@ nvme_ctrlr_fail_req_task(void *arg, int pending)
|
|||||||
struct nvme_request *req;
|
struct nvme_request *req;
|
||||||
|
|
||||||
mtx_lock(&ctrlr->lock);
|
mtx_lock(&ctrlr->lock);
|
||||||
while (!STAILQ_EMPTY(&ctrlr->fail_req)) {
|
while ((req = STAILQ_FIRST(&ctrlr->fail_req)) != NULL) {
|
||||||
req = STAILQ_FIRST(&ctrlr->fail_req);
|
|
||||||
STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
|
STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
|
||||||
|
mtx_unlock(&ctrlr->lock);
|
||||||
nvme_qpair_manual_complete_request(req->qpair, req,
|
nvme_qpair_manual_complete_request(req->qpair, req,
|
||||||
NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
|
NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
|
||||||
|
mtx_lock(&ctrlr->lock);
|
||||||
}
|
}
|
||||||
mtx_unlock(&ctrlr->lock);
|
mtx_unlock(&ctrlr->lock);
|
||||||
}
|
}
|
||||||
@ -978,6 +979,7 @@ static void
|
|||||||
nvme_pt_done(void *arg, const struct nvme_completion *cpl)
|
nvme_pt_done(void *arg, const struct nvme_completion *cpl)
|
||||||
{
|
{
|
||||||
struct nvme_pt_command *pt = arg;
|
struct nvme_pt_command *pt = arg;
|
||||||
|
struct mtx *mtx = pt->driver_lock;
|
||||||
uint16_t status;
|
uint16_t status;
|
||||||
|
|
||||||
bzero(&pt->cpl, sizeof(pt->cpl));
|
bzero(&pt->cpl, sizeof(pt->cpl));
|
||||||
@ -987,9 +989,10 @@ nvme_pt_done(void *arg, const struct nvme_completion *cpl)
|
|||||||
status &= ~NVME_STATUS_P_MASK;
|
status &= ~NVME_STATUS_P_MASK;
|
||||||
pt->cpl.status = status;
|
pt->cpl.status = status;
|
||||||
|
|
||||||
mtx_lock(pt->driver_lock);
|
mtx_lock(mtx);
|
||||||
|
pt->driver_lock = NULL;
|
||||||
wakeup(pt);
|
wakeup(pt);
|
||||||
mtx_unlock(pt->driver_lock);
|
mtx_unlock(mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
@ -1058,15 +1061,7 @@ nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
|
|||||||
|
|
||||||
req->cmd.nsid = htole32(nsid);
|
req->cmd.nsid = htole32(nsid);
|
||||||
|
|
||||||
if (is_admin_cmd)
|
mtx = mtx_pool_find(mtxpool_sleep, pt);
|
||||||
mtx = &ctrlr->lock;
|
|
||||||
else {
|
|
||||||
KASSERT((nsid-1) >= 0 && (nsid-1) < NVME_MAX_NAMESPACES,
|
|
||||||
("%s: invalid namespace ID %d\n", __func__, nsid));
|
|
||||||
mtx = &ctrlr->ns[nsid-1].lock;
|
|
||||||
}
|
|
||||||
|
|
||||||
mtx_lock(mtx);
|
|
||||||
pt->driver_lock = mtx;
|
pt->driver_lock = mtx;
|
||||||
|
|
||||||
if (is_admin_cmd)
|
if (is_admin_cmd)
|
||||||
@ -1074,11 +1069,11 @@ nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
|
|||||||
else
|
else
|
||||||
nvme_ctrlr_submit_io_request(ctrlr, req);
|
nvme_ctrlr_submit_io_request(ctrlr, req);
|
||||||
|
|
||||||
mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
|
mtx_lock(mtx);
|
||||||
|
while (pt->driver_lock != NULL)
|
||||||
|
mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
|
||||||
mtx_unlock(mtx);
|
mtx_unlock(mtx);
|
||||||
|
|
||||||
pt->driver_lock = NULL;
|
|
||||||
|
|
||||||
err:
|
err:
|
||||||
if (buf != NULL) {
|
if (buf != NULL) {
|
||||||
relpbuf(buf, NULL);
|
relpbuf(buf, NULL);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user