Flush my local cache of cryto subsystem fixes:

o add a ``done'' flag for crypto operations; this is set when the operation
  completes and is intended for callers to check operations that may complete
  ``prematurely'' because of direct callbacks
o close a race for operations where the crypto driver returns ERESTART: we
  need to hold the q lock to insure the blocked state for the driver and any
  driver-private state is consistent; otherwise drivers may take an interrupt
  and notify the crypto subsystem that it can unblock the driver but operations
  will be left queued and never be processed
o close a race in /dev/crypto where operations can complete before the caller
  can sleep waiting for the callback: use a per-session mutex and the new done
  flag to handle this
o correct crypto_dispatch's handling of operations where the driver returns
  ERESTART: the return value must be zero and not ERESTART, otherwise the
  caller may free the crypto request despite it being queued for later handling
  (this typically results in a later panic)
o change crypto mutex ``names'' so witness printouts and the like are more
  meaningful
This commit is contained in:
Sam Leffler 2003-06-02 23:28:06 +00:00
parent 3191d840e0
commit 3569ae7f66
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=115746
3 changed files with 43 additions and 18 deletions

View File

@ -114,16 +114,16 @@ crypto_init(void)
{
int error;
mtx_init(&crypto_drivers_mtx, "crypto driver table",
NULL, MTX_DEF|MTX_QUIET);
mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table",
MTX_DEF|MTX_QUIET);
TAILQ_INIT(&crp_q);
TAILQ_INIT(&crp_kq);
mtx_init(&crypto_q_mtx, "crypto op queues", NULL, MTX_DEF);
mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF);
TAILQ_INIT(&crp_ret_q);
TAILQ_INIT(&crp_ret_kq);
mtx_init(&crypto_ret_q_mtx, "crypto return queues", NULL, MTX_DEF);
mtx_init(&crypto_ret_q_mtx, "crypto", "crypto return queues", MTX_DEF);
cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop),
0, 0, 0, 0,
@ -664,6 +664,7 @@ crypto_dispatch(struct cryptop *crp)
binuptime(&crp->crp_tstamp);
#endif
CRYPTO_Q_LOCK();
if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
struct cryptocap *cap;
/*
@ -679,21 +680,22 @@ crypto_dispatch(struct cryptop *crp)
* The driver ran out of resources, mark the
* driver ``blocked'' for cryptop's and put
* the request on the queue.
*
* XXX ops are placed at the tail so their
* order is preserved but this can place them
* behind batch'd ops.
*/
CRYPTO_Q_LOCK();
crypto_drivers[hid].cc_qblocked = 1;
TAILQ_INSERT_HEAD(&crp_q, crp, crp_next);
CRYPTO_Q_UNLOCK();
TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
cryptostats.cs_blocks++;
result = 0;
}
} else {
/*
* The driver is blocked, just queue the op until
* it unblocks and the kernel thread gets kicked.
*/
CRYPTO_Q_LOCK();
TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
CRYPTO_Q_UNLOCK();
result = 0;
}
} else {
@ -704,14 +706,13 @@ crypto_dispatch(struct cryptop *crp)
* when the operation is low priority and/or suitable
* for batching.
*/
CRYPTO_Q_LOCK();
wasempty = TAILQ_EMPTY(&crp_q);
TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
if (wasempty)
wakeup_one(&crp_q);
CRYPTO_Q_UNLOCK();
result = 0;
}
CRYPTO_Q_UNLOCK();
return result;
}
@ -743,7 +744,7 @@ crypto_kdispatch(struct cryptkop *krp)
* it at the end does not work.
*/
crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
cryptostats.cs_kblocks++;
}
} else {
@ -939,6 +940,9 @@ crypto_getreq(int num)
void
crypto_done(struct cryptop *crp)
{
KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
("crypto_done: op already done, flags 0x%x", crp->crp_flags));
crp->crp_flags |= CRYPTO_F_DONE;
if (crp->crp_etype != 0)
cryptostats.cs_errs++;
#ifdef CRYPTO_TIMING

View File

@ -56,6 +56,7 @@ struct csession {
TAILQ_ENTRY(csession) next;
u_int64_t sid;
u_int32_t ses;
struct mtx lock; /* for op submission */
u_int32_t cipher;
struct enc_xform *txform;
@ -419,12 +420,21 @@ cryptodev_op(
crp->crp_mac=cse->tmp_mac;
}
crypto_dispatch(crp);
error = tsleep(cse, PSOCK, "crydev", 0);
if (error) {
/* XXX can this happen? if so, how do we recover? */
/*
* Let the dispatch run unlocked, then, interlock against the
* callback before checking if the operation completed and going
* to sleep. This insures drivers don't inherit our lock which
* results in a lock order reversal between crypto_dispatch forced
* entry and the crypto_done callback into us.
*/
error = crypto_dispatch(crp);
mtx_lock(&cse->lock);
if (error == 0 && (crp->crp_flags & CRYPTO_F_DONE) == 0)
error = msleep(crp, &cse->lock, PWAIT, "crydev", 0);
mtx_unlock(&cse->lock);
if (error != 0)
goto bail;
}
if (crp->crp_etype != 0) {
error = crp->crp_etype;
@ -462,7 +472,9 @@ cryptodev_cb(void *op)
cse->error = crp->crp_etype;
if (crp->crp_etype == EAGAIN)
return crypto_dispatch(crp);
wakeup(cse);
mtx_lock(&cse->lock);
wakeup_one(crp);
mtx_unlock(&cse->lock);
return (0);
}
@ -661,10 +673,17 @@ csecreate(struct fcrypt *fcr, u_int64_t sid, caddr_t key, u_int64_t keylen,
{
struct csession *cse;
#ifdef INVARIANTS
/* NB: required when mtx_init is built with INVARIANTS */
MALLOC(cse, struct csession *, sizeof(struct csession),
M_XDATA, M_NOWAIT | M_ZERO);
#else
MALLOC(cse, struct csession *, sizeof(struct csession),
M_XDATA, M_NOWAIT);
#endif
if (cse == NULL)
return NULL;
mtx_init(&cse->lock, "cryptodev", "crypto session lock", MTX_DEF);
cse->key = key;
cse->keylen = keylen/8;
cse->mackey = mackey;
@ -684,6 +703,7 @@ csefree(struct csession *cse)
int error;
error = crypto_freesession(cse->sid);
mtx_destroy(&cse->lock);
if (cse->key)
FREE(cse->key, M_XDATA);
if (cse->mackey)

View File

@ -264,6 +264,7 @@ struct cryptop {
#define CRYPTO_F_REL 0x0004 /* Must return data in same place */
#define CRYPTO_F_BATCH 0x0008 /* Batch op if possible */
#define CRYPTO_F_CBIMM 0x0010 /* Do callback immediately */
#define CRYPTO_F_DONE 0x0020 /* Operation completed */
caddr_t crp_buf; /* Data to be processed */
caddr_t crp_opaque; /* Opaque pointer, passed along */