Invoke the driver directly when a request is submitted via crypto_dispatch

or crypto_kdispatch unless the driver is currently blocked.  This eliminates
the context switch to the dispatch thread for virtually all requests.

Note that this change means that for software crypto drivers the caller
will now block until the request is completed and the callback is dispatched
to the callback thread (h/w drivers will typically just dispatch the op to
the device and return quickly).  If this is an issue we can either implement
a non-blocking interface in the s/w crypto driver or use either the
"no delay" flag in the crypto request or the "software driver" capability
flag to control what to do.

Sponsored by:	Vernier Networks
This commit is contained in:
Sam Leffler 2003-01-09 05:39:04 +00:00
parent 972136fa24
commit f7890744d0
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=108990

View File

@ -96,6 +96,8 @@ static struct proc *cryptoproc;
static void crypto_ret_proc(void);
static struct proc *cryptoretproc;
static void crypto_destroy(void);
static int crypto_invoke(struct cryptop *crp, int hint);
static int crypto_kinvoke(struct cryptkop *krp, int hint);
static struct cryptostats cryptostats;
SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
@ -652,8 +654,9 @@ crypto_unblock(u_int32_t driverid, int what)
int
crypto_dispatch(struct cryptop *crp)
{
u_int32_t hid = SESID2HID(crp->crp_sid);
struct cryptocap *cap;
int wasempty;
int result;
cryptostats.cs_ops++;
@ -663,18 +666,30 @@ crypto_dispatch(struct cryptop *crp)
#endif
CRYPTO_Q_LOCK();
wasempty = TAILQ_EMPTY(&crp_q);
TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
/*
* Wakeup processing thread if driver is not blocked.
*/
cap = crypto_checkdriver(SESID2HID(crp->crp_sid));
if (cap && !cap->cc_qblocked && wasempty)
wakeup_one(&crp_q);
cap = crypto_checkdriver(hid);
if (cap && !cap->cc_qblocked) {
result = crypto_invoke(crp, 0);
if (result == ERESTART) {
/*
* The driver ran out of resources, mark the
* driver ``blocked'' for cryptop's and put
* the request on the queue.
*/
crypto_drivers[hid].cc_qblocked = 1;
TAILQ_INSERT_HEAD(&crp_q, crp, crp_next);
cryptostats.cs_blocks++;
}
} else {
/*
* The driver is blocked, just queue the op until
* it unblocks and the kernel thread gets kicked.
*/
TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
result = 0;
}
CRYPTO_Q_UNLOCK();
return 0;
return result;
}
/*
@ -685,23 +700,39 @@ int
crypto_kdispatch(struct cryptkop *krp)
{
struct cryptocap *cap;
int wasempty;
int result;
cryptostats.cs_kops++;
CRYPTO_Q_LOCK();
wasempty = TAILQ_EMPTY(&crp_kq);
TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
/*
* Wakeup processing thread if driver is not blocked.
*/
cap = crypto_checkdriver(krp->krp_hid);
if (cap && !cap->cc_kqblocked && wasempty)
wakeup_one(&crp_q); /* NB: shared wait channel */
if (cap && !cap->cc_kqblocked) {
result = crypto_kinvoke(krp, 0);
if (result == ERESTART) {
/*
* The driver ran out of resources, mark the
* driver ``blocked'' for cryptkop's and put
* the request back in the queue. It would
* best to put the request back where we got
* it but that's hard so for now we put it
* at the front. This should be ok; putting
* it at the end does not work.
*/
crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
cryptostats.cs_kblocks++;
}
} else {
/*
* The driver is blocked, just queue the op until
* it unblocks and the kernel thread gets kicked.
*/
TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
result = 0;
}
CRYPTO_Q_UNLOCK();
return 0;
return result;
}
/*