Reduce contention on per-adapter lock.

- Move temporary sglists into the session structure and protect them
  with a per-session lock instead of a per-adapter lock.

- Retire an unused session field, and move a debugging field under
  INVARIANTS to avoid using the session lock for completion handling
  when INVARIANTS isn't enabled.

- Use counter_u64 for per-adapter statistics.

Note that this helps for cases where multiple sessions are used
(e.g. multiple IPsec SAs or multiple KTLS connections).  It does not
help for workloads that use a single session (e.g. a single GELI
volume).

Reviewed by:	np
Sponsored by:	Chelsio Communications
Differential Revision:	https://reviews.freebsd.org/D25457
This commit is contained in:
John Baldwin 2020-06-26 00:01:31 +00:00
parent dae61c9d09
commit 94578db218
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=362637

View File

@ -170,8 +170,9 @@ struct ccr_port {
};
struct ccr_session {
bool active;
#ifdef INVARIANTS
int pending;
#endif
enum { HASH, HMAC, BLKCIPHER, ETA, GCM, CCM } mode;
struct ccr_port *port;
union {
@ -180,16 +181,7 @@ struct ccr_session {
struct ccr_session_ccm_mac ccm_mac;
};
struct ccr_session_blkcipher blkcipher;
};
struct ccr_softc {
struct adapter *adapter;
device_t dev;
uint32_t cid;
struct mtx lock;
bool detaching;
struct ccr_port ports[MAX_NPORTS];
u_int port_mask;
/*
* Pre-allocate S/G lists used when preparing a work request.
@ -205,6 +197,16 @@ struct ccr_softc {
struct sglist *sg_output;
struct sglist *sg_ulptx;
struct sglist *sg_dsgl;
};
struct ccr_softc {
struct adapter *adapter;
device_t dev;
uint32_t cid;
struct mtx lock;
bool detaching;
struct ccr_port ports[MAX_NPORTS];
u_int port_mask;
/*
* Pre-allocate a dummy output buffer for the IV and AAD for
@ -214,24 +216,23 @@ struct ccr_softc {
struct sglist *sg_iv_aad;
/* Statistics. */
uint64_t stats_blkcipher_encrypt;
uint64_t stats_blkcipher_decrypt;
uint64_t stats_hash;
uint64_t stats_hmac;
uint64_t stats_eta_encrypt;
uint64_t stats_eta_decrypt;
uint64_t stats_gcm_encrypt;
uint64_t stats_gcm_decrypt;
uint64_t stats_ccm_encrypt;
uint64_t stats_ccm_decrypt;
uint64_t stats_wr_nomem;
uint64_t stats_inflight;
uint64_t stats_mac_error;
uint64_t stats_pad_error;
uint64_t stats_bad_session;
uint64_t stats_sglist_error;
uint64_t stats_process_error;
uint64_t stats_sw_fallback;
counter_u64_t stats_blkcipher_encrypt;
counter_u64_t stats_blkcipher_decrypt;
counter_u64_t stats_hash;
counter_u64_t stats_hmac;
counter_u64_t stats_eta_encrypt;
counter_u64_t stats_eta_decrypt;
counter_u64_t stats_gcm_encrypt;
counter_u64_t stats_gcm_decrypt;
counter_u64_t stats_ccm_encrypt;
counter_u64_t stats_ccm_decrypt;
counter_u64_t stats_wr_nomem;
counter_u64_t stats_inflight;
counter_u64_t stats_mac_error;
counter_u64_t stats_pad_error;
counter_u64_t stats_sglist_error;
counter_u64_t stats_process_error;
counter_u64_t stats_sw_fallback;
};
/*
@ -307,8 +308,7 @@ ccr_phys_dsgl_len(int nsegs)
}
static void
ccr_write_phys_dsgl(struct ccr_softc *sc, struct ccr_session *s, void *dst,
int nsegs)
ccr_write_phys_dsgl(struct ccr_session *s, void *dst, int nsegs)
{
struct sglist *sg;
struct cpl_rx_phys_dsgl *cpl;
@ -317,7 +317,7 @@ ccr_write_phys_dsgl(struct ccr_softc *sc, struct ccr_session *s, void *dst,
size_t seglen;
u_int i, j;
sg = sc->sg_dsgl;
sg = s->sg_dsgl;
cpl = dst;
cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) |
V_CPL_RX_PHYS_DSGL_ISRDMA(0));
@ -366,14 +366,14 @@ ccr_ulptx_sgl_len(int nsegs)
}
static void
ccr_write_ulptx_sgl(struct ccr_softc *sc, void *dst, int nsegs)
ccr_write_ulptx_sgl(struct ccr_session *s, void *dst, int nsegs)
{
struct ulptx_sgl *usgl;
struct sglist *sg;
struct sglist_seg *ss;
int i;
sg = sc->sg_ulptx;
sg = s->sg_ulptx;
MPASS(nsegs == sg->sg_nseg);
ss = &sg->sg_segs[0];
usgl = dst;
@ -496,12 +496,12 @@ ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
sgl_len = 0;
} else {
imm_len = 0;
sglist_reset(sc->sg_ulptx);
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input,
sglist_reset(s->sg_ulptx);
error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
crp->crp_payload_start, crp->crp_payload_length);
if (error)
return (error);
sgl_nsegs = sc->sg_ulptx->sg_nseg;
sgl_nsegs = s->sg_ulptx->sg_nseg;
sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
}
@ -510,7 +510,7 @@ ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
return (EFBIG);
wr = alloc_wrqe(wr_len, s->port->txq);
if (wr == NULL) {
sc->stats_wr_nomem++;
counter_u64_add(sc->stats_wr_nomem, 1);
return (ENOMEM);
}
crwr = wrtod(wr);
@ -564,7 +564,7 @@ ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crypto_copydata(crp, crp->crp_payload_start,
crp->crp_payload_length, dst);
else
ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
ccr_write_ulptx_sgl(s, dst, sgl_nsegs);
/* XXX: TODO backpressure */
t4_wrq_tx(sc->adapter, wr);
@ -620,16 +620,16 @@ ccr_blkcipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
else
op_type = CHCR_DECRYPT_OP;
sglist_reset(sc->sg_dsgl);
sglist_reset(s->sg_dsgl);
if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output,
error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
crp->crp_payload_output_start, crp->crp_payload_length);
else
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input,
error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
crp->crp_payload_start, crp->crp_payload_length);
if (error)
return (error);
dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN);
if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
return (EFBIG);
dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
@ -650,12 +650,12 @@ ccr_blkcipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
sgl_len = 0;
} else {
imm_len = 0;
sglist_reset(sc->sg_ulptx);
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input,
sglist_reset(s->sg_ulptx);
error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
crp->crp_payload_start, crp->crp_payload_length);
if (error)
return (error);
sgl_nsegs = sc->sg_ulptx->sg_nseg;
sgl_nsegs = s->sg_ulptx->sg_nseg;
sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
}
@ -665,7 +665,7 @@ ccr_blkcipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
return (EFBIG);
wr = alloc_wrqe(wr_len, s->port->txq);
if (wr == NULL) {
sc->stats_wr_nomem++;
counter_u64_add(sc->stats_wr_nomem, 1);
return (ENOMEM);
}
crwr = wrtod(wr);
@ -737,7 +737,7 @@ ccr_blkcipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
}
dst = (char *)(crwr + 1) + kctx_len;
ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs);
ccr_write_phys_dsgl(s, dst, dsgl_nsegs);
dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
memcpy(dst, iv, iv_len);
dst += iv_len;
@ -745,7 +745,7 @@ ccr_blkcipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crypto_copydata(crp, crp->crp_payload_start,
crp->crp_payload_length, dst);
else
ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
ccr_write_ulptx_sgl(s, dst, sgl_nsegs);
/* XXX: TODO backpressure */
t4_wrq_tx(sc->adapter, wr);
@ -846,30 +846,30 @@ ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
MAX_REQUEST_SIZE)
return (EFBIG);
}
sglist_reset(sc->sg_dsgl);
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0,
sglist_reset(s->sg_dsgl);
error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0,
iv_len + crp->crp_aad_length);
if (error)
return (error);
if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output,
error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
crp->crp_payload_output_start, crp->crp_payload_length);
else
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input,
error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
crp->crp_payload_start, crp->crp_payload_length);
if (error)
return (error);
if (op_type == CHCR_ENCRYPT_OP) {
if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output,
error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
crp->crp_digest_start, hash_size_in_response);
else
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input,
error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
crp->crp_digest_start, hash_size_in_response);
if (error)
return (error);
}
dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN);
if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
return (EFBIG);
dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
@ -916,29 +916,29 @@ ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
sgl_len = 0;
} else {
imm_len = 0;
sglist_reset(sc->sg_ulptx);
sglist_reset(s->sg_ulptx);
if (crp->crp_aad_length != 0) {
if (crp->crp_aad != NULL)
error = sglist_append(sc->sg_ulptx,
error = sglist_append(s->sg_ulptx,
crp->crp_aad, crp->crp_aad_length);
else
error = sglist_append_sglist(sc->sg_ulptx,
sc->sg_input, crp->crp_aad_start,
error = sglist_append_sglist(s->sg_ulptx,
s->sg_input, crp->crp_aad_start,
crp->crp_aad_length);
if (error)
return (error);
}
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input,
error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
crp->crp_payload_start, crp->crp_payload_length);
if (error)
return (error);
if (op_type == CHCR_DECRYPT_OP) {
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input,
error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
crp->crp_digest_start, hash_size_in_response);
if (error)
return (error);
}
sgl_nsegs = sc->sg_ulptx->sg_nseg;
sgl_nsegs = s->sg_ulptx->sg_nseg;
sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
}
@ -966,7 +966,7 @@ ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
return (EFBIG);
wr = alloc_wrqe(wr_len, s->port->txq);
if (wr == NULL) {
sc->stats_wr_nomem++;
counter_u64_add(sc->stats_wr_nomem, 1);
return (ENOMEM);
}
crwr = wrtod(wr);
@ -1048,7 +1048,7 @@ ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
memcpy(dst, s->hmac.pads, iopad_size * 2);
dst = (char *)(crwr + 1) + kctx_len;
ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs);
ccr_write_phys_dsgl(s, dst, dsgl_nsegs);
dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
memcpy(dst, iv, iv_len);
dst += iv_len;
@ -1068,7 +1068,7 @@ ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crypto_copydata(crp, crp->crp_digest_start,
hash_size_in_response, dst);
} else
ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
ccr_write_ulptx_sgl(s, dst, sgl_nsegs);
/* XXX: TODO backpressure */
t4_wrq_tx(sc->adapter, wr);
@ -1168,30 +1168,30 @@ ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
MAX_REQUEST_SIZE)
return (EFBIG);
}
sglist_reset(sc->sg_dsgl);
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
sglist_reset(s->sg_dsgl);
error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
crp->crp_aad_length);
if (error)
return (error);
if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output,
error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
crp->crp_payload_output_start, crp->crp_payload_length);
else
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input,
error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
crp->crp_payload_start, crp->crp_payload_length);
if (error)
return (error);
if (op_type == CHCR_ENCRYPT_OP) {
if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output,
error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
crp->crp_digest_start, hash_size_in_response);
else
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input,
error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
crp->crp_digest_start, hash_size_in_response);
if (error)
return (error);
}
dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN);
if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
return (EFBIG);
dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
@ -1225,29 +1225,29 @@ ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
sgl_len = 0;
} else {
imm_len = 0;
sglist_reset(sc->sg_ulptx);
sglist_reset(s->sg_ulptx);
if (crp->crp_aad_length != 0) {
if (crp->crp_aad != NULL)
error = sglist_append(sc->sg_ulptx,
error = sglist_append(s->sg_ulptx,
crp->crp_aad, crp->crp_aad_length);
else
error = sglist_append_sglist(sc->sg_ulptx,
sc->sg_input, crp->crp_aad_start,
error = sglist_append_sglist(s->sg_ulptx,
s->sg_input, crp->crp_aad_start,
crp->crp_aad_length);
if (error)
return (error);
}
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input,
error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
crp->crp_payload_start, crp->crp_payload_length);
if (error)
return (error);
if (op_type == CHCR_DECRYPT_OP) {
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input,
error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
crp->crp_digest_start, hash_size_in_response);
if (error)
return (error);
}
sgl_nsegs = sc->sg_ulptx->sg_nseg;
sgl_nsegs = s->sg_ulptx->sg_nseg;
sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
}
@ -1274,7 +1274,7 @@ ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
return (EFBIG);
wr = alloc_wrqe(wr_len, s->port->txq);
if (wr == NULL) {
sc->stats_wr_nomem++;
counter_u64_add(sc->stats_wr_nomem, 1);
return (ENOMEM);
}
crwr = wrtod(wr);
@ -1339,7 +1339,7 @@ ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN);
dst = (char *)(crwr + 1) + kctx_len;
ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs);
ccr_write_phys_dsgl(s, dst, dsgl_nsegs);
dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
memcpy(dst, iv, iv_len);
dst += iv_len;
@ -1359,7 +1359,7 @@ ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crypto_copydata(crp, crp->crp_digest_start,
hash_size_in_response, dst);
} else
ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
ccr_write_ulptx_sgl(s, dst, sgl_nsegs);
/* XXX: TODO backpressure */
t4_wrq_tx(sc->adapter, wr);
@ -1642,30 +1642,30 @@ ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
MAX_REQUEST_SIZE)
return (EFBIG);
}
sglist_reset(sc->sg_dsgl);
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
sglist_reset(s->sg_dsgl);
error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
aad_len);
if (error)
return (error);
if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output,
error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
crp->crp_payload_output_start, crp->crp_payload_length);
else
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input,
error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
crp->crp_payload_start, crp->crp_payload_length);
if (error)
return (error);
if (op_type == CHCR_ENCRYPT_OP) {
if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output,
error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
crp->crp_digest_start, hash_size_in_response);
else
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input,
error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
crp->crp_digest_start, hash_size_in_response);
if (error)
return (error);
}
dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN);
if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
return (EFBIG);
dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
@ -1701,29 +1701,29 @@ ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
/* Block 0 is passed as immediate data. */
imm_len = b0_len;
sglist_reset(sc->sg_ulptx);
sglist_reset(s->sg_ulptx);
if (crp->crp_aad_length != 0) {
if (crp->crp_aad != NULL)
error = sglist_append(sc->sg_ulptx,
error = sglist_append(s->sg_ulptx,
crp->crp_aad, crp->crp_aad_length);
else
error = sglist_append_sglist(sc->sg_ulptx,
sc->sg_input, crp->crp_aad_start,
error = sglist_append_sglist(s->sg_ulptx,
s->sg_input, crp->crp_aad_start,
crp->crp_aad_length);
if (error)
return (error);
}
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input,
error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
crp->crp_payload_start, crp->crp_payload_length);
if (error)
return (error);
if (op_type == CHCR_DECRYPT_OP) {
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input,
error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
crp->crp_digest_start, hash_size_in_response);
if (error)
return (error);
}
sgl_nsegs = sc->sg_ulptx->sg_nseg;
sgl_nsegs = s->sg_ulptx->sg_nseg;
sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
}
@ -1745,7 +1745,7 @@ ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
return (EFBIG);
wr = alloc_wrqe(wr_len, s->port->txq);
if (wr == NULL) {
sc->stats_wr_nomem++;
counter_u64_add(sc->stats_wr_nomem, 1);
return (ENOMEM);
}
crwr = wrtod(wr);
@ -1809,7 +1809,7 @@ ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
s->blkcipher.enckey, s->blkcipher.key_len);
dst = (char *)(crwr + 1) + kctx_len;
ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs);
ccr_write_phys_dsgl(s, dst, dsgl_nsegs);
dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
memcpy(dst, iv, iv_len);
dst += iv_len;
@ -1847,7 +1847,7 @@ ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
idata->len = htobe32(0);
dst = (void *)(idata + 1);
}
ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
ccr_write_ulptx_sgl(s, dst, sgl_nsegs);
}
/* XXX: TODO backpressure */
@ -2054,47 +2054,50 @@ ccr_sysctls(struct ccr_softc *sc)
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics");
children = SYSCTL_CHILDREN(oid);
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hash", CTLFLAG_RD,
&sc->stats_hash, 0, "Hash requests submitted");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD,
&sc->stats_hmac, 0, "HMAC requests submitted");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_encrypt", CTLFLAG_RD,
&sc->stats_blkcipher_encrypt, 0,
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "hash", CTLFLAG_RD,
&sc->stats_hash, "Hash requests submitted");
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD,
&sc->stats_hmac, "HMAC requests submitted");
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cipher_encrypt",
CTLFLAG_RD, &sc->stats_blkcipher_encrypt,
"Cipher encryption requests submitted");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_decrypt", CTLFLAG_RD,
&sc->stats_blkcipher_decrypt, 0,
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cipher_decrypt",
CTLFLAG_RD, &sc->stats_blkcipher_decrypt,
"Cipher decryption requests submitted");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "eta_encrypt", CTLFLAG_RD,
&sc->stats_eta_encrypt, 0,
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "eta_encrypt",
CTLFLAG_RD, &sc->stats_eta_encrypt,
"Combined AES+HMAC encryption requests submitted");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "eta_decrypt", CTLFLAG_RD,
&sc->stats_eta_decrypt, 0,
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "eta_decrypt",
CTLFLAG_RD, &sc->stats_eta_decrypt,
"Combined AES+HMAC decryption requests submitted");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_encrypt", CTLFLAG_RD,
&sc->stats_gcm_encrypt, 0, "AES-GCM encryption requests submitted");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_decrypt", CTLFLAG_RD,
&sc->stats_gcm_decrypt, 0, "AES-GCM decryption requests submitted");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ccm_encrypt", CTLFLAG_RD,
&sc->stats_ccm_encrypt, 0, "AES-CCM encryption requests submitted");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ccm_decrypt", CTLFLAG_RD,
&sc->stats_ccm_decrypt, 0, "AES-CCM decryption requests submitted");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD,
&sc->stats_wr_nomem, 0, "Work request memory allocation failures");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD,
&sc->stats_inflight, 0, "Requests currently pending");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD,
&sc->stats_mac_error, 0, "MAC errors");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD,
&sc->stats_pad_error, 0, "Padding errors");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "bad_session", CTLFLAG_RD,
&sc->stats_bad_session, 0, "Requests with invalid session ID");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sglist_error", CTLFLAG_RD,
&sc->stats_sglist_error, 0,
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_encrypt",
CTLFLAG_RD, &sc->stats_gcm_encrypt,
"AES-GCM encryption requests submitted");
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_decrypt",
CTLFLAG_RD, &sc->stats_gcm_decrypt,
"AES-GCM decryption requests submitted");
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ccm_encrypt",
CTLFLAG_RD, &sc->stats_ccm_encrypt,
"AES-CCM encryption requests submitted");
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ccm_decrypt",
CTLFLAG_RD, &sc->stats_ccm_decrypt,
"AES-CCM decryption requests submitted");
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD,
&sc->stats_wr_nomem, "Work request memory allocation failures");
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD,
&sc->stats_inflight, "Requests currently pending");
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD,
&sc->stats_mac_error, "MAC errors");
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD,
&sc->stats_pad_error, "Padding errors");
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sglist_error",
CTLFLAG_RD, &sc->stats_sglist_error,
"Requests for which DMA mapping failed");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "process_error", CTLFLAG_RD,
&sc->stats_process_error, 0, "Requests failed during queueing");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sw_fallback", CTLFLAG_RD,
&sc->stats_sw_fallback, 0,
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "process_error",
CTLFLAG_RD, &sc->stats_process_error,
"Requests failed during queueing");
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sw_fallback",
CTLFLAG_RD, &sc->stats_sw_fallback,
"Requests processed by falling back to software");
/*
@ -2157,12 +2160,25 @@ ccr_attach(device_t dev)
sc->adapter->ccr_softc = sc;
mtx_init(&sc->lock, "ccr", NULL, MTX_DEF);
sc->sg_input = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
sc->sg_output = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
sc->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
sc->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_WAITOK);
sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK);
sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK);
sc->stats_blkcipher_encrypt = counter_u64_alloc(M_WAITOK);
sc->stats_blkcipher_decrypt = counter_u64_alloc(M_WAITOK);
sc->stats_hash = counter_u64_alloc(M_WAITOK);
sc->stats_hmac = counter_u64_alloc(M_WAITOK);
sc->stats_eta_encrypt = counter_u64_alloc(M_WAITOK);
sc->stats_eta_decrypt = counter_u64_alloc(M_WAITOK);
sc->stats_gcm_encrypt = counter_u64_alloc(M_WAITOK);
sc->stats_gcm_decrypt = counter_u64_alloc(M_WAITOK);
sc->stats_ccm_encrypt = counter_u64_alloc(M_WAITOK);
sc->stats_ccm_decrypt = counter_u64_alloc(M_WAITOK);
sc->stats_wr_nomem = counter_u64_alloc(M_WAITOK);
sc->stats_inflight = counter_u64_alloc(M_WAITOK);
sc->stats_mac_error = counter_u64_alloc(M_WAITOK);
sc->stats_pad_error = counter_u64_alloc(M_WAITOK);
sc->stats_sglist_error = counter_u64_alloc(M_WAITOK);
sc->stats_process_error = counter_u64_alloc(M_WAITOK);
sc->stats_sw_fallback = counter_u64_alloc(M_WAITOK);
ccr_sysctls(sc);
return (0);
@ -2182,12 +2198,25 @@ ccr_detach(device_t dev)
crypto_unregister_all(sc->cid);
mtx_destroy(&sc->lock);
counter_u64_free(sc->stats_blkcipher_encrypt);
counter_u64_free(sc->stats_blkcipher_decrypt);
counter_u64_free(sc->stats_hash);
counter_u64_free(sc->stats_hmac);
counter_u64_free(sc->stats_eta_encrypt);
counter_u64_free(sc->stats_eta_decrypt);
counter_u64_free(sc->stats_gcm_encrypt);
counter_u64_free(sc->stats_gcm_decrypt);
counter_u64_free(sc->stats_ccm_encrypt);
counter_u64_free(sc->stats_ccm_decrypt);
counter_u64_free(sc->stats_wr_nomem);
counter_u64_free(sc->stats_inflight);
counter_u64_free(sc->stats_mac_error);
counter_u64_free(sc->stats_pad_error);
counter_u64_free(sc->stats_sglist_error);
counter_u64_free(sc->stats_process_error);
counter_u64_free(sc->stats_sw_fallback);
sglist_free(sc->sg_iv_aad);
free(sc->iv_aad_buf, M_CCR);
sglist_free(sc->sg_dsgl);
sglist_free(sc->sg_ulptx);
sglist_free(sc->sg_output);
sglist_free(sc->sg_input);
sc->adapter->ccr_softc = NULL;
return (0);
}
@ -2459,6 +2488,16 @@ ccr_choose_port(struct ccr_softc *sc)
return (best);
}
static void
ccr_delete_session(struct ccr_session *s)
{
sglist_free(s->sg_input);
sglist_free(s->sg_output);
sglist_free(s->sg_ulptx);
sglist_free(s->sg_dsgl);
mtx_destroy(&s->lock);
}
static int
ccr_newsession(device_t dev, crypto_session_t cses,
const struct crypto_session_params *csp)
@ -2547,18 +2586,31 @@ ccr_newsession(device_t dev, crypto_session_t cses,
}
#endif
s = crypto_get_driver_session(cses);
mtx_init(&s->lock, "ccr session", NULL, MTX_DEF);
s->sg_input = sglist_alloc(TX_SGL_SEGS, M_NOWAIT);
s->sg_output = sglist_alloc(TX_SGL_SEGS, M_NOWAIT);
s->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_NOWAIT);
s->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_NOWAIT);
if (s->sg_input == NULL || s->sg_output == NULL ||
s->sg_ulptx == NULL || s->sg_dsgl == NULL) {
ccr_delete_session(s);
return (ENOMEM);
}
sc = device_get_softc(dev);
mtx_lock(&sc->lock);
if (sc->detaching) {
mtx_unlock(&sc->lock);
ccr_delete_session(s);
return (ENXIO);
}
s = crypto_get_driver_session(cses);
s->port = ccr_choose_port(sc);
if (s->port == NULL) {
mtx_unlock(&sc->lock);
ccr_delete_session(s);
return (ENXIO);
}
@ -2619,7 +2671,6 @@ ccr_newsession(device_t dev, crypto_session_t cses,
csp->csp_cipher_klen);
}
s->active = true;
s->port->active_sessions++;
mtx_unlock(&sc->lock);
return (0);
@ -2633,14 +2684,16 @@ ccr_freesession(device_t dev, crypto_session_t cses)
sc = device_get_softc(dev);
s = crypto_get_driver_session(cses);
mtx_lock(&sc->lock);
#ifdef INVARIANTS
if (s->pending != 0)
device_printf(dev,
"session %p freed with %d pending requests\n", s,
s->pending);
s->active = false;
#endif
mtx_lock(&sc->lock);
s->port->active_sessions--;
mtx_unlock(&sc->lock);
ccr_delete_session(s);
}
static int
@ -2655,12 +2708,12 @@ ccr_process(device_t dev, struct cryptop *crp, int hint)
s = crypto_get_driver_session(crp->crp_session);
sc = device_get_softc(dev);
mtx_lock(&sc->lock);
error = ccr_populate_sglist(sc->sg_input, &crp->crp_buf);
mtx_lock(&s->lock);
error = ccr_populate_sglist(s->sg_input, &crp->crp_buf);
if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp))
error = ccr_populate_sglist(sc->sg_output, &crp->crp_obuf);
error = ccr_populate_sglist(s->sg_output, &crp->crp_obuf);
if (error) {
sc->stats_sglist_error++;
counter_u64_add(sc->stats_sglist_error, 1);
goto out;
}
@ -2668,7 +2721,7 @@ ccr_process(device_t dev, struct cryptop *crp, int hint)
case HASH:
error = ccr_hash(sc, s, crp);
if (error == 0)
sc->stats_hash++;
counter_u64_add(sc->stats_hash, 1);
break;
case HMAC:
if (crp->crp_auth_key != NULL)
@ -2677,7 +2730,7 @@ ccr_process(device_t dev, struct cryptop *crp, int hint)
csp->csp_auth_klen, s->hmac.pads);
error = ccr_hash(sc, s, crp);
if (error == 0)
sc->stats_hmac++;
counter_u64_add(sc->stats_hmac, 1);
break;
case BLKCIPHER:
if (crp->crp_cipher_key != NULL)
@ -2686,9 +2739,9 @@ ccr_process(device_t dev, struct cryptop *crp, int hint)
error = ccr_blkcipher(sc, s, crp);
if (error == 0) {
if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
sc->stats_blkcipher_encrypt++;
counter_u64_add(sc->stats_blkcipher_encrypt, 1);
else
sc->stats_blkcipher_decrypt++;
counter_u64_add(sc->stats_blkcipher_decrypt, 1);
}
break;
case ETA:
@ -2702,9 +2755,9 @@ ccr_process(device_t dev, struct cryptop *crp, int hint)
error = ccr_eta(sc, s, crp);
if (error == 0) {
if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
sc->stats_eta_encrypt++;
counter_u64_add(sc->stats_eta_encrypt, 1);
else
sc->stats_eta_decrypt++;
counter_u64_add(sc->stats_eta_decrypt, 1);
}
break;
case GCM:
@ -2715,22 +2768,22 @@ ccr_process(device_t dev, struct cryptop *crp, int hint)
csp->csp_cipher_klen);
}
if (crp->crp_payload_length == 0) {
mtx_unlock(&sc->lock);
mtx_unlock(&s->lock);
ccr_gcm_soft(s, crp);
return (0);
}
error = ccr_gcm(sc, s, crp);
if (error == EMSGSIZE) {
sc->stats_sw_fallback++;
mtx_unlock(&sc->lock);
counter_u64_add(sc->stats_sw_fallback, 1);
mtx_unlock(&s->lock);
ccr_gcm_soft(s, crp);
return (0);
}
if (error == 0) {
if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
sc->stats_gcm_encrypt++;
counter_u64_add(sc->stats_gcm_encrypt, 1);
else
sc->stats_gcm_decrypt++;
counter_u64_add(sc->stats_gcm_decrypt, 1);
}
break;
case CCM:
@ -2740,28 +2793,30 @@ ccr_process(device_t dev, struct cryptop *crp, int hint)
}
error = ccr_ccm(sc, s, crp);
if (error == EMSGSIZE) {
sc->stats_sw_fallback++;
mtx_unlock(&sc->lock);
counter_u64_add(sc->stats_sw_fallback, 1);
mtx_unlock(&s->lock);
ccr_ccm_soft(s, crp);
return (0);
}
if (error == 0) {
if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
sc->stats_ccm_encrypt++;
counter_u64_add(sc->stats_ccm_encrypt, 1);
else
sc->stats_ccm_decrypt++;
counter_u64_add(sc->stats_ccm_decrypt, 1);
}
break;
}
if (error == 0) {
#ifdef INVARIANTS
s->pending++;
sc->stats_inflight++;
#endif
counter_u64_add(sc->stats_inflight, 1);
} else
sc->stats_process_error++;
counter_u64_add(sc->stats_process_error, 1);
out:
mtx_unlock(&sc->lock);
mtx_unlock(&s->lock);
if (error) {
crp->crp_etype = error;
@ -2795,9 +2850,12 @@ do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
else
error = 0;
mtx_lock(&sc->lock);
#ifdef INVARIANTS
mtx_lock(&s->lock);
s->pending--;
sc->stats_inflight--;
mtx_unlock(&s->lock);
#endif
counter_u64_add(sc->stats_inflight, -1);
switch (s->mode) {
case HASH:
@ -2820,11 +2878,10 @@ do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
if (error == EBADMSG) {
if (CHK_MAC_ERR_BIT(status))
sc->stats_mac_error++;
counter_u64_add(sc->stats_mac_error, 1);
if (CHK_PAD_ERR_BIT(status))
sc->stats_pad_error++;
counter_u64_add(sc->stats_pad_error, 1);
}
mtx_unlock(&sc->lock);
crp->crp_etype = error;
crypto_done(crp);
m_freem(m);