crypto/nitrox: add cipher auth chain processing

Add cipher auth crypto chain processing functionality in symmetric
request manager. Update the release notes.

Signed-off-by: Nagadheeraj Rottela <rnagadheeraj@marvell.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
This commit is contained in:
Nagadheeraj Rottela 2019-10-01 06:41:33 +00:00 committed by Akhil Goyal
parent 1acffa3929
commit 9282bdee5c
5 changed files with 480 additions and 3 deletions

View File

@ -0,0 +1,40 @@
;
; Supported features of the 'nitrox' crypto driver.
;
; Refer to default.ini for the full list of available PMD features.
;
[Features]
Symmetric crypto = Y
Sym operation chaining = Y
HW Accelerated = Y
In Place SGL = Y
OOP SGL In SGL Out = Y
OOP SGL In LB Out = Y
OOP LB In SGL Out = Y
OOP LB In LB Out = Y
;
; Supported crypto algorithms of the 'nitrox' crypto driver.
;
[Cipher]
AES CBC (128) = Y
AES CBC (192) = Y
AES CBC (256) = Y
;
; Supported authentication algorithms of the 'nitrox' crypto driver.
;
[Auth]
SHA1 HMAC = Y
SHA224 HMAC = Y
SHA256 HMAC = Y
;
; Supported AEAD algorithms of the 'nitrox' crypto driver.
;
[AEAD]
;
; Supported Asymmetric algorithms of the 'nitrox' crypto driver.
;
[Asymmetric]

View File

@ -10,6 +10,27 @@ information about the NITROX V security processor can be obtained here:
* https://www.marvell.com/security-solutions/nitrox-security-processors/nitrox-v/
Features
--------
Nitrox crypto PMD has support for:
Cipher algorithms:
* ``RTE_CRYPTO_CIPHER_AES_CBC``
Hash algorithms:
* ``RTE_CRYPTO_AUTH_SHA1_HMAC``
* ``RTE_CRYPTO_AUTH_SHA224_HMAC``
* ``RTE_CRYPTO_AUTH_SHA256_HMAC``
Limitations
-----------
* AES_CBC Cipher Only combination is not supported.
* Session-less APIs are not supported.
Installation
------------

View File

@ -56,6 +56,11 @@ New Features
Also, make sure to start the actual text at the margin.
=========================================================
* **Added Marvell NITROX symmetric crypto PMD.**
Added a symmetric crypto PMD for Marvell NITROX V security processor.
See the :doc:`../cryptodevs/nitrox` guide for more details on this new
* **Updated NXP crypto PMDs for PDCP support.**
PDCP support is added to DPAA_SEC and DPAA2_SEC PMDs using rte_security APIs.

View File

@ -701,7 +701,12 @@ nitrox_sym_pmd_create(struct nitrox_device *ndev)
cdev->dequeue_burst = nitrox_sym_dev_deq_burst;
cdev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_IN_PLACE_SGL |
RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
ndev->sym_dev = cdev->data->dev_private;
ndev->sym_dev->cdev = cdev;

View File

@ -10,9 +10,24 @@
#include "nitrox_sym_reqmgr.h"
#include "nitrox_logs.h"
#define MAX_SGBUF_CNT 16
#define MAX_SGCOMP_CNT 5
/* SLC_STORE_INFO */
#define MIN_UDD_LEN 16
/* PKT_IN_HDR + SLC_STORE_INFO */
#define FDATA_SIZE 32
/* Base destination port for the solicited requests */
#define SOLICIT_BASE_DPORT 256
#define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL
#define CMD_TIMEOUT 2
struct gphdr {
uint16_t param0;
uint16_t param1;
uint16_t param2;
uint16_t param3;
};
union pkt_instr_hdr {
uint64_t value;
struct {
@ -105,12 +120,46 @@ struct resp_hdr {
uint64_t completion;
};
struct nitrox_sglist {
uint16_t len;
uint16_t raz0;
uint32_t raz1;
rte_iova_t iova;
void *virt;
};
struct nitrox_sgcomp {
uint16_t len[4];
uint64_t iova[4];
};
struct nitrox_sgtable {
uint8_t map_bufs_cnt;
uint8_t nr_sgcomp;
uint16_t total_bytes;
struct nitrox_sglist sglist[MAX_SGBUF_CNT];
struct nitrox_sgcomp sgcomp[MAX_SGCOMP_CNT];
};
struct iv {
uint8_t *virt;
rte_iova_t iova;
uint16_t len;
};
struct nitrox_softreq {
struct nitrox_crypto_ctx *ctx;
struct rte_crypto_op *op;
struct gphdr gph;
struct nps_pkt_instr instr;
struct resp_hdr resp;
struct nitrox_sgtable in;
struct nitrox_sgtable out;
struct iv iv;
uint64_t timeout;
rte_iova_t dptr;
rte_iova_t rptr;
rte_iova_t iova;
};
@ -121,10 +170,367 @@ softreq_init(struct nitrox_softreq *sr, rte_iova_t iova)
sr->iova = iova;
}
/*
* 64-Byte Instruction Format
*
* ----------------------
* | DPTR0 | 8 bytes
* ----------------------
* | PKT_IN_INSTR_HDR | 8 bytes
* ----------------------
* | PKT_IN_HDR | 16 bytes
* ----------------------
* | SLC_INFO | 16 bytes
* ----------------------
* | Front data | 16 bytes
* ----------------------
*/
static void
create_se_instr(struct nitrox_softreq *sr, uint8_t qno)
{
struct nitrox_crypto_ctx *ctx = sr->ctx;
rte_iova_t ctx_handle;
/* fill the packet instruction */
/* word 0 */
sr->instr.dptr0 = rte_cpu_to_be_64(sr->dptr);
/* word 1 */
sr->instr.ih.value = 0;
sr->instr.ih.s.g = 1;
sr->instr.ih.s.gsz = sr->in.map_bufs_cnt;
sr->instr.ih.s.ssz = sr->out.map_bufs_cnt;
sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
sr->instr.ih.value = rte_cpu_to_be_64(sr->instr.ih.value);
/* word 2 */
sr->instr.irh.value[0] = 0;
sr->instr.irh.s.uddl = MIN_UDD_LEN;
/* context length in 64-bit words */
sr->instr.irh.s.ctxl = RTE_ALIGN_MUL_CEIL(sizeof(ctx->fctx), 8) / 8;
/* offset from solicit base port 256 */
sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno;
/* Invalid context cache */
sr->instr.irh.s.ctxc = 0x3;
sr->instr.irh.s.arg = ctx->req_op;
sr->instr.irh.s.opcode = ctx->opcode;
sr->instr.irh.value[0] = rte_cpu_to_be_64(sr->instr.irh.value[0]);
/* word 3 */
ctx_handle = ctx->iova + offsetof(struct nitrox_crypto_ctx, fctx);
sr->instr.irh.s.ctxp = rte_cpu_to_be_64(ctx_handle);
/* word 4 */
sr->instr.slc.value[0] = 0;
sr->instr.slc.s.ssz = sr->out.map_bufs_cnt;
sr->instr.slc.value[0] = rte_cpu_to_be_64(sr->instr.slc.value[0]);
/* word 5 */
sr->instr.slc.s.rptr = rte_cpu_to_be_64(sr->rptr);
/*
* No conversion for front data,
* It goes into payload
* put GP Header in front data
*/
memcpy(&sr->instr.fdata[0], &sr->gph, sizeof(sr->instr.fdata[0]));
sr->instr.fdata[1] = 0;
}
static void
softreq_copy_iv(struct nitrox_softreq *sr)
{
sr->iv.virt = rte_crypto_op_ctod_offset(sr->op, uint8_t *,
sr->ctx->iv.offset);
sr->iv.iova = rte_crypto_op_ctophys_offset(sr->op, sr->ctx->iv.offset);
sr->iv.len = sr->ctx->iv.length;
}
static int
extract_cipher_auth_digest(struct nitrox_softreq *sr,
struct nitrox_sglist *digest)
{
struct rte_crypto_op *op = sr->op;
struct rte_mbuf *mdst = op->sym->m_dst ? op->sym->m_dst :
op->sym->m_src;
if (sr->ctx->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY &&
unlikely(!op->sym->auth.digest.data))
return -EINVAL;
digest->len = sr->ctx->digest_length;
if (op->sym->auth.digest.data) {
digest->iova = op->sym->auth.digest.phys_addr;
digest->virt = op->sym->auth.digest.data;
return 0;
}
if (unlikely(rte_pktmbuf_data_len(mdst) < op->sym->auth.data.offset +
op->sym->auth.data.length + digest->len))
return -EINVAL;
digest->iova = rte_pktmbuf_mtophys_offset(mdst,
op->sym->auth.data.offset +
op->sym->auth.data.length);
digest->virt = rte_pktmbuf_mtod_offset(mdst, uint8_t *,
op->sym->auth.data.offset +
op->sym->auth.data.length);
return 0;
}
static void
fill_sglist(struct nitrox_sgtable *sgtbl, uint16_t len, rte_iova_t iova,
void *virt)
{
struct nitrox_sglist *sglist = sgtbl->sglist;
uint8_t cnt = sgtbl->map_bufs_cnt;
if (unlikely(!len))
return;
sglist[cnt].len = len;
sglist[cnt].iova = iova;
sglist[cnt].virt = virt;
sgtbl->total_bytes += len;
cnt++;
sgtbl->map_bufs_cnt = cnt;
}
static int
create_sglist_from_mbuf(struct nitrox_sgtable *sgtbl, struct rte_mbuf *mbuf,
uint32_t off, int datalen)
{
struct nitrox_sglist *sglist = sgtbl->sglist;
uint8_t cnt = sgtbl->map_bufs_cnt;
struct rte_mbuf *m;
int mlen;
if (unlikely(datalen <= 0))
return 0;
for (m = mbuf; m && off > rte_pktmbuf_data_len(m); m = m->next)
off -= rte_pktmbuf_data_len(m);
if (unlikely(!m))
return -EIO;
mlen = rte_pktmbuf_data_len(m) - off;
if (datalen <= mlen)
mlen = datalen;
sglist[cnt].len = mlen;
sglist[cnt].iova = rte_pktmbuf_mtophys_offset(m, off);
sglist[cnt].virt = rte_pktmbuf_mtod_offset(m, uint8_t *, off);
sgtbl->total_bytes += mlen;
cnt++;
datalen -= mlen;
for (m = m->next; m && datalen; m = m->next) {
mlen = rte_pktmbuf_data_len(m) < datalen ?
rte_pktmbuf_data_len(m) : datalen;
sglist[cnt].len = mlen;
sglist[cnt].iova = rte_pktmbuf_mtophys(m);
sglist[cnt].virt = rte_pktmbuf_mtod(m, uint8_t *);
sgtbl->total_bytes += mlen;
cnt++;
datalen -= mlen;
}
RTE_VERIFY(cnt <= MAX_SGBUF_CNT);
sgtbl->map_bufs_cnt = cnt;
return 0;
}
static int
create_cipher_auth_sglist(struct nitrox_softreq *sr,
struct nitrox_sgtable *sgtbl, struct rte_mbuf *mbuf)
{
struct rte_crypto_op *op = sr->op;
int auth_only_len;
int err;
fill_sglist(sgtbl, sr->iv.len, sr->iv.iova, sr->iv.virt);
auth_only_len = op->sym->auth.data.length - op->sym->cipher.data.length;
if (unlikely(auth_only_len < 0))
return -EINVAL;
err = create_sglist_from_mbuf(sgtbl, mbuf, op->sym->auth.data.offset,
auth_only_len);
if (unlikely(err))
return err;
err = create_sglist_from_mbuf(sgtbl, mbuf, op->sym->cipher.data.offset,
op->sym->cipher.data.length);
if (unlikely(err))
return err;
return 0;
}
static void
create_sgcomp(struct nitrox_sgtable *sgtbl)
{
int i, j, nr_sgcomp;
struct nitrox_sgcomp *sgcomp = sgtbl->sgcomp;
struct nitrox_sglist *sglist = sgtbl->sglist;
nr_sgcomp = RTE_ALIGN_MUL_CEIL(sgtbl->map_bufs_cnt, 4) / 4;
sgtbl->nr_sgcomp = nr_sgcomp;
for (i = 0; i < nr_sgcomp; i++, sgcomp++) {
for (j = 0; j < 4; j++, sglist++) {
sgcomp->len[j] = rte_cpu_to_be_16(sglist->len);
sgcomp->iova[j] = rte_cpu_to_be_64(sglist->iova);
}
}
}
static int
create_cipher_auth_inbuf(struct nitrox_softreq *sr,
struct nitrox_sglist *digest)
{
int err;
struct nitrox_crypto_ctx *ctx = sr->ctx;
err = create_cipher_auth_sglist(sr, &sr->in, sr->op->sym->m_src);
if (unlikely(err))
return err;
if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
fill_sglist(&sr->in, digest->len, digest->iova, digest->virt);
create_sgcomp(&sr->in);
sr->dptr = sr->iova + offsetof(struct nitrox_softreq, in.sgcomp);
return 0;
}
static int
create_cipher_auth_oop_outbuf(struct nitrox_softreq *sr,
struct nitrox_sglist *digest)
{
int err;
struct nitrox_crypto_ctx *ctx = sr->ctx;
err = create_cipher_auth_sglist(sr, &sr->out, sr->op->sym->m_dst);
if (unlikely(err))
return err;
if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
fill_sglist(&sr->out, digest->len, digest->iova, digest->virt);
return 0;
}
static void
create_cipher_auth_inplace_outbuf(struct nitrox_softreq *sr,
struct nitrox_sglist *digest)
{
int i, cnt;
struct nitrox_crypto_ctx *ctx = sr->ctx;
cnt = sr->out.map_bufs_cnt;
for (i = 0; i < sr->in.map_bufs_cnt; i++, cnt++) {
sr->out.sglist[cnt].len = sr->in.sglist[i].len;
sr->out.sglist[cnt].iova = sr->in.sglist[i].iova;
sr->out.sglist[cnt].virt = sr->in.sglist[i].virt;
}
sr->out.map_bufs_cnt = cnt;
if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) {
fill_sglist(&sr->out, digest->len, digest->iova,
digest->virt);
} else if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
sr->out.map_bufs_cnt--;
}
}
static int
create_cipher_auth_outbuf(struct nitrox_softreq *sr,
struct nitrox_sglist *digest)
{
struct rte_crypto_op *op = sr->op;
int cnt = 0;
sr->resp.orh = PENDING_SIG;
sr->out.sglist[cnt].len = sizeof(sr->resp.orh);
sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
resp.orh);
sr->out.sglist[cnt].virt = &sr->resp.orh;
cnt++;
sr->out.map_bufs_cnt = cnt;
if (op->sym->m_dst) {
int err;
err = create_cipher_auth_oop_outbuf(sr, digest);
if (unlikely(err))
return err;
} else {
create_cipher_auth_inplace_outbuf(sr, digest);
}
cnt = sr->out.map_bufs_cnt;
sr->resp.completion = PENDING_SIG;
sr->out.sglist[cnt].len = sizeof(sr->resp.completion);
sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
resp.completion);
sr->out.sglist[cnt].virt = &sr->resp.completion;
cnt++;
RTE_VERIFY(cnt <= MAX_SGBUF_CNT);
sr->out.map_bufs_cnt = cnt;
create_sgcomp(&sr->out);
sr->rptr = sr->iova + offsetof(struct nitrox_softreq, out.sgcomp);
return 0;
}
static void
create_aead_gph(uint32_t cryptlen, uint16_t ivlen, uint32_t authlen,
struct gphdr *gph)
{
int auth_only_len;
union {
struct {
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
uint16_t iv_offset : 8;
uint16_t auth_offset : 8;
#else
uint16_t auth_offset : 8;
uint16_t iv_offset : 8;
#endif
};
uint16_t value;
} param3;
gph->param0 = rte_cpu_to_be_16(cryptlen);
gph->param1 = rte_cpu_to_be_16(authlen);
auth_only_len = authlen - cryptlen;
gph->param2 = rte_cpu_to_be_16(ivlen + auth_only_len);
param3.iv_offset = 0;
param3.auth_offset = ivlen;
gph->param3 = rte_cpu_to_be_16(param3.value);
}
static int
process_cipher_auth_data(struct nitrox_softreq *sr)
{
RTE_SET_USED(sr);
struct rte_crypto_op *op = sr->op;
int err;
struct nitrox_sglist digest;
softreq_copy_iv(sr);
err = extract_cipher_auth_digest(sr, &digest);
if (unlikely(err))
return err;
err = create_cipher_auth_inbuf(sr, &digest);
if (unlikely(err))
return err;
err = create_cipher_auth_outbuf(sr, &digest);
if (unlikely(err))
return err;
create_aead_gph(op->sym->cipher.data.length, sr->iv.len,
op->sym->auth.data.length, &sr->gph);
return 0;
}
@ -152,11 +558,11 @@ nitrox_process_se_req(uint16_t qno, struct rte_crypto_op *op,
struct nitrox_crypto_ctx *ctx,
struct nitrox_softreq *sr)
{
RTE_SET_USED(qno);
softreq_init(sr, sr->iova);
sr->ctx = ctx;
sr->op = op;
process_softreq(sr);
create_se_instr(sr, qno);
sr->timeout = rte_get_timer_cycles() + CMD_TIMEOUT * rte_get_timer_hz();
return 0;
}