diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c index 1a0f204c89..5ced183bee 100644 --- a/app/test/test_cryptodev.c +++ b/app/test/test_cryptodev.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -57,13 +57,13 @@ struct crypto_testsuite_params { }; struct crypto_unittest_params { - struct rte_crypto_xform cipher_xform; - struct rte_crypto_xform auth_xform; + struct rte_crypto_sym_xform cipher_xform; + struct rte_crypto_sym_xform auth_xform; - struct rte_cryptodev_session *sess; + struct rte_cryptodev_sym_session *sess; struct rte_mbuf_offload *ol; - struct rte_crypto_op *op; + struct rte_crypto_sym_op *op; struct rte_mbuf *obuf, *ibuf; @@ -78,7 +78,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params( struct crypto_unittest_params *ut_params); static int -test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess, +test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess, struct crypto_unittest_params *ut_params, struct crypto_testsuite_params *ts_param); @@ -166,7 +166,7 @@ testsuite_setup(void) "MBUF_OFFLOAD_POOL", NUM_MBUFS, MBUF_CACHE_SIZE, DEFAULT_NUM_XFORMS * - sizeof(struct rte_crypto_xform), + sizeof(struct rte_crypto_sym_xform), rte_socket_id()); if (ts_params->mbuf_ol_pool == NULL) { RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n"); @@ -221,7 +221,7 @@ testsuite_setup(void) ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs; ts_params->conf.socket_id = SOCKET_ID_ANY; - ts_params->conf.session_mp.nb_objs = info.max_nb_sessions; + ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions; TEST_ASSERT_SUCCESS(rte_cryptodev_configure(dev_id, &ts_params->conf), @@ -276,7 +276,7 @@ ut_setup(void) ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE; ts_params->conf.socket_id = SOCKET_ID_ANY; ts_params->conf.session_mp.nb_objs = - (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_PMD) ? + (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_SYM_PMD) ? DEFAULT_NUM_OPS_INFLIGHT : DEFAULT_NUM_OPS_INFLIGHT; @@ -320,7 +320,7 @@ ut_teardown(void) /* free crypto session structure */ if (ut_params->sess) { - rte_cryptodev_session_free(ts_params->valid_devs[0], + rte_cryptodev_sym_session_free(ts_params->valid_devs[0], ut_params->sess); ut_params->sess = NULL; } @@ -465,7 +465,7 @@ test_queue_pair_descriptor_setup(void) rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); - ts_params->conf.session_mp.nb_objs = dev_info.max_nb_sessions; + ts_params->conf.session_mp.nb_objs = dev_info.sym.max_nb_sessions; TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->valid_devs[0], &ts_params->conf), "Failed to configure cryptodev %u", @@ -768,7 +768,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void) TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest"); /* Setup Cipher Parameters */ - ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER; + ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; ut_params->cipher_xform.next = &ut_params->auth_xform; ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; @@ -777,7 +777,8 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void) ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC; /* Setup HMAC Parameters */ - ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH; + ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; + ut_params->auth_xform.next = NULL; ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; @@ -787,21 +788,21 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void) ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1; /* Create crypto session*/ - ut_params->sess = rte_cryptodev_session_create( + ut_params->sess = rte_cryptodev_sym_session_create( ts_params->valid_devs[0], &ut_params->cipher_xform); TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed"); /* Generate Crypto op data structure */ ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool, - RTE_PKTMBUF_OL_CRYPTO); + RTE_PKTMBUF_OL_CRYPTO_SYM); TEST_ASSERT_NOT_NULL(ut_params->ol, "Failed to allocate pktmbuf offload"); ut_params->op = &ut_params->ol->op.crypto; /* Set crypto operation data parameters */ - rte_crypto_op_attach_session(ut_params->op, ut_params->sess); + rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess); ut_params->op->digest.data = ut_params->digest; ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset( @@ -864,18 +865,18 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void) /* Generate Crypto op data structure */ ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool, - RTE_PKTMBUF_OL_CRYPTO); + RTE_PKTMBUF_OL_CRYPTO_SYM); TEST_ASSERT_NOT_NULL(ut_params->ol, "Failed to allocate pktmbuf offload"); ut_params->op = &ut_params->ol->op.crypto; - TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_xforms( + TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms( ut_params->ol, 2), "failed to allocate space for crypto transforms"); /* Set crypto operation data parameters */ - ut_params->op->xform->type = RTE_CRYPTO_XFORM_CIPHER; + ut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER; /* cipher parameters */ ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; @@ -884,7 +885,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void) ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC; /* hash parameters */ - ut_params->op->xform->next->type = RTE_CRYPTO_XFORM_AUTH; + ut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH; ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC; @@ -960,7 +961,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void) DIGEST_BYTE_LENGTH_SHA1); /* Setup Cipher Parameters */ - ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER; + ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; ut_params->cipher_xform.next = NULL; ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; @@ -969,7 +970,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void) ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC; /* Setup HMAC Parameters */ - ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH; + ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; ut_params->auth_xform.next = &ut_params->cipher_xform; ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; @@ -979,13 +980,14 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void) ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1; /* Create Crypto session*/ - ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0], - &ut_params->auth_xform); + ut_params->sess = + rte_cryptodev_sym_session_create(ts_params->valid_devs[0], + &ut_params->auth_xform); TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed"); /* Generate Crypto op data structure */ ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool, - RTE_PKTMBUF_OL_CRYPTO); + RTE_PKTMBUF_OL_CRYPTO_SYM); TEST_ASSERT_NOT_NULL(ut_params->ol, "Failed to allocate pktmbuf offload"); @@ -993,7 +995,7 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void) /* Set crypto operation data parameters */ - rte_crypto_op_attach_session(ut_params->op, ut_params->sess); + rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess); ut_params->op->digest.data = ut_params->digest; ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset( @@ -1068,7 +1070,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void) TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest"); /* Setup Cipher Parameters */ - ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER; + ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; ut_params->cipher_xform.next = &ut_params->auth_xform; ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; @@ -1077,7 +1079,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void) ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC; /* Setup HMAC Parameters */ - ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH; + ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; ut_params->auth_xform.next = NULL; ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; @@ -1087,13 +1089,14 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void) ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256; /* Create Crypto session*/ - ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0], - &ut_params->cipher_xform); + ut_params->sess = + rte_cryptodev_sym_session_create(ts_params->valid_devs[0], + &ut_params->cipher_xform); TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed"); /* Generate Crypto op data structure */ ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool, - RTE_PKTMBUF_OL_CRYPTO); + RTE_PKTMBUF_OL_CRYPTO_SYM); TEST_ASSERT_NOT_NULL(ut_params->ol, "Failed to allocate pktmbuf offload"); @@ -1101,7 +1104,7 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void) /* Set crypto operation data parameters */ - rte_crypto_op_attach_session(ut_params->op, ut_params->sess); + rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess); ut_params->op->digest.data = ut_params->digest; ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset( @@ -1170,7 +1173,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void) DIGEST_BYTE_LENGTH_SHA256); /* Setup Cipher Parameters */ - ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER; + ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; ut_params->cipher_xform.next = NULL; ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; @@ -1179,7 +1182,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void) ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC; /* Setup HMAC Parameters */ - ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH; + ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; ut_params->auth_xform.next = &ut_params->cipher_xform; ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; @@ -1189,13 +1192,14 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void) ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256; /* Create Crypto session*/ - ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0], - &ut_params->auth_xform); + ut_params->sess = + rte_cryptodev_sym_session_create(ts_params->valid_devs[0], + &ut_params->auth_xform); TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed"); /* Generate Crypto op data structure */ ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool, - RTE_PKTMBUF_OL_CRYPTO); + RTE_PKTMBUF_OL_CRYPTO_SYM); TEST_ASSERT_NOT_NULL(ut_params->ol, "Failed to allocate pktmbuf offload"); @@ -1203,7 +1207,7 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void) /* Set crypto operation data parameters */ - rte_crypto_op_attach_session(ut_params->op, ut_params->sess); + rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess); ut_params->op->digest.data = ut_params->digest; ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset( @@ -1283,7 +1287,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void) TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest"); /* Setup Cipher Parameters */ - ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER; + ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; ut_params->cipher_xform.next = &ut_params->auth_xform; ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; @@ -1292,7 +1296,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void) ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC; /* Setup HMAC Parameters */ - ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH; + ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; ut_params->auth_xform.next = NULL; ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; @@ -1302,15 +1306,16 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void) ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512; /* Create Crypto session*/ - ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0], - &ut_params->cipher_xform); + ut_params->sess = + rte_cryptodev_sym_session_create(ts_params->valid_devs[0], + &ut_params->cipher_xform); TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed"); /* Generate Crypto op data structure */ ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool, - RTE_PKTMBUF_OL_CRYPTO); + RTE_PKTMBUF_OL_CRYPTO_SYM); TEST_ASSERT_NOT_NULL(ut_params->ol, "Failed to allocate pktmbuf offload"); @@ -1318,7 +1323,7 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void) /* Set crypto operation data parameters */ - rte_crypto_op_attach_session(ut_params->op, ut_params->sess); + rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess); ut_params->op->digest.data = ut_params->digest; ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset( @@ -1371,7 +1376,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params( struct crypto_unittest_params *ut_params); static int -test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess, +test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess, struct crypto_unittest_params *ut_params, struct crypto_testsuite_params *ts_params); @@ -1386,8 +1391,9 @@ test_AES_CBC_HMAC_SHA512_decrypt_digest_verify(void) "Failed to create session params"); /* Create Crypto session*/ - ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0], - &ut_params->auth_xform); + ut_params->sess = + rte_cryptodev_sym_session_create(ts_params->valid_devs[0], + &ut_params->auth_xform); TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed"); return test_AES_CBC_HMAC_SHA512_decrypt_perform(ut_params->sess, @@ -1400,7 +1406,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params( { /* Setup Cipher Parameters */ - ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER; + ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; ut_params->cipher_xform.next = NULL; ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; @@ -1409,7 +1415,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params( ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC; /* Setup HMAC Parameters */ - ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH; + ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; ut_params->auth_xform.next = &ut_params->cipher_xform; ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; @@ -1423,7 +1429,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params( static int -test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess, +test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess, struct crypto_unittest_params *ut_params, struct crypto_testsuite_params *ts_params) { @@ -1443,7 +1449,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess, /* Generate Crypto op data structure */ ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool, - RTE_PKTMBUF_OL_CRYPTO); + RTE_PKTMBUF_OL_CRYPTO_SYM); TEST_ASSERT_NOT_NULL(ut_params->ol, "Failed to allocate pktmbuf offload"); @@ -1451,7 +1457,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess, /* Set crypto operation data parameters */ - rte_crypto_op_attach_session(ut_params->op, sess); + rte_crypto_sym_op_attach_session(ut_params->op, sess); ut_params->op->digest.data = ut_params->digest; ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset( @@ -1521,7 +1527,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void) TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest"); /* Setup Cipher Parameters */ - ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER; + ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; ut_params->cipher_xform.next = &ut_params->auth_xform; ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; @@ -1530,7 +1536,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void) ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC; /* Setup HMAC Parameters */ - ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH; + ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; ut_params->auth_xform.next = NULL; ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; @@ -1540,13 +1546,14 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void) ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC; /* Create Crypto session*/ - ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0], - &ut_params->cipher_xform); + ut_params->sess = + rte_cryptodev_sym_session_create(ts_params->valid_devs[0], + &ut_params->cipher_xform); TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed"); /* Generate Crypto op data structure */ ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool, - RTE_PKTMBUF_OL_CRYPTO); + RTE_PKTMBUF_OL_CRYPTO_SYM); TEST_ASSERT_NOT_NULL(ut_params->ol, "Failed to allocate pktmbuf offload"); @@ -1554,7 +1561,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void) /* Set crypto operation data parameters */ - rte_crypto_op_attach_session(ut_params->op, ut_params->sess); + rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess); ut_params->op->iv.data = (uint8_t *) rte_pktmbuf_prepend(ut_params->ibuf, @@ -1614,7 +1621,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void) DIGEST_BYTE_LENGTH_AES_XCBC); /* Setup Cipher Parameters */ - ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER; + ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; ut_params->cipher_xform.next = NULL; ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; @@ -1623,7 +1630,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void) ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC; /* Setup HMAC Parameters */ - ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH; + ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; ut_params->auth_xform.next = &ut_params->cipher_xform; ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; @@ -1633,13 +1640,14 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void) ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC; /* Create Crypto session*/ - ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0], - &ut_params->auth_xform); + ut_params->sess = + rte_cryptodev_sym_session_create(ts_params->valid_devs[0], + &ut_params->auth_xform); TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed"); /* Generate Crypto op data structure */ ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool, - RTE_PKTMBUF_OL_CRYPTO); + RTE_PKTMBUF_OL_CRYPTO_SYM); TEST_ASSERT_NOT_NULL(ut_params->ol, "Failed to allocate pktmbuf offload"); @@ -1647,7 +1655,7 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void) /* Set crypto operation data parameters */ - rte_crypto_op_attach_session(ut_params->op, ut_params->sess); + rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess); ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC); @@ -1751,7 +1759,7 @@ test_multi_session(void) struct crypto_unittest_params *ut_params = &unittest_params; struct rte_cryptodev_info dev_info; - struct rte_cryptodev_session **sessions; + struct rte_cryptodev_sym_session **sessions; uint16_t i; @@ -1760,12 +1768,13 @@ test_multi_session(void) rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); - sessions = rte_malloc(NULL, (sizeof(struct rte_cryptodev_session *) * - dev_info.max_nb_sessions) + 1, 0); + sessions = rte_malloc(NULL, + (sizeof(struct rte_cryptodev_sym_session *) * + dev_info.sym.max_nb_sessions) + 1, 0); /* Create multiple crypto sessions*/ - for (i = 0; i < dev_info.max_nb_sessions; i++) { - sessions[i] = rte_cryptodev_session_create( + for (i = 0; i < dev_info.sym.max_nb_sessions; i++) { + sessions[i] = rte_cryptodev_sym_session_create( ts_params->valid_devs[0], &ut_params->auth_xform); TEST_ASSERT_NOT_NULL(sessions[i], @@ -1780,13 +1789,13 @@ test_multi_session(void) } /* Next session create should fail */ - sessions[i] = rte_cryptodev_session_create(ts_params->valid_devs[0], + sessions[i] = rte_cryptodev_sym_session_create(ts_params->valid_devs[0], &ut_params->auth_xform); TEST_ASSERT_NULL(sessions[i], "Session creation succeeded unexpectedly!"); - for (i = 0; i < dev_info.max_nb_sessions; i++) - rte_cryptodev_session_free(ts_params->valid_devs[0], + for (i = 0; i < dev_info.sym.max_nb_sessions; i++) + rte_cryptodev_sym_session_free(ts_params->valid_devs[0], sessions[i]); rte_free(sessions); @@ -1805,7 +1814,7 @@ test_not_in_place_crypto(void) /* Create multiple crypto sessions*/ - ut_params->sess = rte_cryptodev_session_create( + ut_params->sess = rte_cryptodev_sym_session_create( ts_params->valid_devs[0], &ut_params->auth_xform); TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed"); @@ -1827,7 +1836,7 @@ test_not_in_place_crypto(void) /* Generate Crypto op data structure */ ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool, - RTE_PKTMBUF_OL_CRYPTO); + RTE_PKTMBUF_OL_CRYPTO_SYM); TEST_ASSERT_NOT_NULL(ut_params->ol, "Failed to allocate pktmbuf offload"); @@ -1835,7 +1844,7 @@ test_not_in_place_crypto(void) /* Set crypto operation data parameters */ - rte_crypto_op_attach_session(ut_params->op, ut_params->sess); + rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess); ut_params->op->digest.data = ut_params->digest; ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset( @@ -1961,7 +1970,7 @@ static struct unit_test_suite cryptodev_aesni_mb_testsuite = { static int test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/) { - gbl_cryptodev_type = RTE_CRYPTODEV_QAT_PMD; + gbl_cryptodev_type = RTE_CRYPTODEV_QAT_SYM_PMD; return unit_test_suite_runner(&cryptodev_qat_testsuite); } static struct test_command cryptodev_qat_cmd = { diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c index 87f06703de..b0c8abf049 100644 --- a/app/test/test_cryptodev_perf.c +++ b/app/test/test_cryptodev_perf.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -63,12 +63,12 @@ struct crypto_testsuite_params { #define MAX_NUM_OF_OPS_PER_UT (128) struct crypto_unittest_params { - struct rte_crypto_xform cipher_xform; - struct rte_crypto_xform auth_xform; + struct rte_crypto_sym_xform cipher_xform; + struct rte_crypto_sym_xform auth_xform; - struct rte_cryptodev_session *sess; + struct rte_cryptodev_sym_session *sess; - struct rte_crypto_op *op; + struct rte_crypto_sym_op *op; struct rte_mbuf_offload *ol; struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT]; @@ -127,7 +127,7 @@ testsuite_setup(void) ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL", NUM_MBUFS, MBUF_CACHE_SIZE, DEFAULT_NUM_XFORMS * - sizeof(struct rte_crypto_xform), + sizeof(struct rte_crypto_sym_xform), rte_socket_id()); if (ts_params->mbuf_ol_pool == NULL) { RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n"); @@ -179,7 +179,7 @@ testsuite_setup(void) ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE; ts_params->conf.socket_id = SOCKET_ID_ANY; - ts_params->conf.session_mp.nb_objs = info.max_nb_sessions; + ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions; TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->dev_id, &ts_params->conf), @@ -252,7 +252,7 @@ ut_teardown(void) /* free crypto session structure */ if (ut_params->sess) - rte_cryptodev_session_free(ts_params->dev_id, + rte_cryptodev_sym_session_free(ts_params->dev_id, ut_params->sess); /* free crypto operation structure */ @@ -1713,7 +1713,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num) } /* Setup Cipher Parameters */ - ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER; + ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; ut_params->cipher_xform.next = &ut_params->auth_xform; ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; @@ -1723,7 +1723,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num) /* Setup HMAC Parameters */ - ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH; + ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; ut_params->auth_xform.next = NULL; ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; @@ -1733,7 +1733,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num) ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256; /* Create Crypto session*/ - ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id, + ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id, &ut_params->cipher_xform); TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed"); @@ -1753,12 +1753,12 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num) DIGEST_BYTE_LENGTH_SHA256); struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc( - ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO); + ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM); TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload"); - struct rte_crypto_op *cop = &ol->op.crypto; + struct rte_crypto_sym_op *cop = &ol->op.crypto; - rte_crypto_op_attach_session(cop, ut_params->sess); + rte_crypto_sym_op_attach_session(cop, ut_params->sess); cop->digest.data = ut_params->digest; cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b], @@ -1881,7 +1881,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num) } /* Setup Cipher Parameters */ - ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER; + ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; ut_params->cipher_xform.next = &ut_params->auth_xform; ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; @@ -1890,7 +1890,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num) ut_params->cipher_xform.cipher.key.length = CIPHER_IV_LENGTH_AES_CBC; /* Setup HMAC Parameters */ - ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH; + ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; ut_params->auth_xform.next = NULL; ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; @@ -1900,7 +1900,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num) ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256; /* Create Crypto session*/ - ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id, + ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id, &ut_params->cipher_xform); TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed"); @@ -1933,12 +1933,12 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num) struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc( ts_params->mbuf_ol_pool, - RTE_PKTMBUF_OL_CRYPTO); + RTE_PKTMBUF_OL_CRYPTO_SYM); TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload"); - struct rte_crypto_op *cop = &ol->op.crypto; + struct rte_crypto_sym_op *cop = &ol->op.crypto; - rte_crypto_op_attach_session(cop, ut_params->sess); + rte_crypto_sym_op_attach_session(cop, ut_params->sess); cop->digest.data = ut_params->digest; cop->digest.phys_addr = rte_pktmbuf_mtophys_offset( @@ -2060,7 +2060,7 @@ perftest_aesni_mb_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/) static int perftest_qat_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/) { - gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_PMD; + gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD; return unit_test_suite_runner(&cryptodev_testsuite); } diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c index f2afdb6211..f39ebd50f5 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -109,7 +109,7 @@ calculate_auth_precomputes(hash_one_block_t one_block_hash, /** Get xform chain order */ static int -aesni_mb_get_chain_order(const struct rte_crypto_xform *xform) +aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform) { /* * Multi-buffer only supports HASH_CIPHER or CIPHER_HASH chained @@ -119,12 +119,12 @@ aesni_mb_get_chain_order(const struct rte_crypto_xform *xform) if (xform->next == NULL || xform->next->next != NULL) return -1; - if (xform->type == RTE_CRYPTO_XFORM_AUTH && - xform->next->type == RTE_CRYPTO_XFORM_CIPHER) + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && + xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) return HASH_CIPHER; - if (xform->type == RTE_CRYPTO_XFORM_CIPHER && - xform->next->type == RTE_CRYPTO_XFORM_AUTH) + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && + xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) return CIPHER_HASH; return -1; @@ -134,11 +134,11 @@ aesni_mb_get_chain_order(const struct rte_crypto_xform *xform) static int aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops, struct aesni_mb_session *sess, - const struct rte_crypto_xform *xform) + const struct rte_crypto_sym_xform *xform) { hash_one_block_t hash_oneblock_fn; - if (xform->type != RTE_CRYPTO_XFORM_AUTH) { + if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) { MB_LOG_ERR("Crypto xform struct not of type auth"); return -1; } @@ -196,11 +196,11 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops, static int aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops, struct aesni_mb_session *sess, - const struct rte_crypto_xform *xform) + const struct rte_crypto_sym_xform *xform) { aes_keyexp_t aes_keyexp_fn; - if (xform->type != RTE_CRYPTO_XFORM_CIPHER) { + if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) { MB_LOG_ERR("Crypto xform struct not of type cipher"); return -1; } @@ -259,10 +259,10 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops, int aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops, struct aesni_mb_session *sess, - const struct rte_crypto_xform *xform) + const struct rte_crypto_sym_xform *xform) { - const struct rte_crypto_xform *auth_xform = NULL; - const struct rte_crypto_xform *cipher_xform = NULL; + const struct rte_crypto_sym_xform *auth_xform = NULL; + const struct rte_crypto_sym_xform *cipher_xform = NULL; /* Select Crypto operation - hash then cipher / cipher then hash */ switch (aesni_mb_get_chain_order(xform)) { @@ -296,11 +296,11 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops, /** Get multi buffer session */ static struct aesni_mb_session * -get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op) +get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op) { struct aesni_mb_session *sess = NULL; - if (crypto_op->type == RTE_CRYPTO_OP_WITH_SESSION) { + if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) { if (unlikely(crypto_op->session->type != RTE_CRYPTODEV_AESNI_MB_PMD)) return NULL; @@ -313,7 +313,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op) return NULL; sess = (struct aesni_mb_session *) - ((struct rte_cryptodev_session *)_sess)->_private; + ((struct rte_cryptodev_sym_session *)_sess)->_private; if (unlikely(aesni_mb_set_session_parameters(qp->ops, sess, crypto_op->xform) != 0)) { @@ -339,7 +339,8 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op) */ static JOB_AES_HMAC * process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m, - struct rte_crypto_op *c_op, struct aesni_mb_session *session) + struct rte_crypto_sym_op *c_op, + struct aesni_mb_session *session) { JOB_AES_HMAC *job; @@ -436,14 +437,14 @@ static struct rte_mbuf * post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job) { struct rte_mbuf *m; - struct rte_crypto_op *c_op; + struct rte_crypto_sym_op *c_op; if (job->user_data == NULL) return NULL; /* handled retrieved job */ m = (struct rte_mbuf *)job->user_data; - c_op = (struct rte_crypto_op *)job->user_data2; + c_op = (struct rte_crypto_sym_op *)job->user_data2; /* set status as successful by default */ c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; @@ -463,7 +464,7 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job) } /* Free session if a session-less crypto op */ - if (c_op->type == RTE_CRYPTO_OP_SESSIONLESS) { + if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) { rte_mempool_put(qp->sess_mp, c_op->session); c_op->session = NULL; } @@ -515,7 +516,8 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs, int i, processed_jobs = 0; for (i = 0; i < nb_bufs; i++) { - ol = rte_pktmbuf_offload_get(bufs[i], RTE_PKTMBUF_OL_CRYPTO); + ol = rte_pktmbuf_offload_get(bufs[i], + RTE_PKTMBUF_OL_CRYPTO_SYM); if (unlikely(ol == NULL)) { qp->stats.enqueue_err_count++; goto flush_jobs; diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c index 76a85ff743..d56de12924 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -108,7 +108,7 @@ aesni_mb_pmd_info_get(struct rte_cryptodev *dev, if (dev_info != NULL) { dev_info->dev_type = dev->dev_type; dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; - dev_info->max_nb_sessions = internals->max_nb_sessions; + dev_info->sym.max_nb_sessions = internals->max_nb_sessions; } } @@ -243,7 +243,7 @@ aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) /** Configure a aesni multi-buffer session from a crypto xform chain */ static void * aesni_mb_pmd_session_configure(struct rte_cryptodev *dev, - struct rte_crypto_xform *xform, void *sess) + struct rte_crypto_sym_xform *xform, void *sess) { struct aesni_mb_private *internals = dev->data->dev_private; diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h index 304c85c535..0aed17773c 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -218,7 +218,7 @@ struct aesni_mb_session { extern int aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops, struct aesni_mb_session *sess, - const struct rte_crypto_xform *xform); + const struct rte_crypto_sym_xform *xform); /** device specific operations function pointer structure */ diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c index 828756ba6a..e7b9027cc9 100644 --- a/drivers/crypto/qat/qat_crypto.c +++ b/drivers/crypto/qat/qat_crypto.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -89,37 +89,37 @@ void qat_crypto_sym_clear_session(struct rte_cryptodev *dev, } static int -qat_get_cmd_id(const struct rte_crypto_xform *xform) +qat_get_cmd_id(const struct rte_crypto_sym_xform *xform) { if (xform->next == NULL) return -1; /* Cipher Only */ - if (xform->type == RTE_CRYPTO_XFORM_CIPHER && xform->next == NULL) + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) return -1; /* return ICP_QAT_FW_LA_CMD_CIPHER; */ /* Authentication Only */ - if (xform->type == RTE_CRYPTO_XFORM_AUTH && xform->next == NULL) + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL) return -1; /* return ICP_QAT_FW_LA_CMD_AUTH; */ /* Cipher then Authenticate */ - if (xform->type == RTE_CRYPTO_XFORM_CIPHER && - xform->next->type == RTE_CRYPTO_XFORM_AUTH) + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && + xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) return ICP_QAT_FW_LA_CMD_CIPHER_HASH; /* Authenticate then Cipher */ - if (xform->type == RTE_CRYPTO_XFORM_AUTH && - xform->next->type == RTE_CRYPTO_XFORM_CIPHER) + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && + xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) return ICP_QAT_FW_LA_CMD_HASH_CIPHER; return -1; } static struct rte_crypto_auth_xform * -qat_get_auth_xform(struct rte_crypto_xform *xform) +qat_get_auth_xform(struct rte_crypto_sym_xform *xform) { do { - if (xform->type == RTE_CRYPTO_XFORM_AUTH) + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) return &xform->auth; xform = xform->next; @@ -129,10 +129,10 @@ qat_get_auth_xform(struct rte_crypto_xform *xform) } static struct rte_crypto_cipher_xform * -qat_get_cipher_xform(struct rte_crypto_xform *xform) +qat_get_cipher_xform(struct rte_crypto_sym_xform *xform) { do { - if (xform->type == RTE_CRYPTO_XFORM_CIPHER) + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) return &xform->cipher; xform = xform->next; @@ -144,7 +144,7 @@ qat_get_cipher_xform(struct rte_crypto_xform *xform) void * qat_crypto_sym_configure_session(struct rte_cryptodev *dev, - struct rte_crypto_xform *xform, void *session_private) + struct rte_crypto_sym_xform *xform, void *session_private) { struct qat_pmd_private *internals = dev->data->dev_private; @@ -261,7 +261,7 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev, auth_xform->digest_length)) goto error_out; - return (struct rte_cryptodev_session *)session; + return (struct rte_crypto_sym_session *)session; error_out: rte_mempool_put(internals->sess_mp, session); @@ -275,7 +275,7 @@ unsigned qat_crypto_sym_get_session_private_size( } -uint16_t qat_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts, +uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { register struct qat_queue *queue; @@ -327,7 +327,8 @@ uint16_t qat_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts, } uint16_t -qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) { struct rte_mbuf_offload *ol; struct qat_queue *queue; @@ -343,12 +344,13 @@ qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG && msg_counter != nb_pkts) { rx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data); - ol = rte_pktmbuf_offload_get(rx_mbuf, RTE_PKTMBUF_OL_CRYPTO); - + ol = rte_pktmbuf_offload_get(rx_mbuf, + RTE_PKTMBUF_OL_CRYPTO_SYM); if (ICP_QAT_FW_COMN_STATUS_FLAG_OK != ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET( resp_msg->comn_hdr.comn_status)) { - ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + ol->op.crypto.status = + RTE_CRYPTO_OP_STATUS_AUTH_FAILED; } else { ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS; } @@ -384,20 +386,21 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg) struct icp_qat_fw_la_auth_req_params *auth_param; register struct icp_qat_fw_la_bulk_req *qat_req; - ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO); + ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM); if (unlikely(ol == NULL)) { PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached " "to (%p) mbuf.", mbuf); return -EINVAL; } - if (unlikely(ol->op.crypto.type == RTE_CRYPTO_OP_SESSIONLESS)) { + if (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) { PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented" " requests mbuf (%p) is sessionless.", mbuf); return -EINVAL; } - if (unlikely(ol->op.crypto.session->type != RTE_CRYPTODEV_QAT_PMD)) { + if (unlikely(ol->op.crypto.session->type + != RTE_CRYPTODEV_QAT_SYM_PMD)) { PMD_DRV_LOG(ERR, "Session was not created for this device"); return -EINVAL; } @@ -520,8 +523,8 @@ void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev, ADF_NUM_SYM_QPS_PER_BUNDLE * ADF_NUM_BUNDLES_PER_DEV; - info->max_nb_sessions = internals->max_nb_sessions; - info->dev_type = RTE_CRYPTODEV_QAT_PMD; + info->sym.max_nb_sessions = internals->max_nb_sessions; + info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD; } } diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h index d680364b56..e9f71fe5ac 100644 --- a/drivers/crypto/qat/qat_crypto.h +++ b/drivers/crypto/qat/qat_crypto.h @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -109,16 +109,18 @@ qat_crypto_sym_session_init(struct rte_mempool *mempool, void *priv_sess); extern void * qat_crypto_sym_configure_session(struct rte_cryptodev *dev, - struct rte_crypto_xform *xform, void *session_private); + struct rte_crypto_sym_xform *xform, void *session_private); extern void qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session); uint16_t -qat_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); uint16_t -qat_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); #endif /* _QAT_CRYPTO_H_ */ diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c index e500c1e4d6..85700fc386 100644 --- a/drivers/crypto/qat/rte_qat_cryptodev.c +++ b/drivers/crypto/qat/rte_qat_cryptodev.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -89,11 +89,11 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_ cryptodev->pci_dev->addr.devid, cryptodev->pci_dev->addr.function); - cryptodev->dev_type = RTE_CRYPTODEV_QAT_PMD; + cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD; cryptodev->dev_ops = &crypto_qat_ops; - cryptodev->enqueue_burst = qat_crypto_pkt_tx_burst; - cryptodev->dequeue_burst = qat_crypto_pkt_rx_burst; + cryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst; + cryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst; internals = cryptodev->data->dev_private; diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c index a950b7492b..ee519e7fd1 100644 --- a/examples/l2fwd-crypto/main.c +++ b/examples/l2fwd-crypto/main.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -124,13 +124,13 @@ struct l2fwd_crypto_options { enum l2fwd_crypto_xform_chain xform_chain; - struct rte_crypto_xform cipher_xform; + struct rte_crypto_sym_xform cipher_xform; uint8_t ckey_data[32]; struct rte_crypto_key iv_key; uint8_t ivkey_data[16]; - struct rte_crypto_xform auth_xform; + struct rte_crypto_sym_xform auth_xform; uint8_t akey_data[128]; }; @@ -141,9 +141,8 @@ struct l2fwd_crypto_params { unsigned digest_length; unsigned block_size; - struct rte_crypto_key iv_key; - struct rte_cryptodev_session *session; + struct rte_cryptodev_sym_session *session; }; /** lcore configuration */ @@ -372,7 +371,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m, } /* Set crypto operation data parameters */ - rte_crypto_op_attach_session(&ol->op.crypto, cparams->session); + rte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session); /* Append space for digest to end of packet */ ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m, @@ -474,11 +473,11 @@ generate_random_key(uint8_t *key, unsigned length) key[i] = rand() % 0xff; } -static struct rte_cryptodev_session * +static struct rte_cryptodev_sym_session * initialize_crypto_session(struct l2fwd_crypto_options *options, uint8_t cdev_id) { - struct rte_crypto_xform *first_xform; + struct rte_crypto_sym_xform *first_xform; if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) { first_xform = &options->cipher_xform; @@ -489,7 +488,7 @@ initialize_crypto_session(struct l2fwd_crypto_options *options, } /* Setup Cipher Parameters */ - return rte_cryptodev_session_create(cdev_id, first_xform); + return rte_cryptodev_sym_session_create(cdev_id, first_xform); } static void @@ -610,7 +609,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options) m = pkts_burst[j]; ol = rte_pktmbuf_offload_alloc( l2fwd_mbuf_ol_pool, - RTE_PKTMBUF_OL_CRYPTO); + RTE_PKTMBUF_OL_CRYPTO_SYM); /* * If we can't allocate a offload, then drop * the rest of the burst and dequeue and @@ -689,7 +688,7 @@ parse_cryptodev_type(enum rte_cryptodev_type *type, char *optarg) *type = RTE_CRYPTODEV_AESNI_MB_PMD; return 0; } else if (strcmp("QAT", optarg) == 0) { - *type = RTE_CRYPTODEV_QAT_PMD; + *type = RTE_CRYPTODEV_QAT_SYM_PMD; return 0; } @@ -937,7 +936,7 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options) options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH; /* Cipher Data */ - options->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER; + options->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; options->cipher_xform.next = NULL; options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC; @@ -946,12 +945,11 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options) generate_random_key(options->ckey_data, sizeof(options->ckey_data)); options->cipher_xform.cipher.key.data = options->ckey_data; - options->cipher_xform.cipher.key.phys_addr = 0; options->cipher_xform.cipher.key.length = 16; /* Authentication Data */ - options->auth_xform.type = RTE_CRYPTO_XFORM_AUTH; + options->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; options->auth_xform.next = NULL; options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC; @@ -963,7 +961,6 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options) generate_random_key(options->akey_data, sizeof(options->akey_data)); options->auth_xform.auth.key.data = options->akey_data; - options->auth_xform.auth.key.phys_addr = 0; options->auth_xform.auth.key.length = 20; } @@ -982,7 +979,7 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options) switch (options->cdev_type) { case RTE_CRYPTODEV_AESNI_MB_PMD: printf("cryptodev type: AES-NI MB PMD\n"); break; - case RTE_CRYPTODEV_QAT_PMD: + case RTE_CRYPTODEV_QAT_SYM_PMD: printf("cryptodev type: QAT PMD\n"); break; default: break; @@ -1179,7 +1176,7 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports) unsigned i, cdev_id, cdev_count, enabled_cdev_count = 0; int retval; - if (options->cdev_type == RTE_CRYPTODEV_QAT_PMD) { + if (options->cdev_type == RTE_CRYPTODEV_QAT_SYM_PMD) { if (rte_cryptodev_count() < nb_ports) return -1; } else if (options->cdev_type == RTE_CRYPTODEV_AESNI_MB_PMD) { diff --git a/lib/librte_cryptodev/Makefile b/lib/librte_cryptodev/Makefile index 81fa3fc05c..0d592297b4 100644 --- a/lib/librte_cryptodev/Makefile +++ b/lib/librte_cryptodev/Makefile @@ -45,6 +45,7 @@ SRCS-y += rte_cryptodev.c # export include files SYMLINK-y-include += rte_crypto.h +SYMLINK-y-include += rte_crypto_sym.h SYMLINK-y-include += rte_cryptodev.h SYMLINK-y-include += rte_cryptodev_pmd.h diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h index 42343a831f..620c00b731 100644 --- a/lib/librte_cryptodev/rte_crypto.h +++ b/lib/librte_cryptodev/rte_crypto.h @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2016 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -36,313 +36,14 @@ /** * @file rte_crypto.h * - * RTE Cryptographic Definitions + * RTE Cryptography Common Definitions * - * Defines symmetric cipher and authentication algorithms and modes, as well - * as supported symmetric crypto operation combinations. */ #ifdef __cplusplus extern "C" { #endif -#include -#include -#include - -/** Symmetric Cipher Algorithms */ -enum rte_crypto_cipher_algorithm { - RTE_CRYPTO_CIPHER_NULL = 1, - /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */ - - RTE_CRYPTO_CIPHER_3DES_CBC, - /**< Triple DES algorithm in CBC mode */ - RTE_CRYPTO_CIPHER_3DES_CTR, - /**< Triple DES algorithm in CTR mode */ - RTE_CRYPTO_CIPHER_3DES_ECB, - /**< Triple DES algorithm in ECB mode */ - - RTE_CRYPTO_CIPHER_AES_CBC, - /**< AES algorithm in CBC mode */ - RTE_CRYPTO_CIPHER_AES_CCM, - /**< AES algorithm in CCM mode. When this cipher algorithm is used the - * *RTE_CRYPTO_AUTH_AES_CCM* element of the - * *rte_crypto_hash_algorithm* enum MUST be used to set up the related - * *rte_crypto_auth_xform* structure in the session context or in - * the op_params of the crypto operation structure in the case of a - * session-less crypto operation - */ - RTE_CRYPTO_CIPHER_AES_CTR, - /**< AES algorithm in Counter mode */ - RTE_CRYPTO_CIPHER_AES_ECB, - /**< AES algorithm in ECB mode */ - RTE_CRYPTO_CIPHER_AES_F8, - /**< AES algorithm in F8 mode */ - RTE_CRYPTO_CIPHER_AES_GCM, - /**< AES algorithm in GCM mode. When this cipher algorithm is used the - * *RTE_CRYPTO_AUTH_AES_GCM* element of the - * *rte_crypto_auth_algorithm* enum MUST be used to set up the related - * *rte_crypto_auth_setup_data* structure in the session context or in - * the op_params of the crypto operation structure in the case of a - * session-less crypto operation. - */ - RTE_CRYPTO_CIPHER_AES_XTS, - /**< AES algorithm in XTS mode */ - - RTE_CRYPTO_CIPHER_ARC4, - /**< (A)RC4 cipher algorithm */ - - RTE_CRYPTO_CIPHER_KASUMI_F8, - /**< Kasumi algorithm in F8 mode */ - - RTE_CRYPTO_CIPHER_SNOW3G_UEA2, - /**< SNOW3G algorithm in UEA2 mode */ - - RTE_CRYPTO_CIPHER_ZUC_EEA3 - /**< ZUC algorithm in EEA3 mode */ -}; - -/** Symmetric Cipher Direction */ -enum rte_crypto_cipher_operation { - RTE_CRYPTO_CIPHER_OP_ENCRYPT, - /**< Encrypt cipher operation */ - RTE_CRYPTO_CIPHER_OP_DECRYPT - /**< Decrypt cipher operation */ -}; - -/** Crypto key structure */ -struct rte_crypto_key { - uint8_t *data; /**< pointer to key data */ - phys_addr_t phys_addr; - size_t length; /**< key length in bytes */ -}; - -/** - * Symmetric Cipher Setup Data. - * - * This structure contains data relating to Cipher (Encryption and Decryption) - * use to create a session. - */ -struct rte_crypto_cipher_xform { - enum rte_crypto_cipher_operation op; - /**< This parameter determines if the cipher operation is an encrypt or - * a decrypt operation. For the RC4 algorithm and the F8/CTR modes, - * only encrypt operations are valid. - */ - enum rte_crypto_cipher_algorithm algo; - /**< Cipher algorithm */ - - struct rte_crypto_key key; - /**< Cipher key - * - * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will - * point to a concatenation of the AES encryption key followed by a - * keymask. As per RFC3711, the keymask should be padded with trailing - * bytes to match the length of the encryption key used. - * - * For AES-XTS mode of operation, two keys must be provided and - * key.data must point to the two keys concatenated together (Key1 || - * Key2). The cipher key length will contain the total size of both - * keys. - * - * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes), - * 192 bits (24 bytes) or 256 bits (32 bytes). - * - * For the CCM mode of operation, the only supported key length is 128 - * bits (16 bytes). - * - * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length - * should be set to the combined length of the encryption key and the - * keymask. Since the keymask and the encryption key are the same size, - * key.length should be set to 2 x the AES encryption key length. - * - * For the AES-XTS mode of operation: - * - Two keys must be provided and key.length refers to total length of - * the two keys. - * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes). - * - Both keys must have the same size. - **/ -}; - -/** Symmetric Authentication / Hash Algorithms */ -enum rte_crypto_auth_algorithm { - RTE_CRYPTO_AUTH_NULL = 1, - /**< NULL hash algorithm. */ - - RTE_CRYPTO_AUTH_AES_CBC_MAC, - /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */ - RTE_CRYPTO_AUTH_AES_CCM, - /**< AES algorithm in CCM mode. This is an authenticated cipher. When - * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM* - * element of the *rte_crypto_cipher_algorithm* enum MUST be used to - * set up the related rte_crypto_cipher_setup_data structure in the - * session context or the corresponding parameter in the crypto - * operation data structures op_params parameter MUST be set for a - * session-less crypto operation. - */ - RTE_CRYPTO_AUTH_AES_CMAC, - /**< AES CMAC algorithm. */ - RTE_CRYPTO_AUTH_AES_GCM, - /**< AES algorithm in GCM mode. When this hash algorithm - * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the - * rte_crypto_cipher_algorithm enum MUST be used to set up the related - * rte_crypto_cipher_setup_data structure in the session context, or - * the corresponding parameter in the crypto operation data structures - * op_params parameter MUST be set for a session-less crypto operation. - */ - RTE_CRYPTO_AUTH_AES_GMAC, - /**< AES GMAC algorithm. When this hash algorithm - * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the - * rte_crypto_cipher_algorithm enum MUST be used to set up the related - * rte_crypto_cipher_setup_data structure in the session context, or - * the corresponding parameter in the crypto operation data structures - * op_params parameter MUST be set for a session-less crypto operation. - */ - RTE_CRYPTO_AUTH_AES_XCBC_MAC, - /**< AES XCBC algorithm. */ - - RTE_CRYPTO_AUTH_KASUMI_F9, - /**< Kasumi algorithm in F9 mode. */ - - RTE_CRYPTO_AUTH_MD5, - /**< MD5 algorithm */ - RTE_CRYPTO_AUTH_MD5_HMAC, - /**< HMAC using MD5 algorithm */ - - RTE_CRYPTO_AUTH_SHA1, - /**< 128 bit SHA algorithm. */ - RTE_CRYPTO_AUTH_SHA1_HMAC, - /**< HMAC using 128 bit SHA algorithm. */ - RTE_CRYPTO_AUTH_SHA224, - /**< 224 bit SHA algorithm. */ - RTE_CRYPTO_AUTH_SHA224_HMAC, - /**< HMAC using 224 bit SHA algorithm. */ - RTE_CRYPTO_AUTH_SHA256, - /**< 256 bit SHA algorithm. */ - RTE_CRYPTO_AUTH_SHA256_HMAC, - /**< HMAC using 256 bit SHA algorithm. */ - RTE_CRYPTO_AUTH_SHA384, - /**< 384 bit SHA algorithm. */ - RTE_CRYPTO_AUTH_SHA384_HMAC, - /**< HMAC using 384 bit SHA algorithm. */ - RTE_CRYPTO_AUTH_SHA512, - /**< 512 bit SHA algorithm. */ - RTE_CRYPTO_AUTH_SHA512_HMAC, - /**< HMAC using 512 bit SHA algorithm. */ - - RTE_CRYPTO_AUTH_SNOW3G_UIA2, - /**< SNOW3G algorithm in UIA2 mode. */ - - RTE_CRYPTO_AUTH_ZUC_EIA3, - /**< ZUC algorithm in EIA3 mode */ -}; - -/** Symmetric Authentication / Hash Operations */ -enum rte_crypto_auth_operation { - RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */ - RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */ -}; - -/** - * Authentication / Hash transform data. - * - * This structure contains data relating to an authentication/hash crypto - * transforms. The fields op, algo and digest_length are common to all - * authentication transforms and MUST be set. - */ -struct rte_crypto_auth_xform { - enum rte_crypto_auth_operation op; - /**< Authentication operation type */ - enum rte_crypto_auth_algorithm algo; - /**< Authentication algorithm selection */ - - struct rte_crypto_key key; /**< Authentication key data. - * The authentication key length MUST be less than or equal to the - * block size of the algorithm. It is the callers responsibility to - * ensure that the key length is compliant with the standard being used - * (for example RFC 2104, FIPS 198a). - */ - - uint32_t digest_length; - /**< Length of the digest to be returned. If the verify option is set, - * this specifies the length of the digest to be compared for the - * session. - * - * If the value is less than the maximum length allowed by the hash, - * the result shall be truncated. If the value is greater than the - * maximum length allowed by the hash then an error will be generated - * by *rte_cryptodev_session_create* or by the - * *rte_cryptodev_enqueue_burst* if using session-less APIs. - */ - - uint32_t add_auth_data_length; - /**< The length of the additional authenticated data (AAD) in bytes. - * The maximum permitted value is 240 bytes, unless otherwise specified - * below. - * - * This field must be specified when the hash algorithm is one of the - * following: - * - * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the - * length of the IV (which should be 16). - * - * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is - * the length of the Additional Authenticated Data (called A, in NIST - * SP800-38D). - * - * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is - * the length of the associated data (called A, in NIST SP800-38C). - * Note that this does NOT include the length of any padding, or the - * 18 bytes reserved at the start of the above field to store the - * block B0 and the encoded length. The maximum permitted value in - * this case is 222 bytes. - * - * @note - * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation - * this field is not used and should be set to 0. Instead the length - * of the AAD data is specified in the message length to hash field of - * the rte_crypto_op_data structure. - */ -}; - -/** Crypto transformation types */ -enum rte_crypto_xform_type { - RTE_CRYPTO_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */ - RTE_CRYPTO_XFORM_AUTH, /**< Authentication xform */ - RTE_CRYPTO_XFORM_CIPHER /**< Cipher xform */ -}; - -/** - * Crypto transform structure. - * - * This is used to specify the crypto transforms required, multiple transforms - * can be chained together to specify a chain transforms such as authentication - * then cipher, or cipher then authentication. Each transform structure can - * hold a single transform, the type field is used to specify which transform - * is contained within the union - */ -struct rte_crypto_xform { - struct rte_crypto_xform *next; /**< next xform in chain */ - - enum rte_crypto_xform_type type; /**< xform type */ - union { - struct rte_crypto_auth_xform auth; - /**< Authentication / hash xform */ - struct rte_crypto_cipher_xform cipher; - /**< Cipher xform */ - }; -}; - -/** - * Crypto operation session type. This is used to specify whether a crypto - * operation has session structure attached for immutable parameters or if all - * operation information is included in the operation data structure. - */ -enum rte_crypto_op_sess_type { - RTE_CRYPTO_OP_WITH_SESSION, /**< Session based crypto operation */ - RTE_CRYPTO_OP_SESSIONLESS /**< Session-less crypto operation */ -}; - /** Status of crypto operation */ enum rte_crypto_op_status { RTE_CRYPTO_OP_STATUS_SUCCESS, @@ -359,249 +60,7 @@ enum rte_crypto_op_status { /**< Error handling operation */ }; -/** - * Cryptographic Operation Data. - * - * This structure contains data relating to performing cryptographic processing - * on a data buffer. This request is used with rte_crypto_enqueue_burst() call - * for performing cipher, hash, or a combined hash and cipher operations. - */ -struct rte_crypto_op { - enum rte_crypto_op_sess_type type; - enum rte_crypto_op_status status; - - struct { - struct rte_mbuf *m; /**< Destination mbuf */ - uint8_t offset; /**< Data offset */ - } dst; - - union { - struct rte_cryptodev_session *session; - /**< Handle for the initialised session context */ - struct rte_crypto_xform *xform; - /**< Session-less API crypto operation parameters */ - }; - - struct { - struct { - uint32_t offset; - /**< Starting point for cipher processing, specified - * as number of bytes from start of data in the source - * buffer. The result of the cipher operation will be - * written back into the output buffer starting at - * this location. - */ - - uint32_t length; - /**< The message length, in bytes, of the source buffer - * on which the cryptographic operation will be - * computed. This must be a multiple of the block size - * if a block cipher is being used. This is also the - * same as the result length. - * - * @note - * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM, - * this value should not include the length of the - * padding or the length of the MAC; the driver will - * compute the actual number of bytes over which the - * encryption will occur, which will include these - * values. - * - * @note - * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this - * field should be set to 0. - */ - } to_cipher; /**< Data offsets and length for ciphering */ - - struct { - uint32_t offset; - /**< Starting point for hash processing, specified as - * number of bytes from start of packet in source - * buffer. - * - * @note - * For CCM and GCM modes of operation, this field is - * ignored. The field @ref additional_auth field - * should be set instead. - * - * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) - * mode of operation, this field specifies the start - * of the AAD data in the source buffer. - */ - - uint32_t length; - /**< The message length, in bytes, of the source - * buffer that the hash will be computed on. - * - * @note - * For CCM and GCM modes of operation, this field is - * ignored. The field @ref additional_auth field - * should be set instead. - * - * @note - * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode - * of operation, this field specifies the length of - * the AAD data in the source buffer. - */ - } to_hash; /**< Data offsets and length for authentication */ - } data; /**< Details of data to be operated on */ - - struct { - uint8_t *data; - /**< Initialisation Vector or Counter. - * - * - For block ciphers in CBC or F8 mode, or for Kasumi in F8 - * mode, or for SNOW3G in UEA2 mode, this is the Initialisation - * Vector (IV) value. - * - * - For block ciphers in CTR mode, this is the counter. - * - * - For GCM mode, this is either the IV (if the length is 96 - * bits) or J0 (for other sizes), where J0 is as defined by - * NIST SP800-38D. Regardless of the IV length, a full 16 bytes - * needs to be allocated. - * - * - For CCM mode, the first byte is reserved, and the nonce - * should be written starting at &iv[1] (to allow space for the - * implementation to write in the flags in the first byte). - * Note that a full 16 bytes should be allocated, even though - * the length field will have a value less than this. - * - * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std - * 1619-2007. - * - * For optimum performance, the data pointed to SHOULD be - * 8-byte aligned. - */ - phys_addr_t phys_addr; - size_t length; - /**< Length of valid IV data. - * - * - For block ciphers in CBC or F8 mode, or for Kasumi in F8 - * mode, or for SNOW3G in UEA2 mode, this is the length of the - * IV (which must be the same as the block length of the - * cipher). - * - * - For block ciphers in CTR mode, this is the length of the - * counter (which must be the same as the block length of the - * cipher). - * - * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in - * which case data points to J0. - * - * - For CCM mode, this is the length of the nonce, which can - * be in the range 7 to 13 inclusive. - */ - } iv; /**< Initialisation vector parameters */ - - struct { - uint8_t *data; - /**< If this member of this structure is set this is a - * pointer to the location where the digest result should be - * inserted (in the case of digest generation) or where the - * purported digest exists (in the case of digest - * verification). - * - * At session creation time, the client specified the digest - * result length with the digest_length member of the @ref - * rte_crypto_auth_xform structure. For physical crypto - * devices the caller must allocate at least digest_length of - * physically contiguous memory at this location. - * - * For digest generation, the digest result will overwrite - * any data at this location. - * - * @note - * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for - * "digest result" read "authentication tag T". - * - * If this member is not set the digest result is understood - * to be in the destination buffer for digest generation, and - * in the source buffer for digest verification. The location - * of the digest result in this case is immediately following - * the region over which the digest is computed. - */ - phys_addr_t phys_addr; /**< Physical address of digest */ - uint32_t length; /**< Length of digest */ - } digest; /**< Digest parameters */ - - struct { - uint8_t *data; - /**< Pointer to Additional Authenticated Data (AAD) needed for - * authenticated cipher mechanisms (CCM and GCM), and to the IV - * for SNOW3G authentication - * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other - * authentication mechanisms this pointer is ignored. - * - * The length of the data pointed to by this field is set up - * for the session in the @ref rte_crypto_auth_xform structure - * as part of the @ref rte_cryptodev_session_create function - * call. This length must not exceed 240 bytes. - * - * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the - * caller should setup this field as follows: - * - * - the nonce should be written starting at an offset of one - * byte into the array, leaving room for the implementation - * to write in the flags to the first byte. - * - * - the additional authentication data itself should be - * written starting at an offset of 18 bytes into the array, - * leaving room for the length encoding in the first two - * bytes of the second block. - * - * - the array should be big enough to hold the above fields, - * plus any padding to round this up to the nearest multiple - * of the block size (16 bytes). Padding will be added by - * the implementation. - * - * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the - * caller should setup this field as follows: - * - * - the AAD is written in starting at byte 0 - * - the array must be big enough to hold the AAD, plus any - * space to round this up to the nearest multiple of the - * block size (16 bytes). - * - * @note - * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of - * operation, this field is not used and should be set to 0. - * Instead the AAD data should be placed in the source buffer. - */ - phys_addr_t phys_addr; /**< physical address */ - uint32_t length; /**< Length of digest */ - } additional_auth; - /**< Additional authentication parameters */ - - struct rte_mempool *pool; - /**< mempool used to allocate crypto op */ - - void *user_data; - /**< opaque pointer for user data */ -}; - - -/** - * Reset the fields of a crypto operation to their default values. - * - * @param op The crypto operation to be reset. - */ -static inline void -__rte_crypto_op_reset(struct rte_crypto_op *op) -{ - op->type = RTE_CRYPTO_OP_SESSIONLESS; - op->dst.m = NULL; - op->dst.offset = 0; -} - -/** Attach a session to a crypto operation */ -static inline void -rte_crypto_op_attach_session(struct rte_crypto_op *op, - struct rte_cryptodev_session *sess) -{ - op->session = sess; - op->type = RTE_CRYPTO_OP_WITH_SESSION; -} +#include #ifdef __cplusplus } diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h new file mode 100644 index 0000000000..de6c701c00 --- /dev/null +++ b/lib/librte_cryptodev/rte_crypto_sym.h @@ -0,0 +1,598 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_CRYPTO_SYM_H_ +#define _RTE_CRYPTO_SYM_H_ + +/** + * @file rte_crypto_sym.h + * + * RTE Definitions for Symmetric Cryptography + * + * Defines symmetric cipher and authentication algorithms and modes, as well + * as supported symmetric crypto operation combinations. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + + +/** Symmetric Cipher Algorithms */ +enum rte_crypto_cipher_algorithm { + RTE_CRYPTO_CIPHER_NULL = 1, + /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */ + + RTE_CRYPTO_CIPHER_3DES_CBC, + /**< Triple DES algorithm in CBC mode */ + RTE_CRYPTO_CIPHER_3DES_CTR, + /**< Triple DES algorithm in CTR mode */ + RTE_CRYPTO_CIPHER_3DES_ECB, + /**< Triple DES algorithm in ECB mode */ + + RTE_CRYPTO_CIPHER_AES_CBC, + /**< AES algorithm in CBC mode */ + RTE_CRYPTO_CIPHER_AES_CCM, + /**< AES algorithm in CCM mode. When this cipher algorithm is used the + * *RTE_CRYPTO_AUTH_AES_CCM* element of the + * *rte_crypto_hash_algorithm* enum MUST be used to set up the related + * *rte_crypto_auth_xform* structure in the session context or in + * the op_params of the crypto operation structure in the case of a + * session-less crypto operation + */ + RTE_CRYPTO_CIPHER_AES_CTR, + /**< AES algorithm in Counter mode */ + RTE_CRYPTO_CIPHER_AES_ECB, + /**< AES algorithm in ECB mode */ + RTE_CRYPTO_CIPHER_AES_F8, + /**< AES algorithm in F8 mode */ + RTE_CRYPTO_CIPHER_AES_GCM, + /**< AES algorithm in GCM mode. When this cipher algorithm is used the + * *RTE_CRYPTO_AUTH_AES_GCM* element of the + * *rte_crypto_auth_algorithm* enum MUST be used to set up the related + * *rte_crypto_auth_setup_data* structure in the session context or in + * the op_params of the crypto operation structure in the case of a + * session-less crypto operation. + */ + RTE_CRYPTO_CIPHER_AES_XTS, + /**< AES algorithm in XTS mode */ + + RTE_CRYPTO_CIPHER_ARC4, + /**< (A)RC4 cipher algorithm */ + + RTE_CRYPTO_CIPHER_KASUMI_F8, + /**< Kasumi algorithm in F8 mode */ + + RTE_CRYPTO_CIPHER_SNOW3G_UEA2, + /**< SNOW3G algorithm in UEA2 mode */ + + RTE_CRYPTO_CIPHER_ZUC_EEA3 + /**< ZUC algorithm in EEA3 mode */ +}; + +/** Symmetric Cipher Direction */ +enum rte_crypto_cipher_operation { + RTE_CRYPTO_CIPHER_OP_ENCRYPT, + /**< Encrypt cipher operation */ + RTE_CRYPTO_CIPHER_OP_DECRYPT + /**< Decrypt cipher operation */ +}; + +/** Crypto key structure */ +struct rte_crypto_key { + uint8_t *data; /**< pointer to key data */ + phys_addr_t phys_addr; + size_t length; /**< key length in bytes */ +}; + +/** + * Symmetric Cipher Setup Data. + * + * This structure contains data relating to Cipher (Encryption and Decryption) + * use to create a session. + */ +struct rte_crypto_cipher_xform { + enum rte_crypto_cipher_operation op; + /**< This parameter determines if the cipher operation is an encrypt or + * a decrypt operation. For the RC4 algorithm and the F8/CTR modes, + * only encrypt operations are valid. + */ + enum rte_crypto_cipher_algorithm algo; + /**< Cipher algorithm */ + + struct rte_crypto_key key; + /**< Cipher key + * + * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will + * point to a concatenation of the AES encryption key followed by a + * keymask. As per RFC3711, the keymask should be padded with trailing + * bytes to match the length of the encryption key used. + * + * For AES-XTS mode of operation, two keys must be provided and + * key.data must point to the two keys concatenated together (Key1 || + * Key2). The cipher key length will contain the total size of both + * keys. + * + * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes), + * 192 bits (24 bytes) or 256 bits (32 bytes). + * + * For the CCM mode of operation, the only supported key length is 128 + * bits (16 bytes). + * + * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length + * should be set to the combined length of the encryption key and the + * keymask. Since the keymask and the encryption key are the same size, + * key.length should be set to 2 x the AES encryption key length. + * + * For the AES-XTS mode of operation: + * - Two keys must be provided and key.length refers to total length of + * the two keys. + * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes). + * - Both keys must have the same size. + **/ +}; + +/** Symmetric Authentication / Hash Algorithms */ +enum rte_crypto_auth_algorithm { + RTE_CRYPTO_AUTH_NULL = 1, + /**< NULL hash algorithm. */ + + RTE_CRYPTO_AUTH_AES_CBC_MAC, + /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */ + RTE_CRYPTO_AUTH_AES_CCM, + /**< AES algorithm in CCM mode. This is an authenticated cipher. When + * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM* + * element of the *rte_crypto_cipher_algorithm* enum MUST be used to + * set up the related rte_crypto_cipher_setup_data structure in the + * session context or the corresponding parameter in the crypto + * operation data structures op_params parameter MUST be set for a + * session-less crypto operation. + */ + RTE_CRYPTO_AUTH_AES_CMAC, + /**< AES CMAC algorithm. */ + RTE_CRYPTO_AUTH_AES_GCM, + /**< AES algorithm in GCM mode. When this hash algorithm + * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the + * rte_crypto_cipher_algorithm enum MUST be used to set up the related + * rte_crypto_cipher_setup_data structure in the session context, or + * the corresponding parameter in the crypto operation data structures + * op_params parameter MUST be set for a session-less crypto operation. + */ + RTE_CRYPTO_AUTH_AES_GMAC, + /**< AES GMAC algorithm. When this hash algorithm + * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the + * rte_crypto_cipher_algorithm enum MUST be used to set up the related + * rte_crypto_cipher_setup_data structure in the session context, or + * the corresponding parameter in the crypto operation data structures + * op_params parameter MUST be set for a session-less crypto operation. + */ + RTE_CRYPTO_AUTH_AES_XCBC_MAC, + /**< AES XCBC algorithm. */ + + RTE_CRYPTO_AUTH_KASUMI_F9, + /**< Kasumi algorithm in F9 mode. */ + + RTE_CRYPTO_AUTH_MD5, + /**< MD5 algorithm */ + RTE_CRYPTO_AUTH_MD5_HMAC, + /**< HMAC using MD5 algorithm */ + + RTE_CRYPTO_AUTH_SHA1, + /**< 128 bit SHA algorithm. */ + RTE_CRYPTO_AUTH_SHA1_HMAC, + /**< HMAC using 128 bit SHA algorithm. */ + RTE_CRYPTO_AUTH_SHA224, + /**< 224 bit SHA algorithm. */ + RTE_CRYPTO_AUTH_SHA224_HMAC, + /**< HMAC using 224 bit SHA algorithm. */ + RTE_CRYPTO_AUTH_SHA256, + /**< 256 bit SHA algorithm. */ + RTE_CRYPTO_AUTH_SHA256_HMAC, + /**< HMAC using 256 bit SHA algorithm. */ + RTE_CRYPTO_AUTH_SHA384, + /**< 384 bit SHA algorithm. */ + RTE_CRYPTO_AUTH_SHA384_HMAC, + /**< HMAC using 384 bit SHA algorithm. */ + RTE_CRYPTO_AUTH_SHA512, + /**< 512 bit SHA algorithm. */ + RTE_CRYPTO_AUTH_SHA512_HMAC, + /**< HMAC using 512 bit SHA algorithm. */ + + RTE_CRYPTO_AUTH_SNOW3G_UIA2, + /**< SNOW3G algorithm in UIA2 mode. */ + + RTE_CRYPTO_AUTH_ZUC_EIA3, + /**< ZUC algorithm in EIA3 mode */ +}; + +/** Symmetric Authentication / Hash Operations */ +enum rte_crypto_auth_operation { + RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */ + RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */ +}; + +/** + * Authentication / Hash transform data. + * + * This structure contains data relating to an authentication/hash crypto + * transforms. The fields op, algo and digest_length are common to all + * authentication transforms and MUST be set. + */ +struct rte_crypto_auth_xform { + enum rte_crypto_auth_operation op; + /**< Authentication operation type */ + enum rte_crypto_auth_algorithm algo; + /**< Authentication algorithm selection */ + + struct rte_crypto_key key; + /**< Authentication key data. + * The authentication key length MUST be less than or equal to the + * block size of the algorithm. It is the callers responsibility to + * ensure that the key length is compliant with the standard being used + * (for example RFC 2104, FIPS 198a). + */ + + uint32_t digest_length; + /**< Length of the digest to be returned. If the verify option is set, + * this specifies the length of the digest to be compared for the + * session. + * + * If the value is less than the maximum length allowed by the hash, + * the result shall be truncated. If the value is greater than the + * maximum length allowed by the hash then an error will be generated + * by *rte_cryptodev_sym_session_create* or by the + * *rte_cryptodev_sym_enqueue_burst* if using session-less APIs. + */ + + uint32_t add_auth_data_length; + /**< The length of the additional authenticated data (AAD) in bytes. + * The maximum permitted value is 240 bytes, unless otherwise specified + * below. + * + * This field must be specified when the hash algorithm is one of the + * following: + * + * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the + * length of the IV (which should be 16). + * + * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is + * the length of the Additional Authenticated Data (called A, in NIST + * SP800-38D). + * + * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is + * the length of the associated data (called A, in NIST SP800-38C). + * Note that this does NOT include the length of any padding, or the + * 18 bytes reserved at the start of the above field to store the + * block B0 and the encoded length. The maximum permitted value in + * this case is 222 bytes. + * + * @note + * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation + * this field is not used and should be set to 0. Instead the length + * of the AAD data is specified in the message length to hash field of + * the rte_crypto_sym_op_data structure. + */ +}; + +/** Crypto transformation types */ +enum rte_crypto_sym_xform_type { + RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */ + RTE_CRYPTO_SYM_XFORM_AUTH, /**< Authentication xform */ + RTE_CRYPTO_SYM_XFORM_CIPHER /**< Cipher xform */ +}; + +/** + * Symmetric crypto transform structure. + * + * This is used to specify the crypto transforms required, multiple transforms + * can be chained together to specify a chain transforms such as authentication + * then cipher, or cipher then authentication. Each transform structure can + * hold a single transform, the type field is used to specify which transform + * is contained within the union + */ +struct rte_crypto_sym_xform { + struct rte_crypto_sym_xform *next; + /**< next xform in chain */ + enum rte_crypto_sym_xform_type type + ; /**< xform type */ + union { + struct rte_crypto_auth_xform auth; + /**< Authentication / hash xform */ + struct rte_crypto_cipher_xform cipher; + /**< Cipher xform */ + }; +}; + +/** + * Crypto operation session type. This is used to specify whether a crypto + * operation has session structure attached for immutable parameters or if all + * operation information is included in the operation data structure. + */ +enum rte_crypto_sym_op_sess_type { + RTE_CRYPTO_SYM_OP_WITH_SESSION, /**< Session based crypto operation */ + RTE_CRYPTO_SYM_OP_SESSIONLESS /**< Session-less crypto operation */ +}; + + +/** + * Cryptographic Operation Data. + * + * This structure contains data relating to performing cryptographic processing + * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst() + * call for performing cipher, hash, or a combined hash and cipher operations. + */ +struct rte_crypto_sym_op { + enum rte_crypto_sym_op_sess_type type; + enum rte_crypto_op_status status; + + struct { + struct rte_mbuf *m; /**< Destination mbuf */ + uint8_t offset; /**< Data offset */ + } dst; + + union { + struct rte_cryptodev_sym_session *session; + /**< Handle for the initialised session context */ + struct rte_crypto_sym_xform *xform; + /**< Session-less API crypto operation parameters */ + }; + + struct { + struct { + uint32_t offset; + /**< Starting point for cipher processing, specified + * as number of bytes from start of data in the source + * buffer. The result of the cipher operation will be + * written back into the output buffer starting at + * this location. + */ + + uint32_t length; + /**< The message length, in bytes, of the source buffer + * on which the cryptographic operation will be + * computed. This must be a multiple of the block size + * if a block cipher is being used. This is also the + * same as the result length. + * + * @note + * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM, + * this value should not include the length of the + * padding or the length of the MAC; the driver will + * compute the actual number of bytes over which the + * encryption will occur, which will include these + * values. + * + * @note + * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this + * field should be set to 0. + */ + } to_cipher; /**< Data offsets and length for ciphering */ + + struct { + uint32_t offset; + /**< Starting point for hash processing, specified as + * number of bytes from start of packet in source + * buffer. + * + * @note + * For CCM and GCM modes of operation, this field is + * ignored. The field @ref additional_auth field + * should be set instead. + * + * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) + * mode of operation, this field specifies the start + * of the AAD data in the source buffer. + */ + + uint32_t length; + /**< The message length, in bytes, of the source + * buffer that the hash will be computed on. + * + * @note + * For CCM and GCM modes of operation, this field is + * ignored. The field @ref additional_auth field + * should be set instead. + * + * @note + * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode + * of operation, this field specifies the length of + * the AAD data in the source buffer. + */ + } to_hash; /**< Data offsets and length for authentication */ + } data; /**< Details of data to be operated on */ + + struct { + uint8_t *data; + /**< Initialisation Vector or Counter. + * + * - For block ciphers in CBC or F8 mode, or for Kasumi in F8 + * mode, or for SNOW3G in UEA2 mode, this is the Initialisation + * Vector (IV) value. + * + * - For block ciphers in CTR mode, this is the counter. + * + * - For GCM mode, this is either the IV (if the length is 96 + * bits) or J0 (for other sizes), where J0 is as defined by + * NIST SP800-38D. Regardless of the IV length, a full 16 bytes + * needs to be allocated. + * + * - For CCM mode, the first byte is reserved, and the nonce + * should be written starting at &iv[1] (to allow space for the + * implementation to write in the flags in the first byte). + * Note that a full 16 bytes should be allocated, even though + * the length field will have a value less than this. + * + * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std + * 1619-2007. + * + * For optimum performance, the data pointed to SHOULD be + * 8-byte aligned. + */ + phys_addr_t phys_addr; + size_t length; + /**< Length of valid IV data. + * + * - For block ciphers in CBC or F8 mode, or for Kasumi in F8 + * mode, or for SNOW3G in UEA2 mode, this is the length of the + * IV (which must be the same as the block length of the + * cipher). + * + * - For block ciphers in CTR mode, this is the length of the + * counter (which must be the same as the block length of the + * cipher). + * + * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in + * which case data points to J0. + * + * - For CCM mode, this is the length of the nonce, which can + * be in the range 7 to 13 inclusive. + */ + } iv; /**< Initialisation vector parameters */ + + struct { + uint8_t *data; + /**< If this member of this structure is set this is a + * pointer to the location where the digest result should be + * inserted (in the case of digest generation) or where the + * purported digest exists (in the case of digest + * verification). + * + * At session creation time, the client specified the digest + * result length with the digest_length member of the @ref + * rte_crypto_auth_xform structure. For physical crypto + * devices the caller must allocate at least digest_length of + * physically contiguous memory at this location. + * + * For digest generation, the digest result will overwrite + * any data at this location. + * + * @note + * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for + * "digest result" read "authentication tag T". + * + * If this member is not set the digest result is understood + * to be in the destination buffer for digest generation, and + * in the source buffer for digest verification. The location + * of the digest result in this case is immediately following + * the region over which the digest is computed. + */ + phys_addr_t phys_addr; /**< Physical address of digest */ + uint32_t length; /**< Length of digest */ + } digest; /**< Digest parameters */ + + struct { + uint8_t *data; + /**< Pointer to Additional Authenticated Data (AAD) needed for + * authenticated cipher mechanisms (CCM and GCM), and to the IV + * for SNOW3G authentication + * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other + * authentication mechanisms this pointer is ignored. + * + * The length of the data pointed to by this field is set up + * for the session in the @ref rte_crypto_auth_xform structure + * as part of the @ref rte_cryptodev_sym_session_create function + * call. This length must not exceed 240 bytes. + * + * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the + * caller should setup this field as follows: + * + * - the nonce should be written starting at an offset of one + * byte into the array, leaving room for the implementation + * to write in the flags to the first byte. + * + * - the additional authentication data itself should be + * written starting at an offset of 18 bytes into the array, + * leaving room for the length encoding in the first two + * bytes of the second block. + * + * - the array should be big enough to hold the above fields, + * plus any padding to round this up to the nearest multiple + * of the block size (16 bytes). Padding will be added by + * the implementation. + * + * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the + * caller should setup this field as follows: + * + * - the AAD is written in starting at byte 0 + * - the array must be big enough to hold the AAD, plus any + * space to round this up to the nearest multiple of the + * block size (16 bytes). + * + * @note + * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of + * operation, this field is not used and should be set to 0. + * Instead the AAD data should be placed in the source buffer. + */ + phys_addr_t phys_addr; /**< physical address */ + uint32_t length; /**< Length of digest */ + } additional_auth; + /**< Additional authentication parameters */ + + struct rte_mempool *pool; + /**< mempool used to allocate crypto op */ + + void *user_data; + /**< opaque pointer for user data */ +}; + + +/** + * Reset the fields of a crypto operation to their default values. + * + * @param op The crypto operation to be reset. + */ +static inline void +__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op) +{ + op->type = RTE_CRYPTO_SYM_OP_SESSIONLESS; + op->dst.m = NULL; + op->dst.offset = 0; +} + +/** Attach a session to a crypto operation */ +static inline void +rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op, + struct rte_cryptodev_sym_session *sess) +{ + op->session = sess; + op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION; +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_CRYPTO_SYM_H_ */ diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c index 2838852500..c7fef6a4d9 100644 --- a/lib/librte_cryptodev/rte_cryptodev.c +++ b/lib/librte_cryptodev/rte_cryptodev.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -572,8 +572,8 @@ rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id) } static int -rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs, - unsigned obj_cache_size, int socket_id); +rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev, + unsigned nb_objs, unsigned obj_cache_size, int socket_id); int rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config) @@ -604,8 +604,10 @@ rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config) } /* Setup Session mempool for device */ - return rte_crypto_session_pool_create(dev, config->session_mp.nb_objs, - config->session_mp.cache_size, config->socket_id); + return rte_cryptodev_sym_session_pool_create(dev, + config->session_mp.nb_objs, + config->session_mp.cache_size, + config->socket_id); } @@ -911,12 +913,12 @@ rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev, static void -rte_crypto_session_init(struct rte_mempool *mp, +rte_cryptodev_sym_session_init(struct rte_mempool *mp, void *opaque_arg, void *_sess, __rte_unused unsigned i) { - struct rte_cryptodev_session *sess = _sess; + struct rte_cryptodev_sym_session *sess = _sess; struct rte_cryptodev *dev = opaque_arg; memset(sess, 0, mp->elt_size); @@ -930,8 +932,8 @@ rte_crypto_session_init(struct rte_mempool *mp, } static int -rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs, - unsigned obj_cache_size, int socket_id) +rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev, + unsigned nb_objs, unsigned obj_cache_size, int socket_id) { char mp_name[RTE_CRYPTODEV_NAME_MAX_LEN]; unsigned priv_sess_size; @@ -951,7 +953,7 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs, return -ENOMEM; } - unsigned elt_size = sizeof(struct rte_cryptodev_session) + + unsigned elt_size = sizeof(struct rte_cryptodev_sym_session) + priv_sess_size; dev->data->session_pool = rte_mempool_lookup(mp_name); @@ -975,7 +977,8 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs, 0, /* private data size */ NULL, /* obj initialization constructor */ NULL, /* obj initialization constructor arg */ - rte_crypto_session_init, /* obj constructor */ + rte_cryptodev_sym_session_init, + /**< obj constructor*/ dev, /* obj constructor arg */ socket_id, /* socket id */ 0); /* flags */ @@ -990,11 +993,12 @@ rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs, return 0; } -struct rte_cryptodev_session * -rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform) +struct rte_cryptodev_sym_session * +rte_cryptodev_sym_session_create(uint8_t dev_id, + struct rte_crypto_sym_xform *xform) { struct rte_cryptodev *dev; - struct rte_cryptodev_session *sess; + struct rte_cryptodev_sym_session *sess; void *_sess; if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { @@ -1010,7 +1014,7 @@ rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform) return NULL; } - sess = (struct rte_cryptodev_session *)_sess; + sess = (struct rte_cryptodev_sym_session *)_sess; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_configure, NULL); if (dev->dev_ops->session_configure(dev, xform, sess->_private) == @@ -1026,8 +1030,9 @@ rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform) return sess; } -struct rte_cryptodev_session * -rte_cryptodev_session_free(uint8_t dev_id, struct rte_cryptodev_session *sess) +struct rte_cryptodev_sym_session * +rte_cryptodev_sym_session_free(uint8_t dev_id, + struct rte_cryptodev_sym_session *sess) { struct rte_cryptodev *dev; diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h index 0ccd1b89fe..f4b38c104d 100644 --- a/lib/librte_cryptodev/rte_cryptodev.h +++ b/lib/librte_cryptodev/rte_cryptodev.h @@ -1,6 +1,6 @@ /*- * - * Copyright(c) 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -57,14 +57,14 @@ extern "C" { /**< Null crypto PMD device name */ #define CRYPTODEV_NAME_AESNI_MB_PMD ("cryptodev_aesni_mb_pmd") /**< AES-NI Multi buffer PMD device name */ -#define CRYPTODEV_NAME_QAT_PMD ("cryptodev_qat_pmd") -/**< Intel QAT PMD device name */ +#define CRYPTODEV_NAME_QAT_SYM_PMD ("cryptodev_qat_sym_pmd") +/**< Intel QAT Symmetric Crypto PMD device name */ /** Crypto device type */ enum rte_cryptodev_type { RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */ RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */ - RTE_CRYPTODEV_QAT_PMD, /**< QAT PMD */ + RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT PMD Symmetric Crypto */ }; /* Logging Macros */ @@ -99,8 +99,11 @@ struct rte_cryptodev_info { unsigned max_nb_queue_pairs; /**< Maximum number of queues pairs supported by device. */ - unsigned max_nb_sessions; - /**< Maximum number of sessions supported by device. */ + + struct { + unsigned max_nb_sessions; + /**< Maximum number of sessions supported by device. */ + } sym; }; #define RTE_CRYPTODEV_DETACHED (0) @@ -575,6 +578,23 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, } +/** Cryptodev symmetric crypto session */ +struct rte_cryptodev_sym_session { + struct { + uint8_t dev_id; + /**< Device Id */ + enum rte_cryptodev_type type; + /** Crypto Device type session created on */ + struct rte_mempool *mp; + /**< Mempool session allocated from */ + } __rte_aligned(8); + /**< Public symmetric session details */ + + char _private[0]; + /**< Private session material */ +}; + + /** * Initialise a session for symmetric cryptographic operations. * @@ -596,24 +616,24 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, * @return * Pointer to the created session or NULL */ -extern struct rte_cryptodev_session * -rte_cryptodev_session_create(uint8_t dev_id, - struct rte_crypto_xform *xform); +extern struct rte_cryptodev_sym_session * +rte_cryptodev_sym_session_create(uint8_t dev_id, + struct rte_crypto_sym_xform *xform); /** * Free the memory associated with a previously allocated session. * * @param dev_id The device identifier. * @param session Session pointer previously allocated by - * *rte_cryptodev_session_create*. + * *rte_cryptodev_sym_session_create*. * * @return * NULL on successful freeing of session. * Session pointer on failure to free session. */ -extern struct rte_cryptodev_session * -rte_cryptodev_session_free(uint8_t dev_id, - struct rte_cryptodev_session *session); +extern struct rte_cryptodev_sym_session * +rte_cryptodev_sym_session_free(uint8_t dev_id, + struct rte_cryptodev_sym_session *session); #ifdef __cplusplus diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h index a16d109fdb..7d049ea38b 100644 --- a/lib/librte_cryptodev/rte_cryptodev_pmd.h +++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h @@ -1,6 +1,6 @@ /*- * - * Copyright(c) 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -358,7 +358,7 @@ typedef uint32_t (*cryptodev_queue_pair_count_t)(struct rte_cryptodev *dev); * - On success returns a pointer to a rte_mempool * - On failure returns a NULL pointer */ -typedef int (*cryptodev_create_session_pool_t)( +typedef int (*cryptodev_sym_create_session_pool_t)( struct rte_cryptodev *dev, unsigned nb_objs, unsigned obj_cache_size, int socket_id); @@ -372,7 +372,7 @@ typedef int (*cryptodev_create_session_pool_t)( * - On success returns the size of the session structure for device * - On failure returns 0 */ -typedef unsigned (*cryptodev_get_session_private_size_t)( +typedef unsigned (*cryptodev_sym_get_session_private_size_t)( struct rte_cryptodev *dev); /** @@ -386,7 +386,7 @@ typedef unsigned (*cryptodev_get_session_private_size_t)( * - Returns private session structure on success. * - Returns NULL on failure. */ -typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool, +typedef void (*cryptodev_sym_initialize_session_t)(struct rte_mempool *mempool, void *session_private); /** @@ -400,14 +400,14 @@ typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool, * - Returns private session structure on success. * - Returns NULL on failure. */ -typedef void * (*cryptodev_configure_session_t)(struct rte_cryptodev *dev, - struct rte_crypto_xform *xform, void *session_private); +typedef void * (*cryptodev_sym_configure_session_t)(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, void *session_private); /** * Free Crypto session. * @param session Cryptodev session structure to free */ -typedef void (*cryptodev_free_session_t)(struct rte_cryptodev *dev, +typedef void (*cryptodev_sym_free_session_t)(struct rte_cryptodev *dev, void *session_private); @@ -436,13 +436,13 @@ struct rte_cryptodev_ops { cryptodev_queue_pair_count_t queue_pair_count; /**< Get count of the queue pairs. */ - cryptodev_get_session_private_size_t session_get_size; + cryptodev_sym_get_session_private_size_t session_get_size; /**< Return private session. */ - cryptodev_initialize_session_t session_initialize; + cryptodev_sym_initialize_session_t session_initialize; /**< Initialization function for private session data */ - cryptodev_configure_session_t session_configure; + cryptodev_sym_configure_session_t session_configure; /**< Configure a Crypto session. */ - cryptodev_free_session_t session_clear; + cryptodev_sym_free_session_t session_clear; /**< Clear a Crypto sessions private data. */ }; diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map index ff8e93d7a5..a46af6f555 100644 --- a/lib/librte_cryptodev/rte_cryptodev_version.map +++ b/lib/librte_cryptodev/rte_cryptodev_version.map @@ -16,8 +16,8 @@ DPDK_2.2 { rte_cryptodev_pmd_driver_register; rte_cryptodev_pmd_release_device; rte_cryptodev_pmd_virtual_dev_init; - rte_cryptodev_session_create; - rte_cryptodev_session_free; + rte_cryptodev_sym_session_create; + rte_cryptodev_sym_session_free; rte_cryptodev_socket_id; rte_cryptodev_start; rte_cryptodev_stats_get; @@ -29,4 +29,4 @@ DPDK_2.2 { rte_cryptodev_queue_pair_stop; local: *; -}; \ No newline at end of file +}; diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.h b/lib/librte_mbuf_offload/rte_mbuf_offload.h index 77993b642e..5ce6058071 100644 --- a/lib/librte_mbuf_offload/rte_mbuf_offload.h +++ b/lib/librte_mbuf_offload/rte_mbuf_offload.h @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -67,7 +67,7 @@ extern "C" { enum rte_mbuf_ol_op_type { RTE_PKTMBUF_OL_NOT_SPECIFIED = 0, /**< Off-load not specified */ - RTE_PKTMBUF_OL_CRYPTO + RTE_PKTMBUF_OL_CRYPTO_SYM /**< Crypto offload operation */ }; @@ -84,7 +84,7 @@ struct rte_mbuf_offload { enum rte_mbuf_ol_op_type type; /**< offload type */ union { - struct rte_crypto_op crypto; /**< Crypto operation */ + struct rte_crypto_sym_op crypto; /**< Crypto operation */ } op; }; @@ -194,8 +194,8 @@ __rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol, ol->type = type; switch (type) { - case RTE_PKTMBUF_OL_CRYPTO: - __rte_crypto_op_reset(&ol->op.crypto); break; + case RTE_PKTMBUF_OL_CRYPTO_SYM: + __rte_crypto_sym_op_reset(&ol->op.crypto); break; default: break; } @@ -278,24 +278,24 @@ __rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol, * - On success returns pointer to first crypto xform in crypto operations chain * - On failure returns NULL */ -static inline struct rte_crypto_xform * -rte_pktmbuf_offload_alloc_crypto_xforms(struct rte_mbuf_offload *ol, +static inline struct rte_crypto_sym_xform * +rte_pktmbuf_offload_alloc_crypto_sym_xforms(struct rte_mbuf_offload *ol, unsigned nb_xforms) { - struct rte_crypto_xform *xform; + struct rte_crypto_sym_xform *xform; void *priv_data; uint16_t size; - size = sizeof(struct rte_crypto_xform) * nb_xforms; + size = sizeof(struct rte_crypto_sym_xform) * nb_xforms; priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size); if (priv_data == NULL) return NULL; - ol->op.crypto.xform = xform = (struct rte_crypto_xform *)priv_data; + ol->op.crypto.xform = xform = (struct rte_crypto_sym_xform *)priv_data; do { - xform->type = RTE_CRYPTO_XFORM_NOT_SPECIFIED; + xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED; xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL; } while (xform);