crypto/scheduler: add multicore scheduling mode

Multi-core scheduling mode is a mode where scheduler distributes
crypto operations in a round-robin base, between several core
assigned as workers.

Signed-off-by: Kirill Rybalchenko <kirill.rybalchenko@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
This commit is contained in:
Kirill Rybalchenko 2017-07-05 17:14:38 +01:00 committed by Pablo de Lara
parent a3277ad47f
commit 4c07e0552f
8 changed files with 539 additions and 3 deletions

View File

@ -170,3 +170,28 @@ operation:
crypto operation burst to the primary slave. When one or more crypto
operations fail to be enqueued, then they will be enqueued to the secondary
slave.
* **CDEV_SCHED_MODE_MULTICORE:**
*Initialization mode parameter*: **multi-core**
Multi-core mode, which distributes the workload with several (up to eight)
worker cores. The enqueued bursts are distributed among the worker cores in a
round-robin manner. If scheduler cannot enqueue entire burst to the same worker,
it will enqueue the remaining operations to the next available worker.
For pure small packet size (64 bytes) traffic however the multi-core mode is not
an optimal solution, as it doesn't give significant per-core performance improvement.
For mixed traffic (IMIX) the optimal number of worker cores is around 2-3.
For large packets (1.5 Kbytes) scheduler shows linear scaling in performance
up to eight cores.
Each worker uses its own slave cryptodev. Only software cryptodevs
are supported. Only the same type of cryptodevs should be used concurrently.
The multi-core mode uses one extra parameter:
* corelist: Semicolon-separated list of logical cores to be used as workers.
The number of worker cores should be equal to the number of slave cryptodevs.
Example:
... --vdev "crypto_aesni_mb1,name=aesni_mb_1" --vdev "crypto_aesni_mb_pmd2,name=aesni_mb_2" \
--vdev "crypto_scheduler,slave=aesni_mb_1,slave=aesni_mb_2,mode=multi-core,corelist=23;24" ...

View File

@ -133,6 +133,11 @@ New Features
* 192-bit key.
* **Updated the Cryptodev Scheduler PMD.**
Added a multicore based distribution mode, which distributes the enqueued
crypto operations among several slaves, running on different logical cores.
Resolved Issues
---------------

View File

@ -56,5 +56,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += rte_cryptodev_scheduler.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_roundrobin.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pkt_size_distr.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_failover.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_multicore.c
include $(RTE_SDK)/mk/rte.lib.mk

View File

@ -351,6 +351,13 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
return -1;
}
break;
case CDEV_SCHED_MODE_MULTICORE:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
multicore_scheduler) < 0) {
CS_LOG_ERR("Failed to load scheduler");
return -1;
}
break;
default:
CS_LOG_ERR("Not yet supported");
return -ENOTSUP;

View File

@ -58,12 +58,17 @@ extern "C" {
#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES (8)
#endif
/** Maximum number of multi-core worker cores */
#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (64)
/** Round-robin scheduling mode string */
#define SCHEDULER_MODE_NAME_ROUND_ROBIN round-robin
/** Packet-size based distribution scheduling mode string */
#define SCHEDULER_MODE_NAME_PKT_SIZE_DISTR packet-size-distr
/** Fail-over scheduling mode string */
#define SCHEDULER_MODE_NAME_FAIL_OVER fail-over
/** multi-core scheduling mode string */
#define SCHEDULER_MODE_NAME_MULTI_CORE multi-core
/**
* Crypto scheduler PMD operation modes
@ -78,6 +83,8 @@ enum rte_cryptodev_scheduler_mode {
CDEV_SCHED_MODE_PKT_SIZE_DISTR,
/** Fail-over mode */
CDEV_SCHED_MODE_FAILOVER,
/** multi-core mode */
CDEV_SCHED_MODE_MULTICORE,
CDEV_SCHED_MODE_COUNT /**< number of modes */
};
@ -295,6 +302,8 @@ extern struct rte_cryptodev_scheduler *roundrobin_scheduler;
extern struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler;
/** Fail-over mode scheduler */
extern struct rte_cryptodev_scheduler *failover_scheduler;
/** multi-core mode scheduler */
extern struct rte_cryptodev_scheduler *multicore_scheduler;
#ifdef __cplusplus
}

View File

@ -0,0 +1,412 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <unistd.h>
#include <rte_cryptodev.h>
#include <rte_malloc.h>
#include "rte_cryptodev_scheduler_operations.h"
#include "scheduler_pmd_private.h"
#define MC_SCHED_ENQ_RING_NAME_PREFIX "MCS_ENQR_"
#define MC_SCHED_DEQ_RING_NAME_PREFIX "MCS_DEQR_"
#define MC_SCHED_BUFFER_SIZE 32
/** multi-core scheduler context */
struct mc_scheduler_ctx {
uint32_t num_workers; /**< Number of workers polling */
uint32_t stop_signal;
struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
};
struct mc_scheduler_qp_ctx {
struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
uint32_t nb_slaves;
uint32_t last_enq_worker_idx;
uint32_t last_deq_worker_idx;
struct mc_scheduler_ctx *mc_private_ctx;
};
static uint16_t
schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct mc_scheduler_qp_ctx *mc_qp_ctx =
((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
uint32_t worker_idx = mc_qp_ctx->last_enq_worker_idx;
uint16_t i, processed_ops = 0;
if (unlikely(nb_ops == 0))
return 0;
for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) {
struct rte_ring *enq_ring = mc_ctx->sched_enq_ring[worker_idx];
uint16_t nb_queue_ops = rte_ring_enqueue_burst(enq_ring,
(void *)(&ops[processed_ops]), nb_ops, NULL);
nb_ops -= nb_queue_ops;
processed_ops += nb_queue_ops;
if (++worker_idx == mc_ctx->num_workers)
worker_idx = 0;
}
mc_qp_ctx->last_enq_worker_idx = worker_idx;
return processed_ops;
}
static uint16_t
schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
struct rte_ring *order_ring =
((struct scheduler_qp_ctx *)qp)->order_ring;
uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
nb_ops);
uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
nb_ops_to_enq);
scheduler_order_insert(order_ring, ops, nb_ops_enqd);
return nb_ops_enqd;
}
static uint16_t
schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct mc_scheduler_qp_ctx *mc_qp_ctx =
((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
uint32_t worker_idx = mc_qp_ctx->last_deq_worker_idx;
uint16_t i, processed_ops = 0;
for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) {
struct rte_ring *deq_ring = mc_ctx->sched_deq_ring[worker_idx];
uint16_t nb_deq_ops = rte_ring_dequeue_burst(deq_ring,
(void *)(&ops[processed_ops]), nb_ops, NULL);
nb_ops -= nb_deq_ops;
processed_ops += nb_deq_ops;
if (++worker_idx == mc_ctx->num_workers)
worker_idx = 0;
}
mc_qp_ctx->last_deq_worker_idx = worker_idx;
return processed_ops;
}
static uint16_t
schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
struct rte_ring *order_ring =
((struct scheduler_qp_ctx *)qp)->order_ring;
return scheduler_order_drain(order_ring, ops, nb_ops);
}
static int
slave_attach(__rte_unused struct rte_cryptodev *dev,
__rte_unused uint8_t slave_id)
{
return 0;
}
static int
slave_detach(__rte_unused struct rte_cryptodev *dev,
__rte_unused uint8_t slave_id)
{
return 0;
}
static int
mc_scheduler_worker(struct rte_cryptodev *dev)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
struct rte_ring *enq_ring;
struct rte_ring *deq_ring;
uint32_t core_id = rte_lcore_id();
int i, worker_idx = -1;
struct scheduler_slave *slave;
struct rte_crypto_op *enq_ops[MC_SCHED_BUFFER_SIZE];
struct rte_crypto_op *deq_ops[MC_SCHED_BUFFER_SIZE];
struct scheduler_session *sess0, *sess1, *sess2, *sess3;
uint16_t processed_ops;
uint16_t left_op = 0;
uint16_t left_op_idx = 0;
uint16_t inflight_ops = 0;
for (i = 0; i < (int)sched_ctx->nb_wc; i++) {
if (sched_ctx->wc_pool[i] == core_id) {
worker_idx = i;
break;
}
}
if (worker_idx == -1) {
CS_LOG_ERR("worker on core %u:cannot find worker index!\n", core_id);
return -1;
}
slave = &sched_ctx->slaves[worker_idx];
enq_ring = mc_ctx->sched_enq_ring[worker_idx];
deq_ring = mc_ctx->sched_deq_ring[worker_idx];
while (!mc_ctx->stop_signal) {
if (left_op) {
processed_ops =
rte_cryptodev_enqueue_burst(slave->dev_id,
slave->qp_id,
&enq_ops[left_op_idx], left_op);
left_op -= processed_ops;
left_op_idx += processed_ops;
} else {
uint16_t nb_deq_ops = rte_ring_dequeue_burst(enq_ring,
(void *)enq_ops, MC_SCHED_BUFFER_SIZE, NULL);
if (nb_deq_ops) {
uint16_t i;
for (i = 0; i < nb_deq_ops && i < 4; i++)
rte_prefetch0(enq_ops[i]->sym->session);
for (i = 0; (i < (nb_deq_ops - 8))
&& (nb_deq_ops > 8); i += 4) {
sess0 = (struct scheduler_session *)
enq_ops[i]->sym->session->_private;
sess1 = (struct scheduler_session *)
enq_ops[i+1]->sym->session->_private;
sess2 = (struct scheduler_session *)
enq_ops[i+2]->sym->session->_private;
sess3 = (struct scheduler_session *)
enq_ops[i+3]->sym->session->_private;
enq_ops[i]->sym->session =
sess0->sessions[worker_idx];
enq_ops[i + 1]->sym->session =
sess1->sessions[worker_idx];
enq_ops[i + 2]->sym->session =
sess2->sessions[worker_idx];
enq_ops[i + 3]->sym->session =
sess3->sessions[worker_idx];
rte_prefetch0(enq_ops[i + 4]->sym->session);
rte_prefetch0(enq_ops[i + 5]->sym->session);
rte_prefetch0(enq_ops[i + 6]->sym->session);
rte_prefetch0(enq_ops[i + 7]->sym->session);
}
for (; i < nb_deq_ops; i++) {
sess0 = (struct scheduler_session *)
enq_ops[i]->sym->session->_private;
enq_ops[i]->sym->session =
sess0->sessions[worker_idx];
}
processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
slave->qp_id, enq_ops, nb_deq_ops);
if (unlikely(processed_ops < nb_deq_ops)) {
left_op = nb_deq_ops - processed_ops;
left_op_idx = processed_ops;
}
inflight_ops += processed_ops;
}
}
if (inflight_ops > 0) {
processed_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
slave->qp_id, deq_ops, MC_SCHED_BUFFER_SIZE);
if (processed_ops) {
uint16_t nb_enq_ops = rte_ring_enqueue_burst(deq_ring,
(void *)deq_ops, processed_ops, NULL);
inflight_ops -= nb_enq_ops;
}
}
rte_pause();
}
return 0;
}
static int
scheduler_start(struct rte_cryptodev *dev)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
uint16_t i;
mc_ctx->stop_signal = 0;
for (i = 0; i < sched_ctx->nb_wc; i++)
rte_eal_remote_launch(
(lcore_function_t *)mc_scheduler_worker, dev,
sched_ctx->wc_pool[i]);
if (sched_ctx->reordering_enabled) {
dev->enqueue_burst = &schedule_enqueue_ordering;
dev->dequeue_burst = &schedule_dequeue_ordering;
} else {
dev->enqueue_burst = &schedule_enqueue;
dev->dequeue_burst = &schedule_dequeue;
}
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
struct mc_scheduler_qp_ctx *mc_qp_ctx =
qp_ctx->private_qp_ctx;
uint32_t j;
memset(mc_qp_ctx->slaves, 0,
RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
sizeof(struct scheduler_slave));
for (j = 0; j < sched_ctx->nb_slaves; j++) {
mc_qp_ctx->slaves[j].dev_id =
sched_ctx->slaves[j].dev_id;
mc_qp_ctx->slaves[j].qp_id = i;
}
mc_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
mc_qp_ctx->last_enq_worker_idx = 0;
mc_qp_ctx->last_deq_worker_idx = 0;
}
return 0;
}
static int
scheduler_stop(struct rte_cryptodev *dev)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
mc_ctx->stop_signal = 1;
for (uint16_t i = 0; i < sched_ctx->nb_wc; i++)
rte_eal_wait_lcore(sched_ctx->wc_pool[i]);
return 0;
}
static int
scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
{
struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
struct mc_scheduler_qp_ctx *mc_qp_ctx;
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0,
rte_socket_id());
if (!mc_qp_ctx) {
CS_LOG_ERR("failed allocate memory for private queue pair");
return -ENOMEM;
}
mc_qp_ctx->mc_private_ctx = mc_ctx;
qp_ctx->private_qp_ctx = (void *)mc_qp_ctx;
return 0;
}
static int
scheduler_create_private_ctx(struct rte_cryptodev *dev)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
struct mc_scheduler_ctx *mc_ctx;
if (sched_ctx->private_ctx)
rte_free(sched_ctx->private_ctx);
mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0,
rte_socket_id());
if (!mc_ctx) {
CS_LOG_ERR("failed allocate memory");
return -ENOMEM;
}
mc_ctx->num_workers = sched_ctx->nb_wc;
for (uint16_t i = 0; i < sched_ctx->nb_wc; i++) {
char r_name[16];
snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX "%u", i);
mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
if (!mc_ctx->sched_enq_ring[i]) {
CS_LOG_ERR("Cannot create ring for worker %u", i);
return -1;
}
snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX "%u", i);
mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
if (!mc_ctx->sched_deq_ring[i]) {
CS_LOG_ERR("Cannot create ring for worker %u", i);
return -1;
}
}
sched_ctx->private_ctx = (void *)mc_ctx;
return 0;
}
struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
slave_attach,
slave_detach,
scheduler_start,
scheduler_stop,
scheduler_config_qp,
scheduler_create_private_ctx,
NULL, /* option_set */
NULL /* option_get */
};
struct rte_cryptodev_scheduler mc_scheduler = {
.name = "multicore-scheduler",
.description = "scheduler which will run burst across multiple cpu cores",
.mode = CDEV_SCHED_MODE_MULTICORE,
.ops = &scheduler_mc_ops
};
struct rte_cryptodev_scheduler *multicore_scheduler = &mc_scheduler;

View File

@ -49,6 +49,7 @@ struct scheduler_init_params {
uint32_t nb_slaves;
enum rte_cryptodev_scheduler_mode mode;
uint32_t enable_ordering;
uint64_t wcmask;
char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]
[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
};
@ -60,6 +61,8 @@ struct scheduler_init_params {
#define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs")
#define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions")
#define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id")
#define RTE_CRYPTODEV_VDEV_COREMASK ("coremask")
#define RTE_CRYPTODEV_VDEV_CORELIST ("corelist")
const char *scheduler_valid_params[] = {
RTE_CRYPTODEV_VDEV_NAME,
@ -68,7 +71,9 @@ const char *scheduler_valid_params[] = {
RTE_CRYPTODEV_VDEV_ORDERING,
RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
RTE_CRYPTODEV_VDEV_SOCKET_ID
RTE_CRYPTODEV_VDEV_SOCKET_ID,
RTE_CRYPTODEV_VDEV_COREMASK,
RTE_CRYPTODEV_VDEV_CORELIST
};
struct scheduler_parse_map {
@ -82,7 +87,9 @@ const struct scheduler_parse_map scheduler_mode_map[] = {
{RTE_STR(SCHEDULER_MODE_NAME_PKT_SIZE_DISTR),
CDEV_SCHED_MODE_PKT_SIZE_DISTR},
{RTE_STR(SCHEDULER_MODE_NAME_FAIL_OVER),
CDEV_SCHED_MODE_FAILOVER}
CDEV_SCHED_MODE_FAILOVER},
{RTE_STR(SCHEDULER_MODE_NAME_MULTI_CORE),
CDEV_SCHED_MODE_MULTICORE}
};
const struct scheduler_parse_map scheduler_ordering_map[] = {
@ -122,6 +129,21 @@ cryptodev_scheduler_create(const char *name,
sched_ctx->max_nb_queue_pairs =
init_params->def_p.max_nb_queue_pairs;
if (init_params->mode == CDEV_SCHED_MODE_MULTICORE) {
uint16_t i;
sched_ctx->nb_wc = 0;
for (i = 0; i < RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES; i++) {
if (init_params->wcmask & (1ULL << i)) {
sched_ctx->wc_pool[sched_ctx->nb_wc++] = i;
RTE_LOG(INFO, PMD,
" Worker core[%u]=%u added\n",
sched_ctx->nb_wc-1, i);
}
}
}
if (init_params->mode > CDEV_SCHED_MODE_USERDEFINED &&
init_params->mode < CDEV_SCHED_MODE_COUNT) {
ret = rte_cryptodev_scheduler_mode_set(dev->data->dev_id,
@ -240,6 +262,43 @@ parse_integer_arg(const char *key __rte_unused,
return 0;
}
/** Parse integer from hexadecimal integer argument */
static int
parse_coremask_arg(const char *key __rte_unused,
const char *value, void *extra_args)
{
struct scheduler_init_params *params = extra_args;
params->wcmask = strtoull(value, NULL, 16);
return 0;
}
/** Parse integer from list of integers argument */
static int
parse_corelist_arg(const char *key __rte_unused,
const char *value, void *extra_args)
{
struct scheduler_init_params *params = extra_args;
params->wcmask = 0ULL;
const char *token = value;
while (isdigit(token[0])) {
char *rval;
unsigned int core = strtoul(token, &rval, 10);
params->wcmask |= 1ULL << core;
token = (const char *)rval;
if (token[0] == '\0')
break;
token++;
}
return 0;
}
/** Parse name */
static int
parse_name_arg(const char *key __rte_unused,
@ -359,6 +418,18 @@ scheduler_parse_init_params(struct scheduler_init_params *params,
if (ret < 0)
goto free_kvlist;
ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_COREMASK,
&parse_coremask_arg,
params);
if (ret < 0)
goto free_kvlist;
ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_CORELIST,
&parse_corelist_arg,
params);
if (ret < 0)
goto free_kvlist;
ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_NAME,
&parse_name_arg,
&params->def_p);
@ -420,6 +491,9 @@ cryptodev_scheduler_probe(struct rte_vdev_device *vdev)
if (init_params.def_p.name[0] != '\0')
RTE_LOG(INFO, PMD, " User defined name = %s\n",
init_params.def_p.name);
if (init_params.wcmask != 0)
RTE_LOG(INFO, PMD, " workers core mask = %"PRIx64"\n",
init_params.wcmask);
return cryptodev_scheduler_create(name,
vdev,

View File

@ -89,6 +89,8 @@ struct scheduler_ctx {
char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
uint16_t wc_pool[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
uint16_t nb_wc;
char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
int nb_init_slaves;
@ -144,7 +146,8 @@ scheduler_order_drain(struct rte_ring *order_ring,
while (nb_ops_to_deq < nb_objs) {
SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED ||
op->status == RTE_CRYPTO_OP_STATUS_ENQUEUED)
break;
nb_ops_to_deq++;
}