sched: update subport rate dynamically

Add support to update subport rate dynamically.

Signed-off-by: Savinay Dharmappa <savinay.dharmappa@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
This commit is contained in:
Savinay Dharmappa 2020-10-09 13:39:14 +01:00 committed by Thomas Monjalon
parent 5f757d8fcc
commit ac6fcb841b
8 changed files with 212 additions and 245 deletions

View File

@ -138,7 +138,7 @@ test_sched(void)
port = rte_sched_port_config(&port_param);
TEST_ASSERT_NOT_NULL(port, "Error config sched port\n");
err = rte_sched_subport_config(port, SUBPORT, subport_param);
err = rte_sched_subport_config(port, SUBPORT, subport_param, 0);
TEST_ASSERT_SUCCESS(err, "Error config sched, err=%d\n", err);
for (pipe = 0; pipe < subport_param[0].n_pipes_per_subport_enabled; pipe++) {

View File

@ -208,12 +208,6 @@ Deprecation Notices
in "rte_sched.h". These changes are aligned to improvements suggested in the
RFC https://mails.dpdk.org/archives/dev/2018-November/120035.html.
* sched: To allow dynamic configuration of the subport bandwidth profile,
changes will be made to data structures ``rte_sched_subport_params``,
``rte_sched_port_params`` and new data structure, API functions will be
defined in ``rte_sched.h``. These changes are aligned as suggested in the
RFC https://mails.dpdk.org/archives/dev/2020-July/175161.html
* metrics: The function ``rte_metrics_init`` will have a non-void return
in order to notify errors instead of calling ``rte_exit``.

View File

@ -191,6 +191,13 @@ New Features
* Added new ``RTE_ACL_CLASSIFY_AVX512X32`` vector implementation,
which can process up to 32 flows in parallel. Requires AVX512 support.
* **Added support to update subport bandwidth dynamically.**
* Added new API ``rte_sched_port_subport_profile_add`` to add new
subport bandwidth profile to subport porfile table at runtime.
* Added support to update subport rate dynamically.
Removed Items
-------------
@ -316,6 +323,9 @@ API Changes
This enum value was not used inside DPDK, while it prevented to add new
classify algorithms without causing an ABI breakage.
* sched: Added ``subport_profile_id`` as argument
to function ``rte_sched_subport_config``.
ABI Changes
-----------

View File

@ -92,7 +92,7 @@ softnic_tmgr_port_create(struct pmd_internals *p,
status = rte_sched_subport_config(sched,
subport_id,
&t->subport_params[subport_id]);
&t->subport_params[subport_id], 0);
if (status) {
rte_sched_port_free(sched);
return NULL;
@ -1141,7 +1141,7 @@ update_subport_tc_rate(struct rte_eth_dev *dev,
/* Update the subport configuration. */
if (rte_sched_subport_config(SCHED(p),
subport_id, &subport_params))
subport_id, &subport_params, 0))
return -1;
/* Commit changes. */
@ -2912,7 +2912,7 @@ update_subport_rate(struct rte_eth_dev *dev,
/* Update the subport configuration. */
if (rte_sched_subport_config(SCHED(p), subport_id,
&subport_params))
&subport_params, 0))
return -1;
/* Commit changes. */

View File

@ -119,7 +119,8 @@ tmgr_port_create(const char *name, struct tmgr_port_params *params)
status = rte_sched_subport_config(
s,
i,
&subport_profile[0]);
&subport_profile[0],
0);
if (status) {
rte_sched_port_free(s);
@ -180,7 +181,8 @@ tmgr_subport_config(const char *port_name,
status = rte_sched_subport_config(
port->s,
subport_id,
&subport_profile[subport_profile_id]);
&subport_profile[subport_profile_id],
0);
return status;
}

View File

@ -314,7 +314,8 @@ app_init_sched_port(uint32_t portid, uint32_t socketid)
}
for (subport = 0; subport < port_params.n_subports_per_port; subport ++) {
err = rte_sched_subport_config(port, subport, &subport_params[subport]);
err = rte_sched_subport_config(port, subport,
&subport_params[subport], 0);
if (err) {
rte_exit(EXIT_FAILURE, "Unable to config sched subport %u, err=%d\n",
subport, err);

View File

@ -123,6 +123,7 @@ struct rte_sched_grinder {
uint32_t productive;
uint32_t pindex;
struct rte_sched_subport *subport;
struct rte_sched_subport_profile *subport_params;
struct rte_sched_pipe *pipe;
struct rte_sched_pipe_profile *pipe_params;
@ -151,16 +152,11 @@ struct rte_sched_grinder {
struct rte_sched_subport {
/* Token bucket (TB) */
uint64_t tb_time; /* time of last update */
uint64_t tb_period;
uint64_t tb_credits_per_period;
uint64_t tb_size;
uint64_t tb_credits;
/* Traffic classes (TCs) */
uint64_t tc_time; /* time of next update */
uint64_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
uint64_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
uint64_t tc_period;
/* TC oversubscription */
uint64_t tc_ov_wm;
@ -174,6 +170,8 @@ struct rte_sched_subport {
/* Statistics */
struct rte_sched_subport_stats stats __rte_cache_aligned;
/* subport profile */
uint32_t profile;
/* Subport pipes */
uint32_t n_pipes_per_subport_enabled;
uint32_t n_pipe_profiles;
@ -834,18 +832,6 @@ rte_sched_subport_check_params(struct rte_sched_subport_params *params,
return -EINVAL;
}
if (params->tb_rate == 0 || params->tb_rate > rate) {
RTE_LOG(ERR, SCHED,
"%s: Incorrect value for tb rate\n", __func__);
return -EINVAL;
}
if (params->tb_size == 0) {
RTE_LOG(ERR, SCHED,
"%s: Incorrect value for tb size\n", __func__);
return -EINVAL;
}
/* qsize: if non-zero, power of 2,
* no bigger than 32K (due to 16-bit read/write pointers)
*/
@ -859,29 +845,8 @@ rte_sched_subport_check_params(struct rte_sched_subport_params *params,
}
}
for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
uint64_t tc_rate = params->tc_rate[i];
uint16_t qsize = params->qsize[i];
if ((qsize == 0 && tc_rate != 0) ||
(qsize != 0 && tc_rate == 0) ||
(tc_rate > params->tb_rate)) {
RTE_LOG(ERR, SCHED,
"%s: Incorrect value for tc rate\n", __func__);
return -EINVAL;
}
}
if (params->qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0 ||
params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
RTE_LOG(ERR, SCHED,
"%s: Incorrect qsize or tc rate(best effort)\n", __func__);
return -EINVAL;
}
if (params->tc_period == 0) {
RTE_LOG(ERR, SCHED,
"%s: Incorrect value for tc period\n", __func__);
if (params->qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
RTE_LOG(ERR, SCHED, "%s: Incorrect qsize\n", __func__);
return -EINVAL;
}
@ -1098,48 +1063,6 @@ rte_sched_port_free(struct rte_sched_port *port)
rte_free(port);
}
static void
rte_sched_port_log_subport_config(struct rte_sched_port *port, uint32_t i)
{
struct rte_sched_subport *s = port->subports[i];
RTE_LOG(DEBUG, SCHED, "Low level config for subport %u:\n"
" Token bucket: period = %"PRIu64", credits per period = %"PRIu64
", size = %"PRIu64"\n"
" Traffic classes: period = %"PRIu64"\n"
" credits per period = [%"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64", %"PRIu64
", %"PRIu64", %"PRIu64", %"PRIu64"]\n"
" Best effort traffic class oversubscription: wm min = %"PRIu64
", wm max = %"PRIu64"\n",
i,
/* Token bucket */
s->tb_period,
s->tb_credits_per_period,
s->tb_size,
/* Traffic classes */
s->tc_period,
s->tc_credits_per_period[0],
s->tc_credits_per_period[1],
s->tc_credits_per_period[2],
s->tc_credits_per_period[3],
s->tc_credits_per_period[4],
s->tc_credits_per_period[5],
s->tc_credits_per_period[6],
s->tc_credits_per_period[7],
s->tc_credits_per_period[8],
s->tc_credits_per_period[9],
s->tc_credits_per_period[10],
s->tc_credits_per_period[11],
s->tc_credits_per_period[12],
/* Best effort traffic class oversubscription */
s->tc_ov_wm_min,
s->tc_ov_wm_max);
}
static void
rte_sched_free_memory(struct rte_sched_port *port, uint32_t n_subports)
{
@ -1158,10 +1081,12 @@ rte_sched_free_memory(struct rte_sched_port *port, uint32_t n_subports)
int
rte_sched_subport_config(struct rte_sched_port *port,
uint32_t subport_id,
struct rte_sched_subport_params *params)
struct rte_sched_subport_params *params,
uint32_t subport_profile_id)
{
struct rte_sched_subport *s = NULL;
uint32_t n_subports = subport_id;
struct rte_sched_subport_profile *profile;
uint32_t n_subport_pipe_queues, i;
uint32_t size0, size1, bmp_mem_size;
int status;
@ -1181,6 +1106,20 @@ rte_sched_subport_config(struct rte_sched_port *port,
return -EINVAL;
}
if (subport_profile_id >= port->n_max_subport_profiles) {
RTE_LOG(ERR, SCHED, "%s: "
"Number of subport profile exceeds the max limit\n",
__func__);
rte_sched_free_memory(port, n_subports);
return -EINVAL;
}
/** Memory is allocated only on first invocation of the api for a
* given subport. Subsequent invocation on same subport will just
* update subport bandwidth parameter.
**/
if (port->subports[subport_id] == NULL) {
status = rte_sched_subport_check_params(params,
port->n_pipes_per_subport,
port->rate);
@ -1211,36 +1150,12 @@ rte_sched_subport_config(struct rte_sched_port *port,
n_subports++;
subport_profile_id = 0;
/* Port */
port->subports[subport_id] = s;
/* Token Bucket (TB) */
if (params->tb_rate == port->rate) {
s->tb_credits_per_period = 1;
s->tb_period = 1;
} else {
double tb_rate = ((double) params->tb_rate) / ((double) port->rate);
double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
rte_approx_64(tb_rate, d, &s->tb_credits_per_period, &s->tb_period);
}
s->tb_size = params->tb_size;
s->tb_time = port->time;
s->tb_credits = s->tb_size / 2;
/* Traffic Classes (TCs) */
s->tc_period = rte_sched_time_ms_to_bytes(params->tc_period, port->rate);
for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
if (params->qsize[i])
s->tc_credits_per_period[i]
= rte_sched_time_ms_to_bytes(params->tc_period,
params->tc_rate[i]);
}
s->tc_time = port->time + s->tc_period;
for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
if (params->qsize[i])
s->tc_credits[i] = s->tc_credits_per_period[i];
/* compile time checks */
RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS == 0);
@ -1248,7 +1163,8 @@ rte_sched_subport_config(struct rte_sched_port *port,
(RTE_SCHED_PORT_N_GRINDERS - 1));
/* User parameters */
s->n_pipes_per_subport_enabled = params->n_pipes_per_subport_enabled;
s->n_pipes_per_subport_enabled =
params->n_pipes_per_subport_enabled;
memcpy(s->qsize, params->qsize, sizeof(params->qsize));
s->n_pipe_profiles = params->n_pipe_profiles;
s->n_max_pipe_profiles = params->n_max_pipe_profiles;
@ -1272,7 +1188,8 @@ rte_sched_subport_config(struct rte_sched_port *port,
rte_sched_free_memory(port, n_subports);
RTE_LOG(NOTICE, SCHED,
"%s: RED configuration init fails\n", __func__);
"%s: RED configuration init fails\n",
__func__);
return -EINVAL;
}
}
@ -1302,18 +1219,20 @@ rte_sched_subport_config(struct rte_sched_port *port,
s->pipe_profiles = (struct rte_sched_pipe_profile *)
(s->memory + rte_sched_subport_get_array_base(params,
e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES));
s->bmp_array = s->memory + rte_sched_subport_get_array_base(params,
e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY);
s->bmp_array = s->memory + rte_sched_subport_get_array_base(
params, e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY);
s->queue_array = (struct rte_mbuf **)
(s->memory + rte_sched_subport_get_array_base(params,
e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY));
/* Pipe profile table */
rte_sched_subport_config_pipe_profile_table(s, params, port->rate);
rte_sched_subport_config_pipe_profile_table(s, params,
port->rate);
/* Bitmap */
n_subport_pipe_queues = rte_sched_subport_pipe_queues(s);
bmp_mem_size = rte_bitmap_get_memory_footprint(n_subport_pipe_queues);
bmp_mem_size = rte_bitmap_get_memory_footprint(
n_subport_pipe_queues);
s->bmp = rte_bitmap_init(n_subport_pipe_queues, s->bmp_array,
bmp_mem_size);
if (s->bmp == NULL) {
@ -1330,16 +1249,40 @@ rte_sched_subport_config(struct rte_sched_port *port,
#ifdef RTE_SCHED_SUBPORT_TC_OV
/* TC oversubscription */
s->tc_ov_wm_min = port->mtu;
s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(params->tc_period,
s->pipe_tc_be_rate_max);
s->tc_ov_wm = s->tc_ov_wm_max;
s->tc_ov_period_id = 0;
s->tc_ov = 0;
s->tc_ov_n = 0;
s->tc_ov_rate = 0;
#endif
}
rte_sched_port_log_subport_config(port, subport_id);
{
/* update subport parameters from subport profile table*/
profile = port->subport_profiles + subport_profile_id;
s = port->subports[subport_id];
s->tb_credits = profile->tb_size / 2;
s->tc_time = port->time + profile->tc_period;
for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
if (s->qsize[i])
s->tc_credits[i] =
profile->tc_credits_per_period[i];
else
profile->tc_credits_per_period[i] = 0;
#ifdef RTE_SCHED_SUBPORT_TC_OV
s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(profile->tc_period,
s->pipe_tc_be_rate_max);
#endif
s->profile = subport_profile_id;
}
rte_sched_port_log_subport_profile(port, subport_profile_id);
return 0;
}
@ -1351,6 +1294,7 @@ rte_sched_pipe_config(struct rte_sched_port *port,
int32_t pipe_profile)
{
struct rte_sched_subport *s;
struct rte_sched_subport_profile *sp;
struct rte_sched_pipe *p;
struct rte_sched_pipe_profile *params;
uint32_t n_subports = subport_id + 1;
@ -1391,14 +1335,15 @@ rte_sched_pipe_config(struct rte_sched_port *port,
return -EINVAL;
}
sp = port->subport_profiles + s->profile;
/* Handle the case when pipe already has a valid configuration */
p = s->pipe + pipe_id;
if (p->tb_time) {
params = s->pipe_profiles + p->profile;
double subport_tc_be_rate =
(double) s->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
/ (double) s->tc_period;
(double)sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
/ (double) sp->tc_period;
double pipe_tc_be_rate =
(double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
/ (double) params->tc_period;
@ -1440,8 +1385,8 @@ rte_sched_pipe_config(struct rte_sched_port *port,
{
/* Subport best effort tc oversubscription */
double subport_tc_be_rate =
(double) s->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
/ (double) s->tc_period;
(double)sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
/ (double) sp->tc_period;
double pipe_tc_be_rate =
(double) params->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE]
/ (double) params->tc_period;
@ -2229,14 +2174,15 @@ grinder_credits_update(struct rte_sched_port *port,
struct rte_sched_grinder *grinder = subport->grinder + pos;
struct rte_sched_pipe *pipe = grinder->pipe;
struct rte_sched_pipe_profile *params = grinder->pipe_params;
struct rte_sched_subport_profile *sp = grinder->subport_params;
uint64_t n_periods;
uint32_t i;
/* Subport TB */
n_periods = (port->time - subport->tb_time) / subport->tb_period;
subport->tb_credits += n_periods * subport->tb_credits_per_period;
subport->tb_credits = RTE_MIN(subport->tb_credits, subport->tb_size);
subport->tb_time += n_periods * subport->tb_period;
n_periods = (port->time - subport->tb_time) / sp->tb_period;
subport->tb_credits += n_periods * sp->tb_credits_per_period;
subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size);
subport->tb_time += n_periods * sp->tb_period;
/* Pipe TB */
n_periods = (port->time - pipe->tb_time) / params->tb_period;
@ -2247,9 +2193,9 @@ grinder_credits_update(struct rte_sched_port *port,
/* Subport TCs */
if (unlikely(port->time >= subport->tc_time)) {
for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
subport->tc_credits[i] = subport->tc_credits_per_period[i];
subport->tc_credits[i] = sp->tc_credits_per_period[i];
subport->tc_time = port->time + subport->tc_period;
subport->tc_time = port->time + sp->tc_period;
}
/* Pipe TCs */
@ -2265,8 +2211,10 @@ grinder_credits_update(struct rte_sched_port *port,
static inline uint64_t
grinder_tc_ov_credits_update(struct rte_sched_port *port,
struct rte_sched_subport *subport)
struct rte_sched_subport *subport, uint32_t pos)
{
struct rte_sched_grinder *grinder = subport->grinder + pos;
struct rte_sched_subport_profile *sp = grinder->subport_params;
uint64_t tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
uint64_t tc_consumption = 0, tc_ov_consumption_max;
uint64_t tc_ov_wm = subport->tc_ov_wm;
@ -2276,17 +2224,17 @@ grinder_tc_ov_credits_update(struct rte_sched_port *port,
return subport->tc_ov_wm_max;
for (i = 0; i < RTE_SCHED_TRAFFIC_CLASS_BE; i++) {
tc_ov_consumption[i] =
subport->tc_credits_per_period[i] - subport->tc_credits[i];
tc_ov_consumption[i] = sp->tc_credits_per_period[i]
- subport->tc_credits[i];
tc_consumption += tc_ov_consumption[i];
}
tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] =
subport->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
subport->tc_credits[RTE_SCHED_TRAFFIC_CLASS_BE];
tc_ov_consumption_max =
subport->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
sp->tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASS_BE] -
tc_consumption;
if (tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASS_BE] >
@ -2312,14 +2260,15 @@ grinder_credits_update(struct rte_sched_port *port,
struct rte_sched_grinder *grinder = subport->grinder + pos;
struct rte_sched_pipe *pipe = grinder->pipe;
struct rte_sched_pipe_profile *params = grinder->pipe_params;
struct rte_sched_subport_profile *sp = grinder->subport_params;
uint64_t n_periods;
uint32_t i;
/* Subport TB */
n_periods = (port->time - subport->tb_time) / subport->tb_period;
subport->tb_credits += n_periods * subport->tb_credits_per_period;
subport->tb_credits = RTE_MIN(subport->tb_credits, subport->tb_size);
subport->tb_time += n_periods * subport->tb_period;
n_periods = (port->time - subport->tb_time) / sp->tb_period;
subport->tb_credits += n_periods * sp->tb_credits_per_period;
subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size);
subport->tb_time += n_periods * sp->tb_period;
/* Pipe TB */
n_periods = (port->time - pipe->tb_time) / params->tb_period;
@ -2329,12 +2278,13 @@ grinder_credits_update(struct rte_sched_port *port,
/* Subport TCs */
if (unlikely(port->time >= subport->tc_time)) {
subport->tc_ov_wm = grinder_tc_ov_credits_update(port, subport);
subport->tc_ov_wm =
grinder_tc_ov_credits_update(port, subport, pos);
for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
subport->tc_credits[i] = subport->tc_credits_per_period[i];
subport->tc_credits[i] = sp->tc_credits_per_period[i];
subport->tc_time = port->time + subport->tc_period;
subport->tc_time = port->time + sp->tc_period;
subport->tc_ov_period_id++;
}
@ -2857,6 +2807,9 @@ grinder_handle(struct rte_sched_port *port,
struct rte_sched_pipe *pipe = grinder->pipe;
grinder->pipe_params = subport->pipe_profiles + pipe->profile;
grinder->subport_params = port->subport_profiles +
subport->profile;
grinder_prefetch_tc_queue_arrays(subport, pos);
grinder_credits_update(port, subport, pos);

View File

@ -361,20 +361,27 @@ rte_sched_port_subport_profile_add(struct rte_sched_port *port,
/**
* Hierarchical scheduler subport configuration
*
* Note that this function is safe to use at runtime
* to configure subport bandwidth profile.
* @param port
* Handle to port scheduler instance
* @param subport_id
* Subport ID
* @param params
* Subport configuration parameters
* Subport configuration parameters. Must be non-NULL
* for first invocation (i.e initialization) for a given
* subport. Ignored (recommended value is NULL) for all
* subsequent invocation on the same subport.
* @param subport_profile_id
* ID of subport bandwidth profile
* @return
* 0 upon success, error code otherwise
*/
int
rte_sched_subport_config(struct rte_sched_port *port,
uint32_t subport_id,
struct rte_sched_subport_params *params);
struct rte_sched_subport_params *params,
uint32_t subport_profile_id);
/**
* Hierarchical scheduler pipe configuration