event/dlb: add infos get and configure

Add support for configuring the DLB hardware.
In particular, this patch configures the DLB
hardware's scheduling domain, such that it is provisioned with
the requested number of ports and queues, provided sufficient
resources are available. Individual queues and ports are
configured later in port setup and eventdev start.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
Reviewed-by: Gage Eads <gage.eads@intel.com>
This commit is contained in:
Timothy McDaniel 2020-11-01 17:30:02 -06:00 committed by Jerin Jacob
parent 5993e5eb7d
commit b94c709dec
6 changed files with 4568 additions and 99 deletions

View File

@ -34,3 +34,51 @@ detailed understanding of the hardware, but these details are important when
writing high-performance code. This section describes the places where the
eventdev API and DLB misalign.
Scheduling Domain Configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
There are 32 scheduling domainis the DLB.
When one is configured, it allocates load-balanced and
directed queues, ports, credits, and other hardware resources. Some
resource allocations are user-controlled -- the number of queues, for example
-- and others, like credit pools (one directed and one load-balanced pool per
scheduling domain), are not.
The DLB is a closed system eventdev, and as such the ``nb_events_limit`` device
setup argument and the per-port ``new_event_threshold`` argument apply as
defined in the eventdev header file. The limit is applied to all enqueues,
regardless of whether it will consume a directed or load-balanced credit.
Reconfiguration
~~~~~~~~~~~~~~~
The Eventdev API allows one to reconfigure a device, its ports, and its queues
by first stopping the device, calling the configuration function(s), then
restarting the device. The DLB does not support configuring an individual queue
or port without first reconfiguring the entire device, however, so there are
certain reconfiguration sequences that are valid in the eventdev API but not
supported by the PMD.
Specifically, the PMD supports the following configuration sequence:
1. Configure and start the device
2. Stop the device
3. (Optional) Reconfigure the device
4. (Optional) If step 3 is run:
a. Setup queue(s). The reconfigured queue(s) lose their previous port links.
b. The reconfigured port(s) lose their previous queue links.
5. (Optional, only if steps 4a and 4b are run) Link port(s) to queue(s)
6. Restart the device. If the device is reconfigured in step 3 but one or more
of its ports or queues are not, the PMD will apply their previous
configuration (including port->queue links) at this time.
The PMD does not support the following configuration sequences:
1. Configure and start the device
2. Stop the device
3. Setup queue or setup port
4. Start the device
This sequence is not supported because the event device must be reconfigured
before its ports or queues can be.

View File

@ -139,6 +139,19 @@ dlb_hw_query_resources(struct dlb_eventdev *dlb)
return 0;
}
static void
dlb_free_qe_mem(struct dlb_port *qm_port)
{
if (qm_port == NULL)
return;
rte_free(qm_port->qe4);
qm_port->qe4 = NULL;
rte_free(qm_port->consume_qe);
qm_port->consume_qe = NULL;
}
/* Wrapper for string to int conversion. Substituted for atoi(...), which is
* unsafe.
*/
@ -231,6 +244,388 @@ set_num_dir_credits(const char *key __rte_unused,
DLB_MAX_NUM_DIR_CREDITS);
return -EINVAL;
}
return 0;
}
/* VDEV-only notes:
* This function first unmaps all memory mappings and closes the
* domain's file descriptor, which causes the driver to reset the
* scheduling domain. Once that completes (when close() returns), we
* can safely free the dynamically allocated memory used by the
* scheduling domain.
*
* PF-only notes:
* We will maintain a use count and use that to determine when
* a reset is required. In PF mode, we never mmap, or munmap
* device memory, and we own the entire physical PCI device.
*/
static void
dlb_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)
{
struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
enum dlb_configuration_state config_state;
int i, j;
/* Close and reset the domain */
dlb_iface_domain_close(dlb);
/* Free all dynamically allocated port memory */
for (i = 0; i < dlb->num_ports; i++)
dlb_free_qe_mem(&dlb->ev_ports[i].qm_port);
/* If reconfiguring, mark the device's queues and ports as "previously
* configured." If the user does not reconfigure them, the PMD will
* reapply their previous configuration when the device is started.
*/
config_state = (reconfig) ? DLB_PREV_CONFIGURED : DLB_NOT_CONFIGURED;
for (i = 0; i < dlb->num_ports; i++) {
dlb->ev_ports[i].qm_port.config_state = config_state;
/* Reset setup_done so ports can be reconfigured */
dlb->ev_ports[i].setup_done = false;
for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
dlb->ev_ports[i].link[j].mapped = false;
}
for (i = 0; i < dlb->num_queues; i++)
dlb->ev_queues[i].qm_queue.config_state = config_state;
for (i = 0; i < DLB_MAX_NUM_QUEUES; i++)
dlb->ev_queues[i].setup_done = false;
dlb->num_ports = 0;
dlb->num_ldb_ports = 0;
dlb->num_dir_ports = 0;
dlb->num_queues = 0;
dlb->num_ldb_queues = 0;
dlb->num_dir_queues = 0;
dlb->configured = false;
}
static int
dlb_ldb_credit_pool_create(struct dlb_hw_dev *handle)
{
struct dlb_create_ldb_pool_args cfg;
struct dlb_cmd_response response;
int ret;
if (handle == NULL)
return -EINVAL;
if (!handle->cfg.resources.num_ldb_credits) {
handle->cfg.ldb_credit_pool_id = 0;
handle->cfg.num_ldb_credits = 0;
return 0;
}
cfg.response = (uintptr_t)&response;
cfg.num_ldb_credits = handle->cfg.resources.num_ldb_credits;
ret = dlb_iface_ldb_credit_pool_create(handle,
&cfg);
if (ret < 0) {
DLB_LOG_ERR("dlb: ldb_credit_pool_create ret=%d (driver status: %s)\n",
ret, dlb_error_strings[response.status]);
}
handle->cfg.ldb_credit_pool_id = response.id;
handle->cfg.num_ldb_credits = cfg.num_ldb_credits;
return ret;
}
static int
dlb_dir_credit_pool_create(struct dlb_hw_dev *handle)
{
struct dlb_create_dir_pool_args cfg;
struct dlb_cmd_response response;
int ret;
if (handle == NULL)
return -EINVAL;
if (!handle->cfg.resources.num_dir_credits) {
handle->cfg.dir_credit_pool_id = 0;
handle->cfg.num_dir_credits = 0;
return 0;
}
cfg.response = (uintptr_t)&response;
cfg.num_dir_credits = handle->cfg.resources.num_dir_credits;
ret = dlb_iface_dir_credit_pool_create(handle, &cfg);
if (ret < 0)
DLB_LOG_ERR("dlb: dir_credit_pool_create ret=%d (driver status: %s)\n",
ret, dlb_error_strings[response.status]);
handle->cfg.dir_credit_pool_id = response.id;
handle->cfg.num_dir_credits = cfg.num_dir_credits;
return ret;
}
static int
dlb_hw_create_sched_domain(struct dlb_hw_dev *handle,
struct dlb_eventdev *dlb,
const struct dlb_hw_rsrcs *resources_asked)
{
int ret = 0;
struct dlb_create_sched_domain_args *config_params;
struct dlb_cmd_response response;
if (resources_asked == NULL) {
DLB_LOG_ERR("dlb: dlb_create NULL parameter\n");
ret = EINVAL;
goto error_exit;
}
/* Map generic qm resources to dlb resources */
config_params = &handle->cfg.resources;
config_params->response = (uintptr_t)&response;
/* DIR ports and queues */
config_params->num_dir_ports =
resources_asked->num_dir_ports;
config_params->num_dir_credits =
resources_asked->num_dir_credits;
/* LDB ports and queues */
config_params->num_ldb_queues =
resources_asked->num_ldb_queues;
config_params->num_ldb_ports =
resources_asked->num_ldb_ports;
config_params->num_ldb_credits =
resources_asked->num_ldb_credits;
config_params->num_atomic_inflights =
dlb->num_atm_inflights_per_queue *
config_params->num_ldb_queues;
config_params->num_hist_list_entries = config_params->num_ldb_ports *
DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
/* dlb limited to 1 credit pool per queue type */
config_params->num_ldb_credit_pools = 1;
config_params->num_dir_credit_pools = 1;
DLB_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d, ldb_cred_pools=%d, dir-credit_pools=%d\n",
config_params->num_ldb_queues,
config_params->num_ldb_ports,
config_params->num_dir_ports,
config_params->num_atomic_inflights,
config_params->num_hist_list_entries,
config_params->num_ldb_credits,
config_params->num_dir_credits,
config_params->num_ldb_credit_pools,
config_params->num_dir_credit_pools);
/* Configure the QM */
ret = dlb_iface_sched_domain_create(handle, config_params);
if (ret < 0) {
DLB_LOG_ERR("dlb: domain create failed, device_id = %d, (driver ret = %d, extra status: %s)\n",
handle->device_id,
ret,
dlb_error_strings[response.status]);
goto error_exit;
}
handle->domain_id = response.id;
handle->domain_id_valid = 1;
config_params->response = 0;
ret = dlb_ldb_credit_pool_create(handle);
if (ret < 0) {
DLB_LOG_ERR("dlb: create ldb credit pool failed\n");
goto error_exit2;
}
ret = dlb_dir_credit_pool_create(handle);
if (ret < 0) {
DLB_LOG_ERR("dlb: create dir credit pool failed\n");
goto error_exit2;
}
handle->cfg.configured = true;
return 0;
error_exit2:
dlb_iface_domain_close(dlb);
error_exit:
return ret;
}
/* End HW specific */
static void
dlb_eventdev_info_get(struct rte_eventdev *dev,
struct rte_event_dev_info *dev_info)
{
struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
int ret;
ret = dlb_hw_query_resources(dlb);
if (ret) {
const struct rte_eventdev_data *data = dev->data;
DLB_LOG_ERR("get resources err=%d, devid=%d\n",
ret, data->dev_id);
/* fn is void, so fall through and return values set up in
* probe
*/
}
/* Add num resources currently owned by this domain.
* These would become available if the scheduling domain were reset due
* to the application recalling eventdev_configure to *reconfigure* the
* domain.
*/
evdev_dlb_default_info.max_event_ports += dlb->num_ldb_ports;
evdev_dlb_default_info.max_event_queues += dlb->num_ldb_queues;
evdev_dlb_default_info.max_num_events += dlb->num_ldb_credits;
/* In DLB A-stepping hardware, applications are limited to 128
* configured ports (load-balanced or directed). The reported number of
* available ports must reflect this.
*/
if (dlb->revision < DLB_REV_B0) {
int used_ports;
used_ports = DLB_MAX_NUM_LDB_PORTS + DLB_MAX_NUM_DIR_PORTS -
dlb->hw_rsrc_query_results.num_ldb_ports -
dlb->hw_rsrc_query_results.num_dir_ports;
evdev_dlb_default_info.max_event_ports =
RTE_MIN(evdev_dlb_default_info.max_event_ports,
128 - used_ports);
}
evdev_dlb_default_info.max_event_queues =
RTE_MIN(evdev_dlb_default_info.max_event_queues,
RTE_EVENT_MAX_QUEUES_PER_DEV);
evdev_dlb_default_info.max_num_events =
RTE_MIN(evdev_dlb_default_info.max_num_events,
dlb->max_num_events_override);
*dev_info = evdev_dlb_default_info;
}
/* Note: 1 QM instance per QM device, QM instance/device == event device */
static int
dlb_eventdev_configure(const struct rte_eventdev *dev)
{
struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
struct dlb_hw_dev *handle = &dlb->qm_instance;
struct dlb_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max;
const struct rte_eventdev_data *data = dev->data;
const struct rte_event_dev_config *config = &data->dev_conf;
int ret;
/* If this eventdev is already configured, we must release the current
* scheduling domain before attempting to configure a new one.
*/
if (dlb->configured) {
dlb_hw_reset_sched_domain(dev, true);
ret = dlb_hw_query_resources(dlb);
if (ret) {
DLB_LOG_ERR("get resources err=%d, devid=%d\n",
ret, data->dev_id);
return ret;
}
}
if (config->nb_event_queues > rsrcs->num_queues) {
DLB_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\n",
config->nb_event_queues,
rsrcs->num_queues);
return -EINVAL;
}
if (config->nb_event_ports > (rsrcs->num_ldb_ports
+ rsrcs->num_dir_ports)) {
DLB_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\n",
config->nb_event_ports,
(rsrcs->num_ldb_ports + rsrcs->num_dir_ports));
return -EINVAL;
}
if (config->nb_events_limit > rsrcs->nb_events_limit) {
DLB_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\n",
config->nb_events_limit,
rsrcs->nb_events_limit);
return -EINVAL;
}
if (config->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
dlb->global_dequeue_wait = false;
else {
uint32_t timeout32;
dlb->global_dequeue_wait = true;
timeout32 = config->dequeue_timeout_ns;
dlb->global_dequeue_wait_ticks =
timeout32 * (rte_get_timer_hz() / 1E9);
}
/* Does this platform support umonitor/umwait? */
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_WAITPKG)) {
if (RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE != 0 &&
RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE != 1) {
DLB_LOG_ERR("invalid value (%d) for RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE must be 0 or 1.\n",
RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE);
return -EINVAL;
}
dlb->umwait_allowed = true;
}
rsrcs->num_dir_ports = config->nb_single_link_event_port_queues;
rsrcs->num_ldb_ports = config->nb_event_ports - rsrcs->num_dir_ports;
/* 1 dir queue per dir port */
rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
/* Scale down nb_events_limit by 4 for directed credits, since there
* are 4x as many load-balanced credits.
*/
rsrcs->num_ldb_credits = 0;
rsrcs->num_dir_credits = 0;
if (rsrcs->num_ldb_queues)
rsrcs->num_ldb_credits = config->nb_events_limit;
if (rsrcs->num_dir_ports)
rsrcs->num_dir_credits = config->nb_events_limit / 4;
if (dlb->num_dir_credits_override != -1)
rsrcs->num_dir_credits = dlb->num_dir_credits_override;
if (dlb_hw_create_sched_domain(handle, dlb, rsrcs) < 0) {
DLB_LOG_ERR("dlb_hw_create_sched_domain failed\n");
return -ENODEV;
}
dlb->new_event_limit = config->nb_events_limit;
__atomic_store_n(&dlb->inflights, 0, __ATOMIC_SEQ_CST);
/* Save number of ports/queues for this event dev */
dlb->num_ports = config->nb_event_ports;
dlb->num_queues = config->nb_event_queues;
dlb->num_dir_ports = rsrcs->num_dir_ports;
dlb->num_ldb_ports = dlb->num_ports - dlb->num_dir_ports;
dlb->num_ldb_queues = dlb->num_queues - dlb->num_dir_ports;
dlb->num_dir_queues = dlb->num_dir_ports;
dlb->num_ldb_credits = rsrcs->num_ldb_credits;
dlb->num_dir_credits = rsrcs->num_dir_credits;
dlb->configured = true;
return 0;
}
@ -309,6 +704,8 @@ void
dlb_entry_points_init(struct rte_eventdev *dev)
{
static struct rte_eventdev_ops dlb_eventdev_entry_ops = {
.dev_infos_get = dlb_eventdev_info_get,
.dev_configure = dlb_eventdev_configure,
.dump = dlb_eventdev_dump,
.xstats_get = dlb_eventdev_xstats_get,
.xstats_get_names = dlb_eventdev_xstats_get_names,

View File

@ -16,12 +16,23 @@ void (*dlb_iface_low_level_io_init)(struct dlb_eventdev *dlb);
int (*dlb_iface_open)(struct dlb_hw_dev *handle, const char *name);
void (*dlb_iface_domain_close)(struct dlb_eventdev *dlb);
int (*dlb_iface_get_device_version)(struct dlb_hw_dev *handle,
uint8_t *revision);
int (*dlb_iface_get_num_resources)(struct dlb_hw_dev *handle,
struct dlb_get_num_resources_args *rsrcs);
int (*dlb_iface_sched_domain_create)(struct dlb_hw_dev *handle,
struct dlb_create_sched_domain_args *args);
int (*dlb_iface_ldb_credit_pool_create)(struct dlb_hw_dev *handle,
struct dlb_create_ldb_pool_args *cfg);
int (*dlb_iface_dir_credit_pool_create)(struct dlb_hw_dev *handle,
struct dlb_create_dir_pool_args *cfg);
int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle,
enum dlb_cq_poll_modes *mode);

View File

@ -15,12 +15,23 @@ extern void (*dlb_iface_low_level_io_init)(struct dlb_eventdev *dlb);
extern int (*dlb_iface_open)(struct dlb_hw_dev *handle, const char *name);
extern void (*dlb_iface_domain_close)(struct dlb_eventdev *dlb);
extern int (*dlb_iface_get_device_version)(struct dlb_hw_dev *handle,
uint8_t *revision);
extern int (*dlb_iface_get_num_resources)(struct dlb_hw_dev *handle,
struct dlb_get_num_resources_args *rsrcs);
extern int (*dlb_iface_sched_domain_create)(struct dlb_hw_dev *handle,
struct dlb_create_sched_domain_args *args);
extern int (*dlb_iface_ldb_credit_pool_create)(struct dlb_hw_dev *handle,
struct dlb_create_ldb_pool_args *cfg);
extern int (*dlb_iface_dir_credit_pool_create)(struct dlb_hw_dev *handle,
struct dlb_create_dir_pool_args *cfg);
extern int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle,
enum dlb_cq_poll_modes *mode);

File diff suppressed because it is too large Load Diff

View File

@ -78,6 +78,17 @@ dlb_pf_open(struct dlb_hw_dev *handle, const char *name)
return 0;
}
static void
dlb_pf_domain_close(struct dlb_eventdev *dlb)
{
struct dlb_dev *dlb_dev = (struct dlb_dev *)dlb->qm_instance.pf_dev;
int ret;
ret = dlb_reset_domain(&dlb_dev->hw, dlb->qm_instance.domain_id);
if (ret)
DLB_LOG_ERR("dlb_pf_reset_domain err %d", ret);
}
static int
dlb_pf_get_device_version(struct dlb_hw_dev *handle,
uint8_t *revision)
@ -100,6 +111,79 @@ dlb_pf_get_num_resources(struct dlb_hw_dev *handle,
return 0;
}
static int
dlb_pf_sched_domain_create(struct dlb_hw_dev *handle,
struct dlb_create_sched_domain_args *arg)
{
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
struct dlb_cmd_response response = {0};
int ret;
DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
if (dlb_dev->domain_reset_failed) {
response.status = DLB_ST_DOMAIN_RESET_FAILED;
ret = -EINVAL;
goto done;
}
ret = dlb_hw_create_sched_domain(&dlb_dev->hw, arg, &response);
if (ret)
goto done;
done:
*(struct dlb_cmd_response *)arg->response = response;
DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
return ret;
}
static int
dlb_pf_ldb_credit_pool_create(struct dlb_hw_dev *handle,
struct dlb_create_ldb_pool_args *cfg)
{
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
struct dlb_cmd_response response = {0};
int ret;
DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
ret = dlb_hw_create_ldb_pool(&dlb_dev->hw,
handle->domain_id,
cfg,
&response);
*(struct dlb_cmd_response *)cfg->response = response;
DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
return ret;
}
static int
dlb_pf_dir_credit_pool_create(struct dlb_hw_dev *handle,
struct dlb_create_dir_pool_args *cfg)
{
struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
struct dlb_cmd_response response = {0};
int ret;
DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
ret = dlb_hw_create_dir_pool(&dlb_dev->hw,
handle->domain_id,
cfg,
&response);
*(struct dlb_cmd_response *)cfg->response = response;
DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
return ret;
}
static int
dlb_pf_get_cq_poll_mode(struct dlb_hw_dev *handle,
enum dlb_cq_poll_modes *mode)
@ -119,8 +203,12 @@ dlb_pf_iface_fn_ptrs_init(void)
{
dlb_iface_low_level_io_init = dlb_pf_low_level_io_init;
dlb_iface_open = dlb_pf_open;
dlb_iface_domain_close = dlb_pf_domain_close;
dlb_iface_get_device_version = dlb_pf_get_device_version;
dlb_iface_get_num_resources = dlb_pf_get_num_resources;
dlb_iface_sched_domain_create = dlb_pf_sched_domain_create;
dlb_iface_ldb_credit_pool_create = dlb_pf_ldb_credit_pool_create;
dlb_iface_dir_credit_pool_create = dlb_pf_dir_credit_pool_create;
dlb_iface_get_cq_poll_mode = dlb_pf_get_cq_poll_mode;
}