event/dlb2: add eventdev start

Add support for the eventdev start entry point.
We delay initializing some resources until
eventdev start, since the number of linked queues can be
used to determine if we are dealing with a ldb or dir resource.
If this is a device restart, then the previous configuration
will be reapplied.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
Reviewed-by: Gage Eads <gage.eads@intel.com>
This commit is contained in:
Timothy McDaniel 2020-11-01 17:37:56 -06:00 committed by Jerin Jacob
parent a29248b57b
commit 59e1a966ea
6 changed files with 297 additions and 0 deletions

View File

@ -1957,6 +1957,138 @@ dlb2_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev,
return cfg.response.id;
}
static int
dlb2_eventdev_reapply_configuration(struct rte_eventdev *dev)
{
struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
int ret, i;
/* If an event queue or port was previously configured, but hasn't been
* reconfigured, reapply its original configuration.
*/
for (i = 0; i < dlb2->num_queues; i++) {
struct dlb2_eventdev_queue *ev_queue;
ev_queue = &dlb2->ev_queues[i];
if (ev_queue->qm_queue.config_state != DLB2_PREV_CONFIGURED)
continue;
ret = dlb2_eventdev_queue_setup(dev, i, &ev_queue->conf);
if (ret < 0) {
DLB2_LOG_ERR("dlb2: failed to reconfigure queue %d", i);
return ret;
}
}
for (i = 0; i < dlb2->num_ports; i++) {
struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[i];
if (ev_port->qm_port.config_state != DLB2_PREV_CONFIGURED)
continue;
ret = dlb2_eventdev_port_setup(dev, i, &ev_port->conf);
if (ret < 0) {
DLB2_LOG_ERR("dlb2: failed to reconfigure ev_port %d",
i);
return ret;
}
}
return 0;
}
static int
dlb2_eventdev_apply_port_links(struct rte_eventdev *dev)
{
struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
int i;
/* Perform requested port->queue links */
for (i = 0; i < dlb2->num_ports; i++) {
struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[i];
int j;
for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
struct dlb2_eventdev_queue *ev_queue;
uint8_t prio, queue_id;
if (!ev_port->link[j].valid)
continue;
prio = ev_port->link[j].priority;
queue_id = ev_port->link[j].queue_id;
if (dlb2_validate_port_link(ev_port, queue_id, true, j))
return -EINVAL;
ev_queue = &dlb2->ev_queues[queue_id];
if (dlb2_do_port_link(dev, ev_queue, ev_port, prio))
return -EINVAL;
}
}
return 0;
}
static int
dlb2_eventdev_start(struct rte_eventdev *dev)
{
struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
struct dlb2_hw_dev *handle = &dlb2->qm_instance;
struct dlb2_start_domain_args cfg;
int ret, i;
rte_spinlock_lock(&dlb2->qm_instance.resource_lock);
if (dlb2->run_state != DLB2_RUN_STATE_STOPPED) {
DLB2_LOG_ERR("bad state %d for dev_start\n",
(int)dlb2->run_state);
rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
return -EINVAL;
}
dlb2->run_state = DLB2_RUN_STATE_STARTING;
rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
/* If the device was configured more than once, some event ports and/or
* queues may need to be reconfigured.
*/
ret = dlb2_eventdev_reapply_configuration(dev);
if (ret)
return ret;
/* The DLB PMD delays port links until the device is started. */
ret = dlb2_eventdev_apply_port_links(dev);
if (ret)
return ret;
for (i = 0; i < dlb2->num_ports; i++) {
if (!dlb2->ev_ports[i].setup_done) {
DLB2_LOG_ERR("dlb2: port %d not setup", i);
return -ESTALE;
}
}
for (i = 0; i < dlb2->num_queues; i++) {
if (dlb2->ev_queues[i].num_links == 0) {
DLB2_LOG_ERR("dlb2: queue %d is not linked", i);
return -ENOLINK;
}
}
ret = dlb2_iface_sched_domain_start(handle, &cfg);
if (ret < 0) {
DLB2_LOG_ERR("dlb2: sched_domain_start ret=%d (driver status: %s)\n",
ret, dlb2_error_strings[cfg.response.status]);
return ret;
}
dlb2->run_state = DLB2_RUN_STATE_STARTED;
DLB2_LOG_DBG("dlb2: sched_domain_start completed OK\n");
return 0;
}
static void
dlb2_entry_points_init(struct rte_eventdev *dev)
{
@ -1964,6 +2096,7 @@ dlb2_entry_points_init(struct rte_eventdev *dev)
static struct rte_eventdev_ops dlb2_eventdev_entry_ops = {
.dev_infos_get = dlb2_eventdev_info_get,
.dev_configure = dlb2_eventdev_configure,
.dev_start = dlb2_eventdev_start,
.queue_def_conf = dlb2_eventdev_queue_default_conf_get,
.queue_setup = dlb2_eventdev_queue_setup,
.port_def_conf = dlb2_eventdev_port_default_conf_get,

View File

@ -63,3 +63,6 @@ int (*dlb2_iface_unmap_qid)(struct dlb2_hw_dev *handle,
int (*dlb2_iface_pending_port_unmaps)(struct dlb2_hw_dev *handle,
struct dlb2_pending_port_unmaps_args *args);
int (*dlb2_iface_sched_domain_start)(struct dlb2_hw_dev *handle,
struct dlb2_start_domain_args *cfg);

View File

@ -62,4 +62,7 @@ extern int (*dlb2_iface_unmap_qid)(struct dlb2_hw_dev *handle,
extern int (*dlb2_iface_pending_port_unmaps)(struct dlb2_hw_dev *handle,
struct dlb2_pending_port_unmaps_args *args);
extern int (*dlb2_iface_sched_domain_start)(struct dlb2_hw_dev *handle,
struct dlb2_start_domain_args *cfg);
#endif /* _DLB2_IFACE_H_ */

View File

@ -5811,3 +5811,126 @@ int dlb2_hw_pending_port_unmaps(struct dlb2_hw *hw,
return 0;
}
static int dlb2_verify_start_domain_args(struct dlb2_hw *hw,
u32 domain_id,
struct dlb2_cmd_response *resp,
bool vdev_req,
unsigned int vdev_id)
{
struct dlb2_hw_domain *domain;
domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
if (domain == NULL) {
resp->status = DLB2_ST_INVALID_DOMAIN_ID;
return -EINVAL;
}
if (!domain->configured) {
resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
return -EINVAL;
}
if (domain->started) {
resp->status = DLB2_ST_DOMAIN_STARTED;
return -EINVAL;
}
return 0;
}
static void dlb2_log_start_domain(struct dlb2_hw *hw,
u32 domain_id,
bool vdev_req,
unsigned int vdev_id)
{
DLB2_HW_DBG(hw, "DLB2 start domain arguments:\n");
if (vdev_req)
DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
}
/**
* dlb2_hw_start_domain() - Lock the domain configuration
* @hw: Contains the current state of the DLB2 hardware.
* @domain_id: Domain ID
* @arg: User-provided arguments (unused, here for ioctl callback template).
* @resp: Response to user.
* @vdev_req: Request came from a virtual device.
* @vdev_id: If vdev_req is true, this contains the virtual device's ID.
*
* Return: returns < 0 on error, 0 otherwise. If the driver is unable to
* satisfy a request, resp->status will be set accordingly.
*/
int
dlb2_hw_start_domain(struct dlb2_hw *hw,
u32 domain_id,
__attribute((unused)) struct dlb2_start_domain_args *arg,
struct dlb2_cmd_response *resp,
bool vdev_req,
unsigned int vdev_id)
{
struct dlb2_list_entry *iter;
struct dlb2_dir_pq_pair *dir_queue;
struct dlb2_ldb_queue *ldb_queue;
struct dlb2_hw_domain *domain;
int ret;
RTE_SET_USED(arg);
RTE_SET_USED(iter);
dlb2_log_start_domain(hw, domain_id, vdev_req, vdev_id);
ret = dlb2_verify_start_domain_args(hw,
domain_id,
resp,
vdev_req,
vdev_id);
if (ret)
return ret;
domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
if (domain == NULL) {
DLB2_HW_ERR(hw,
"[%s():%d] Internal error: domain not found\n",
__func__, __LINE__);
return -EFAULT;
}
/*
* Enable load-balanced and directed queue write permissions for the
* queues this domain owns. Without this, the DLB2 will drop all
* incoming traffic to those queues.
*/
DLB2_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
union dlb2_sys_ldb_vasqid_v r0 = { {0} };
unsigned int offs;
r0.field.vasqid_v = 1;
offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES +
ldb_queue->id.phys_id;
DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), r0.val);
}
DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) {
union dlb2_sys_dir_vasqid_v r0 = { {0} };
unsigned int offs;
r0.field.vasqid_v = 1;
offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS +
dir_queue->id.phys_id;
DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), r0.val);
}
dlb2_flush_csr(hw);
domain->started = true;
resp->status = 0;
return 0;
}

View File

@ -661,3 +661,13 @@ dlb2_pf_create_dir_queue(struct dlb2_hw *hw,
return dlb2_hw_create_dir_queue(hw, id, args, resp, NOT_VF_REQ,
PF_ID_ZERO);
}
int
dlb2_pf_start_domain(struct dlb2_hw *hw,
u32 id,
struct dlb2_start_domain_args *args,
struct dlb2_cmd_response *resp)
{
return dlb2_hw_start_domain(hw, id, args, resp, NOT_VF_REQ,
PF_ID_ZERO);
}

View File

@ -503,6 +503,30 @@ dlb2_pf_pending_port_unmaps(struct dlb2_hw_dev *handle,
return ret;
}
static int
dlb2_pf_sched_domain_start(struct dlb2_hw_dev *handle,
struct dlb2_start_domain_args *cfg)
{
struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
struct dlb2_cmd_response response = {0};
int ret;
DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
ret = dlb2_pf_start_domain(&dlb2_dev->hw,
handle->domain_id,
cfg,
&response);
cfg->response = response;
DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
__func__, ret);
return ret;
}
static void
dlb2_pf_iface_fn_ptrs_init(void)
{
@ -520,6 +544,7 @@ dlb2_pf_iface_fn_ptrs_init(void)
dlb2_iface_dir_port_create = dlb2_pf_dir_port_create;
dlb2_iface_map_qid = dlb2_pf_map_qid;
dlb2_iface_unmap_qid = dlb2_pf_unmap_qid;
dlb2_iface_sched_domain_start = dlb2_pf_sched_domain_start;
dlb2_iface_pending_port_unmaps = dlb2_pf_pending_port_unmaps;
dlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation;
dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation;