event/dlb2: add probe-time hardware init
This commit adds probe-time low level hardware initialization. It also adds probe-time init for both primary and secondary DPDK processes. Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com> Reviewed-by: Gage Eads <gage.eads@intel.com>
This commit is contained in:
parent
17f56f6d56
commit
e7c9971a85
@ -31,6 +31,7 @@
|
||||
#include <rte_string_fns.h>
|
||||
|
||||
#include "dlb2_priv.h"
|
||||
#include "dlb2_iface.h"
|
||||
#include "dlb2_inline_fns.h"
|
||||
|
||||
/*
|
||||
@ -40,10 +41,108 @@
|
||||
#if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
|
||||
#error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
|
||||
#endif
|
||||
static struct rte_event_dev_info evdev_dlb2_default_info = {
|
||||
.driver_name = "", /* probe will set */
|
||||
.min_dequeue_timeout_ns = DLB2_MIN_DEQUEUE_TIMEOUT_NS,
|
||||
.max_dequeue_timeout_ns = DLB2_MAX_DEQUEUE_TIMEOUT_NS,
|
||||
#if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB2_MAX_NUM_LDB_QUEUES)
|
||||
.max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
|
||||
#else
|
||||
.max_event_queues = DLB2_MAX_NUM_LDB_QUEUES,
|
||||
#endif
|
||||
.max_event_queue_flows = DLB2_MAX_NUM_FLOWS,
|
||||
.max_event_queue_priority_levels = DLB2_QID_PRIORITIES,
|
||||
.max_event_priority_levels = DLB2_QID_PRIORITIES,
|
||||
.max_event_ports = DLB2_MAX_NUM_LDB_PORTS,
|
||||
.max_event_port_dequeue_depth = DLB2_MAX_CQ_DEPTH,
|
||||
.max_event_port_enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH,
|
||||
.max_event_port_links = DLB2_MAX_NUM_QIDS_PER_LDB_CQ,
|
||||
.max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
|
||||
.max_single_link_event_port_queue_pairs = DLB2_MAX_NUM_DIR_PORTS,
|
||||
.event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
|
||||
RTE_EVENT_DEV_CAP_EVENT_QOS |
|
||||
RTE_EVENT_DEV_CAP_BURST_MODE |
|
||||
RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
|
||||
RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
|
||||
RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
|
||||
};
|
||||
|
||||
struct process_local_port_data
|
||||
dlb2_port[DLB2_MAX_NUM_PORTS][DLB2_NUM_PORT_TYPES];
|
||||
|
||||
/* override defaults with value(s) provided on command line */
|
||||
static void
|
||||
dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2,
|
||||
int *qid_depth_thresholds)
|
||||
{
|
||||
int q;
|
||||
|
||||
for (q = 0; q < DLB2_MAX_NUM_QUEUES; q++) {
|
||||
if (qid_depth_thresholds[q] != 0)
|
||||
dlb2->ev_queues[q].depth_threshold =
|
||||
qid_depth_thresholds[q];
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
dlb2_hw_query_resources(struct dlb2_eventdev *dlb2)
|
||||
{
|
||||
struct dlb2_hw_dev *handle = &dlb2->qm_instance;
|
||||
struct dlb2_hw_resource_info *dlb2_info = &handle->info;
|
||||
int ret;
|
||||
|
||||
/* Query driver resources provisioned for this device */
|
||||
|
||||
ret = dlb2_iface_get_num_resources(handle,
|
||||
&dlb2->hw_rsrc_query_results);
|
||||
if (ret) {
|
||||
DLB2_LOG_ERR("ioctl get dlb2 num resources, err=%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Complete filling in device resource info returned to evdev app,
|
||||
* overriding any default values.
|
||||
* The capabilities (CAPs) were set at compile time.
|
||||
*/
|
||||
|
||||
evdev_dlb2_default_info.max_event_queues =
|
||||
dlb2->hw_rsrc_query_results.num_ldb_queues;
|
||||
|
||||
evdev_dlb2_default_info.max_event_ports =
|
||||
dlb2->hw_rsrc_query_results.num_ldb_ports;
|
||||
|
||||
evdev_dlb2_default_info.max_num_events =
|
||||
dlb2->hw_rsrc_query_results.num_ldb_credits;
|
||||
|
||||
/* Save off values used when creating the scheduling domain. */
|
||||
|
||||
handle->info.num_sched_domains =
|
||||
dlb2->hw_rsrc_query_results.num_sched_domains;
|
||||
|
||||
handle->info.hw_rsrc_max.nb_events_limit =
|
||||
dlb2->hw_rsrc_query_results.num_ldb_credits;
|
||||
|
||||
handle->info.hw_rsrc_max.num_queues =
|
||||
dlb2->hw_rsrc_query_results.num_ldb_queues +
|
||||
dlb2->hw_rsrc_query_results.num_dir_ports;
|
||||
|
||||
handle->info.hw_rsrc_max.num_ldb_queues =
|
||||
dlb2->hw_rsrc_query_results.num_ldb_queues;
|
||||
|
||||
handle->info.hw_rsrc_max.num_ldb_ports =
|
||||
dlb2->hw_rsrc_query_results.num_ldb_ports;
|
||||
|
||||
handle->info.hw_rsrc_max.num_dir_ports =
|
||||
dlb2->hw_rsrc_query_results.num_dir_ports;
|
||||
|
||||
handle->info.hw_rsrc_max.reorder_window_size =
|
||||
dlb2->hw_rsrc_query_results.num_hist_list_entries;
|
||||
|
||||
rte_memcpy(dlb2_info, &handle->info.hw_rsrc_max, sizeof(*dlb2_info));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define DLB2_BASE_10 10
|
||||
|
||||
static int
|
||||
@ -235,14 +334,71 @@ set_qid_depth_thresh(const char *key __rte_unused,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
dlb2_entry_points_init(struct rte_eventdev *dev)
|
||||
{
|
||||
RTE_SET_USED(dev);
|
||||
|
||||
/* Eventdev PMD entry points */
|
||||
}
|
||||
|
||||
int
|
||||
dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
|
||||
const char *name,
|
||||
struct dlb2_devargs *dlb2_args)
|
||||
{
|
||||
RTE_SET_USED(dev);
|
||||
RTE_SET_USED(name);
|
||||
RTE_SET_USED(dlb2_args);
|
||||
struct dlb2_eventdev *dlb2;
|
||||
int err;
|
||||
|
||||
dlb2 = dev->data->dev_private;
|
||||
|
||||
dlb2->event_dev = dev; /* backlink */
|
||||
|
||||
evdev_dlb2_default_info.driver_name = name;
|
||||
|
||||
dlb2->max_num_events_override = dlb2_args->max_num_events;
|
||||
dlb2->num_dir_credits_override = dlb2_args->num_dir_credits_override;
|
||||
dlb2->qm_instance.cos_id = dlb2_args->cos_id;
|
||||
|
||||
err = dlb2_iface_open(&dlb2->qm_instance, name);
|
||||
if (err < 0) {
|
||||
DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = dlb2_iface_get_device_version(&dlb2->qm_instance,
|
||||
&dlb2->revision);
|
||||
if (err < 0) {
|
||||
DLB2_LOG_ERR("dlb2: failed to get the device version, err=%d\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = dlb2_hw_query_resources(dlb2);
|
||||
if (err) {
|
||||
DLB2_LOG_ERR("get resources err=%d for %s\n",
|
||||
err, name);
|
||||
return err;
|
||||
}
|
||||
|
||||
dlb2_iface_hardware_init(&dlb2->qm_instance);
|
||||
|
||||
err = dlb2_iface_get_cq_poll_mode(&dlb2->qm_instance, &dlb2->poll_mode);
|
||||
if (err < 0) {
|
||||
DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
|
||||
rte_spinlock_init(&dlb2->qm_instance.resource_lock);
|
||||
|
||||
dlb2_iface_low_level_io_init();
|
||||
|
||||
dlb2_entry_points_init(dev);
|
||||
|
||||
dlb2_init_queue_depth_thresholds(dlb2,
|
||||
dlb2_args->qid_depth_thresholds.val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -251,8 +407,30 @@ int
|
||||
dlb2_secondary_eventdev_probe(struct rte_eventdev *dev,
|
||||
const char *name)
|
||||
{
|
||||
RTE_SET_USED(dev);
|
||||
RTE_SET_USED(name);
|
||||
struct dlb2_eventdev *dlb2;
|
||||
int err;
|
||||
|
||||
dlb2 = dev->data->dev_private;
|
||||
|
||||
evdev_dlb2_default_info.driver_name = name;
|
||||
|
||||
err = dlb2_iface_open(&dlb2->qm_instance, name);
|
||||
if (err < 0) {
|
||||
DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = dlb2_hw_query_resources(dlb2);
|
||||
if (err) {
|
||||
DLB2_LOG_ERR("get resources err=%d for %s\n",
|
||||
err, name);
|
||||
return err;
|
||||
}
|
||||
|
||||
dlb2_iface_low_level_io_init();
|
||||
|
||||
dlb2_entry_points_init(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -10,7 +10,8 @@ endif
|
||||
sources = files('dlb2.c',
|
||||
'dlb2_iface.c',
|
||||
'pf/dlb2_main.c',
|
||||
'pf/dlb2_pf.c'
|
||||
'pf/dlb2_pf.c',
|
||||
'pf/base/dlb2_resource.c'
|
||||
)
|
||||
|
||||
headers = files()
|
||||
|
274
drivers/event/dlb2/pf/base/dlb2_resource.c
Normal file
274
drivers/event/dlb2/pf/base/dlb2_resource.c
Normal file
@ -0,0 +1,274 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "dlb2_user.h"
|
||||
|
||||
#include "dlb2_hw_types.h"
|
||||
#include "dlb2_mbox.h"
|
||||
#include "dlb2_osdep.h"
|
||||
#include "dlb2_osdep_bitmap.h"
|
||||
#include "dlb2_osdep_types.h"
|
||||
#include "dlb2_regs.h"
|
||||
#include "dlb2_resource.h"
|
||||
|
||||
static void dlb2_init_domain_rsrc_lists(struct dlb2_hw_domain *domain)
|
||||
{
|
||||
int i;
|
||||
|
||||
dlb2_list_init_head(&domain->used_ldb_queues);
|
||||
dlb2_list_init_head(&domain->used_dir_pq_pairs);
|
||||
dlb2_list_init_head(&domain->avail_ldb_queues);
|
||||
dlb2_list_init_head(&domain->avail_dir_pq_pairs);
|
||||
|
||||
for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
|
||||
dlb2_list_init_head(&domain->used_ldb_ports[i]);
|
||||
for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
|
||||
dlb2_list_init_head(&domain->avail_ldb_ports[i]);
|
||||
}
|
||||
|
||||
static void dlb2_init_fn_rsrc_lists(struct dlb2_function_resources *rsrc)
|
||||
{
|
||||
int i;
|
||||
|
||||
dlb2_list_init_head(&rsrc->avail_domains);
|
||||
dlb2_list_init_head(&rsrc->used_domains);
|
||||
dlb2_list_init_head(&rsrc->avail_ldb_queues);
|
||||
dlb2_list_init_head(&rsrc->avail_dir_pq_pairs);
|
||||
|
||||
for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
|
||||
dlb2_list_init_head(&rsrc->avail_ldb_ports[i]);
|
||||
}
|
||||
|
||||
void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw)
|
||||
{
|
||||
union dlb2_chp_cfg_chp_csr_ctrl r0;
|
||||
|
||||
r0.val = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
|
||||
|
||||
r0.field.cfg_64bytes_qe_dir_cq_mode = 1;
|
||||
|
||||
DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
|
||||
}
|
||||
|
||||
int dlb2_hw_get_num_resources(struct dlb2_hw *hw,
|
||||
struct dlb2_get_num_resources_args *arg,
|
||||
bool vdev_req,
|
||||
unsigned int vdev_id)
|
||||
{
|
||||
struct dlb2_function_resources *rsrcs;
|
||||
struct dlb2_bitmap *map;
|
||||
int i;
|
||||
|
||||
if (vdev_req && vdev_id >= DLB2_MAX_NUM_VDEVS)
|
||||
return -EINVAL;
|
||||
|
||||
if (vdev_req)
|
||||
rsrcs = &hw->vdev[vdev_id];
|
||||
else
|
||||
rsrcs = &hw->pf;
|
||||
|
||||
arg->num_sched_domains = rsrcs->num_avail_domains;
|
||||
|
||||
arg->num_ldb_queues = rsrcs->num_avail_ldb_queues;
|
||||
|
||||
arg->num_ldb_ports = 0;
|
||||
for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
|
||||
arg->num_ldb_ports += rsrcs->num_avail_ldb_ports[i];
|
||||
|
||||
arg->num_cos_ldb_ports[0] = rsrcs->num_avail_ldb_ports[0];
|
||||
arg->num_cos_ldb_ports[1] = rsrcs->num_avail_ldb_ports[1];
|
||||
arg->num_cos_ldb_ports[2] = rsrcs->num_avail_ldb_ports[2];
|
||||
arg->num_cos_ldb_ports[3] = rsrcs->num_avail_ldb_ports[3];
|
||||
|
||||
arg->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;
|
||||
|
||||
arg->num_atomic_inflights = rsrcs->num_avail_aqed_entries;
|
||||
|
||||
map = rsrcs->avail_hist_list_entries;
|
||||
|
||||
arg->num_hist_list_entries = dlb2_bitmap_count(map);
|
||||
|
||||
arg->max_contiguous_hist_list_entries =
|
||||
dlb2_bitmap_longest_set_range(map);
|
||||
|
||||
arg->num_ldb_credits = rsrcs->num_avail_qed_entries;
|
||||
|
||||
arg->num_dir_credits = rsrcs->num_avail_dqed_entries;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw)
|
||||
{
|
||||
union dlb2_chp_cfg_chp_csr_ctrl r0;
|
||||
|
||||
r0.val = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
|
||||
|
||||
r0.field.cfg_64bytes_qe_ldb_cq_mode = 1;
|
||||
|
||||
DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, r0.val);
|
||||
}
|
||||
|
||||
void dlb2_resource_free(struct dlb2_hw *hw)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (hw->pf.avail_hist_list_entries)
|
||||
dlb2_bitmap_free(hw->pf.avail_hist_list_entries);
|
||||
|
||||
for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
|
||||
if (hw->vdev[i].avail_hist_list_entries)
|
||||
dlb2_bitmap_free(hw->vdev[i].avail_hist_list_entries);
|
||||
}
|
||||
}
|
||||
|
||||
int dlb2_resource_init(struct dlb2_hw *hw)
|
||||
{
|
||||
struct dlb2_list_entry *list;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* For optimal load-balancing, ports that map to one or more QIDs in
|
||||
* common should not be in numerical sequence. This is application
|
||||
* dependent, but the driver interleaves port IDs as much as possible
|
||||
* to reduce the likelihood of this. This initial allocation maximizes
|
||||
* the average distance between an ID and its immediate neighbors (i.e.
|
||||
* the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
|
||||
* 3, etc.).
|
||||
*/
|
||||
u8 init_ldb_port_allocation[DLB2_MAX_NUM_LDB_PORTS] = {
|
||||
0, 7, 14, 5, 12, 3, 10, 1, 8, 15, 6, 13, 4, 11, 2, 9,
|
||||
16, 23, 30, 21, 28, 19, 26, 17, 24, 31, 22, 29, 20, 27, 18, 25,
|
||||
32, 39, 46, 37, 44, 35, 42, 33, 40, 47, 38, 45, 36, 43, 34, 41,
|
||||
48, 55, 62, 53, 60, 51, 58, 49, 56, 63, 54, 61, 52, 59, 50, 57,
|
||||
};
|
||||
|
||||
/* Zero-out resource tracking data structures */
|
||||
memset(&hw->rsrcs, 0, sizeof(hw->rsrcs));
|
||||
memset(&hw->pf, 0, sizeof(hw->pf));
|
||||
|
||||
dlb2_init_fn_rsrc_lists(&hw->pf);
|
||||
|
||||
for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
|
||||
memset(&hw->vdev[i], 0, sizeof(hw->vdev[i]));
|
||||
dlb2_init_fn_rsrc_lists(&hw->vdev[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
|
||||
memset(&hw->domains[i], 0, sizeof(hw->domains[i]));
|
||||
dlb2_init_domain_rsrc_lists(&hw->domains[i]);
|
||||
hw->domains[i].parent_func = &hw->pf;
|
||||
}
|
||||
|
||||
/* Give all resources to the PF driver */
|
||||
hw->pf.num_avail_domains = DLB2_MAX_NUM_DOMAINS;
|
||||
for (i = 0; i < hw->pf.num_avail_domains; i++) {
|
||||
list = &hw->domains[i].func_list;
|
||||
|
||||
dlb2_list_add(&hw->pf.avail_domains, list);
|
||||
}
|
||||
|
||||
hw->pf.num_avail_ldb_queues = DLB2_MAX_NUM_LDB_QUEUES;
|
||||
for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
|
||||
list = &hw->rsrcs.ldb_queues[i].func_list;
|
||||
|
||||
dlb2_list_add(&hw->pf.avail_ldb_queues, list);
|
||||
}
|
||||
|
||||
for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
|
||||
hw->pf.num_avail_ldb_ports[i] =
|
||||
DLB2_MAX_NUM_LDB_PORTS / DLB2_NUM_COS_DOMAINS;
|
||||
|
||||
for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
|
||||
int cos_id = i >> DLB2_NUM_COS_DOMAINS;
|
||||
struct dlb2_ldb_port *port;
|
||||
|
||||
port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
|
||||
|
||||
dlb2_list_add(&hw->pf.avail_ldb_ports[cos_id],
|
||||
&port->func_list);
|
||||
}
|
||||
|
||||
hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS;
|
||||
for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
|
||||
list = &hw->rsrcs.dir_pq_pairs[i].func_list;
|
||||
|
||||
dlb2_list_add(&hw->pf.avail_dir_pq_pairs, list);
|
||||
}
|
||||
|
||||
hw->pf.num_avail_qed_entries = DLB2_MAX_NUM_LDB_CREDITS;
|
||||
hw->pf.num_avail_dqed_entries = DLB2_MAX_NUM_DIR_CREDITS;
|
||||
hw->pf.num_avail_aqed_entries = DLB2_MAX_NUM_AQED_ENTRIES;
|
||||
|
||||
ret = dlb2_bitmap_alloc(&hw->pf.avail_hist_list_entries,
|
||||
DLB2_MAX_NUM_HIST_LIST_ENTRIES);
|
||||
if (ret)
|
||||
goto unwind;
|
||||
|
||||
ret = dlb2_bitmap_fill(hw->pf.avail_hist_list_entries);
|
||||
if (ret)
|
||||
goto unwind;
|
||||
|
||||
for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
|
||||
ret = dlb2_bitmap_alloc(&hw->vdev[i].avail_hist_list_entries,
|
||||
DLB2_MAX_NUM_HIST_LIST_ENTRIES);
|
||||
if (ret)
|
||||
goto unwind;
|
||||
|
||||
ret = dlb2_bitmap_zero(hw->vdev[i].avail_hist_list_entries);
|
||||
if (ret)
|
||||
goto unwind;
|
||||
}
|
||||
|
||||
/* Initialize the hardware resource IDs */
|
||||
for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
|
||||
hw->domains[i].id.phys_id = i;
|
||||
hw->domains[i].id.vdev_owned = false;
|
||||
}
|
||||
|
||||
for (i = 0; i < DLB2_MAX_NUM_LDB_QUEUES; i++) {
|
||||
hw->rsrcs.ldb_queues[i].id.phys_id = i;
|
||||
hw->rsrcs.ldb_queues[i].id.vdev_owned = false;
|
||||
}
|
||||
|
||||
for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
|
||||
hw->rsrcs.ldb_ports[i].id.phys_id = i;
|
||||
hw->rsrcs.ldb_ports[i].id.vdev_owned = false;
|
||||
}
|
||||
|
||||
for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS; i++) {
|
||||
hw->rsrcs.dir_pq_pairs[i].id.phys_id = i;
|
||||
hw->rsrcs.dir_pq_pairs[i].id.vdev_owned = false;
|
||||
}
|
||||
|
||||
for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
|
||||
hw->rsrcs.sn_groups[i].id = i;
|
||||
/* Default mode (0) is 64 sequence numbers per queue */
|
||||
hw->rsrcs.sn_groups[i].mode = 0;
|
||||
hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 64;
|
||||
hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
|
||||
hw->cos_reservation[i] = 100 / DLB2_NUM_COS_DOMAINS;
|
||||
|
||||
return 0;
|
||||
|
||||
unwind:
|
||||
dlb2_resource_free(hw);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw)
|
||||
{
|
||||
union dlb2_cfg_mstr_cfg_pm_pmcsr_disable r0;
|
||||
|
||||
r0.val = DLB2_CSR_RD(hw, DLB2_CFG_MSTR_CFG_PM_PMCSR_DISABLE);
|
||||
|
||||
r0.field.disable = 0;
|
||||
|
||||
DLB2_CSR_WR(hw, DLB2_CFG_MSTR_CFG_PM_PMCSR_DISABLE, r0.val);
|
||||
}
|
@ -19,6 +19,7 @@
|
||||
#include "dlb2_main.h"
|
||||
#include "../dlb2_user.h"
|
||||
#include "../dlb2_priv.h"
|
||||
#include "../dlb2_iface.h"
|
||||
#include "../dlb2_inline_fns.h"
|
||||
|
||||
#define PF_ID_ZERO 0 /* PF ONLY! */
|
||||
@ -72,27 +73,6 @@
|
||||
#define DLB2_PCI_ACS_UF 0x10
|
||||
#define DLB2_PCI_ACS_EC 0x20
|
||||
|
||||
/* Stubs: Allow building partial probe patch */
|
||||
void dlb2_resource_free(struct dlb2_hw *hw)
|
||||
{
|
||||
RTE_SET_USED(hw);
|
||||
}
|
||||
|
||||
int dlb2_resource_init(struct dlb2_hw *hw)
|
||||
{
|
||||
int ret = 0;
|
||||
RTE_SET_USED(hw);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw)
|
||||
{
|
||||
RTE_SET_USED(hw);
|
||||
}
|
||||
|
||||
/* End stubs */
|
||||
|
||||
static int
|
||||
dlb2_pci_find_ext_capability(struct rte_pci_device *pdev, uint32_t id)
|
||||
{
|
||||
@ -149,7 +129,6 @@ static int
|
||||
dlb2_pf_init_driver_state(struct dlb2_dev *dlb2_dev)
|
||||
{
|
||||
rte_spinlock_init(&dlb2_dev->resource_mutex);
|
||||
rte_spinlock_init(&dlb2_dev->measurement_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -38,7 +38,6 @@ struct dlb2_dev {
|
||||
* hardware registers.
|
||||
*/
|
||||
rte_spinlock_t resource_mutex;
|
||||
rte_spinlock_t measurement_lock;
|
||||
bool worker_launched;
|
||||
u8 revision;
|
||||
};
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <rte_string_fns.h>
|
||||
|
||||
#include "../dlb2_priv.h"
|
||||
#include "../dlb2_iface.h"
|
||||
#include "../dlb2_inline_fns.h"
|
||||
#include "dlb2_main.h"
|
||||
#include "base/dlb2_hw_types.h"
|
||||
@ -40,35 +41,82 @@
|
||||
|
||||
static const char *event_dlb2_pf_name = RTE_STR(EVDEV_DLB2_NAME_PMD);
|
||||
|
||||
/* Stubs: Allow building partial probe patch */
|
||||
int dlb2_hw_get_num_resources(struct dlb2_hw *hw,
|
||||
struct dlb2_get_num_resources_args *arg,
|
||||
bool vdev_req,
|
||||
unsigned int vdev_id)
|
||||
static void
|
||||
dlb2_pf_low_level_io_init(void)
|
||||
{
|
||||
RTE_SET_USED(hw);
|
||||
RTE_SET_USED(arg);
|
||||
RTE_SET_USED(vdev_req);
|
||||
RTE_SET_USED(vdev_id);
|
||||
int i;
|
||||
/* Addresses will be initialized at port create */
|
||||
for (i = 0; i < DLB2_MAX_NUM_PORTS; i++) {
|
||||
/* First directed ports */
|
||||
dlb2_port[i][DLB2_DIR_PORT].pp_addr = NULL;
|
||||
dlb2_port[i][DLB2_DIR_PORT].cq_base = NULL;
|
||||
dlb2_port[i][DLB2_DIR_PORT].mmaped = true;
|
||||
|
||||
/* Now load balanced ports */
|
||||
dlb2_port[i][DLB2_LDB_PORT].pp_addr = NULL;
|
||||
dlb2_port[i][DLB2_LDB_PORT].cq_base = NULL;
|
||||
dlb2_port[i][DLB2_LDB_PORT].mmaped = true;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
dlb2_pf_open(struct dlb2_hw_dev *handle, const char *name)
|
||||
{
|
||||
RTE_SET_USED(handle);
|
||||
RTE_SET_USED(name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw)
|
||||
static int
|
||||
dlb2_pf_get_device_version(struct dlb2_hw_dev *handle,
|
||||
uint8_t *revision)
|
||||
{
|
||||
RTE_SET_USED(hw);
|
||||
struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
|
||||
|
||||
*revision = dlb2_dev->revision;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw)
|
||||
static void
|
||||
dlb2_pf_hardware_init(struct dlb2_hw_dev *handle)
|
||||
{
|
||||
RTE_SET_USED(hw);
|
||||
struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
|
||||
|
||||
dlb2_hw_enable_sparse_ldb_cq_mode(&dlb2_dev->hw);
|
||||
dlb2_hw_enable_sparse_dir_cq_mode(&dlb2_dev->hw);
|
||||
}
|
||||
|
||||
static int
|
||||
dlb2_pf_get_num_resources(struct dlb2_hw_dev *handle,
|
||||
struct dlb2_get_num_resources_args *rsrcs)
|
||||
{
|
||||
struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
|
||||
|
||||
return dlb2_hw_get_num_resources(&dlb2_dev->hw, rsrcs, false, 0);
|
||||
}
|
||||
|
||||
static int
|
||||
dlb2_pf_get_cq_poll_mode(struct dlb2_hw_dev *handle,
|
||||
enum dlb2_cq_poll_modes *mode)
|
||||
{
|
||||
RTE_SET_USED(handle);
|
||||
|
||||
*mode = DLB2_CQ_POLL_MODE_SPARSE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
/* End stubs */
|
||||
|
||||
static void
|
||||
dlb2_pf_iface_fn_ptrs_init(void)
|
||||
{
|
||||
/* flexible iface fn ptr assignments will go here */
|
||||
dlb2_iface_low_level_io_init = dlb2_pf_low_level_io_init;
|
||||
dlb2_iface_open = dlb2_pf_open;
|
||||
dlb2_iface_get_device_version = dlb2_pf_get_device_version;
|
||||
dlb2_iface_hardware_init = dlb2_pf_hardware_init;
|
||||
dlb2_iface_get_num_resources = dlb2_pf_get_num_resources;
|
||||
dlb2_iface_get_cq_poll_mode = dlb2_pf_get_cq_poll_mode;
|
||||
}
|
||||
|
||||
/* PCI DEV HOOKS */
|
||||
|
Loading…
Reference in New Issue
Block a user