event/dlb2: allow CQ depths up to 1024

Updated to allow overriding the default CQ depth of 32.  Since there are
only 2048 DLB history list entries, increasing the CQ depth decreases
the number of available LDB ports to 2048/max_cq_depth. Resource query
will take this into account and return the correct maximum number of
LDB ports.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
This commit is contained in:
Timothy McDaniel 2022-04-09 10:13:20 -05:00 committed by Jerin Jacob
parent 80bb303d3b
commit 86fe66d456
3 changed files with 62 additions and 8 deletions

View File

@ -55,7 +55,7 @@ static struct rte_event_dev_info evdev_dlb2_default_info = {
.max_event_queue_priority_levels = DLB2_QID_PRIORITIES,
.max_event_priority_levels = DLB2_QID_PRIORITIES,
.max_event_ports = DLB2_MAX_NUM_LDB_PORTS,
.max_event_port_dequeue_depth = DLB2_MAX_CQ_DEPTH,
.max_event_port_dequeue_depth = DLB2_DEFAULT_CQ_DEPTH,
.max_event_port_enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH,
.max_event_port_links = DLB2_MAX_NUM_QIDS_PER_LDB_CQ,
.max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
@ -111,6 +111,7 @@ dlb2_hw_query_resources(struct dlb2_eventdev *dlb2)
{
struct dlb2_hw_dev *handle = &dlb2->qm_instance;
struct dlb2_hw_resource_info *dlb2_info = &handle->info;
int num_ldb_ports;
int ret;
/* Query driver resources provisioned for this device */
@ -127,11 +128,15 @@ dlb2_hw_query_resources(struct dlb2_eventdev *dlb2)
* The capabilities (CAPs) were set at compile time.
*/
if (dlb2->max_cq_depth != DLB2_DEFAULT_CQ_DEPTH)
num_ldb_ports = DLB2_MAX_HL_ENTRIES / dlb2->max_cq_depth;
else
num_ldb_ports = dlb2->hw_rsrc_query_results.num_ldb_ports;
evdev_dlb2_default_info.max_event_queues =
dlb2->hw_rsrc_query_results.num_ldb_queues;
evdev_dlb2_default_info.max_event_ports =
dlb2->hw_rsrc_query_results.num_ldb_ports;
evdev_dlb2_default_info.max_event_ports = num_ldb_ports;
if (dlb2->version == DLB2_HW_V2_5) {
evdev_dlb2_default_info.max_num_events =
@ -159,8 +164,7 @@ dlb2_hw_query_resources(struct dlb2_eventdev *dlb2)
handle->info.hw_rsrc_max.num_ldb_queues =
dlb2->hw_rsrc_query_results.num_ldb_queues;
handle->info.hw_rsrc_max.num_ldb_ports =
dlb2->hw_rsrc_query_results.num_ldb_ports;
handle->info.hw_rsrc_max.num_ldb_ports = num_ldb_ports;
handle->info.hw_rsrc_max.num_dir_ports =
dlb2->hw_rsrc_query_results.num_dir_ports;
@ -212,6 +216,36 @@ set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
return 0;
}
static int
set_max_cq_depth(const char *key __rte_unused,
const char *value,
void *opaque)
{
int *max_cq_depth = opaque;
int ret;
if (value == NULL || opaque == NULL) {
DLB2_LOG_ERR("NULL pointer\n");
return -EINVAL;
}
ret = dlb2_string_to_int(max_cq_depth, value);
if (ret < 0)
return ret;
if (*max_cq_depth < DLB2_MIN_CQ_DEPTH_OVERRIDE ||
*max_cq_depth > DLB2_MAX_CQ_DEPTH_OVERRIDE ||
!rte_is_power_of_2(*max_cq_depth)) {
DLB2_LOG_ERR("dlb2: max_cq_depth %d and %d and a power of 2\n",
DLB2_MIN_CQ_DEPTH_OVERRIDE,
DLB2_MAX_CQ_DEPTH_OVERRIDE);
return -EINVAL;
}
return 0;
}
static int
set_max_num_events(const char *key __rte_unused,
const char *value,
@ -4504,6 +4538,7 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
dlb2->hw_credit_quanta = dlb2_args->hw_credit_quanta;
dlb2->default_depth_thresh = dlb2_args->default_depth_thresh;
dlb2->vector_opts_enabled = dlb2_args->vector_opts_enabled;
dlb2->max_cq_depth = dlb2_args->max_cq_depth;
err = dlb2_iface_open(&dlb2->qm_instance, name);
if (err < 0) {
@ -4609,6 +4644,7 @@ dlb2_parse_params(const char *params,
DLB2_HW_CREDIT_QUANTA_ARG,
DLB2_DEPTH_THRESH_ARG,
DLB2_VECTOR_OPTS_ENAB_ARG,
DLB2_MAX_CQ_DEPTH,
NULL };
if (params != NULL && params[0] != '\0') {
@ -4744,6 +4780,17 @@ dlb2_parse_params(const char *params,
return ret;
}
ret = rte_kvargs_process(kvlist,
DLB2_MAX_CQ_DEPTH,
set_max_cq_depth,
&dlb2_args->max_cq_depth);
if (ret != 0) {
DLB2_LOG_ERR("%s: Error parsing vector opts enabled",
name);
rte_kvargs_free(kvlist);
return ret;
}
rte_kvargs_free(kvlist);
}
}

View File

@ -28,6 +28,8 @@
#define DLB2_SW_CREDIT_P_QUANTA_DEFAULT 256 /* Producer */
#define DLB2_SW_CREDIT_C_QUANTA_DEFAULT 256 /* Consumer */
#define DLB2_DEPTH_THRESH_DEFAULT 256
#define DLB2_MIN_CQ_DEPTH_OVERRIDE 32
#define DLB2_MAX_CQ_DEPTH_OVERRIDE 1024
/* command line arg strings */
#define NUMA_NODE_ARG "numa_node"
@ -41,6 +43,7 @@
#define DLB2_HW_CREDIT_QUANTA_ARG "hw_credit_quanta"
#define DLB2_DEPTH_THRESH_ARG "default_depth_thresh"
#define DLB2_VECTOR_OPTS_ENAB_ARG "vector_opts_enable"
#define DLB2_MAX_CQ_DEPTH "max_cq_depth"
/* Begin HW related defines and structs */
@ -87,11 +90,12 @@
* depth must be a power of 2 and must also be >= HIST LIST entries.
* As a result we just limit the maximum dequeue depth to 32.
*/
#define DLB2_MAX_HL_ENTRIES 2048
#define DLB2_MIN_CQ_DEPTH 1
#define DLB2_MAX_CQ_DEPTH 32
#define DLB2_DEFAULT_CQ_DEPTH 32
#define DLB2_MIN_HARDWARE_CQ_DEPTH 8
#define DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT \
DLB2_MAX_CQ_DEPTH
DLB2_DEFAULT_CQ_DEPTH
#define DLB2_HW_DEVICE_FROM_PCI_ID(_pdev) \
(((_pdev->id.device_id == PCI_DEVICE_ID_INTEL_DLB2_5_PF) || \
@ -572,6 +576,7 @@ struct dlb2_eventdev {
int max_num_events_override;
int num_dir_credits_override;
bool vector_opts_enabled;
int max_cq_depth;
volatile enum dlb2_run_state run_state;
uint16_t num_dir_queues; /* total num of evdev dir queues requested */
union {
@ -632,6 +637,7 @@ struct dlb2_devargs {
int hw_credit_quanta;
int default_depth_thresh;
bool vector_opts_enabled;
int max_cq_depth;
};
/* End Eventdev related defines and structs */

View File

@ -619,7 +619,8 @@ dlb2_eventdev_pci_init(struct rte_eventdev *eventdev)
.poll_interval = DLB2_POLL_INTERVAL_DEFAULT,
.sw_credit_quanta = DLB2_SW_CREDIT_QUANTA_DEFAULT,
.hw_credit_quanta = DLB2_SW_CREDIT_BATCH_SZ,
.default_depth_thresh = DLB2_DEPTH_THRESH_DEFAULT
.default_depth_thresh = DLB2_DEPTH_THRESH_DEFAULT,
.max_cq_depth = DLB2_DEFAULT_CQ_DEPTH
};
struct dlb2_eventdev *dlb2;