event/dlb2: add eventdev probe
Add the eventdev portion of probe, and parse command line options, but do not initialize hardware. Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com> Reviewed-by: Gage Eads <gage.eads@intel.com>
This commit is contained in:
parent
7161ea01b1
commit
5433956d51
@ -2,6 +2,349 @@
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <rte_log.h>
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
#include <nmmintrin.h>
|
||||
#include <pthread.h>
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/fcntl.h>
|
||||
|
||||
#include <rte_common.h>
|
||||
#include <rte_config.h>
|
||||
#include <rte_cycles.h>
|
||||
#include <rte_debug.h>
|
||||
#include <rte_dev.h>
|
||||
#include <rte_errno.h>
|
||||
#include <rte_eventdev.h>
|
||||
#include <rte_eventdev_pmd.h>
|
||||
#include <rte_io.h>
|
||||
#include <rte_kvargs.h>
|
||||
#include <rte_log.h>
|
||||
#include <rte_malloc.h>
|
||||
#include <rte_mbuf.h>
|
||||
#include <rte_prefetch.h>
|
||||
#include <rte_ring.h>
|
||||
#include <rte_string_fns.h>
|
||||
|
||||
#include "dlb2_priv.h"
|
||||
#include "dlb2_inline_fns.h"
|
||||
|
||||
/*
|
||||
* Resources exposed to eventdev. Some values overridden at runtime using
|
||||
* values returned by the DLB kernel driver.
|
||||
*/
|
||||
#if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
|
||||
#error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
|
||||
#endif
|
||||
|
||||
struct process_local_port_data
|
||||
dlb2_port[DLB2_MAX_NUM_PORTS][DLB2_NUM_PORT_TYPES];
|
||||
|
||||
#define DLB2_BASE_10 10
|
||||
|
||||
static int
|
||||
dlb2_string_to_int(int *result, const char *str)
|
||||
{
|
||||
long ret;
|
||||
char *endptr;
|
||||
|
||||
if (str == NULL || result == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
errno = 0;
|
||||
ret = strtol(str, &endptr, DLB2_BASE_10);
|
||||
if (errno)
|
||||
return -errno;
|
||||
|
||||
/* long int and int may be different width for some architectures */
|
||||
if (ret < INT_MIN || ret > INT_MAX || endptr == str)
|
||||
return -EINVAL;
|
||||
|
||||
*result = ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
|
||||
{
|
||||
int *socket_id = opaque;
|
||||
int ret;
|
||||
|
||||
ret = dlb2_string_to_int(socket_id, value);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (*socket_id > RTE_MAX_NUMA_NODES)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
set_max_num_events(const char *key __rte_unused,
|
||||
const char *value,
|
||||
void *opaque)
|
||||
{
|
||||
int *max_num_events = opaque;
|
||||
int ret;
|
||||
|
||||
if (value == NULL || opaque == NULL) {
|
||||
DLB2_LOG_ERR("NULL pointer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = dlb2_string_to_int(max_num_events, value);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (*max_num_events < 0 || *max_num_events >
|
||||
DLB2_MAX_NUM_LDB_CREDITS) {
|
||||
DLB2_LOG_ERR("dlb2: max_num_events must be between 0 and %d\n",
|
||||
DLB2_MAX_NUM_LDB_CREDITS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
set_num_dir_credits(const char *key __rte_unused,
|
||||
const char *value,
|
||||
void *opaque)
|
||||
{
|
||||
int *num_dir_credits = opaque;
|
||||
int ret;
|
||||
|
||||
if (value == NULL || opaque == NULL) {
|
||||
DLB2_LOG_ERR("NULL pointer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = dlb2_string_to_int(num_dir_credits, value);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (*num_dir_credits < 0 ||
|
||||
*num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS) {
|
||||
DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d\n",
|
||||
DLB2_MAX_NUM_DIR_CREDITS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
set_dev_id(const char *key __rte_unused,
|
||||
const char *value,
|
||||
void *opaque)
|
||||
{
|
||||
int *dev_id = opaque;
|
||||
int ret;
|
||||
|
||||
if (value == NULL || opaque == NULL) {
|
||||
DLB2_LOG_ERR("NULL pointer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = dlb2_string_to_int(dev_id, value);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
set_cos(const char *key __rte_unused,
|
||||
const char *value,
|
||||
void *opaque)
|
||||
{
|
||||
enum dlb2_cos *cos_id = opaque;
|
||||
int x = 0;
|
||||
int ret;
|
||||
|
||||
if (value == NULL || opaque == NULL) {
|
||||
DLB2_LOG_ERR("NULL pointer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = dlb2_string_to_int(&x, value);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (x != DLB2_COS_DEFAULT && (x < DLB2_COS_0 || x > DLB2_COS_3)) {
|
||||
DLB2_LOG_ERR(
|
||||
"COS %d out of range, must be DLB2_COS_DEFAULT or 0-3\n",
|
||||
x);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*cos_id = x;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
set_qid_depth_thresh(const char *key __rte_unused,
|
||||
const char *value,
|
||||
void *opaque)
|
||||
{
|
||||
struct dlb2_qid_depth_thresholds *qid_thresh = opaque;
|
||||
int first, last, thresh, i;
|
||||
|
||||
if (value == NULL || opaque == NULL) {
|
||||
DLB2_LOG_ERR("NULL pointer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* command line override may take one of the following 3 forms:
|
||||
* qid_depth_thresh=all:<threshold_value> ... all queues
|
||||
* qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues
|
||||
* qid_depth_thresh=qid:<threshold_value> ... just one queue
|
||||
*/
|
||||
if (sscanf(value, "all:%d", &thresh) == 1) {
|
||||
first = 0;
|
||||
last = DLB2_MAX_NUM_QUEUES - 1;
|
||||
} else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) {
|
||||
/* we have everything we need */
|
||||
} else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
|
||||
last = first;
|
||||
} else {
|
||||
DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (first > last || first < 0 || last >= DLB2_MAX_NUM_QUEUES) {
|
||||
DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) {
|
||||
DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n",
|
||||
DLB2_MAX_QUEUE_DEPTH_THRESHOLD);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = first; i <= last; i++)
|
||||
qid_thresh->val[i] = thresh; /* indexed by qid */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
|
||||
const char *name,
|
||||
struct dlb2_devargs *dlb2_args)
|
||||
{
|
||||
RTE_SET_USED(dev);
|
||||
RTE_SET_USED(name);
|
||||
RTE_SET_USED(dlb2_args);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
dlb2_secondary_eventdev_probe(struct rte_eventdev *dev,
|
||||
const char *name)
|
||||
{
|
||||
RTE_SET_USED(dev);
|
||||
RTE_SET_USED(name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
dlb2_parse_params(const char *params,
|
||||
const char *name,
|
||||
struct dlb2_devargs *dlb2_args)
|
||||
{
|
||||
int ret = 0;
|
||||
static const char * const args[] = { NUMA_NODE_ARG,
|
||||
DLB2_MAX_NUM_EVENTS,
|
||||
DLB2_NUM_DIR_CREDITS,
|
||||
DEV_ID_ARG,
|
||||
DLB2_QID_DEPTH_THRESH_ARG,
|
||||
DLB2_COS_ARG,
|
||||
NULL };
|
||||
|
||||
if (params != NULL && params[0] != '\0') {
|
||||
struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
|
||||
|
||||
if (kvlist == NULL) {
|
||||
RTE_LOG(INFO, PMD,
|
||||
"Ignoring unsupported parameters when creating device '%s'\n",
|
||||
name);
|
||||
} else {
|
||||
int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
|
||||
set_numa_node,
|
||||
&dlb2_args->socket_id);
|
||||
if (ret != 0) {
|
||||
DLB2_LOG_ERR("%s: Error parsing numa node parameter",
|
||||
name);
|
||||
rte_kvargs_free(kvlist);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = rte_kvargs_process(kvlist, DLB2_MAX_NUM_EVENTS,
|
||||
set_max_num_events,
|
||||
&dlb2_args->max_num_events);
|
||||
if (ret != 0) {
|
||||
DLB2_LOG_ERR("%s: Error parsing max_num_events parameter",
|
||||
name);
|
||||
rte_kvargs_free(kvlist);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = rte_kvargs_process(kvlist,
|
||||
DLB2_NUM_DIR_CREDITS,
|
||||
set_num_dir_credits,
|
||||
&dlb2_args->num_dir_credits_override);
|
||||
if (ret != 0) {
|
||||
DLB2_LOG_ERR("%s: Error parsing num_dir_credits parameter",
|
||||
name);
|
||||
rte_kvargs_free(kvlist);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
|
||||
set_dev_id,
|
||||
&dlb2_args->dev_id);
|
||||
if (ret != 0) {
|
||||
DLB2_LOG_ERR("%s: Error parsing dev_id parameter",
|
||||
name);
|
||||
rte_kvargs_free(kvlist);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = rte_kvargs_process(
|
||||
kvlist,
|
||||
DLB2_QID_DEPTH_THRESH_ARG,
|
||||
set_qid_depth_thresh,
|
||||
&dlb2_args->qid_depth_thresholds);
|
||||
if (ret != 0) {
|
||||
DLB2_LOG_ERR("%s: Error parsing qid_depth_thresh parameter",
|
||||
name);
|
||||
rte_kvargs_free(kvlist);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = rte_kvargs_process(kvlist, DLB2_COS_ARG,
|
||||
set_cos,
|
||||
&dlb2_args->cos_id);
|
||||
if (ret != 0) {
|
||||
DLB2_LOG_ERR("%s: Error parsing cos parameter",
|
||||
name);
|
||||
rte_kvargs_free(kvlist);
|
||||
return ret;
|
||||
}
|
||||
|
||||
rte_kvargs_free(kvlist);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
RTE_LOG_REGISTER(eventdev_dlb2_log_level, pmd.event.dlb2, NOTICE);
|
||||
|
@ -7,7 +7,10 @@ if not is_linux or not dpdk_conf.has('RTE_ARCH_X86_64')
|
||||
subdir_done()
|
||||
endif
|
||||
|
||||
sources = files('dlb2.c')
|
||||
sources = files('dlb2.c',
|
||||
'pf/dlb2_main.c',
|
||||
'pf/dlb2_pf.c'
|
||||
)
|
||||
|
||||
headers = files()
|
||||
|
||||
|
367
drivers/event/dlb2/pf/base/dlb2_hw_types.h
Normal file
367
drivers/event/dlb2/pf/base/dlb2_hw_types.h
Normal file
@ -0,0 +1,367 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __DLB2_HW_TYPES_H
|
||||
#define __DLB2_HW_TYPES_H
|
||||
|
||||
#include "dlb2_user.h"
|
||||
|
||||
#include "dlb2_osdep_list.h"
|
||||
#include "dlb2_osdep_types.h"
|
||||
|
||||
#define DLB2_MAX_NUM_VDEVS 16
|
||||
#define DLB2_MAX_NUM_DOMAINS 32
|
||||
#define DLB2_MAX_NUM_LDB_QUEUES 32 /* LDB == load-balanced */
|
||||
#define DLB2_MAX_NUM_DIR_QUEUES 64 /* DIR == directed */
|
||||
#define DLB2_MAX_NUM_LDB_PORTS 64
|
||||
#define DLB2_MAX_NUM_DIR_PORTS 64
|
||||
#define DLB2_MAX_NUM_LDB_CREDITS (8 * 1024)
|
||||
#define DLB2_MAX_NUM_DIR_CREDITS (2 * 1024)
|
||||
#define DLB2_MAX_NUM_HIST_LIST_ENTRIES 2048
|
||||
#define DLB2_MAX_NUM_AQED_ENTRIES 2048
|
||||
#define DLB2_MAX_NUM_QIDS_PER_LDB_CQ 8
|
||||
#define DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS 2
|
||||
#define DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES 5
|
||||
#define DLB2_QID_PRIORITIES 8
|
||||
#define DLB2_NUM_ARB_WEIGHTS 8
|
||||
#define DLB2_MAX_WEIGHT 255
|
||||
#define DLB2_NUM_COS_DOMAINS 4
|
||||
#define DLB2_MAX_CQ_COMP_CHECK_LOOPS 409600
|
||||
#define DLB2_MAX_QID_EMPTY_CHECK_LOOPS (32 * 64 * 1024 * (800 / 30))
|
||||
#ifdef FPGA
|
||||
#define DLB2_HZ 2000000
|
||||
#else
|
||||
#define DLB2_HZ 800000000
|
||||
#endif
|
||||
|
||||
#define PCI_DEVICE_ID_INTEL_DLB2_PF 0x2710
|
||||
#define PCI_DEVICE_ID_INTEL_DLB2_VF 0x2711
|
||||
|
||||
/* Interrupt related macros */
|
||||
#define DLB2_PF_NUM_NON_CQ_INTERRUPT_VECTORS 1
|
||||
#define DLB2_PF_NUM_CQ_INTERRUPT_VECTORS 64
|
||||
#define DLB2_PF_TOTAL_NUM_INTERRUPT_VECTORS \
|
||||
(DLB2_PF_NUM_NON_CQ_INTERRUPT_VECTORS + \
|
||||
DLB2_PF_NUM_CQ_INTERRUPT_VECTORS)
|
||||
#define DLB2_PF_NUM_COMPRESSED_MODE_VECTORS \
|
||||
(DLB2_PF_NUM_NON_CQ_INTERRUPT_VECTORS + 1)
|
||||
#define DLB2_PF_NUM_PACKED_MODE_VECTORS \
|
||||
DLB2_PF_TOTAL_NUM_INTERRUPT_VECTORS
|
||||
#define DLB2_PF_COMPRESSED_MODE_CQ_VECTOR_ID \
|
||||
DLB2_PF_NUM_NON_CQ_INTERRUPT_VECTORS
|
||||
|
||||
/* DLB non-CQ interrupts (alarm, mailbox, WDT) */
|
||||
#define DLB2_INT_NON_CQ 0
|
||||
|
||||
#define DLB2_ALARM_HW_SOURCE_SYS 0
|
||||
#define DLB2_ALARM_HW_SOURCE_DLB 1
|
||||
|
||||
#define DLB2_ALARM_HW_UNIT_CHP 4
|
||||
|
||||
#define DLB2_ALARM_SYS_AID_ILLEGAL_QID 3
|
||||
#define DLB2_ALARM_SYS_AID_DISABLED_QID 4
|
||||
#define DLB2_ALARM_SYS_AID_ILLEGAL_HCW 5
|
||||
#define DLB2_ALARM_HW_CHP_AID_ILLEGAL_ENQ 1
|
||||
#define DLB2_ALARM_HW_CHP_AID_EXCESS_TOKEN_POPS 2
|
||||
|
||||
#define DLB2_VF_NUM_NON_CQ_INTERRUPT_VECTORS 1
|
||||
#define DLB2_VF_NUM_CQ_INTERRUPT_VECTORS 31
|
||||
#define DLB2_VF_BASE_CQ_VECTOR_ID 0
|
||||
#define DLB2_VF_LAST_CQ_VECTOR_ID 30
|
||||
#define DLB2_VF_MBOX_VECTOR_ID 31
|
||||
#define DLB2_VF_TOTAL_NUM_INTERRUPT_VECTORS \
|
||||
(DLB2_VF_NUM_NON_CQ_INTERRUPT_VECTORS + \
|
||||
DLB2_VF_NUM_CQ_INTERRUPT_VECTORS)
|
||||
|
||||
#define DLB2_VDEV_MAX_NUM_INTERRUPT_VECTORS (DLB2_MAX_NUM_LDB_PORTS + \
|
||||
DLB2_MAX_NUM_DIR_PORTS + 1)
|
||||
|
||||
/*
|
||||
* Hardware-defined base addresses. Those prefixed 'DLB2_DRV' are only used by
|
||||
* the PF driver.
|
||||
*/
|
||||
#define DLB2_DRV_LDB_PP_BASE 0x2300000
|
||||
#define DLB2_DRV_LDB_PP_STRIDE 0x1000
|
||||
#define DLB2_DRV_LDB_PP_BOUND (DLB2_DRV_LDB_PP_BASE + \
|
||||
DLB2_DRV_LDB_PP_STRIDE * DLB2_MAX_NUM_LDB_PORTS)
|
||||
#define DLB2_DRV_DIR_PP_BASE 0x2200000
|
||||
#define DLB2_DRV_DIR_PP_STRIDE 0x1000
|
||||
#define DLB2_DRV_DIR_PP_BOUND (DLB2_DRV_DIR_PP_BASE + \
|
||||
DLB2_DRV_DIR_PP_STRIDE * DLB2_MAX_NUM_DIR_PORTS)
|
||||
#define DLB2_LDB_PP_BASE 0x2100000
|
||||
#define DLB2_LDB_PP_STRIDE 0x1000
|
||||
#define DLB2_LDB_PP_BOUND (DLB2_LDB_PP_BASE + \
|
||||
DLB2_LDB_PP_STRIDE * DLB2_MAX_NUM_LDB_PORTS)
|
||||
#define DLB2_LDB_PP_OFFS(id) (DLB2_LDB_PP_BASE + (id) * DLB2_PP_SIZE)
|
||||
#define DLB2_DIR_PP_BASE 0x2000000
|
||||
#define DLB2_DIR_PP_STRIDE 0x1000
|
||||
#define DLB2_DIR_PP_BOUND (DLB2_DIR_PP_BASE + \
|
||||
DLB2_DIR_PP_STRIDE * DLB2_MAX_NUM_DIR_PORTS)
|
||||
#define DLB2_DIR_PP_OFFS(id) (DLB2_DIR_PP_BASE + (id) * DLB2_PP_SIZE)
|
||||
|
||||
struct dlb2_resource_id {
|
||||
u32 phys_id;
|
||||
u32 virt_id;
|
||||
u8 vdev_owned;
|
||||
u8 vdev_id;
|
||||
};
|
||||
|
||||
struct dlb2_freelist {
|
||||
u32 base;
|
||||
u32 bound;
|
||||
u32 offset;
|
||||
};
|
||||
|
||||
static inline u32 dlb2_freelist_count(struct dlb2_freelist *list)
|
||||
{
|
||||
return list->bound - list->base - list->offset;
|
||||
}
|
||||
|
||||
struct dlb2_hcw {
|
||||
u64 data;
|
||||
/* Word 3 */
|
||||
u16 opaque;
|
||||
u8 qid;
|
||||
u8 sched_type:2;
|
||||
u8 priority:3;
|
||||
u8 msg_type:3;
|
||||
/* Word 4 */
|
||||
u16 lock_id;
|
||||
u8 ts_flag:1;
|
||||
u8 rsvd1:2;
|
||||
u8 no_dec:1;
|
||||
u8 cmp_id:4;
|
||||
u8 cq_token:1;
|
||||
u8 qe_comp:1;
|
||||
u8 qe_frag:1;
|
||||
u8 qe_valid:1;
|
||||
u8 int_arm:1;
|
||||
u8 error:1;
|
||||
u8 rsvd:2;
|
||||
};
|
||||
|
||||
struct dlb2_ldb_queue {
|
||||
struct dlb2_list_entry domain_list;
|
||||
struct dlb2_list_entry func_list;
|
||||
struct dlb2_resource_id id;
|
||||
struct dlb2_resource_id domain_id;
|
||||
u32 num_qid_inflights;
|
||||
u32 aqed_limit;
|
||||
u32 sn_group; /* sn == sequence number */
|
||||
u32 sn_slot;
|
||||
u32 num_mappings;
|
||||
u8 sn_cfg_valid;
|
||||
u8 num_pending_additions;
|
||||
u8 owned;
|
||||
u8 configured;
|
||||
};
|
||||
|
||||
/*
|
||||
* Directed ports and queues are paired by nature, so the driver tracks them
|
||||
* with a single data structure.
|
||||
*/
|
||||
struct dlb2_dir_pq_pair {
|
||||
struct dlb2_list_entry domain_list;
|
||||
struct dlb2_list_entry func_list;
|
||||
struct dlb2_resource_id id;
|
||||
struct dlb2_resource_id domain_id;
|
||||
u32 ref_cnt;
|
||||
u8 init_tkn_cnt;
|
||||
u8 queue_configured;
|
||||
u8 port_configured;
|
||||
u8 owned;
|
||||
u8 enabled;
|
||||
};
|
||||
|
||||
enum dlb2_qid_map_state {
|
||||
/* The slot doesn't contain a valid queue mapping */
|
||||
DLB2_QUEUE_UNMAPPED,
|
||||
/* The slot contains a valid queue mapping */
|
||||
DLB2_QUEUE_MAPPED,
|
||||
/* The driver is mapping a queue into this slot */
|
||||
DLB2_QUEUE_MAP_IN_PROG,
|
||||
/* The driver is unmapping a queue from this slot */
|
||||
DLB2_QUEUE_UNMAP_IN_PROG,
|
||||
/*
|
||||
* The driver is unmapping a queue from this slot, and once complete
|
||||
* will replace it with another mapping.
|
||||
*/
|
||||
DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP,
|
||||
};
|
||||
|
||||
struct dlb2_ldb_port_qid_map {
|
||||
enum dlb2_qid_map_state state;
|
||||
u16 qid;
|
||||
u16 pending_qid;
|
||||
u8 priority;
|
||||
u8 pending_priority;
|
||||
};
|
||||
|
||||
struct dlb2_ldb_port {
|
||||
struct dlb2_list_entry domain_list;
|
||||
struct dlb2_list_entry func_list;
|
||||
struct dlb2_resource_id id;
|
||||
struct dlb2_resource_id domain_id;
|
||||
/* The qid_map represents the hardware QID mapping state. */
|
||||
struct dlb2_ldb_port_qid_map qid_map[DLB2_MAX_NUM_QIDS_PER_LDB_CQ];
|
||||
u32 hist_list_entry_base;
|
||||
u32 hist_list_entry_limit;
|
||||
u32 ref_cnt;
|
||||
u8 init_tkn_cnt;
|
||||
u8 num_pending_removals;
|
||||
u8 num_mappings;
|
||||
u8 owned;
|
||||
u8 enabled;
|
||||
u8 configured;
|
||||
};
|
||||
|
||||
struct dlb2_sn_group {
|
||||
u32 mode;
|
||||
u32 sequence_numbers_per_queue;
|
||||
u32 slot_use_bitmap;
|
||||
u32 id;
|
||||
};
|
||||
|
||||
static inline bool dlb2_sn_group_full(struct dlb2_sn_group *group)
|
||||
{
|
||||
u32 mask[] = {
|
||||
0x0000ffff, /* 64 SNs per queue */
|
||||
0x000000ff, /* 128 SNs per queue */
|
||||
0x0000000f, /* 256 SNs per queue */
|
||||
0x00000003, /* 512 SNs per queue */
|
||||
0x00000001}; /* 1024 SNs per queue */
|
||||
|
||||
return group->slot_use_bitmap == mask[group->mode];
|
||||
}
|
||||
|
||||
static inline int dlb2_sn_group_alloc_slot(struct dlb2_sn_group *group)
|
||||
{
|
||||
u32 bound[6] = {16, 8, 4, 2, 1};
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < bound[group->mode]; i++) {
|
||||
if (!(group->slot_use_bitmap & (1 << i))) {
|
||||
group->slot_use_bitmap |= 1 << i;
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dlb2_sn_group_free_slot(struct dlb2_sn_group *group, int slot)
|
||||
{
|
||||
group->slot_use_bitmap &= ~(1 << slot);
|
||||
}
|
||||
|
||||
static inline int dlb2_sn_group_used_slots(struct dlb2_sn_group *group)
|
||||
{
|
||||
int i, cnt = 0;
|
||||
|
||||
for (i = 0; i < 32; i++)
|
||||
cnt += !!(group->slot_use_bitmap & (1 << i));
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
struct dlb2_hw_domain {
|
||||
struct dlb2_function_resources *parent_func;
|
||||
struct dlb2_list_entry func_list;
|
||||
struct dlb2_list_head used_ldb_queues;
|
||||
struct dlb2_list_head used_ldb_ports[DLB2_NUM_COS_DOMAINS];
|
||||
struct dlb2_list_head used_dir_pq_pairs;
|
||||
struct dlb2_list_head avail_ldb_queues;
|
||||
struct dlb2_list_head avail_ldb_ports[DLB2_NUM_COS_DOMAINS];
|
||||
struct dlb2_list_head avail_dir_pq_pairs;
|
||||
u32 total_hist_list_entries;
|
||||
u32 avail_hist_list_entries;
|
||||
u32 hist_list_entry_base;
|
||||
u32 hist_list_entry_offset;
|
||||
u32 num_ldb_credits;
|
||||
u32 num_dir_credits;
|
||||
u32 num_avail_aqed_entries;
|
||||
u32 num_used_aqed_entries;
|
||||
struct dlb2_resource_id id;
|
||||
int num_pending_removals;
|
||||
int num_pending_additions;
|
||||
u8 configured;
|
||||
u8 started;
|
||||
};
|
||||
|
||||
struct dlb2_bitmap;
|
||||
|
||||
struct dlb2_function_resources {
|
||||
struct dlb2_list_head avail_domains;
|
||||
struct dlb2_list_head used_domains;
|
||||
struct dlb2_list_head avail_ldb_queues;
|
||||
struct dlb2_list_head avail_ldb_ports[DLB2_NUM_COS_DOMAINS];
|
||||
struct dlb2_list_head avail_dir_pq_pairs;
|
||||
struct dlb2_bitmap *avail_hist_list_entries;
|
||||
u32 num_avail_domains;
|
||||
u32 num_avail_ldb_queues;
|
||||
u32 num_avail_ldb_ports[DLB2_NUM_COS_DOMAINS];
|
||||
u32 num_avail_dir_pq_pairs;
|
||||
u32 num_avail_qed_entries;
|
||||
u32 num_avail_dqed_entries;
|
||||
u32 num_avail_aqed_entries;
|
||||
u8 locked; /* (VDEV only) */
|
||||
};
|
||||
|
||||
/*
|
||||
* After initialization, each resource in dlb2_hw_resources is located in one
|
||||
* of the following lists:
|
||||
* -- The PF's available resources list. These are unconfigured resources owned
|
||||
* by the PF and not allocated to a dlb2 scheduling domain.
|
||||
* -- A VDEV's available resources list. These are VDEV-owned unconfigured
|
||||
* resources not allocated to a dlb2 scheduling domain.
|
||||
* -- A domain's available resources list. These are domain-owned unconfigured
|
||||
* resources.
|
||||
* -- A domain's used resources list. These are domain-owned configured
|
||||
* resources.
|
||||
*
|
||||
* A resource moves to a new list when a VDEV or domain is created or destroyed,
|
||||
* or when the resource is configured.
|
||||
*/
|
||||
struct dlb2_hw_resources {
|
||||
struct dlb2_ldb_queue ldb_queues[DLB2_MAX_NUM_LDB_QUEUES];
|
||||
struct dlb2_ldb_port ldb_ports[DLB2_MAX_NUM_LDB_PORTS];
|
||||
struct dlb2_dir_pq_pair dir_pq_pairs[DLB2_MAX_NUM_DIR_PORTS];
|
||||
struct dlb2_sn_group sn_groups[DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS];
|
||||
};
|
||||
|
||||
struct dlb2_mbox {
|
||||
u32 *mbox;
|
||||
u32 *isr_in_progress;
|
||||
};
|
||||
|
||||
struct dlb2_sw_mbox {
|
||||
struct dlb2_mbox vdev_to_pf;
|
||||
struct dlb2_mbox pf_to_vdev;
|
||||
void (*pf_to_vdev_inject)(void *arg);
|
||||
void *pf_to_vdev_inject_arg;
|
||||
};
|
||||
|
||||
struct dlb2_hw {
|
||||
/* BAR 0 address */
|
||||
void *csr_kva;
|
||||
unsigned long csr_phys_addr;
|
||||
/* BAR 2 address */
|
||||
void *func_kva;
|
||||
unsigned long func_phys_addr;
|
||||
|
||||
/* Resource tracking */
|
||||
struct dlb2_hw_resources rsrcs;
|
||||
struct dlb2_function_resources pf;
|
||||
struct dlb2_function_resources vdev[DLB2_MAX_NUM_VDEVS];
|
||||
struct dlb2_hw_domain domains[DLB2_MAX_NUM_DOMAINS];
|
||||
u8 cos_reservation[DLB2_NUM_COS_DOMAINS];
|
||||
|
||||
/* Virtualization */
|
||||
int virt_mode;
|
||||
struct dlb2_sw_mbox mbox[DLB2_MAX_NUM_VDEVS];
|
||||
unsigned int pasid[DLB2_MAX_NUM_VDEVS];
|
||||
};
|
||||
|
||||
#endif /* __DLB2_HW_TYPES_H */
|
596
drivers/event/dlb2/pf/base/dlb2_mbox.h
Normal file
596
drivers/event/dlb2/pf/base/dlb2_mbox.h
Normal file
@ -0,0 +1,596 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __DLB2_BASE_DLB2_MBOX_H
|
||||
#define __DLB2_BASE_DLB2_MBOX_H
|
||||
|
||||
#include "dlb2_osdep_types.h"
|
||||
#include "dlb2_regs.h"
|
||||
|
||||
#define DLB2_MBOX_INTERFACE_VERSION 1
|
||||
|
||||
/*
|
||||
* The PF uses its PF->VF mailbox to send responses to VF requests, as well as
|
||||
* to send requests of its own (e.g. notifying a VF of an impending FLR).
|
||||
* To avoid communication race conditions, e.g. the PF sends a response and then
|
||||
* sends a request before the VF reads the response, the PF->VF mailbox is
|
||||
* divided into two sections:
|
||||
* - Bytes 0-47: PF responses
|
||||
* - Bytes 48-63: PF requests
|
||||
*
|
||||
* Partitioning the PF->VF mailbox allows responses and requests to occupy the
|
||||
* mailbox simultaneously.
|
||||
*/
|
||||
#define DLB2_PF2VF_RESP_BYTES 48
|
||||
#define DLB2_PF2VF_RESP_BASE 0
|
||||
#define DLB2_PF2VF_RESP_BASE_WORD (DLB2_PF2VF_RESP_BASE / 4)
|
||||
|
||||
#define DLB2_PF2VF_REQ_BYTES 16
|
||||
#define DLB2_PF2VF_REQ_BASE (DLB2_PF2VF_RESP_BASE + DLB2_PF2VF_RESP_BYTES)
|
||||
#define DLB2_PF2VF_REQ_BASE_WORD (DLB2_PF2VF_REQ_BASE / 4)
|
||||
|
||||
/*
|
||||
* Similarly, the VF->PF mailbox is divided into two sections:
|
||||
* - Bytes 0-239: VF requests
|
||||
* -- (Bytes 0-3 are unused due to a hardware errata)
|
||||
* - Bytes 240-255: VF responses
|
||||
*/
|
||||
#define DLB2_VF2PF_REQ_BYTES 236
|
||||
#define DLB2_VF2PF_REQ_BASE 4
|
||||
#define DLB2_VF2PF_REQ_BASE_WORD (DLB2_VF2PF_REQ_BASE / 4)
|
||||
|
||||
#define DLB2_VF2PF_RESP_BYTES 16
|
||||
#define DLB2_VF2PF_RESP_BASE (DLB2_VF2PF_REQ_BASE + DLB2_VF2PF_REQ_BYTES)
|
||||
#define DLB2_VF2PF_RESP_BASE_WORD (DLB2_VF2PF_RESP_BASE / 4)
|
||||
|
||||
/* VF-initiated commands */
|
||||
enum dlb2_mbox_cmd_type {
|
||||
DLB2_MBOX_CMD_REGISTER,
|
||||
DLB2_MBOX_CMD_UNREGISTER,
|
||||
DLB2_MBOX_CMD_GET_NUM_RESOURCES,
|
||||
DLB2_MBOX_CMD_CREATE_SCHED_DOMAIN,
|
||||
DLB2_MBOX_CMD_RESET_SCHED_DOMAIN,
|
||||
DLB2_MBOX_CMD_CREATE_LDB_QUEUE,
|
||||
DLB2_MBOX_CMD_CREATE_DIR_QUEUE,
|
||||
DLB2_MBOX_CMD_CREATE_LDB_PORT,
|
||||
DLB2_MBOX_CMD_CREATE_DIR_PORT,
|
||||
DLB2_MBOX_CMD_ENABLE_LDB_PORT,
|
||||
DLB2_MBOX_CMD_DISABLE_LDB_PORT,
|
||||
DLB2_MBOX_CMD_ENABLE_DIR_PORT,
|
||||
DLB2_MBOX_CMD_DISABLE_DIR_PORT,
|
||||
DLB2_MBOX_CMD_LDB_PORT_OWNED_BY_DOMAIN,
|
||||
DLB2_MBOX_CMD_DIR_PORT_OWNED_BY_DOMAIN,
|
||||
DLB2_MBOX_CMD_MAP_QID,
|
||||
DLB2_MBOX_CMD_UNMAP_QID,
|
||||
DLB2_MBOX_CMD_START_DOMAIN,
|
||||
DLB2_MBOX_CMD_ENABLE_LDB_PORT_INTR,
|
||||
DLB2_MBOX_CMD_ENABLE_DIR_PORT_INTR,
|
||||
DLB2_MBOX_CMD_ARM_CQ_INTR,
|
||||
DLB2_MBOX_CMD_GET_NUM_USED_RESOURCES,
|
||||
DLB2_MBOX_CMD_GET_SN_ALLOCATION,
|
||||
DLB2_MBOX_CMD_GET_LDB_QUEUE_DEPTH,
|
||||
DLB2_MBOX_CMD_GET_DIR_QUEUE_DEPTH,
|
||||
DLB2_MBOX_CMD_PENDING_PORT_UNMAPS,
|
||||
DLB2_MBOX_CMD_GET_COS_BW,
|
||||
DLB2_MBOX_CMD_GET_SN_OCCUPANCY,
|
||||
DLB2_MBOX_CMD_QUERY_CQ_POLL_MODE,
|
||||
|
||||
/* NUM_QE_CMD_TYPES must be last */
|
||||
NUM_DLB2_MBOX_CMD_TYPES,
|
||||
};
|
||||
|
||||
static const char dlb2_mbox_cmd_type_strings[][128] = {
|
||||
"DLB2_MBOX_CMD_REGISTER",
|
||||
"DLB2_MBOX_CMD_UNREGISTER",
|
||||
"DLB2_MBOX_CMD_GET_NUM_RESOURCES",
|
||||
"DLB2_MBOX_CMD_CREATE_SCHED_DOMAIN",
|
||||
"DLB2_MBOX_CMD_RESET_SCHED_DOMAIN",
|
||||
"DLB2_MBOX_CMD_CREATE_LDB_QUEUE",
|
||||
"DLB2_MBOX_CMD_CREATE_DIR_QUEUE",
|
||||
"DLB2_MBOX_CMD_CREATE_LDB_PORT",
|
||||
"DLB2_MBOX_CMD_CREATE_DIR_PORT",
|
||||
"DLB2_MBOX_CMD_ENABLE_LDB_PORT",
|
||||
"DLB2_MBOX_CMD_DISABLE_LDB_PORT",
|
||||
"DLB2_MBOX_CMD_ENABLE_DIR_PORT",
|
||||
"DLB2_MBOX_CMD_DISABLE_DIR_PORT",
|
||||
"DLB2_MBOX_CMD_LDB_PORT_OWNED_BY_DOMAIN",
|
||||
"DLB2_MBOX_CMD_DIR_PORT_OWNED_BY_DOMAIN",
|
||||
"DLB2_MBOX_CMD_MAP_QID",
|
||||
"DLB2_MBOX_CMD_UNMAP_QID",
|
||||
"DLB2_MBOX_CMD_START_DOMAIN",
|
||||
"DLB2_MBOX_CMD_ENABLE_LDB_PORT_INTR",
|
||||
"DLB2_MBOX_CMD_ENABLE_DIR_PORT_INTR",
|
||||
"DLB2_MBOX_CMD_ARM_CQ_INTR",
|
||||
"DLB2_MBOX_CMD_GET_NUM_USED_RESOURCES",
|
||||
"DLB2_MBOX_CMD_GET_SN_ALLOCATION",
|
||||
"DLB2_MBOX_CMD_GET_LDB_QUEUE_DEPTH",
|
||||
"DLB2_MBOX_CMD_GET_DIR_QUEUE_DEPTH",
|
||||
"DLB2_MBOX_CMD_PENDING_PORT_UNMAPS",
|
||||
"DLB2_MBOX_CMD_GET_COS_BW",
|
||||
"DLB2_MBOX_CMD_GET_SN_OCCUPANCY",
|
||||
"DLB2_MBOX_CMD_QUERY_CQ_POLL_MODE",
|
||||
};
|
||||
|
||||
/* PF-initiated commands */
|
||||
enum dlb2_mbox_vf_cmd_type {
|
||||
DLB2_MBOX_VF_CMD_DOMAIN_ALERT,
|
||||
DLB2_MBOX_VF_CMD_NOTIFICATION,
|
||||
DLB2_MBOX_VF_CMD_IN_USE,
|
||||
|
||||
/* NUM_DLB2_MBOX_VF_CMD_TYPES must be last */
|
||||
NUM_DLB2_MBOX_VF_CMD_TYPES,
|
||||
};
|
||||
|
||||
static const char dlb2_mbox_vf_cmd_type_strings[][128] = {
|
||||
"DLB2_MBOX_VF_CMD_DOMAIN_ALERT",
|
||||
"DLB2_MBOX_VF_CMD_NOTIFICATION",
|
||||
"DLB2_MBOX_VF_CMD_IN_USE",
|
||||
};
|
||||
|
||||
#define DLB2_MBOX_CMD_TYPE(hdr) \
|
||||
(((struct dlb2_mbox_req_hdr *)hdr)->type)
|
||||
#define DLB2_MBOX_CMD_STRING(hdr) \
|
||||
dlb2_mbox_cmd_type_strings[DLB2_MBOX_CMD_TYPE(hdr)]
|
||||
|
||||
enum dlb2_mbox_status_type {
|
||||
DLB2_MBOX_ST_SUCCESS,
|
||||
DLB2_MBOX_ST_INVALID_CMD_TYPE,
|
||||
DLB2_MBOX_ST_VERSION_MISMATCH,
|
||||
DLB2_MBOX_ST_INVALID_OWNER_VF,
|
||||
};
|
||||
|
||||
static const char dlb2_mbox_status_type_strings[][128] = {
|
||||
"DLB2_MBOX_ST_SUCCESS",
|
||||
"DLB2_MBOX_ST_INVALID_CMD_TYPE",
|
||||
"DLB2_MBOX_ST_VERSION_MISMATCH",
|
||||
"DLB2_MBOX_ST_INVALID_OWNER_VF",
|
||||
};
|
||||
|
||||
#define DLB2_MBOX_ST_TYPE(hdr) \
|
||||
(((struct dlb2_mbox_resp_hdr *)hdr)->status)
|
||||
#define DLB2_MBOX_ST_STRING(hdr) \
|
||||
dlb2_mbox_status_type_strings[DLB2_MBOX_ST_TYPE(hdr)]
|
||||
|
||||
/* This structure is always the first field in a request structure */
|
||||
struct dlb2_mbox_req_hdr {
|
||||
u32 type;
|
||||
};
|
||||
|
||||
/* This structure is always the first field in a response structure */
|
||||
struct dlb2_mbox_resp_hdr {
|
||||
u32 status;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_register_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u16 min_interface_version;
|
||||
u16 max_interface_version;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_register_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 interface_version;
|
||||
u8 pf_id;
|
||||
u8 vf_id;
|
||||
u8 is_auxiliary_vf;
|
||||
u8 primary_vf_id;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_unregister_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_unregister_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_get_num_resources_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_get_num_resources_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
u16 num_sched_domains;
|
||||
u16 num_ldb_queues;
|
||||
u16 num_ldb_ports;
|
||||
u16 num_cos_ldb_ports[4];
|
||||
u16 num_dir_ports;
|
||||
u32 num_atomic_inflights;
|
||||
u32 num_hist_list_entries;
|
||||
u32 max_contiguous_hist_list_entries;
|
||||
u16 num_ldb_credits;
|
||||
u16 num_dir_credits;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_create_sched_domain_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 num_ldb_queues;
|
||||
u32 num_ldb_ports;
|
||||
u32 num_cos_ldb_ports[4];
|
||||
u32 num_dir_ports;
|
||||
u32 num_atomic_inflights;
|
||||
u32 num_hist_list_entries;
|
||||
u32 num_ldb_credits;
|
||||
u32 num_dir_credits;
|
||||
u8 cos_strict;
|
||||
u8 padding0[3];
|
||||
u32 padding1;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_create_sched_domain_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
u32 status;
|
||||
u32 id;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_reset_sched_domain_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 id;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_reset_sched_domain_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_create_ldb_queue_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 domain_id;
|
||||
u32 num_sequence_numbers;
|
||||
u32 num_qid_inflights;
|
||||
u32 num_atomic_inflights;
|
||||
u32 lock_id_comp_level;
|
||||
u32 depth_threshold;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_create_ldb_queue_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
u32 status;
|
||||
u32 id;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_create_dir_queue_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 domain_id;
|
||||
u32 port_id;
|
||||
u32 depth_threshold;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_create_dir_queue_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
u32 status;
|
||||
u32 id;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_create_ldb_port_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 domain_id;
|
||||
u16 cq_depth;
|
||||
u16 cq_history_list_size;
|
||||
u8 cos_id;
|
||||
u8 cos_strict;
|
||||
u16 padding1;
|
||||
u64 cq_base_address;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_create_ldb_port_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
u32 status;
|
||||
u32 id;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_create_dir_port_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 domain_id;
|
||||
u64 cq_base_address;
|
||||
u16 cq_depth;
|
||||
u16 padding0;
|
||||
s32 queue_id;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_create_dir_port_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
u32 status;
|
||||
u32 id;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_enable_ldb_port_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 domain_id;
|
||||
u32 port_id;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_enable_ldb_port_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
u32 status;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_disable_ldb_port_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 domain_id;
|
||||
u32 port_id;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_disable_ldb_port_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
u32 status;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_enable_dir_port_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 domain_id;
|
||||
u32 port_id;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_enable_dir_port_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
u32 status;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_disable_dir_port_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 domain_id;
|
||||
u32 port_id;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_disable_dir_port_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
u32 status;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_ldb_port_owned_by_domain_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 domain_id;
|
||||
u32 port_id;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_ldb_port_owned_by_domain_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
s32 owned;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_dir_port_owned_by_domain_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 domain_id;
|
||||
u32 port_id;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_dir_port_owned_by_domain_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
s32 owned;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_map_qid_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 domain_id;
|
||||
u32 port_id;
|
||||
u32 qid;
|
||||
u32 priority;
|
||||
u32 padding0;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_map_qid_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
u32 status;
|
||||
u32 id;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_unmap_qid_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 domain_id;
|
||||
u32 port_id;
|
||||
u32 qid;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_unmap_qid_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
u32 status;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_start_domain_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 domain_id;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_start_domain_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
u32 status;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_enable_ldb_port_intr_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u16 port_id;
|
||||
u16 thresh;
|
||||
u16 vector;
|
||||
u16 owner_vf;
|
||||
u16 reserved[2];
|
||||
};
|
||||
|
||||
struct dlb2_mbox_enable_ldb_port_intr_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
u32 status;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_enable_dir_port_intr_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u16 port_id;
|
||||
u16 thresh;
|
||||
u16 vector;
|
||||
u16 owner_vf;
|
||||
u16 reserved[2];
|
||||
};
|
||||
|
||||
struct dlb2_mbox_enable_dir_port_intr_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
u32 status;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_arm_cq_intr_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 domain_id;
|
||||
u32 port_id;
|
||||
u32 is_ldb;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_arm_cq_intr_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
u32 status;
|
||||
u32 padding0;
|
||||
};
|
||||
|
||||
/*
|
||||
* The alert_id and aux_alert_data follows the format of the alerts defined in
|
||||
* dlb2_types.h. The alert id contains an enum dlb2_domain_alert_id value, and
|
||||
* the aux_alert_data value varies depending on the alert.
|
||||
*/
|
||||
struct dlb2_mbox_vf_alert_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 domain_id;
|
||||
u32 alert_id;
|
||||
u32 aux_alert_data;
|
||||
};
|
||||
|
||||
enum dlb2_mbox_vf_notification_type {
|
||||
DLB2_MBOX_VF_NOTIFICATION_PRE_RESET,
|
||||
DLB2_MBOX_VF_NOTIFICATION_POST_RESET,
|
||||
|
||||
/* NUM_DLB2_MBOX_VF_NOTIFICATION_TYPES must be last */
|
||||
NUM_DLB2_MBOX_VF_NOTIFICATION_TYPES,
|
||||
};
|
||||
|
||||
struct dlb2_mbox_vf_notification_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 notification;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_vf_in_use_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_vf_in_use_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 in_use;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_get_sn_allocation_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 group_id;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_get_sn_allocation_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 num;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_get_ldb_queue_depth_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 domain_id;
|
||||
u32 queue_id;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_get_ldb_queue_depth_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
u32 status;
|
||||
u32 depth;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_get_dir_queue_depth_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 domain_id;
|
||||
u32 queue_id;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_get_dir_queue_depth_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
u32 status;
|
||||
u32 depth;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_pending_port_unmaps_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 domain_id;
|
||||
u32 port_id;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_pending_port_unmaps_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
u32 status;
|
||||
u32 num;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_get_cos_bw_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 cos_id;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_get_cos_bw_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 num;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_get_sn_occupancy_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 group_id;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_get_sn_occupancy_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 num;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_query_cq_poll_mode_cmd_req {
|
||||
struct dlb2_mbox_req_hdr hdr;
|
||||
u32 padding;
|
||||
};
|
||||
|
||||
struct dlb2_mbox_query_cq_poll_mode_cmd_resp {
|
||||
struct dlb2_mbox_resp_hdr hdr;
|
||||
u32 error_code;
|
||||
u32 status;
|
||||
u32 mode;
|
||||
};
|
||||
|
||||
#endif /* __DLB2_BASE_DLB2_MBOX_H */
|
230
drivers/event/dlb2/pf/base/dlb2_osdep.h
Normal file
230
drivers/event/dlb2/pf/base/dlb2_osdep.h
Normal file
@ -0,0 +1,230 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __DLB2_OSDEP_H
|
||||
#define __DLB2_OSDEP_H
|
||||
|
||||
#include <string.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
#include <pthread.h>
|
||||
|
||||
#include <rte_string_fns.h>
|
||||
#include <rte_cycles.h>
|
||||
#include <rte_io.h>
|
||||
#include <rte_log.h>
|
||||
#include <rte_spinlock.h>
|
||||
#include "../dlb2_main.h"
|
||||
#include "dlb2_resource.h"
|
||||
#include "../../dlb2_log.h"
|
||||
#include "../../dlb2_user.h"
|
||||
|
||||
|
||||
#define DLB2_PCI_REG_READ(addr) rte_read32((void *)addr)
|
||||
#define DLB2_PCI_REG_WRITE(reg, value) rte_write32(value, (void *)reg)
|
||||
|
||||
/* Read/write register 'reg' in the CSR BAR space */
|
||||
#define DLB2_CSR_REG_ADDR(a, reg) ((void *)((uintptr_t)(a)->csr_kva + (reg)))
|
||||
#define DLB2_CSR_RD(hw, reg) \
|
||||
DLB2_PCI_REG_READ(DLB2_CSR_REG_ADDR((hw), (reg)))
|
||||
#define DLB2_CSR_WR(hw, reg, value) \
|
||||
DLB2_PCI_REG_WRITE(DLB2_CSR_REG_ADDR((hw), (reg)), (value))
|
||||
|
||||
/* Read/write register 'reg' in the func BAR space */
|
||||
#define DLB2_FUNC_REG_ADDR(a, reg) ((void *)((uintptr_t)(a)->func_kva + (reg)))
|
||||
#define DLB2_FUNC_RD(hw, reg) \
|
||||
DLB2_PCI_REG_READ(DLB2_FUNC_REG_ADDR((hw), (reg)))
|
||||
#define DLB2_FUNC_WR(hw, reg, value) \
|
||||
DLB2_PCI_REG_WRITE(DLB2_FUNC_REG_ADDR((hw), (reg)), (value))
|
||||
|
||||
/* Map to PMDs logging interface */
|
||||
#define DLB2_ERR(dev, fmt, args...) \
|
||||
DLB2_LOG_ERR(fmt, ## args)
|
||||
|
||||
#define DLB2_INFO(dev, fmt, args...) \
|
||||
DLB2_LOG_INFO(fmt, ## args)
|
||||
|
||||
#define DLB2_DEBUG(dev, fmt, args...) \
|
||||
DLB2_LOG_DBG(fmt, ## args)
|
||||
|
||||
/**
|
||||
* os_udelay() - busy-wait for a number of microseconds
|
||||
* @usecs: delay duration.
|
||||
*/
|
||||
static inline void os_udelay(int usecs)
|
||||
{
|
||||
rte_delay_us(usecs);
|
||||
}
|
||||
|
||||
/**
|
||||
* os_msleep() - sleep for a number of milliseconds
|
||||
* @usecs: delay duration.
|
||||
*/
|
||||
static inline void os_msleep(int msecs)
|
||||
{
|
||||
rte_delay_ms(msecs);
|
||||
}
|
||||
|
||||
#define DLB2_PP_BASE(__is_ldb) \
|
||||
((__is_ldb) ? DLB2_LDB_PP_BASE : DLB2_DIR_PP_BASE)
|
||||
|
||||
/**
|
||||
* os_map_producer_port() - map a producer port into the caller's address space
|
||||
* @hw: dlb2_hw handle for a particular device.
|
||||
* @port_id: port ID
|
||||
* @is_ldb: true for load-balanced port, false for a directed port
|
||||
*
|
||||
* This function maps the requested producer port memory into the caller's
|
||||
* address space.
|
||||
*
|
||||
* Return:
|
||||
* Returns the base address at which the PP memory was mapped, else NULL.
|
||||
*/
|
||||
static inline void *os_map_producer_port(struct dlb2_hw *hw,
|
||||
u8 port_id,
|
||||
bool is_ldb)
|
||||
{
|
||||
uint64_t addr;
|
||||
uint64_t pp_dma_base;
|
||||
|
||||
pp_dma_base = (uintptr_t)hw->func_kva + DLB2_PP_BASE(is_ldb);
|
||||
addr = (pp_dma_base + (PAGE_SIZE * port_id));
|
||||
|
||||
return (void *)(uintptr_t)addr;
|
||||
}
|
||||
|
||||
/**
|
||||
* os_unmap_producer_port() - unmap a producer port
|
||||
* @addr: mapped producer port address
|
||||
*
|
||||
* This function undoes os_map_producer_port() by unmapping the producer port
|
||||
* memory from the caller's address space.
|
||||
*
|
||||
* Return:
|
||||
* Returns the base address at which the PP memory was mapped, else NULL.
|
||||
*/
|
||||
static inline void os_unmap_producer_port(struct dlb2_hw *hw, void *addr)
|
||||
{
|
||||
RTE_SET_USED(hw);
|
||||
RTE_SET_USED(addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* os_fence_hcw() - fence an HCW to ensure it arrives at the device
|
||||
* @hw: dlb2_hw handle for a particular device.
|
||||
* @pp_addr: producer port address
|
||||
*/
|
||||
static inline void os_fence_hcw(struct dlb2_hw *hw, u64 *pp_addr)
|
||||
{
|
||||
RTE_SET_USED(hw);
|
||||
|
||||
/* To ensure outstanding HCWs reach the device, read the PP address. IA
|
||||
* memory ordering prevents reads from passing older writes, and the
|
||||
* mfence also ensures this.
|
||||
*/
|
||||
rte_mb();
|
||||
|
||||
*(volatile u64 *)pp_addr;
|
||||
}
|
||||
|
||||
/**
|
||||
* DLB2_HW_ERR() - log an error message
|
||||
* @dlb2: dlb2_hw handle for a particular device.
|
||||
* @...: variable string args.
|
||||
*/
|
||||
#define DLB2_HW_ERR(dlb2, ...) do { \
|
||||
RTE_SET_USED(dlb2); \
|
||||
DLB2_ERR(dlb2, __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* DLB2_HW_DBG() - log an info message
|
||||
* @dlb2: dlb2_hw handle for a particular device.
|
||||
* @...: variable string args.
|
||||
*/
|
||||
#define DLB2_HW_DBG(dlb2, ...) do { \
|
||||
RTE_SET_USED(dlb2); \
|
||||
DLB2_DEBUG(dlb2, __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
/* The callback runs until it completes all outstanding QID->CQ
|
||||
* map and unmap requests. To prevent deadlock, this function gives other
|
||||
* threads a chance to grab the resource mutex and configure hardware.
|
||||
*/
|
||||
static void *dlb2_complete_queue_map_unmap(void *__args)
|
||||
{
|
||||
struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)__args;
|
||||
int ret;
|
||||
|
||||
while (1) {
|
||||
rte_spinlock_lock(&dlb2_dev->resource_mutex);
|
||||
|
||||
ret = dlb2_finish_unmap_qid_procedures(&dlb2_dev->hw);
|
||||
ret += dlb2_finish_map_qid_procedures(&dlb2_dev->hw);
|
||||
|
||||
if (ret != 0) {
|
||||
rte_spinlock_unlock(&dlb2_dev->resource_mutex);
|
||||
/* Relinquish the CPU so the application can process
|
||||
* its CQs, so this function doesn't deadlock.
|
||||
*/
|
||||
sched_yield();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
dlb2_dev->worker_launched = false;
|
||||
|
||||
rte_spinlock_unlock(&dlb2_dev->resource_mutex);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* os_schedule_work() - launch a thread to process pending map and unmap work
|
||||
* @hw: dlb2_hw handle for a particular device.
|
||||
*
|
||||
* This function launches a kernel thread that will run until all pending
|
||||
* map and unmap procedures are complete.
|
||||
*/
|
||||
static inline void os_schedule_work(struct dlb2_hw *hw)
|
||||
{
|
||||
struct dlb2_dev *dlb2_dev;
|
||||
pthread_t complete_queue_map_unmap_thread;
|
||||
int ret;
|
||||
|
||||
dlb2_dev = container_of(hw, struct dlb2_dev, hw);
|
||||
|
||||
ret = rte_ctrl_thread_create(&complete_queue_map_unmap_thread,
|
||||
"dlb_queue_unmap_waiter",
|
||||
NULL,
|
||||
dlb2_complete_queue_map_unmap,
|
||||
dlb2_dev);
|
||||
if (ret)
|
||||
DLB2_ERR(dlb2_dev,
|
||||
"Could not create queue complete map/unmap thread, err=%d\n",
|
||||
ret);
|
||||
else
|
||||
dlb2_dev->worker_launched = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* os_worker_active() - query whether the map/unmap worker thread is active
|
||||
* @hw: dlb2_hw handle for a particular device.
|
||||
*
|
||||
* This function returns a boolean indicating whether a thread (launched by
|
||||
* os_schedule_work()) is active. This function is used to determine
|
||||
* whether or not to launch a worker thread.
|
||||
*/
|
||||
static inline bool os_worker_active(struct dlb2_hw *hw)
|
||||
{
|
||||
struct dlb2_dev *dlb2_dev;
|
||||
|
||||
dlb2_dev = container_of(hw, struct dlb2_dev, hw);
|
||||
|
||||
return dlb2_dev->worker_launched;
|
||||
}
|
||||
|
||||
#endif /* __DLB2_OSDEP_H */
|
440
drivers/event/dlb2/pf/base/dlb2_osdep_bitmap.h
Normal file
440
drivers/event/dlb2/pf/base/dlb2_osdep_bitmap.h
Normal file
@ -0,0 +1,440 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __DLB2_OSDEP_BITMAP_H
|
||||
#define __DLB2_OSDEP_BITMAP_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
#include <rte_bitmap.h>
|
||||
#include <rte_string_fns.h>
|
||||
#include <rte_malloc.h>
|
||||
#include <rte_errno.h>
|
||||
#include "../dlb2_main.h"
|
||||
|
||||
/*************************/
|
||||
/*** Bitmap operations ***/
|
||||
/*************************/
|
||||
struct dlb2_bitmap {
|
||||
struct rte_bitmap *map;
|
||||
unsigned int len;
|
||||
};
|
||||
|
||||
/**
|
||||
* dlb2_bitmap_alloc() - alloc a bitmap data structure
|
||||
* @bitmap: pointer to dlb2_bitmap structure pointer.
|
||||
* @len: number of entries in the bitmap.
|
||||
*
|
||||
* This function allocates a bitmap and initializes it with length @len. All
|
||||
* entries are initially zero.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - bitmap is NULL or len is 0.
|
||||
* ENOMEM - could not allocate memory for the bitmap data structure.
|
||||
*/
|
||||
static inline int dlb2_bitmap_alloc(struct dlb2_bitmap **bitmap,
|
||||
unsigned int len)
|
||||
{
|
||||
struct dlb2_bitmap *bm;
|
||||
void *mem;
|
||||
uint32_t alloc_size;
|
||||
uint32_t nbits = (uint32_t)len;
|
||||
|
||||
if (bitmap == NULL || nbits == 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Allocate DLB2 bitmap control struct */
|
||||
bm = rte_malloc("DLB2_PF",
|
||||
sizeof(struct dlb2_bitmap),
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
|
||||
if (bm == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Allocate bitmap memory */
|
||||
alloc_size = rte_bitmap_get_memory_footprint(nbits);
|
||||
mem = rte_malloc("DLB2_PF_BITMAP", alloc_size, RTE_CACHE_LINE_SIZE);
|
||||
if (mem == NULL) {
|
||||
rte_free(bm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bm->map = rte_bitmap_init(len, mem, alloc_size);
|
||||
if (bm->map == NULL) {
|
||||
rte_free(mem);
|
||||
rte_free(bm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bm->len = len;
|
||||
|
||||
*bitmap = bm;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb2_bitmap_free() - free a previously allocated bitmap data structure
|
||||
* @bitmap: pointer to dlb2_bitmap structure.
|
||||
*
|
||||
* This function frees a bitmap that was allocated with dlb2_bitmap_alloc().
|
||||
*/
|
||||
static inline void dlb2_bitmap_free(struct dlb2_bitmap *bitmap)
|
||||
{
|
||||
if (bitmap == NULL)
|
||||
return;
|
||||
|
||||
rte_free(bitmap->map);
|
||||
rte_free(bitmap);
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb2_bitmap_fill() - fill a bitmap with all 1s
|
||||
* @bitmap: pointer to dlb2_bitmap structure.
|
||||
*
|
||||
* This function sets all bitmap values to 1.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - bitmap is NULL or is uninitialized.
|
||||
*/
|
||||
static inline int dlb2_bitmap_fill(struct dlb2_bitmap *bitmap)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (bitmap == NULL || bitmap->map == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i != bitmap->len; i++)
|
||||
rte_bitmap_set(bitmap->map, i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb2_bitmap_fill() - fill a bitmap with all 0s
|
||||
* @bitmap: pointer to dlb2_bitmap structure.
|
||||
*
|
||||
* This function sets all bitmap values to 0.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - bitmap is NULL or is uninitialized.
|
||||
*/
|
||||
static inline int dlb2_bitmap_zero(struct dlb2_bitmap *bitmap)
|
||||
{
|
||||
if (bitmap == NULL || bitmap->map == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
rte_bitmap_reset(bitmap->map);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb2_bitmap_set() - set a bitmap entry
|
||||
* @bitmap: pointer to dlb2_bitmap structure.
|
||||
* @bit: bit index.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - bitmap is NULL or is uninitialized, or bit is larger than the
|
||||
* bitmap length.
|
||||
*/
|
||||
static inline int dlb2_bitmap_set(struct dlb2_bitmap *bitmap,
|
||||
unsigned int bit)
|
||||
{
|
||||
if (bitmap == NULL || bitmap->map == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (bitmap->len <= bit)
|
||||
return -EINVAL;
|
||||
|
||||
rte_bitmap_set(bitmap->map, bit);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb2_bitmap_set_range() - set a range of bitmap entries
|
||||
* @bitmap: pointer to dlb2_bitmap structure.
|
||||
* @bit: starting bit index.
|
||||
* @len: length of the range.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - bitmap is NULL or is uninitialized, or the range exceeds the bitmap
|
||||
* length.
|
||||
*/
|
||||
static inline int dlb2_bitmap_set_range(struct dlb2_bitmap *bitmap,
|
||||
unsigned int bit,
|
||||
unsigned int len)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (bitmap == NULL || bitmap->map == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (bitmap->len <= bit)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i != len; i++)
|
||||
rte_bitmap_set(bitmap->map, bit + i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb2_bitmap_clear() - clear a bitmap entry
|
||||
* @bitmap: pointer to dlb2_bitmap structure.
|
||||
* @bit: bit index.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - bitmap is NULL or is uninitialized, or bit is larger than the
|
||||
* bitmap length.
|
||||
*/
|
||||
static inline int dlb2_bitmap_clear(struct dlb2_bitmap *bitmap,
|
||||
unsigned int bit)
|
||||
{
|
||||
if (bitmap == NULL || bitmap->map == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (bitmap->len <= bit)
|
||||
return -EINVAL;
|
||||
|
||||
rte_bitmap_clear(bitmap->map, bit);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb2_bitmap_clear_range() - clear a range of bitmap entries
|
||||
* @bitmap: pointer to dlb2_bitmap structure.
|
||||
* @bit: starting bit index.
|
||||
* @len: length of the range.
|
||||
*
|
||||
* Return:
|
||||
* Returns 0 upon success, < 0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - bitmap is NULL or is uninitialized, or the range exceeds the bitmap
|
||||
* length.
|
||||
*/
|
||||
static inline int dlb2_bitmap_clear_range(struct dlb2_bitmap *bitmap,
|
||||
unsigned int bit,
|
||||
unsigned int len)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (bitmap == NULL || bitmap->map == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (bitmap->len <= bit)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i != len; i++)
|
||||
rte_bitmap_clear(bitmap->map, bit + i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb2_bitmap_find_set_bit_range() - find an range of set bits
|
||||
* @bitmap: pointer to dlb2_bitmap structure.
|
||||
* @len: length of the range.
|
||||
*
|
||||
* This function looks for a range of set bits of length @len.
|
||||
*
|
||||
* Return:
|
||||
* Returns the base bit index upon success, < 0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* ENOENT - unable to find a length *len* range of set bits.
|
||||
* EINVAL - bitmap is NULL or is uninitialized, or len is invalid.
|
||||
*/
|
||||
static inline int dlb2_bitmap_find_set_bit_range(struct dlb2_bitmap *bitmap,
|
||||
unsigned int len)
|
||||
{
|
||||
unsigned int i, j = 0;
|
||||
|
||||
if (bitmap == NULL || bitmap->map == NULL || len == 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (bitmap->len < len)
|
||||
return -ENOENT;
|
||||
|
||||
for (i = 0; i != bitmap->len; i++) {
|
||||
if (rte_bitmap_get(bitmap->map, i)) {
|
||||
if (++j == len)
|
||||
return i - j + 1;
|
||||
} else {
|
||||
j = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* No set bit range of length len? */
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb2_bitmap_find_set_bit() - find an range of set bits
|
||||
* @bitmap: pointer to dlb2_bitmap structure.
|
||||
*
|
||||
* This function looks for a single set bit.
|
||||
*
|
||||
* Return:
|
||||
* Returns the base bit index upon success, -1 if not found, <-1 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - bitmap is NULL or is uninitialized, or len is invalid.
|
||||
*/
|
||||
static inline int dlb2_bitmap_find_set_bit(struct dlb2_bitmap *bitmap)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (bitmap == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (bitmap->map == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i != bitmap->len; i++) {
|
||||
if (rte_bitmap_get(bitmap->map, i))
|
||||
return i;
|
||||
}
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb2_bitmap_count() - returns the number of set bits
|
||||
* @bitmap: pointer to dlb2_bitmap structure.
|
||||
*
|
||||
* This function looks for a single set bit.
|
||||
*
|
||||
* Return:
|
||||
* Returns the number of set bits upon success, <0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - bitmap is NULL or is uninitialized.
|
||||
*/
|
||||
static inline int dlb2_bitmap_count(struct dlb2_bitmap *bitmap)
|
||||
{
|
||||
int weight = 0;
|
||||
unsigned int i;
|
||||
|
||||
if (bitmap == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (bitmap->map == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i != bitmap->len; i++) {
|
||||
if (rte_bitmap_get(bitmap->map, i))
|
||||
weight++;
|
||||
}
|
||||
return weight;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb2_bitmap_longest_set_range() - returns longest contiguous range of set
|
||||
* bits
|
||||
* @bitmap: pointer to dlb2_bitmap structure.
|
||||
*
|
||||
* Return:
|
||||
* Returns the bitmap's longest contiguous range of set bits upon success,
|
||||
* <0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - bitmap is NULL or is uninitialized.
|
||||
*/
|
||||
static inline int dlb2_bitmap_longest_set_range(struct dlb2_bitmap *bitmap)
|
||||
{
|
||||
int max_len = 0, len = 0;
|
||||
unsigned int i;
|
||||
|
||||
if (bitmap == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (bitmap->map == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i != bitmap->len; i++) {
|
||||
if (rte_bitmap_get(bitmap->map, i)) {
|
||||
len++;
|
||||
} else {
|
||||
if (len > max_len)
|
||||
max_len = len;
|
||||
len = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (len > max_len)
|
||||
max_len = len;
|
||||
|
||||
return max_len;
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb2_bitmap_or() - store the logical 'or' of two bitmaps into a third
|
||||
* @dest: pointer to dlb2_bitmap structure, which will contain the results of
|
||||
* the 'or' of src1 and src2.
|
||||
* @src1: pointer to dlb2_bitmap structure, will be 'or'ed with src2.
|
||||
* @src2: pointer to dlb2_bitmap structure, will be 'or'ed with src1.
|
||||
*
|
||||
* This function 'or's two bitmaps together and stores the result in a third
|
||||
* bitmap. The source and destination bitmaps can be the same.
|
||||
*
|
||||
* Return:
|
||||
* Returns the number of set bits upon success, <0 otherwise.
|
||||
*
|
||||
* Errors:
|
||||
* EINVAL - One of the bitmaps is NULL or is uninitialized.
|
||||
*/
|
||||
static inline int dlb2_bitmap_or(struct dlb2_bitmap *dest,
|
||||
struct dlb2_bitmap *src1,
|
||||
struct dlb2_bitmap *src2)
|
||||
{
|
||||
unsigned int i, min;
|
||||
int numset = 0;
|
||||
|
||||
if (dest == NULL || dest->map == NULL ||
|
||||
src1 == NULL || src1->map == NULL ||
|
||||
src2 == NULL || src2->map == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
min = dest->len;
|
||||
min = (min > src1->len) ? src1->len : min;
|
||||
min = (min > src2->len) ? src2->len : min;
|
||||
|
||||
for (i = 0; i != min; i++) {
|
||||
if (rte_bitmap_get(src1->map, i) ||
|
||||
rte_bitmap_get(src2->map, i)) {
|
||||
rte_bitmap_set(dest->map, i);
|
||||
numset++;
|
||||
} else {
|
||||
rte_bitmap_clear(dest->map, i);
|
||||
}
|
||||
}
|
||||
|
||||
return numset;
|
||||
}
|
||||
|
||||
#endif /* __DLB2_OSDEP_BITMAP_H */
|
131
drivers/event/dlb2/pf/base/dlb2_osdep_list.h
Normal file
131
drivers/event/dlb2/pf/base/dlb2_osdep_list.h
Normal file
@ -0,0 +1,131 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __DLB2_OSDEP_LIST_H
|
||||
#define __DLB2_OSDEP_LIST_H
|
||||
|
||||
#include <rte_tailq.h>
|
||||
|
||||
struct dlb2_list_entry {
|
||||
TAILQ_ENTRY(dlb2_list_entry) node;
|
||||
};
|
||||
|
||||
/* Dummy - just a struct definition */
|
||||
TAILQ_HEAD(dlb2_list_head, dlb2_list_entry);
|
||||
|
||||
/* =================
|
||||
* TAILQ Supplements
|
||||
* =================
|
||||
*/
|
||||
|
||||
#ifndef TAILQ_FOREACH_ENTRY
|
||||
#define TAILQ_FOREACH_ENTRY(ptr, head, name, iter) \
|
||||
for ((iter) = TAILQ_FIRST(&head); \
|
||||
(iter) \
|
||||
&& (ptr = container_of(iter, typeof(*(ptr)), name)); \
|
||||
(iter) = TAILQ_NEXT((iter), node))
|
||||
#endif
|
||||
|
||||
#ifndef TAILQ_FOREACH_ENTRY_SAFE
|
||||
#define TAILQ_FOREACH_ENTRY_SAFE(ptr, head, name, iter, tvar) \
|
||||
for ((iter) = TAILQ_FIRST(&head); \
|
||||
(iter) && \
|
||||
(ptr = container_of(iter, typeof(*(ptr)), name)) &&\
|
||||
((tvar) = TAILQ_NEXT((iter), node), 1); \
|
||||
(iter) = (tvar))
|
||||
#endif
|
||||
|
||||
/***********************/
|
||||
/*** List operations ***/
|
||||
/***********************/
|
||||
|
||||
/**
|
||||
* dlb2_list_init_head() - initialize the head of a list
|
||||
* @head: list head
|
||||
*/
|
||||
static inline void dlb2_list_init_head(struct dlb2_list_head *head)
|
||||
{
|
||||
TAILQ_INIT(head);
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb2_list_add() - add an entry to a list
|
||||
* @head: list head
|
||||
* @entry: new list entry
|
||||
*/
|
||||
static inline void
|
||||
dlb2_list_add(struct dlb2_list_head *head, struct dlb2_list_entry *entry)
|
||||
{
|
||||
TAILQ_INSERT_TAIL(head, entry, node);
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb2_list_del() - delete an entry from a list
|
||||
* @entry: list entry
|
||||
* @head: list head
|
||||
*/
|
||||
static inline void dlb2_list_del(struct dlb2_list_head *head,
|
||||
struct dlb2_list_entry *entry)
|
||||
{
|
||||
TAILQ_REMOVE(head, entry, node);
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb2_list_empty() - check if a list is empty
|
||||
* @head: list head
|
||||
*
|
||||
* Return:
|
||||
* Returns 1 if empty, 0 if not.
|
||||
*/
|
||||
static inline int dlb2_list_empty(struct dlb2_list_head *head)
|
||||
{
|
||||
return TAILQ_EMPTY(head);
|
||||
}
|
||||
|
||||
/**
|
||||
* dlb2_list_splice() - splice a list
|
||||
* @src_head: list to be added
|
||||
* @ head: where src_head will be inserted
|
||||
*/
|
||||
static inline void dlb2_list_splice(struct dlb2_list_head *src_head,
|
||||
struct dlb2_list_head *head)
|
||||
{
|
||||
TAILQ_CONCAT(head, src_head, node);
|
||||
}
|
||||
|
||||
/**
|
||||
* DLB2_LIST_HEAD() - retrieve the head of the list
|
||||
* @head: list head
|
||||
* @type: type of the list variable
|
||||
* @name: name of the list field within the containing struct
|
||||
*/
|
||||
#define DLB2_LIST_HEAD(head, type, name) \
|
||||
(TAILQ_FIRST(&head) ? \
|
||||
container_of(TAILQ_FIRST(&head), type, name) : \
|
||||
NULL)
|
||||
|
||||
/**
|
||||
* DLB2_LIST_FOR_EACH() - iterate over a list
|
||||
* @head: list head
|
||||
* @ptr: pointer to struct containing a struct list
|
||||
* @name: name of the list field within the containing struct
|
||||
* @iter: iterator variable
|
||||
*/
|
||||
#define DLB2_LIST_FOR_EACH(head, ptr, name, tmp_iter) \
|
||||
TAILQ_FOREACH_ENTRY(ptr, head, name, tmp_iter)
|
||||
|
||||
/**
|
||||
* DLB2_LIST_FOR_EACH_SAFE() - iterate over a list. This loop works even if
|
||||
* an element is removed from the list while processing it.
|
||||
* @ptr: pointer to struct containing a struct list
|
||||
* @ptr_tmp: pointer to struct containing a struct list (temporary)
|
||||
* @head: list head
|
||||
* @name: name of the list field within the containing struct
|
||||
* @iter: iterator variable
|
||||
* @iter_tmp: iterator variable (temporary)
|
||||
*/
|
||||
#define DLB2_LIST_FOR_EACH_SAFE(head, ptr, ptr_tmp, name, tmp_iter, saf_itr) \
|
||||
TAILQ_FOREACH_ENTRY_SAFE(ptr, head, name, tmp_iter, saf_itr)
|
||||
|
||||
#endif /* __DLB2_OSDEP_LIST_H */
|
31
drivers/event/dlb2/pf/base/dlb2_osdep_types.h
Normal file
31
drivers/event/dlb2/pf/base/dlb2_osdep_types.h
Normal file
@ -0,0 +1,31 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __DLB2_OSDEP_TYPES_H
|
||||
#define __DLB2_OSDEP_TYPES_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <ctype.h>
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
|
||||
/* Types for user mode PF PMD */
|
||||
typedef uint8_t u8;
|
||||
typedef int8_t s8;
|
||||
typedef uint16_t u16;
|
||||
typedef int16_t s16;
|
||||
typedef uint32_t u32;
|
||||
typedef int32_t s32;
|
||||
typedef uint64_t u64;
|
||||
|
||||
#define __iomem
|
||||
|
||||
/* END types for user mode PF PMD */
|
||||
|
||||
#endif /* __DLB2_OSDEP_TYPES_H */
|
2527
drivers/event/dlb2/pf/base/dlb2_regs.h
Normal file
2527
drivers/event/dlb2/pf/base/dlb2_regs.h
Normal file
File diff suppressed because it is too large
Load Diff
1913
drivers/event/dlb2/pf/base/dlb2_resource.h
Normal file
1913
drivers/event/dlb2/pf/base/dlb2_resource.h
Normal file
File diff suppressed because it is too large
Load Diff
621
drivers/event/dlb2/pf/dlb2_main.c
Normal file
621
drivers/event/dlb2/pf/dlb2_main.c
Normal file
@ -0,0 +1,621 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
#include <assert.h>
|
||||
#include <unistd.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <rte_malloc.h>
|
||||
#include <rte_errno.h>
|
||||
|
||||
#include "base/dlb2_resource.h"
|
||||
#include "base/dlb2_osdep.h"
|
||||
#include "base/dlb2_regs.h"
|
||||
#include "dlb2_main.h"
|
||||
#include "../dlb2_user.h"
|
||||
#include "../dlb2_priv.h"
|
||||
#include "../dlb2_inline_fns.h"
|
||||
|
||||
#define PF_ID_ZERO 0 /* PF ONLY! */
|
||||
#define NO_OWNER_VF 0 /* PF ONLY! */
|
||||
#define NOT_VF_REQ false /* PF ONLY! */
|
||||
|
||||
#define DLB2_PCI_CFG_SPACE_SIZE 256
|
||||
#define DLB2_PCI_CAP_POINTER 0x34
|
||||
#define DLB2_PCI_CAP_NEXT(hdr) (((hdr) >> 8) & 0xFC)
|
||||
#define DLB2_PCI_CAP_ID(hdr) ((hdr) & 0xFF)
|
||||
#define DLB2_PCI_EXT_CAP_NEXT(hdr) (((hdr) >> 20) & 0xFFC)
|
||||
#define DLB2_PCI_EXT_CAP_ID(hdr) ((hdr) & 0xFFFF)
|
||||
#define DLB2_PCI_EXT_CAP_ID_ERR 1
|
||||
#define DLB2_PCI_ERR_UNCOR_MASK 8
|
||||
#define DLB2_PCI_ERR_UNC_UNSUP 0x00100000
|
||||
|
||||
#define DLB2_PCI_EXP_DEVCTL 8
|
||||
#define DLB2_PCI_LNKCTL 16
|
||||
#define DLB2_PCI_SLTCTL 24
|
||||
#define DLB2_PCI_RTCTL 28
|
||||
#define DLB2_PCI_EXP_DEVCTL2 40
|
||||
#define DLB2_PCI_LNKCTL2 48
|
||||
#define DLB2_PCI_SLTCTL2 56
|
||||
#define DLB2_PCI_CMD 4
|
||||
#define DLB2_PCI_X_CMD 2
|
||||
#define DLB2_PCI_EXP_DEVSTA 10
|
||||
#define DLB2_PCI_EXP_DEVSTA_TRPND 0x20
|
||||
#define DLB2_PCI_EXP_DEVCTL_BCR_FLR 0x8000
|
||||
|
||||
#define DLB2_PCI_CAP_ID_EXP 0x10
|
||||
#define DLB2_PCI_CAP_ID_MSIX 0x11
|
||||
#define DLB2_PCI_EXT_CAP_ID_PAS 0x1B
|
||||
#define DLB2_PCI_EXT_CAP_ID_PRI 0x13
|
||||
#define DLB2_PCI_EXT_CAP_ID_ACS 0xD
|
||||
|
||||
#define DLB2_PCI_PRI_CTRL_ENABLE 0x1
|
||||
#define DLB2_PCI_PRI_ALLOC_REQ 0xC
|
||||
#define DLB2_PCI_PRI_CTRL 0x4
|
||||
#define DLB2_PCI_MSIX_FLAGS 0x2
|
||||
#define DLB2_PCI_MSIX_FLAGS_ENABLE 0x8000
|
||||
#define DLB2_PCI_MSIX_FLAGS_MASKALL 0x4000
|
||||
#define DLB2_PCI_ERR_ROOT_STATUS 0x30
|
||||
#define DLB2_PCI_ERR_COR_STATUS 0x10
|
||||
#define DLB2_PCI_ERR_UNCOR_STATUS 0x4
|
||||
#define DLB2_PCI_COMMAND_INTX_DISABLE 0x400
|
||||
#define DLB2_PCI_ACS_CAP 0x4
|
||||
#define DLB2_PCI_ACS_CTRL 0x6
|
||||
#define DLB2_PCI_ACS_SV 0x1
|
||||
#define DLB2_PCI_ACS_RR 0x4
|
||||
#define DLB2_PCI_ACS_CR 0x8
|
||||
#define DLB2_PCI_ACS_UF 0x10
|
||||
#define DLB2_PCI_ACS_EC 0x20
|
||||
|
||||
/* Stubs: Allow building partial probe patch */
|
||||
void dlb2_resource_free(struct dlb2_hw *hw)
|
||||
{
|
||||
RTE_SET_USED(hw);
|
||||
}
|
||||
|
||||
int dlb2_resource_init(struct dlb2_hw *hw)
|
||||
{
|
||||
int ret = 0;
|
||||
RTE_SET_USED(hw);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw)
|
||||
{
|
||||
RTE_SET_USED(hw);
|
||||
}
|
||||
|
||||
/* End stubs */
|
||||
|
||||
static int
|
||||
dlb2_pci_find_ext_capability(struct rte_pci_device *pdev, uint32_t id)
|
||||
{
|
||||
uint32_t hdr;
|
||||
size_t sz;
|
||||
int pos;
|
||||
|
||||
pos = DLB2_PCI_CFG_SPACE_SIZE;
|
||||
sz = sizeof(hdr);
|
||||
|
||||
while (pos > 0xFF) {
|
||||
if (rte_pci_read_config(pdev, &hdr, sz, pos) != (int)sz)
|
||||
return -1;
|
||||
|
||||
if (DLB2_PCI_EXT_CAP_ID(hdr) == id)
|
||||
return pos;
|
||||
|
||||
pos = DLB2_PCI_EXT_CAP_NEXT(hdr);
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int dlb2_pci_find_capability(struct rte_pci_device *pdev, uint32_t id)
|
||||
{
|
||||
uint8_t pos;
|
||||
int ret;
|
||||
uint16_t hdr;
|
||||
|
||||
ret = rte_pci_read_config(pdev, &pos, 1, DLB2_PCI_CAP_POINTER);
|
||||
pos &= 0xFC;
|
||||
|
||||
if (ret != 1)
|
||||
return -1;
|
||||
|
||||
while (pos > 0x3F) {
|
||||
ret = rte_pci_read_config(pdev, &hdr, 2, pos);
|
||||
if (ret != 2)
|
||||
return -1;
|
||||
|
||||
if (DLB2_PCI_CAP_ID(hdr) == id)
|
||||
return pos;
|
||||
|
||||
if (DLB2_PCI_CAP_ID(hdr) == 0xFF)
|
||||
return -1;
|
||||
|
||||
pos = DLB2_PCI_CAP_NEXT(hdr);
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int
|
||||
dlb2_pf_init_driver_state(struct dlb2_dev *dlb2_dev)
|
||||
{
|
||||
rte_spinlock_init(&dlb2_dev->resource_mutex);
|
||||
rte_spinlock_init(&dlb2_dev->measurement_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dlb2_pf_enable_pm(struct dlb2_dev *dlb2_dev)
|
||||
{
|
||||
dlb2_clr_pmcsr_disable(&dlb2_dev->hw);
|
||||
}
|
||||
|
||||
#define DLB2_READY_RETRY_LIMIT 1000
|
||||
static int dlb2_pf_wait_for_device_ready(struct dlb2_dev *dlb2_dev)
|
||||
{
|
||||
u32 retries = 0;
|
||||
|
||||
/* Allow at least 1s for the device to become active after power-on */
|
||||
for (retries = 0; retries < DLB2_READY_RETRY_LIMIT; retries++) {
|
||||
union dlb2_cfg_mstr_cfg_diagnostic_idle_status idle;
|
||||
union dlb2_cfg_mstr_cfg_pm_status pm_st;
|
||||
u32 addr;
|
||||
|
||||
addr = DLB2_CFG_MSTR_CFG_PM_STATUS;
|
||||
pm_st.val = DLB2_CSR_RD(&dlb2_dev->hw, addr);
|
||||
addr = DLB2_CFG_MSTR_CFG_DIAGNOSTIC_IDLE_STATUS;
|
||||
idle.val = DLB2_CSR_RD(&dlb2_dev->hw, addr);
|
||||
if (pm_st.field.pmsm == 1 && idle.field.dlb_func_idle == 1)
|
||||
break;
|
||||
|
||||
rte_delay_ms(1);
|
||||
};
|
||||
|
||||
if (retries == DLB2_READY_RETRY_LIMIT) {
|
||||
DLB2_LOG_ERR("[%s()] wait for device ready timed out\n",
|
||||
__func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct dlb2_dev *
|
||||
dlb2_probe(struct rte_pci_device *pdev)
|
||||
{
|
||||
struct dlb2_dev *dlb2_dev;
|
||||
int ret = 0;
|
||||
|
||||
DLB2_INFO(dlb2_dev, "probe\n");
|
||||
|
||||
dlb2_dev = rte_malloc("DLB2_PF", sizeof(struct dlb2_dev),
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
|
||||
if (dlb2_dev == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto dlb2_dev_malloc_fail;
|
||||
}
|
||||
|
||||
/* PCI Bus driver has already mapped bar space into process.
|
||||
* Save off our IO register and FUNC addresses.
|
||||
*/
|
||||
|
||||
/* BAR 0 */
|
||||
if (pdev->mem_resource[0].addr == NULL) {
|
||||
DLB2_ERR(dlb2_dev, "probe: BAR 0 addr (csr_kva) is NULL\n");
|
||||
ret = -EINVAL;
|
||||
goto pci_mmap_bad_addr;
|
||||
}
|
||||
dlb2_dev->hw.func_kva = (void *)(uintptr_t)pdev->mem_resource[0].addr;
|
||||
dlb2_dev->hw.func_phys_addr = pdev->mem_resource[0].phys_addr;
|
||||
|
||||
DLB2_INFO(dlb2_dev, "DLB2 FUNC VA=%p, PA=%p, len=%p\n",
|
||||
(void *)dlb2_dev->hw.func_kva,
|
||||
(void *)dlb2_dev->hw.func_phys_addr,
|
||||
(void *)(pdev->mem_resource[0].len));
|
||||
|
||||
/* BAR 2 */
|
||||
if (pdev->mem_resource[2].addr == NULL) {
|
||||
DLB2_ERR(dlb2_dev, "probe: BAR 2 addr (func_kva) is NULL\n");
|
||||
ret = -EINVAL;
|
||||
goto pci_mmap_bad_addr;
|
||||
}
|
||||
dlb2_dev->hw.csr_kva = (void *)(uintptr_t)pdev->mem_resource[2].addr;
|
||||
dlb2_dev->hw.csr_phys_addr = pdev->mem_resource[2].phys_addr;
|
||||
|
||||
DLB2_INFO(dlb2_dev, "DLB2 CSR VA=%p, PA=%p, len=%p\n",
|
||||
(void *)dlb2_dev->hw.csr_kva,
|
||||
(void *)dlb2_dev->hw.csr_phys_addr,
|
||||
(void *)(pdev->mem_resource[2].len));
|
||||
|
||||
dlb2_dev->pdev = pdev;
|
||||
|
||||
/* PM enable must be done before any other MMIO accesses, and this
|
||||
* setting is persistent across device reset.
|
||||
*/
|
||||
dlb2_pf_enable_pm(dlb2_dev);
|
||||
|
||||
ret = dlb2_pf_wait_for_device_ready(dlb2_dev);
|
||||
if (ret)
|
||||
goto wait_for_device_ready_fail;
|
||||
|
||||
ret = dlb2_pf_reset(dlb2_dev);
|
||||
if (ret)
|
||||
goto dlb2_reset_fail;
|
||||
|
||||
ret = dlb2_pf_init_driver_state(dlb2_dev);
|
||||
if (ret)
|
||||
goto init_driver_state_fail;
|
||||
|
||||
ret = dlb2_resource_init(&dlb2_dev->hw);
|
||||
if (ret)
|
||||
goto resource_init_fail;
|
||||
|
||||
return dlb2_dev;
|
||||
|
||||
resource_init_fail:
|
||||
dlb2_resource_free(&dlb2_dev->hw);
|
||||
init_driver_state_fail:
|
||||
dlb2_reset_fail:
|
||||
pci_mmap_bad_addr:
|
||||
wait_for_device_ready_fail:
|
||||
rte_free(dlb2_dev);
|
||||
dlb2_dev_malloc_fail:
|
||||
rte_errno = ret;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int
|
||||
dlb2_pf_reset(struct dlb2_dev *dlb2_dev)
|
||||
{
|
||||
int ret = 0;
|
||||
int i = 0;
|
||||
uint32_t dword[16];
|
||||
uint16_t cmd;
|
||||
off_t off;
|
||||
|
||||
uint16_t dev_ctl_word;
|
||||
uint16_t dev_ctl2_word;
|
||||
uint16_t lnk_word;
|
||||
uint16_t lnk_word2;
|
||||
uint16_t slt_word;
|
||||
uint16_t slt_word2;
|
||||
uint16_t rt_ctl_word;
|
||||
uint32_t pri_reqs_dword;
|
||||
uint16_t pri_ctrl_word;
|
||||
|
||||
int pcie_cap_offset;
|
||||
int pri_cap_offset;
|
||||
int msix_cap_offset;
|
||||
int err_cap_offset;
|
||||
int acs_cap_offset;
|
||||
int wait_count;
|
||||
|
||||
uint16_t devsta_busy_word;
|
||||
uint16_t devctl_word;
|
||||
|
||||
struct rte_pci_device *pdev = dlb2_dev->pdev;
|
||||
|
||||
/* Save PCI config state */
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
if (rte_pci_read_config(pdev, &dword[i], 4, i * 4) != 4)
|
||||
return ret;
|
||||
}
|
||||
|
||||
pcie_cap_offset = dlb2_pci_find_capability(pdev, DLB2_PCI_CAP_ID_EXP);
|
||||
|
||||
if (pcie_cap_offset < 0) {
|
||||
DLB2_LOG_ERR("[%s()] failed to find the pcie capability\n",
|
||||
__func__);
|
||||
return pcie_cap_offset;
|
||||
}
|
||||
|
||||
off = pcie_cap_offset + DLB2_PCI_EXP_DEVCTL;
|
||||
if (rte_pci_read_config(pdev, &dev_ctl_word, 2, off) != 2)
|
||||
dev_ctl_word = 0;
|
||||
|
||||
off = pcie_cap_offset + DLB2_PCI_LNKCTL;
|
||||
if (rte_pci_read_config(pdev, &lnk_word, 2, off) != 2)
|
||||
lnk_word = 0;
|
||||
|
||||
off = pcie_cap_offset + DLB2_PCI_SLTCTL;
|
||||
if (rte_pci_read_config(pdev, &slt_word, 2, off) != 2)
|
||||
slt_word = 0;
|
||||
|
||||
off = pcie_cap_offset + DLB2_PCI_RTCTL;
|
||||
if (rte_pci_read_config(pdev, &rt_ctl_word, 2, off) != 2)
|
||||
rt_ctl_word = 0;
|
||||
|
||||
off = pcie_cap_offset + DLB2_PCI_EXP_DEVCTL2;
|
||||
if (rte_pci_read_config(pdev, &dev_ctl2_word, 2, off) != 2)
|
||||
dev_ctl2_word = 0;
|
||||
|
||||
off = pcie_cap_offset + DLB2_PCI_LNKCTL2;
|
||||
if (rte_pci_read_config(pdev, &lnk_word2, 2, off) != 2)
|
||||
lnk_word2 = 0;
|
||||
|
||||
off = pcie_cap_offset + DLB2_PCI_SLTCTL2;
|
||||
if (rte_pci_read_config(pdev, &slt_word2, 2, off) != 2)
|
||||
slt_word2 = 0;
|
||||
|
||||
off = DLB2_PCI_EXT_CAP_ID_PRI;
|
||||
pri_cap_offset = dlb2_pci_find_ext_capability(pdev, off);
|
||||
|
||||
if (pri_cap_offset >= 0) {
|
||||
off = pri_cap_offset + DLB2_PCI_PRI_ALLOC_REQ;
|
||||
if (rte_pci_read_config(pdev, &pri_reqs_dword, 4, off) != 4)
|
||||
pri_reqs_dword = 0;
|
||||
}
|
||||
|
||||
/* clear the PCI command register before issuing the FLR */
|
||||
|
||||
off = DLB2_PCI_CMD;
|
||||
cmd = 0;
|
||||
if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
|
||||
DLB2_LOG_ERR("[%s()] failed to write the pci command\n",
|
||||
__func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* issue the FLR */
|
||||
for (wait_count = 0; wait_count < 4; wait_count++) {
|
||||
int sleep_time;
|
||||
|
||||
off = pcie_cap_offset + DLB2_PCI_EXP_DEVSTA;
|
||||
ret = rte_pci_read_config(pdev, &devsta_busy_word, 2, off);
|
||||
if (ret != 2) {
|
||||
DLB2_LOG_ERR("[%s()] failed to read the pci device status\n",
|
||||
__func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!(devsta_busy_word & DLB2_PCI_EXP_DEVSTA_TRPND))
|
||||
break;
|
||||
|
||||
sleep_time = (1 << (wait_count)) * 100;
|
||||
rte_delay_ms(sleep_time);
|
||||
}
|
||||
|
||||
if (wait_count == 4) {
|
||||
DLB2_LOG_ERR("[%s()] wait for pci pending transactions timed out\n",
|
||||
__func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
off = pcie_cap_offset + DLB2_PCI_EXP_DEVCTL;
|
||||
ret = rte_pci_read_config(pdev, &devctl_word, 2, off);
|
||||
if (ret != 2) {
|
||||
DLB2_LOG_ERR("[%s()] failed to read the pcie device control\n",
|
||||
__func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
devctl_word |= DLB2_PCI_EXP_DEVCTL_BCR_FLR;
|
||||
|
||||
ret = rte_pci_write_config(pdev, &devctl_word, 2, off);
|
||||
if (ret != 2) {
|
||||
DLB2_LOG_ERR("[%s()] failed to write the pcie device control\n",
|
||||
__func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
rte_delay_ms(100);
|
||||
|
||||
/* Restore PCI config state */
|
||||
|
||||
if (pcie_cap_offset >= 0) {
|
||||
off = pcie_cap_offset + DLB2_PCI_EXP_DEVCTL;
|
||||
ret = rte_pci_write_config(pdev, &dev_ctl_word, 2, off);
|
||||
if (ret != 2) {
|
||||
DLB2_LOG_ERR("[%s()] failed to write the pcie device control at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return ret;
|
||||
}
|
||||
|
||||
off = pcie_cap_offset + DLB2_PCI_LNKCTL;
|
||||
ret = rte_pci_write_config(pdev, &lnk_word, 2, off);
|
||||
if (ret != 2) {
|
||||
DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return ret;
|
||||
}
|
||||
|
||||
off = pcie_cap_offset + DLB2_PCI_SLTCTL;
|
||||
ret = rte_pci_write_config(pdev, &slt_word, 2, off);
|
||||
if (ret != 2) {
|
||||
DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return ret;
|
||||
}
|
||||
|
||||
off = pcie_cap_offset + DLB2_PCI_RTCTL;
|
||||
ret = rte_pci_write_config(pdev, &rt_ctl_word, 2, off);
|
||||
if (ret != 2) {
|
||||
DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return ret;
|
||||
}
|
||||
|
||||
off = pcie_cap_offset + DLB2_PCI_EXP_DEVCTL2;
|
||||
ret = rte_pci_write_config(pdev, &dev_ctl2_word, 2, off);
|
||||
if (ret != 2) {
|
||||
DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return ret;
|
||||
}
|
||||
|
||||
off = pcie_cap_offset + DLB2_PCI_LNKCTL2;
|
||||
ret = rte_pci_write_config(pdev, &lnk_word2, 2, off);
|
||||
if (ret != 2) {
|
||||
DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return ret;
|
||||
}
|
||||
|
||||
off = pcie_cap_offset + DLB2_PCI_SLTCTL2;
|
||||
ret = rte_pci_write_config(pdev, &slt_word2, 2, off);
|
||||
if (ret != 2) {
|
||||
DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (pri_cap_offset >= 0) {
|
||||
pri_ctrl_word = DLB2_PCI_PRI_CTRL_ENABLE;
|
||||
|
||||
off = pri_cap_offset + DLB2_PCI_PRI_ALLOC_REQ;
|
||||
ret = rte_pci_write_config(pdev, &pri_reqs_dword, 4, off);
|
||||
if (ret != 4) {
|
||||
DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return ret;
|
||||
}
|
||||
|
||||
off = pri_cap_offset + DLB2_PCI_PRI_CTRL;
|
||||
ret = rte_pci_write_config(pdev, &pri_ctrl_word, 2, off);
|
||||
if (ret != 2) {
|
||||
DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
off = DLB2_PCI_EXT_CAP_ID_ERR;
|
||||
err_cap_offset = dlb2_pci_find_ext_capability(pdev, off);
|
||||
|
||||
if (err_cap_offset >= 0) {
|
||||
uint32_t tmp;
|
||||
|
||||
off = err_cap_offset + DLB2_PCI_ERR_ROOT_STATUS;
|
||||
if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
|
||||
tmp = 0;
|
||||
|
||||
ret = rte_pci_write_config(pdev, &tmp, 4, off);
|
||||
if (ret != 4) {
|
||||
DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return ret;
|
||||
}
|
||||
|
||||
off = err_cap_offset + DLB2_PCI_ERR_COR_STATUS;
|
||||
if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
|
||||
tmp = 0;
|
||||
|
||||
ret = rte_pci_write_config(pdev, &tmp, 4, off);
|
||||
if (ret != 4) {
|
||||
DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return ret;
|
||||
}
|
||||
|
||||
off = err_cap_offset + DLB2_PCI_ERR_UNCOR_STATUS;
|
||||
if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
|
||||
tmp = 0;
|
||||
|
||||
ret = rte_pci_write_config(pdev, &tmp, 4, off);
|
||||
if (ret != 4) {
|
||||
DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 16; i > 0; i--) {
|
||||
off = (i - 1) * 4;
|
||||
ret = rte_pci_write_config(pdev, &dword[i - 1], 4, off);
|
||||
if (ret != 4) {
|
||||
DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
off = DLB2_PCI_CMD;
|
||||
if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
|
||||
cmd &= ~DLB2_PCI_COMMAND_INTX_DISABLE;
|
||||
if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
|
||||
DLB2_LOG_ERR("[%s()] failed to write the pci command\n",
|
||||
__func__);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
msix_cap_offset = dlb2_pci_find_capability(pdev,
|
||||
DLB2_PCI_CAP_ID_MSIX);
|
||||
if (msix_cap_offset >= 0) {
|
||||
off = msix_cap_offset + DLB2_PCI_MSIX_FLAGS;
|
||||
if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
|
||||
cmd |= DLB2_PCI_MSIX_FLAGS_ENABLE;
|
||||
cmd |= DLB2_PCI_MSIX_FLAGS_MASKALL;
|
||||
if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
|
||||
DLB2_LOG_ERR("[%s()] failed to write msix flags\n",
|
||||
__func__);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
off = msix_cap_offset + DLB2_PCI_MSIX_FLAGS;
|
||||
if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
|
||||
cmd &= ~DLB2_PCI_MSIX_FLAGS_MASKALL;
|
||||
if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
|
||||
DLB2_LOG_ERR("[%s()] failed to write msix flags\n",
|
||||
__func__);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
off = DLB2_PCI_EXT_CAP_ID_ACS;
|
||||
acs_cap_offset = dlb2_pci_find_ext_capability(pdev, off);
|
||||
|
||||
if (acs_cap_offset >= 0) {
|
||||
uint16_t acs_cap, acs_ctrl, acs_mask;
|
||||
off = acs_cap_offset + DLB2_PCI_ACS_CAP;
|
||||
if (rte_pci_read_config(pdev, &acs_cap, 2, off) != 2)
|
||||
acs_cap = 0;
|
||||
|
||||
off = acs_cap_offset + DLB2_PCI_ACS_CTRL;
|
||||
if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
|
||||
acs_ctrl = 0;
|
||||
|
||||
acs_mask = DLB2_PCI_ACS_SV | DLB2_PCI_ACS_RR;
|
||||
acs_mask |= (DLB2_PCI_ACS_CR | DLB2_PCI_ACS_UF);
|
||||
acs_ctrl |= (acs_cap & acs_mask);
|
||||
|
||||
ret = rte_pci_write_config(pdev, &acs_ctrl, 2, off);
|
||||
if (ret != 2) {
|
||||
DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return ret;
|
||||
}
|
||||
|
||||
off = acs_cap_offset + DLB2_PCI_ACS_CTRL;
|
||||
if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
|
||||
acs_ctrl = 0;
|
||||
|
||||
acs_mask = DLB2_PCI_ACS_RR | DLB2_PCI_ACS_CR;
|
||||
acs_mask |= DLB2_PCI_ACS_EC;
|
||||
acs_ctrl &= ~acs_mask;
|
||||
|
||||
off = acs_cap_offset + DLB2_PCI_ACS_CTRL;
|
||||
ret = rte_pci_write_config(pdev, &acs_ctrl, 2, off);
|
||||
if (ret != 2) {
|
||||
DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
|
||||
__func__, (int)off);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
98
drivers/event/dlb2/pf/dlb2_main.h
Normal file
98
drivers/event/dlb2/pf/dlb2_main.h
Normal file
@ -0,0 +1,98 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __DLB2_MAIN_H
|
||||
#define __DLB2_MAIN_H
|
||||
|
||||
#include <rte_debug.h>
|
||||
#include <rte_log.h>
|
||||
#include <rte_spinlock.h>
|
||||
#include <rte_pci.h>
|
||||
#include <rte_bus_pci.h>
|
||||
|
||||
#ifndef PAGE_SIZE
|
||||
#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
|
||||
#endif
|
||||
|
||||
#include "base/dlb2_hw_types.h"
|
||||
#include "../dlb2_user.h"
|
||||
|
||||
#define DLB2_DEFAULT_UNREGISTER_TIMEOUT_S 5
|
||||
|
||||
struct dlb2_dev;
|
||||
|
||||
struct dlb2_port_memory {
|
||||
struct dlb2_list_head list;
|
||||
void *cq_base;
|
||||
bool valid;
|
||||
};
|
||||
|
||||
struct dlb2_dev {
|
||||
struct rte_pci_device *pdev;
|
||||
struct dlb2_hw hw;
|
||||
/* struct list_head list; */
|
||||
struct device *dlb2_device;
|
||||
bool domain_reset_failed;
|
||||
/* The resource mutex serializes access to driver data structures and
|
||||
* hardware registers.
|
||||
*/
|
||||
rte_spinlock_t resource_mutex;
|
||||
rte_spinlock_t measurement_lock;
|
||||
bool worker_launched;
|
||||
u8 revision;
|
||||
};
|
||||
|
||||
struct dlb2_dev *dlb2_probe(struct rte_pci_device *pdev);
|
||||
|
||||
int dlb2_pf_reset(struct dlb2_dev *dlb2_dev);
|
||||
int dlb2_pf_create_sched_domain(struct dlb2_hw *hw,
|
||||
struct dlb2_create_sched_domain_args *args,
|
||||
struct dlb2_cmd_response *resp);
|
||||
int dlb2_pf_create_ldb_queue(struct dlb2_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb2_create_ldb_queue_args *args,
|
||||
struct dlb2_cmd_response *resp);
|
||||
int dlb2_pf_create_dir_queue(struct dlb2_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb2_create_dir_queue_args *args,
|
||||
struct dlb2_cmd_response *resp);
|
||||
int dlb2_pf_create_ldb_port(struct dlb2_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb2_create_ldb_port_args *args,
|
||||
uintptr_t cq_dma_base,
|
||||
struct dlb2_cmd_response *resp);
|
||||
int dlb2_pf_create_dir_port(struct dlb2_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb2_create_dir_port_args *args,
|
||||
uintptr_t cq_dma_base,
|
||||
struct dlb2_cmd_response *resp);
|
||||
int dlb2_pf_start_domain(struct dlb2_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb2_start_domain_args *args,
|
||||
struct dlb2_cmd_response *resp);
|
||||
int dlb2_pf_enable_ldb_port(struct dlb2_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb2_enable_ldb_port_args *args,
|
||||
struct dlb2_cmd_response *resp);
|
||||
int dlb2_pf_disable_ldb_port(struct dlb2_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb2_disable_ldb_port_args *args,
|
||||
struct dlb2_cmd_response *resp);
|
||||
int dlb2_pf_enable_dir_port(struct dlb2_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb2_enable_dir_port_args *args,
|
||||
struct dlb2_cmd_response *resp);
|
||||
int dlb2_pf_disable_dir_port(struct dlb2_hw *hw,
|
||||
u32 domain_id,
|
||||
struct dlb2_disable_dir_port_args *args,
|
||||
struct dlb2_cmd_response *resp);
|
||||
int dlb2_pf_reset_domain(struct dlb2_hw *hw, u32 domain_id);
|
||||
int dlb2_pf_ldb_port_owned_by_domain(struct dlb2_hw *hw,
|
||||
u32 domain_id,
|
||||
u32 port_id);
|
||||
int dlb2_pf_dir_port_owned_by_domain(struct dlb2_hw *hw,
|
||||
u32 domain_id,
|
||||
u32 port_id);
|
||||
|
||||
#endif /* __DLB2_MAIN_H */
|
196
drivers/event/dlb2/pf/dlb2_pf.c
Normal file
196
drivers/event/dlb2/pf/dlb2_pf.c
Normal file
@ -0,0 +1,196 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2016-2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/fcntl.h>
|
||||
#include <sys/time.h>
|
||||
#include <errno.h>
|
||||
#include <assert.h>
|
||||
#include <unistd.h>
|
||||
#include <string.h>
|
||||
#include <rte_debug.h>
|
||||
#include <rte_log.h>
|
||||
#include <rte_dev.h>
|
||||
#include <rte_devargs.h>
|
||||
#include <rte_mbuf.h>
|
||||
#include <rte_ring.h>
|
||||
#include <rte_errno.h>
|
||||
#include <rte_kvargs.h>
|
||||
#include <rte_malloc.h>
|
||||
#include <rte_cycles.h>
|
||||
#include <rte_io.h>
|
||||
#include <rte_pci.h>
|
||||
#include <rte_bus_pci.h>
|
||||
#include <rte_eventdev.h>
|
||||
#include <rte_eventdev_pmd.h>
|
||||
#include <rte_eventdev_pmd_pci.h>
|
||||
#include <rte_memory.h>
|
||||
#include <rte_string_fns.h>
|
||||
|
||||
#include "../dlb2_priv.h"
|
||||
#include "../dlb2_inline_fns.h"
|
||||
#include "dlb2_main.h"
|
||||
#include "base/dlb2_hw_types.h"
|
||||
#include "base/dlb2_osdep.h"
|
||||
#include "base/dlb2_resource.h"
|
||||
|
||||
static const char *event_dlb2_pf_name = RTE_STR(EVDEV_DLB2_NAME_PMD);
|
||||
|
||||
/* Stubs: Allow building partial probe patch */
|
||||
int dlb2_hw_get_num_resources(struct dlb2_hw *hw,
|
||||
struct dlb2_get_num_resources_args *arg,
|
||||
bool vdev_req,
|
||||
unsigned int vdev_id)
|
||||
{
|
||||
RTE_SET_USED(hw);
|
||||
RTE_SET_USED(arg);
|
||||
RTE_SET_USED(vdev_req);
|
||||
RTE_SET_USED(vdev_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw)
|
||||
{
|
||||
RTE_SET_USED(hw);
|
||||
}
|
||||
|
||||
void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw)
|
||||
{
|
||||
RTE_SET_USED(hw);
|
||||
}
|
||||
/* End stubs */
|
||||
|
||||
static void
|
||||
dlb2_pf_iface_fn_ptrs_init(void)
|
||||
{
|
||||
/* flexible iface fn ptr assignments will go here */
|
||||
}
|
||||
|
||||
/* PCI DEV HOOKS */
|
||||
static int
|
||||
dlb2_eventdev_pci_init(struct rte_eventdev *eventdev)
|
||||
{
|
||||
int ret = 0;
|
||||
struct rte_pci_device *pci_dev;
|
||||
struct dlb2_devargs dlb2_args = {
|
||||
.socket_id = rte_socket_id(),
|
||||
.max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
|
||||
.num_dir_credits_override = -1,
|
||||
.qid_depth_thresholds = { {0} },
|
||||
.cos_id = DLB2_COS_DEFAULT
|
||||
};
|
||||
struct dlb2_eventdev *dlb2;
|
||||
|
||||
DLB2_LOG_DBG("Enter with dev_id=%d socket_id=%d",
|
||||
eventdev->data->dev_id, eventdev->data->socket_id);
|
||||
|
||||
dlb2_pf_iface_fn_ptrs_init();
|
||||
|
||||
pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
|
||||
|
||||
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
|
||||
dlb2 = dlb2_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
|
||||
|
||||
/* Probe the DLB2 PF layer */
|
||||
dlb2->qm_instance.pf_dev = dlb2_probe(pci_dev);
|
||||
|
||||
if (dlb2->qm_instance.pf_dev == NULL) {
|
||||
DLB2_LOG_ERR("DLB2 PF Probe failed with error %d\n",
|
||||
rte_errno);
|
||||
ret = -rte_errno;
|
||||
goto dlb2_probe_failed;
|
||||
}
|
||||
|
||||
/* Were we invoked with runtime parameters? */
|
||||
if (pci_dev->device.devargs) {
|
||||
ret = dlb2_parse_params(pci_dev->device.devargs->args,
|
||||
pci_dev->device.devargs->name,
|
||||
&dlb2_args);
|
||||
if (ret) {
|
||||
DLB2_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
|
||||
ret, rte_errno);
|
||||
goto dlb2_probe_failed;
|
||||
}
|
||||
}
|
||||
|
||||
ret = dlb2_primary_eventdev_probe(eventdev,
|
||||
event_dlb2_pf_name,
|
||||
&dlb2_args);
|
||||
} else {
|
||||
ret = dlb2_secondary_eventdev_probe(eventdev,
|
||||
event_dlb2_pf_name);
|
||||
}
|
||||
if (ret)
|
||||
goto dlb2_probe_failed;
|
||||
|
||||
DLB2_LOG_INFO("DLB2 PF Probe success\n");
|
||||
|
||||
return 0;
|
||||
|
||||
dlb2_probe_failed:
|
||||
|
||||
DLB2_LOG_INFO("DLB2 PF Probe failed, ret=%d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define EVENTDEV_INTEL_VENDOR_ID 0x8086
|
||||
|
||||
static const struct rte_pci_id pci_id_dlb2_map[] = {
|
||||
{
|
||||
RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
|
||||
PCI_DEVICE_ID_INTEL_DLB2_PF)
|
||||
},
|
||||
{
|
||||
.vendor_id = 0,
|
||||
},
|
||||
};
|
||||
|
||||
static int
|
||||
event_dlb2_pci_probe(struct rte_pci_driver *pci_drv,
|
||||
struct rte_pci_device *pci_dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
|
||||
sizeof(struct dlb2_eventdev),
|
||||
dlb2_eventdev_pci_init,
|
||||
event_dlb2_pf_name);
|
||||
if (ret) {
|
||||
DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, "
|
||||
"ret=%d\n", ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
event_dlb2_pci_remove(struct rte_pci_device *pci_dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = rte_event_pmd_pci_remove(pci_dev, NULL);
|
||||
|
||||
if (ret) {
|
||||
DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, "
|
||||
"ret=%d\n", ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
static struct rte_pci_driver pci_eventdev_dlb2_pmd = {
|
||||
.id_table = pci_id_dlb2_map,
|
||||
.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
|
||||
.probe = event_dlb2_pci_probe,
|
||||
.remove = event_dlb2_pci_remove,
|
||||
};
|
||||
|
||||
RTE_PMD_REGISTER_PCI(event_dlb2_pf, pci_eventdev_dlb2_pmd);
|
||||
RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_pf, pci_id_dlb2_map);
|
Loading…
Reference in New Issue
Block a user