raw/ifpga/base: add Intel FPGA OPAE share code

This patch adds Intel FPGA Open Programmable Acceleration
Engine (OPAE)[1] base driver code, in order to support Intel
FPGA devices under DPDK. The base code currently supports
Intel FPGA solutions including integrated solution (Intel(R)
Xeon(R) CPU with FPGAs) and discrete solution (Intel(R)
Programmable Acceleration Card with Intel(R) Arria(R) 10 FPGA)
and it could be extended to support more FPGA devices in the
future. Please refer to [1][2] for more introduction on OPAE
and Intel FPGAs.

[1] https://01.org/OPAE
[2] https://www.altera.com/solutions/acceleration-hub/overview.html

Signed-off-by: Tianfei Zhang <tianfei.zhang@intel.com>
Signed-off-by: Hao Wu <hao.wu@intel.com>
Signed-off-by: Yilun Xu <yilun.xu@intel.com>
Reviewed-by: Qi Zhang <qi.z.zhang@intel.com>
This commit is contained in:
Tianfei Zhang 2018-05-11 16:31:30 +08:00 committed by Thomas Monjalon
parent 05fa3d4a65
commit 56bb54ea1b
29 changed files with 7903 additions and 0 deletions

View File

@ -880,6 +880,10 @@ F: doc/guides/eventdevs/opdl.rst
Rawdev Drivers
--------------
Intel FPGA
M: Tianfei zhang <tianfei.zhang@intel.com>
F: drivers/raw/ifpga_rawdev/
NXP DPAA2 QDMA
M: Nipun Gupta <nipun.gupta@nxp.com>
F: drivers/raw/dpaa2_qdma/

View File

@ -0,0 +1,26 @@
#SPDX-License-Identifier: BSD-3-Clause
#Copyright(c) 2010-2018 Intel Corporation
ifneq ($(CONFIG_RTE_LIBRTE_EAL),)
OSDEP := osdep_rte
else
OSDEP := osdep_raw
endif
CFLAGS += -I$(RTE_SDK)/drivers/raw/ifpga_rawdev/base/$(OSDEP)
SRCS-y += ifpga_api.c
SRCS-y += ifpga_enumerate.c
SRCS-y += ifpga_feature_dev.c
SRCS-y += ifpga_fme.c
SRCS-y += ifpga_fme_iperf.c
SRCS-y += ifpga_fme_dperf.c
SRCS-y += ifpga_fme_error.c
SRCS-y += ifpga_port.c
SRCS-y += ifpga_port_error.c
SRCS-y += opae_hw_api.c
SRCS-y += opae_ifpga_hw_api.c
SRCS-y += opae_debug.c
SRCS-y += ifpga_fme_pr.c
SRCS-y += $(wildcard $(SRCDIR)/base/$(OSDEP)/*.c)

View File

@ -0,0 +1,31 @@
..
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
Intel iFPGA driver
==================
This directory contains source code of Intel FPGA driver released by
the team which develops Intel FPGA Open Programmable Acceleration Engine (OPAE).
The directory of base/ contains the original source package. The base code
currently supports Intel FPGA solutions including integrated solution (Intel(R)
Xeon(R) CPU with FPGAs) and discrete solution (Intel(R) Programmable Acceleration
Card with Intel(R) Arria(R) 10 FPGA) and it could be extended to support more FPGA
devices in the future.
Please refer to [1][2] for more introduction on OPAE and Intel FPGAs.
[1] https://01.org/OPAE
[2] https://www.altera.com/solutions/acceleration-hub/overview.html
Updating the driver
===================
NOTE: The source code in this directory should not be modified apart from
the following file(s):
osdep_raw/osdep_generic.h
osdep_rte/osdep_generic.h

View File

@ -0,0 +1,294 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#include "ifpga_api.h"
#include "ifpga_enumerate.h"
#include "ifpga_feature_dev.h"
#include "opae_hw_api.h"
/* Accelerator APIs */
static int ifpga_acc_get_uuid(struct opae_accelerator *acc,
struct uuid *uuid)
{
struct opae_bridge *br = acc->br;
struct ifpga_port_hw *port;
if (!br || !br->data)
return -EINVAL;
port = br->data;
return fpga_get_afu_uuid(port, uuid);
}
static int ifpga_acc_set_irq(struct opae_accelerator *acc,
u32 start, u32 count, s32 evtfds[])
{
struct ifpga_afu_info *afu_info = acc->data;
struct opae_bridge *br = acc->br;
struct ifpga_port_hw *port;
struct fpga_uafu_irq_set irq_set;
if (!br || !br->data)
return -EINVAL;
if (start >= afu_info->num_irqs || start + count > afu_info->num_irqs)
return -EINVAL;
port = br->data;
irq_set.start = start;
irq_set.count = count;
irq_set.evtfds = evtfds;
return ifpga_set_irq(port->parent, FEATURE_FIU_ID_PORT, port->port_id,
IFPGA_PORT_FEATURE_ID_UINT, &irq_set);
}
static int ifpga_acc_get_info(struct opae_accelerator *acc,
struct opae_acc_info *info)
{
struct ifpga_afu_info *afu_info = acc->data;
if (!afu_info)
return -ENODEV;
info->num_regions = afu_info->num_regions;
info->num_irqs = afu_info->num_irqs;
return 0;
}
static int ifpga_acc_get_region_info(struct opae_accelerator *acc,
struct opae_acc_region_info *info)
{
struct ifpga_afu_info *afu_info = acc->data;
if (!afu_info)
return -EINVAL;
if (info->index >= afu_info->num_regions)
return -EINVAL;
/* always one RW region only for AFU now */
info->flags = ACC_REGION_READ | ACC_REGION_WRITE | ACC_REGION_MMIO;
info->len = afu_info->region[info->index].len;
info->addr = afu_info->region[info->index].addr;
return 0;
}
static int ifpga_acc_read(struct opae_accelerator *acc, unsigned int region_idx,
u64 offset, unsigned int byte, void *data)
{
struct ifpga_afu_info *afu_info = acc->data;
struct opae_reg_region *region;
if (!afu_info)
return -EINVAL;
if (offset + byte <= offset)
return -EINVAL;
if (region_idx >= afu_info->num_regions)
return -EINVAL;
region = &afu_info->region[region_idx];
if (offset + byte > region->len)
return -EINVAL;
switch (byte) {
case 8:
*(u64 *)data = opae_readq(region->addr + offset);
break;
case 4:
*(u32 *)data = opae_readl(region->addr + offset);
break;
case 2:
*(u16 *)data = opae_readw(region->addr + offset);
break;
case 1:
*(u8 *)data = opae_readb(region->addr + offset);
break;
default:
return -EINVAL;
}
return 0;
}
static int ifpga_acc_write(struct opae_accelerator *acc,
unsigned int region_idx, u64 offset,
unsigned int byte, void *data)
{
struct ifpga_afu_info *afu_info = acc->data;
struct opae_reg_region *region;
if (!afu_info)
return -EINVAL;
if (offset + byte <= offset)
return -EINVAL;
if (region_idx >= afu_info->num_regions)
return -EINVAL;
region = &afu_info->region[region_idx];
if (offset + byte > region->len)
return -EINVAL;
/* normal mmio case */
switch (byte) {
case 8:
opae_writeq(*(u64 *)data, region->addr + offset);
break;
case 4:
opae_writel(*(u32 *)data, region->addr + offset);
break;
case 2:
opae_writew(*(u16 *)data, region->addr + offset);
break;
case 1:
opae_writeb(*(u8 *)data, region->addr + offset);
break;
default:
return -EINVAL;
}
return 0;
}
struct opae_accelerator_ops ifpga_acc_ops = {
.read = ifpga_acc_read,
.write = ifpga_acc_write,
.set_irq = ifpga_acc_set_irq,
.get_info = ifpga_acc_get_info,
.get_region_info = ifpga_acc_get_region_info,
.get_uuid = ifpga_acc_get_uuid,
};
/* Bridge APIs */
static int ifpga_br_reset(struct opae_bridge *br)
{
struct ifpga_port_hw *port = br->data;
return fpga_port_reset(port);
}
struct opae_bridge_ops ifpga_br_ops = {
.reset = ifpga_br_reset,
};
/* Manager APIs */
static int ifpga_mgr_flash(struct opae_manager *mgr, int id, void *buf,
u32 size, u64 *status)
{
struct ifpga_fme_hw *fme = mgr->data;
struct ifpga_hw *hw = fme->parent;
return ifpga_pr(hw, id, buf, size, status);
}
struct opae_manager_ops ifpga_mgr_ops = {
.flash = ifpga_mgr_flash,
};
/* Adapter APIs */
static int ifpga_adapter_enumerate(struct opae_adapter *adapter)
{
struct ifpga_hw *hw = malloc(sizeof(*hw));
if (hw) {
memset(hw, 0, sizeof(*hw));
hw->pci_data = adapter->data;
hw->adapter = adapter;
if (ifpga_bus_enumerate(hw))
goto error;
return ifpga_bus_init(hw);
}
error:
return -ENOMEM;
}
struct opae_adapter_ops ifpga_adapter_ops = {
.enumerate = ifpga_adapter_enumerate,
};
/**
* ifpga_pr - do the partial reconfiguration for a given port device
* @hw: pointer to the HW structure
* @port_id: the port device id
* @buffer: the buffer of the bitstream
* @size: the size of the bitstream
* @status: hardware status including PR error code if return -EIO.
*
* @return
* - 0: Success, partial reconfiguration finished.
* - <0: Error code returned in partial reconfiguration.
**/
int ifpga_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
u64 *status)
{
if (!is_valid_port_id(hw, port_id))
return -ENODEV;
return do_pr(hw, port_id, buffer, size, status);
}
int ifpga_get_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
struct feature_prop *prop)
{
if (!hw || !prop)
return -EINVAL;
switch (fiu_id) {
case FEATURE_FIU_ID_FME:
return fme_get_prop(&hw->fme, prop);
case FEATURE_FIU_ID_PORT:
if (!is_valid_port_id(hw, port_id))
return -ENODEV;
return port_get_prop(&hw->port[port_id], prop);
}
return -ENOENT;
}
int ifpga_set_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
struct feature_prop *prop)
{
if (!hw || !prop)
return -EINVAL;
switch (fiu_id) {
case FEATURE_FIU_ID_FME:
return fme_set_prop(&hw->fme, prop);
case FEATURE_FIU_ID_PORT:
if (!is_valid_port_id(hw, port_id))
return -ENODEV;
return port_set_prop(&hw->port[port_id], prop);
}
return -ENOENT;
}
int ifpga_set_irq(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
u32 feature_id, void *irq_set)
{
if (!hw || !irq_set)
return -EINVAL;
switch (fiu_id) {
case FEATURE_FIU_ID_FME:
return fme_set_irq(&hw->fme, feature_id, irq_set);
case FEATURE_FIU_ID_PORT:
if (!is_valid_port_id(hw, port_id))
return -ENODEV;
return port_set_irq(&hw->port[port_id], feature_id, irq_set);
}
return -ENOENT;
}

View File

@ -0,0 +1,28 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#ifndef _IFPGA_API_H_
#define _IFPGA_API_H_
#include "opae_hw_api.h"
#include "ifpga_hw.h"
extern struct opae_adapter_ops ifpga_adapter_ops;
extern struct opae_manager_ops ifpga_mgr_ops;
extern struct opae_bridge_ops ifpga_br_ops;
extern struct opae_accelerator_ops ifpga_acc_ops;
/* common APIs */
int ifpga_get_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
struct feature_prop *prop);
int ifpga_set_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
struct feature_prop *prop);
int ifpga_set_irq(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
u32 feature_id, void *irq_set);
/* FME APIs */
int ifpga_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
u64 *status);
#endif /* _IFPGA_API_H_ */

View File

@ -0,0 +1,58 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#ifndef _IFPGA_COMPAT_H_
#define _IFPGA_COMPAT_H_
#include "opae_osdep.h"
#undef container_of
#define container_of(ptr, type, member) ({ \
typeof(((type *)0)->member)(*__mptr) = (ptr); \
(type *)((char *)__mptr - offsetof(type, member)); })
#define IFPGA_PAGE_SHIFT 12
#define IFPGA_PAGE_SIZE (1 << IFPGA_PAGE_SHIFT)
#define IFPGA_PAGE_MASK (~(IFPGA_PAGE_SIZE - 1))
#define IFPGA_PAGE_ALIGN(addr) (((addr) + IFPGA_PAGE_SIZE - 1)\
& IFPGA_PAGE_MASK)
#define IFPGA_ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), IFPGA_PAGE_SIZE)
#define readl(addr) opae_readl(addr)
#define readq(addr) opae_readq(addr)
#define writel(value, addr) opae_writel(value, addr)
#define writeq(value, addr) opae_writeq(value, addr)
#define malloc(size) opae_malloc(size)
#define zmalloc(size) opae_zmalloc(size)
#define free(size) opae_free(size)
/*
* Wait register's _field to be changed to the given value (_expect's _field)
* by polling with given interval and timeout.
*/
#define fpga_wait_register_field(_field, _expect, _reg_addr, _timeout, _invl)\
({ \
int wait = 0; \
int ret = -ETIMEDOUT; \
typeof(_expect) value; \
for (; wait <= _timeout; wait += _invl) { \
value.csr = readq(_reg_addr); \
if (_expect._field == value._field) { \
ret = 0; \
break; \
} \
udelay(_invl); \
} \
ret; \
})
#define __maybe_unused __attribute__((__unused__))
#define UNUSED(x) (void)(x)
#endif /* _IFPGA_COMPAT_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,821 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#include "opae_hw_api.h"
#include "ifpga_api.h"
#include "ifpga_hw.h"
#include "ifpga_enumerate.h"
#include "ifpga_feature_dev.h"
struct build_feature_devs_info {
struct opae_adapter_data_pci *pci_data;
struct ifpga_afu_info *acc_info;
void *fiu;
enum fpga_id_type current_type;
int current_port_id;
void *ioaddr;
void *ioend;
uint64_t phys_addr;
int current_bar;
void *pfme_hdr;
struct ifpga_hw *hw;
};
struct feature_info {
const char *name;
u32 resource_size;
int feature_index;
int revision_id;
unsigned int vec_start;
unsigned int vec_cnt;
struct feature_ops *ops;
};
/* indexed by fme feature IDs which are defined in 'enum fme_feature_id'. */
static struct feature_info fme_features[] = {
{
.name = FME_FEATURE_HEADER,
.resource_size = sizeof(struct feature_fme_header),
.feature_index = FME_FEATURE_ID_HEADER,
.revision_id = FME_HEADER_REVISION,
.ops = &fme_hdr_ops,
},
{
.name = FME_FEATURE_THERMAL_MGMT,
.resource_size = sizeof(struct feature_fme_thermal),
.feature_index = FME_FEATURE_ID_THERMAL_MGMT,
.revision_id = FME_THERMAL_MGMT_REVISION,
.ops = &fme_thermal_mgmt_ops,
},
{
.name = FME_FEATURE_POWER_MGMT,
.resource_size = sizeof(struct feature_fme_power),
.feature_index = FME_FEATURE_ID_POWER_MGMT,
.revision_id = FME_POWER_MGMT_REVISION,
.ops = &fme_power_mgmt_ops,
},
{
.name = FME_FEATURE_GLOBAL_IPERF,
.resource_size = sizeof(struct feature_fme_iperf),
.feature_index = FME_FEATURE_ID_GLOBAL_IPERF,
.revision_id = FME_GLOBAL_IPERF_REVISION,
.ops = &fme_global_iperf_ops,
},
{
.name = FME_FEATURE_GLOBAL_ERR,
.resource_size = sizeof(struct feature_fme_err),
.feature_index = FME_FEATURE_ID_GLOBAL_ERR,
.revision_id = FME_GLOBAL_ERR_REVISION,
.ops = &fme_global_err_ops,
},
{
.name = FME_FEATURE_PR_MGMT,
.resource_size = sizeof(struct feature_fme_pr),
.feature_index = FME_FEATURE_ID_PR_MGMT,
.revision_id = FME_PR_MGMT_REVISION,
.ops = &fme_pr_mgmt_ops,
},
{
.name = FME_FEATURE_HSSI_ETH,
.resource_size = sizeof(struct feature_fme_hssi),
.feature_index = FME_FEATURE_ID_HSSI_ETH,
.revision_id = FME_HSSI_ETH_REVISION
},
{
.name = FME_FEATURE_GLOBAL_DPERF,
.resource_size = sizeof(struct feature_fme_dperf),
.feature_index = FME_FEATURE_ID_GLOBAL_DPERF,
.revision_id = FME_GLOBAL_DPERF_REVISION,
.ops = &fme_global_dperf_ops,
}
};
static struct feature_info port_features[] = {
{
.name = PORT_FEATURE_HEADER,
.resource_size = sizeof(struct feature_port_header),
.feature_index = PORT_FEATURE_ID_HEADER,
.revision_id = PORT_HEADER_REVISION,
.ops = &port_hdr_ops,
},
{
.name = PORT_FEATURE_ERR,
.resource_size = sizeof(struct feature_port_error),
.feature_index = PORT_FEATURE_ID_ERROR,
.revision_id = PORT_ERR_REVISION,
.ops = &port_error_ops,
},
{
.name = PORT_FEATURE_UMSG,
.resource_size = sizeof(struct feature_port_umsg),
.feature_index = PORT_FEATURE_ID_UMSG,
.revision_id = PORT_UMSG_REVISION,
},
{
.name = PORT_FEATURE_UINT,
.resource_size = sizeof(struct feature_port_uint),
.feature_index = PORT_FEATURE_ID_UINT,
.revision_id = PORT_UINT_REVISION,
.ops = &port_uint_ops,
},
{
.name = PORT_FEATURE_STP,
.resource_size = PORT_FEATURE_STP_REGION_SIZE,
.feature_index = PORT_FEATURE_ID_STP,
.revision_id = PORT_STP_REVISION,
.ops = &port_stp_ops,
},
{
.name = PORT_FEATURE_UAFU,
/* UAFU feature size should be read from PORT_CAP.MMIOSIZE.
* Will set uafu feature size while parse port device.
*/
.resource_size = 0,
.feature_index = PORT_FEATURE_ID_UAFU,
.revision_id = PORT_UAFU_REVISION
},
};
static u64 feature_id(void __iomem *start)
{
struct feature_header header;
header.csr = readq(start);
switch (header.type) {
case FEATURE_TYPE_FIU:
return FEATURE_ID_HEADER;
case FEATURE_TYPE_PRIVATE:
return header.id;
case FEATURE_TYPE_AFU:
return FEATURE_ID_AFU;
}
WARN_ON(1);
return 0;
}
static int
build_info_add_sub_feature(struct build_feature_devs_info *binfo,
struct feature_info *finfo, void __iomem *start)
{
struct ifpga_hw *hw = binfo->hw;
struct feature *feature = NULL;
int feature_idx = finfo->feature_index;
unsigned int vec_start = finfo->vec_start;
unsigned int vec_cnt = finfo->vec_cnt;
struct feature_irq_ctx *ctx = NULL;
int port_id, ret = 0;
unsigned int i;
if (binfo->current_type == FME_ID) {
feature = &hw->fme.sub_feature[feature_idx];
feature->parent = &hw->fme;
} else if (binfo->current_type == PORT_ID) {
port_id = binfo->current_port_id;
feature = &hw->port[port_id].sub_feature[feature_idx];
feature->parent = &hw->port[port_id];
} else {
return -EFAULT;
}
feature->state = IFPGA_FEATURE_ATTACHED;
feature->addr = start;
feature->id = feature_id(start);
feature->size = finfo->resource_size;
feature->name = finfo->name;
feature->revision = finfo->revision_id;
feature->ops = finfo->ops;
feature->phys_addr = binfo->phys_addr +
((u8 *)start - (u8 *)binfo->ioaddr);
if (vec_cnt) {
if (vec_start + vec_cnt <= vec_start)
return -EINVAL;
ctx = zmalloc(sizeof(*ctx) * vec_cnt);
if (!ctx)
return -ENOMEM;
for (i = 0; i < vec_cnt; i++) {
ctx[i].eventfd = -1;
ctx[i].idx = vec_start + i;
}
}
feature->ctx = ctx;
feature->ctx_num = vec_cnt;
feature->vfio_dev_fd = binfo->pci_data->vfio_dev_fd;
return ret;
}
static int
create_feature_instance(struct build_feature_devs_info *binfo,
void __iomem *start, struct feature_info *finfo)
{
struct feature_header *hdr = start;
if (finfo->revision_id != SKIP_REVISION_CHECK &&
hdr->revision > finfo->revision_id) {
dev_err(binfo, "feature %s revision :default:%x, now at:%x, mis-match.\n",
finfo->name, finfo->revision_id, hdr->revision);
}
return build_info_add_sub_feature(binfo, finfo, start);
}
/*
* UAFU GUID is dynamic as it can be changed after FME downloads different
* Green Bitstream to the port, so we treat the unknown GUIDs which are
* attached on port's feature list as UAFU.
*/
static bool feature_is_UAFU(struct build_feature_devs_info *binfo)
{
if (binfo->current_type != PORT_ID)
return false;
return true;
}
static int parse_feature_port_uafu(struct build_feature_devs_info *binfo,
struct feature_header *hdr)
{
enum port_feature_id id = PORT_FEATURE_ID_UAFU;
struct ifpga_afu_info *info;
void *start = (void *)hdr;
int ret;
if (port_features[id].resource_size) {
ret = create_feature_instance(binfo, hdr, &port_features[id]);
} else {
dev_err(binfo, "the uafu feature header is mis-configured.\n");
ret = -EINVAL;
}
if (ret)
return ret;
/* FIXME: need to figure out a better name */
info = malloc(sizeof(*info));
if (!info)
return -ENOMEM;
info->region[0].addr = start;
info->region[0].phys_addr = binfo->phys_addr +
(uint8_t *)start - (uint8_t *)binfo->ioaddr;
info->region[0].len = port_features[id].resource_size;
port_features[id].resource_size = 0;
info->num_regions = 1;
binfo->acc_info = info;
return ret;
}
static int parse_feature_afus(struct build_feature_devs_info *binfo,
struct feature_header *hdr)
{
int ret;
struct feature_afu_header *afu_hdr, header;
u8 __iomem *start;
u8 __iomem *end = binfo->ioend;
start = (u8 __iomem *)hdr;
for (; start < end; start += header.next_afu) {
if ((unsigned int)(end - start) <
(unsigned int)(sizeof(*afu_hdr) + sizeof(*hdr)))
return -EINVAL;
hdr = (struct feature_header *)start;
afu_hdr = (struct feature_afu_header *)(hdr + 1);
header.csr = readq(&afu_hdr->csr);
if (feature_is_UAFU(binfo)) {
ret = parse_feature_port_uafu(binfo, hdr);
if (ret)
return ret;
}
if (!header.next_afu)
break;
}
return 0;
}
/* create and register proper private data */
static int build_info_commit_dev(struct build_feature_devs_info *binfo)
{
struct ifpga_afu_info *info = binfo->acc_info;
struct ifpga_hw *hw = binfo->hw;
struct opae_manager *mgr;
struct opae_bridge *br;
struct opae_accelerator *acc;
if (!binfo->fiu)
return 0;
if (binfo->current_type == PORT_ID) {
/* return error if no valid acc info data structure */
if (!info)
return -EFAULT;
br = opae_bridge_alloc(hw->adapter->name, &ifpga_br_ops,
binfo->fiu);
if (!br)
return -ENOMEM;
br->id = binfo->current_port_id;
/* update irq info */
info->num_irqs = port_features[PORT_FEATURE_ID_UINT].vec_cnt;
acc = opae_accelerator_alloc(hw->adapter->name,
&ifpga_acc_ops, info);
if (!acc) {
opae_bridge_free(br);
return -ENOMEM;
}
acc->br = br;
acc->index = br->id;
opae_adapter_add_acc(hw->adapter, acc);
} else if (binfo->current_type == FME_ID) {
mgr = opae_manager_alloc(hw->adapter->name, &ifpga_mgr_ops,
binfo->fiu);
if (!mgr)
return -ENOMEM;
mgr->adapter = hw->adapter;
hw->adapter->mgr = mgr;
}
binfo->fiu = NULL;
return 0;
}
static int
build_info_create_dev(struct build_feature_devs_info *binfo,
enum fpga_id_type type, unsigned int index)
{
int ret;
ret = build_info_commit_dev(binfo);
if (ret)
return ret;
binfo->current_type = type;
if (type == FME_ID) {
binfo->fiu = &binfo->hw->fme;
} else if (type == PORT_ID) {
binfo->fiu = &binfo->hw->port[index];
binfo->current_port_id = index;
}
return 0;
}
static int parse_feature_fme(struct build_feature_devs_info *binfo,
struct feature_header *start)
{
struct ifpga_hw *hw = binfo->hw;
struct ifpga_fme_hw *fme = &hw->fme;
int ret;
ret = build_info_create_dev(binfo, FME_ID, 0);
if (ret)
return ret;
/* Update FME states */
fme->state = IFPGA_FME_IMPLEMENTED;
fme->parent = hw;
spinlock_init(&fme->lock);
return create_feature_instance(binfo, start,
&fme_features[FME_FEATURE_ID_HEADER]);
}
static int parse_feature_port(struct build_feature_devs_info *binfo,
void __iomem *start)
{
struct feature_port_header *port_hdr;
struct feature_port_capability capability;
struct ifpga_hw *hw = binfo->hw;
struct ifpga_port_hw *port;
unsigned int port_id;
int ret;
/* Get current port's id */
port_hdr = (struct feature_port_header *)start;
capability.csr = readq(&port_hdr->capability);
port_id = capability.port_number;
ret = build_info_create_dev(binfo, PORT_ID, port_id);
if (ret)
return ret;
/*found a Port device*/
port = &hw->port[port_id];
port->port_id = binfo->current_port_id;
port->parent = hw;
port->state = IFPGA_PORT_ATTACHED;
spinlock_init(&port->lock);
return create_feature_instance(binfo, start,
&port_features[PORT_FEATURE_ID_HEADER]);
}
static void enable_port_uafu(struct build_feature_devs_info *binfo,
void __iomem *start)
{
enum port_feature_id id = PORT_FEATURE_ID_UAFU;
struct feature_port_header *port_hdr;
struct feature_port_capability capability;
struct ifpga_port_hw *port = &binfo->hw->port[binfo->current_port_id];
port_hdr = (struct feature_port_header *)start;
capability.csr = readq(&port_hdr->capability);
port_features[id].resource_size = (capability.mmio_size << 10);
/*
* From spec, to Enable UAFU, we should reset related port,
* or the whole mmio space in this UAFU will be invalid
*/
if (port_features[id].resource_size)
fpga_port_reset(port);
}
static int parse_feature_fiu(struct build_feature_devs_info *binfo,
struct feature_header *hdr)
{
struct feature_header header;
struct feature_fiu_header *fiu_hdr, fiu_header;
u8 __iomem *start = (u8 __iomem *)hdr;
int ret;
header.csr = readq(hdr);
switch (header.id) {
case FEATURE_FIU_ID_FME:
ret = parse_feature_fme(binfo, hdr);
binfo->pfme_hdr = hdr;
if (ret)
return ret;
break;
case FEATURE_FIU_ID_PORT:
ret = parse_feature_port(binfo, hdr);
enable_port_uafu(binfo, hdr);
if (ret)
return ret;
/* Check Port FIU's next_afu pointer to User AFU DFH */
fiu_hdr = (struct feature_fiu_header *)(hdr + 1);
fiu_header.csr = readq(&fiu_hdr->csr);
if (fiu_header.next_afu) {
start += fiu_header.next_afu;
ret = parse_feature_afus(binfo,
(struct feature_header *)start);
if (ret)
return ret;
} else {
dev_info(binfo, "No AFUs detected on Port\n");
}
break;
default:
dev_info(binfo, "FIU TYPE %d is not supported yet.\n",
header.id);
}
return 0;
}
static void parse_feature_irqs(struct build_feature_devs_info *binfo,
void __iomem *start, struct feature_info *finfo)
{
finfo->vec_start = 0;
finfo->vec_cnt = 0;
UNUSED(binfo);
if (!strcmp(finfo->name, PORT_FEATURE_UINT)) {
struct feature_port_uint *port_uint = start;
struct feature_port_uint_cap uint_cap;
uint_cap.csr = readq(&port_uint->capability);
if (uint_cap.intr_num) {
finfo->vec_start = uint_cap.first_vec_num;
finfo->vec_cnt = uint_cap.intr_num;
} else {
dev_debug(binfo, "UAFU doesn't support interrupt\n");
}
} else if (!strcmp(finfo->name, PORT_FEATURE_ERR)) {
struct feature_port_error *port_err = start;
struct feature_port_err_capability port_err_cap;
port_err_cap.csr = readq(&port_err->error_capability);
if (port_err_cap.support_intr) {
finfo->vec_start = port_err_cap.intr_vector_num;
finfo->vec_cnt = 1;
} else {
dev_debug(&binfo, "Port error doesn't support interrupt\n");
}
} else if (!strcmp(finfo->name, FME_FEATURE_GLOBAL_ERR)) {
struct feature_fme_err *fme_err = start;
struct feature_fme_error_capability fme_err_cap;
fme_err_cap.csr = readq(&fme_err->fme_err_capability);
if (fme_err_cap.support_intr) {
finfo->vec_start = fme_err_cap.intr_vector_num;
finfo->vec_cnt = 1;
} else {
dev_debug(&binfo, "FME error doesn't support interrupt\n");
}
}
}
static int parse_feature_fme_private(struct build_feature_devs_info *binfo,
struct feature_header *hdr)
{
struct feature_header header;
header.csr = readq(hdr);
if (header.id >= ARRAY_SIZE(fme_features)) {
dev_err(binfo, "FME feature id %x is not supported yet.\n",
header.id);
return 0;
}
parse_feature_irqs(binfo, hdr, &fme_features[header.id]);
return create_feature_instance(binfo, hdr, &fme_features[header.id]);
}
static int parse_feature_port_private(struct build_feature_devs_info *binfo,
struct feature_header *hdr)
{
struct feature_header header;
enum port_feature_id id;
header.csr = readq(hdr);
/*
* the region of port feature id is [0x10, 0x13], + 1 to reserve 0
* which is dedicated for port-hdr.
*/
id = (header.id & 0x000f) + 1;
if (id >= ARRAY_SIZE(port_features)) {
dev_err(binfo, "Port feature id %x is not supported yet.\n",
header.id);
return 0;
}
parse_feature_irqs(binfo, hdr, &port_features[id]);
return create_feature_instance(binfo, hdr, &port_features[id]);
}
static int parse_feature_private(struct build_feature_devs_info *binfo,
struct feature_header *hdr)
{
struct feature_header header;
header.csr = readq(hdr);
switch (binfo->current_type) {
case FME_ID:
return parse_feature_fme_private(binfo, hdr);
case PORT_ID:
return parse_feature_port_private(binfo, hdr);
default:
dev_err(binfo, "private feature %x belonging to AFU %d (unknown_type) is not supported yet.\n",
header.id, binfo->current_type);
}
return 0;
}
static int parse_feature(struct build_feature_devs_info *binfo,
struct feature_header *hdr)
{
struct feature_header header;
int ret = 0;
header.csr = readq(hdr);
switch (header.type) {
case FEATURE_TYPE_AFU:
ret = parse_feature_afus(binfo, hdr);
break;
case FEATURE_TYPE_PRIVATE:
ret = parse_feature_private(binfo, hdr);
break;
case FEATURE_TYPE_FIU:
ret = parse_feature_fiu(binfo, hdr);
break;
default:
dev_err(binfo, "Feature Type %x is not supported.\n",
hdr->type);
};
return ret;
}
static int
parse_feature_list(struct build_feature_devs_info *binfo, u8 __iomem *start)
{
struct feature_header *hdr, header;
u8 __iomem *end = (u8 __iomem *)binfo->ioend;
int ret = 0;
for (; start < end; start += header.next_header_offset) {
if ((unsigned int)(end - start) < (unsigned int)sizeof(*hdr)) {
dev_err(binfo, "The region is too small to contain a feature.\n");
ret = -EINVAL;
break;
}
hdr = (struct feature_header *)start;
ret = parse_feature(binfo, hdr);
if (ret)
return ret;
header.csr = readq(hdr);
if (!header.next_header_offset)
break;
}
return build_info_commit_dev(binfo);
}
/* switch the memory mapping to BAR# @bar */
static int parse_switch_to(struct build_feature_devs_info *binfo, int bar)
{
struct opae_adapter_data_pci *pci_data = binfo->pci_data;
if (!pci_data->region[bar].addr)
return -ENOMEM;
binfo->ioaddr = pci_data->region[bar].addr;
binfo->ioend = (u8 __iomem *)binfo->ioaddr + pci_data->region[bar].len;
binfo->phys_addr = pci_data->region[bar].phys_addr;
binfo->current_bar = bar;
return 0;
}
static int parse_ports_from_fme(struct build_feature_devs_info *binfo)
{
struct feature_fme_header *fme_hdr;
struct feature_fme_port port;
int i = 0, ret = 0;
if (!binfo->pfme_hdr) {
dev_info(binfo, "VF is detected.\n");
return ret;
}
fme_hdr = binfo->pfme_hdr;
do {
port.csr = readq(&fme_hdr->port[i]);
if (!port.port_implemented)
break;
/* skip port which only could be accessed via VF */
if (port.afu_access_control == FME_AFU_ACCESS_VF)
continue;
ret = parse_switch_to(binfo, port.port_bar);
if (ret)
break;
ret = parse_feature_list(binfo,
(u8 __iomem *)binfo->ioaddr +
port.port_offset);
if (ret)
break;
} while (++i < MAX_FPGA_PORT_NUM);
return ret;
}
static struct build_feature_devs_info *
build_info_alloc_and_init(struct ifpga_hw *hw)
{
struct build_feature_devs_info *binfo;
binfo = zmalloc(sizeof(*binfo));
if (!binfo)
return binfo;
binfo->hw = hw;
binfo->pci_data = hw->pci_data;
/* fpga feature list starts from BAR 0 */
if (parse_switch_to(binfo, 0)) {
free(binfo);
return NULL;
}
return binfo;
}
static void build_info_free(struct build_feature_devs_info *binfo)
{
free(binfo);
}
static void ifpga_print_device_feature_list(struct ifpga_hw *hw)
{
struct ifpga_fme_hw *fme = &hw->fme;
struct ifpga_port_hw *port;
struct feature *feature;
int i, j;
dev_info(hw, "found fme_device, is in PF: %s\n",
is_ifpga_hw_pf(hw) ? "yes" : "no");
for (i = 0; i < FME_FEATURE_ID_MAX; i++) {
feature = &fme->sub_feature[i];
if (feature->state != IFPGA_FEATURE_ATTACHED)
continue;
dev_info(hw, "%12s: 0x%p - 0x%p - paddr: 0x%lx\n",
feature->name, feature->addr,
feature->addr + feature->size - 1,
(unsigned long)feature->phys_addr);
}
for (i = 0; i < MAX_FPGA_PORT_NUM; i++) {
port = &hw->port[i];
if (port->state != IFPGA_PORT_ATTACHED)
continue;
dev_info(hw, "port device: %d\n", port->port_id);
for (j = 0; j < PORT_FEATURE_ID_MAX; j++) {
feature = &port->sub_feature[j];
if (feature->state != IFPGA_FEATURE_ATTACHED)
continue;
dev_info(hw, "%12s: 0x%p - 0x%p - paddr:0x%lx\n",
feature->name,
feature->addr,
feature->addr +
feature->size - 1,
(unsigned long)feature->phys_addr);
}
}
}
int ifpga_bus_enumerate(struct ifpga_hw *hw)
{
struct build_feature_devs_info *binfo;
int ret;
binfo = build_info_alloc_and_init(hw);
if (!binfo)
return -ENOMEM;
ret = parse_feature_list(binfo, binfo->ioaddr);
if (ret)
goto exit;
ret = parse_ports_from_fme(binfo);
if (ret)
goto exit;
ifpga_print_device_feature_list(hw);
exit:
build_info_free(binfo);
return ret;
}
int ifpga_bus_init(struct ifpga_hw *hw)
{
int i;
fme_hw_init(&hw->fme);
for (i = 0; i < MAX_FPGA_PORT_NUM; i++)
port_hw_init(&hw->port[i]);
return 0;
}

View File

@ -0,0 +1,11 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#ifndef _IFPGA_ENUMERATE_H_
#define _IFPGA_ENUMERATE_H_
int ifpga_bus_init(struct ifpga_hw *hw);
int ifpga_bus_enumerate(struct ifpga_hw *hw);
#endif /* _IFPGA_ENUMERATE_H_ */

View File

@ -0,0 +1,253 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#include <sys/ioctl.h>
#include "ifpga_feature_dev.h"
/*
* Enable Port by clear the port soft reset bit, which is set by default.
* The AFU is unable to respond to any MMIO access while in reset.
* __fpga_port_enable function should only be used after __fpga_port_disable
* function.
*/
void __fpga_port_enable(struct ifpga_port_hw *port)
{
struct feature_port_header *port_hdr;
struct feature_port_control control;
WARN_ON(!port->disable_count);
if (--port->disable_count != 0)
return;
port_hdr = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_HEADER);
WARN_ON(!port_hdr);
control.csr = readq(&port_hdr->control);
control.port_sftrst = 0x0;
writeq(control.csr, &port_hdr->control);
}
int __fpga_port_disable(struct ifpga_port_hw *port)
{
struct feature_port_header *port_hdr;
struct feature_port_control control;
if (port->disable_count++ != 0)
return 0;
port_hdr = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_HEADER);
WARN_ON(!port_hdr);
/* Set port soft reset */
control.csr = readq(&port_hdr->control);
control.port_sftrst = 0x1;
writeq(control.csr, &port_hdr->control);
/*
* HW sets ack bit to 1 when all outstanding requests have been drained
* on this port and minimum soft reset pulse width has elapsed.
* Driver polls port_soft_reset_ack to determine if reset done by HW.
*/
control.port_sftrst_ack = 1;
if (fpga_wait_register_field(port_sftrst_ack, control,
&port_hdr->control, RST_POLL_TIMEOUT,
RST_POLL_INVL)) {
dev_err(port, "timeout, fail to reset device\n");
return -ETIMEDOUT;
}
return 0;
}
int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid)
{
struct feature_port_header *port_hdr;
u64 guidl, guidh;
port_hdr = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_UAFU);
spinlock_lock(&port->lock);
guidl = readq(&port_hdr->afu_header.guid.b[0]);
guidh = readq(&port_hdr->afu_header.guid.b[8]);
spinlock_unlock(&port->lock);
memcpy(uuid->b, &guidl, sizeof(u64));
memcpy(uuid->b + 8, &guidh, sizeof(u64));
return 0;
}
/* Mask / Unmask Port Errors by the Error Mask register. */
void port_err_mask(struct ifpga_port_hw *port, bool mask)
{
struct feature_port_error *port_err;
struct feature_port_err_key err_mask;
port_err = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_ERROR);
if (mask)
err_mask.csr = PORT_ERR_MASK;
else
err_mask.csr = 0;
writeq(err_mask.csr, &port_err->error_mask);
}
/* Clear All Port Errors. */
int port_err_clear(struct ifpga_port_hw *port, u64 err)
{
struct feature_port_header *port_hdr;
struct feature_port_error *port_err;
struct feature_port_err_key mask;
struct feature_port_first_err_key first;
struct feature_port_status status;
int ret = 0;
port_err = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_ERROR);
port_hdr = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_HEADER);
/*
* Clear All Port Errors
*
* - Check for AP6 State
* - Halt Port by keeping Port in reset
* - Set PORT Error mask to all 1 to mask errors
* - Clear all errors
* - Set Port mask to all 0 to enable errors
* - All errors start capturing new errors
* - Enable Port by pulling the port out of reset
*/
/* If device is still in AP6 state, can not clear any error.*/
status.csr = readq(&port_hdr->status);
if (status.power_state == PORT_POWER_STATE_AP6) {
dev_err(dev, "Could not clear errors, device in AP6 state.\n");
return -EBUSY;
}
/* Halt Port by keeping Port in reset */
ret = __fpga_port_disable(port);
if (ret)
return ret;
/* Mask all errors */
port_err_mask(port, true);
/* Clear errors if err input matches with current port errors.*/
mask.csr = readq(&port_err->port_error);
if (mask.csr == err) {
writeq(mask.csr, &port_err->port_error);
first.csr = readq(&port_err->port_first_error);
writeq(first.csr, &port_err->port_first_error);
} else {
ret = -EBUSY;
}
/* Clear mask */
port_err_mask(port, false);
/* Enable the Port by clear the reset */
__fpga_port_enable(port);
return ret;
}
int port_clear_error(struct ifpga_port_hw *port)
{
struct feature_port_error *port_err;
struct feature_port_err_key error;
port_err = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_ERROR);
error.csr = readq(&port_err->port_error);
dev_info(port, "read port error: 0x%lx\n", (unsigned long)error.csr);
return port_err_clear(port, error.csr);
}
void fme_hw_uinit(struct ifpga_fme_hw *fme)
{
struct feature *feature;
int i;
if (fme->state != IFPGA_FME_IMPLEMENTED)
return;
for (i = 0; i < FME_FEATURE_ID_MAX; i++) {
feature = &fme->sub_feature[i];
if (feature->state == IFPGA_FEATURE_ATTACHED &&
feature->ops && feature->ops->uinit)
feature->ops->uinit(feature);
}
}
int fme_hw_init(struct ifpga_fme_hw *fme)
{
struct feature *feature;
int i, ret;
if (fme->state != IFPGA_FME_IMPLEMENTED)
return -EINVAL;
for (i = 0; i < FME_FEATURE_ID_MAX; i++) {
feature = &fme->sub_feature[i];
if (feature->state == IFPGA_FEATURE_ATTACHED &&
feature->ops && feature->ops->init) {
ret = feature->ops->init(feature);
if (ret) {
fme_hw_uinit(fme);
return ret;
}
}
}
return 0;
}
void port_hw_uinit(struct ifpga_port_hw *port)
{
struct feature *feature;
int i;
for (i = 0; i < PORT_FEATURE_ID_MAX; i++) {
feature = &port->sub_feature[i];
if (feature->state == IFPGA_FEATURE_ATTACHED &&
feature->ops && feature->ops->uinit)
feature->ops->uinit(feature);
}
}
int port_hw_init(struct ifpga_port_hw *port)
{
struct feature *feature;
int i, ret;
if (port->state == IFPGA_PORT_UNUSED)
return 0;
for (i = 0; i < PORT_FEATURE_ID_MAX; i++) {
feature = &port->sub_feature[i];
if (feature->ops && feature->ops->init) {
ret = feature->ops->init(feature);
if (ret) {
port_hw_uinit(port);
return ret;
}
}
}
return 0;
}

View File

@ -0,0 +1,164 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#ifndef _IFPGA_FEATURE_DEV_H_
#define _IFPGA_FEATURE_DEV_H_
#include "ifpga_hw.h"
static inline struct ifpga_port_hw *
get_port(struct ifpga_hw *hw, u32 port_id)
{
if (!is_valid_port_id(hw, port_id))
return NULL;
return &hw->port[port_id];
}
#define ifpga_for_each_feature(hw, feature) \
for ((feature) = (hw)->sub_feature; \
(feature) < (hw)->sub_feature + (FME_FEATURE_ID_MAX); (feature)++)
static inline struct feature *
get_fme_feature_by_id(struct ifpga_fme_hw *fme, u64 id)
{
struct feature *feature;
ifpga_for_each_feature(fme, feature) {
if (feature->id == id)
return feature;
}
return NULL;
}
static inline struct feature *
get_port_feature_by_id(struct ifpga_port_hw *port, u64 id)
{
struct feature *feature;
ifpga_for_each_feature(port, feature) {
if (feature->id == id)
return feature;
}
return NULL;
}
static inline void *
get_fme_feature_ioaddr_by_index(struct ifpga_fme_hw *fme, int index)
{
return fme->sub_feature[index].addr;
}
static inline void *
get_port_feature_ioaddr_by_index(struct ifpga_port_hw *port, int index)
{
return port->sub_feature[index].addr;
}
static inline bool
is_fme_feature_present(struct ifpga_fme_hw *fme, int index)
{
return !!get_fme_feature_ioaddr_by_index(fme, index);
}
static inline bool
is_port_feature_present(struct ifpga_port_hw *port, int index)
{
return !!get_port_feature_ioaddr_by_index(port, index);
}
int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid);
int __fpga_port_disable(struct ifpga_port_hw *port);
void __fpga_port_enable(struct ifpga_port_hw *port);
static inline int fpga_port_disable(struct ifpga_port_hw *port)
{
int ret;
spinlock_lock(&port->lock);
ret = __fpga_port_disable(port);
spinlock_unlock(&port->lock);
return ret;
}
static inline int fpga_port_enable(struct ifpga_port_hw *port)
{
spinlock_lock(&port->lock);
__fpga_port_enable(port);
spinlock_unlock(&port->lock);
return 0;
}
static inline int __fpga_port_reset(struct ifpga_port_hw *port)
{
int ret;
ret = __fpga_port_disable(port);
if (ret)
return ret;
__fpga_port_enable(port);
return 0;
}
static inline int fpga_port_reset(struct ifpga_port_hw *port)
{
int ret;
spinlock_lock(&port->lock);
ret = __fpga_port_reset(port);
spinlock_unlock(&port->lock);
return ret;
}
int do_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
u64 *status);
int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop);
int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop);
int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set);
int fme_hw_init(struct ifpga_fme_hw *fme);
void fme_hw_uinit(struct ifpga_fme_hw *fme);
void port_hw_uinit(struct ifpga_port_hw *port);
int port_hw_init(struct ifpga_port_hw *port);
int port_clear_error(struct ifpga_port_hw *port);
void port_err_mask(struct ifpga_port_hw *port, bool mask);
int port_err_clear(struct ifpga_port_hw *port, u64 err);
extern struct feature_ops fme_hdr_ops;
extern struct feature_ops fme_thermal_mgmt_ops;
extern struct feature_ops fme_power_mgmt_ops;
extern struct feature_ops fme_global_err_ops;
extern struct feature_ops fme_pr_mgmt_ops;
extern struct feature_ops fme_global_iperf_ops;
extern struct feature_ops fme_global_dperf_ops;
int port_get_prop(struct ifpga_port_hw *port, struct feature_prop *prop);
int port_set_prop(struct ifpga_port_hw *port, struct feature_prop *prop);
/* This struct is used when parsing uafu irq_set */
struct fpga_uafu_irq_set {
u32 start;
u32 count;
s32 *evtfds;
};
int port_set_irq(struct ifpga_port_hw *port, u32 feature_id, void *irq_set);
extern struct feature_ops port_hdr_ops;
extern struct feature_ops port_error_ops;
extern struct feature_ops port_stp_ops;
extern struct feature_ops port_uint_ops;
/* help functions for feature ops */
int fpga_msix_set_block(struct feature *feature, unsigned int start,
unsigned int count, s32 *fds);
#endif /* _IFPGA_FEATURE_DEV_H_ */

View File

@ -0,0 +1,734 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#include "ifpga_feature_dev.h"
#define PWR_THRESHOLD_MAX 0x7F
int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
{
struct feature *feature;
if (!fme)
return -ENOENT;
feature = get_fme_feature_by_id(fme, prop->feature_id);
if (feature && feature->ops && feature->ops->get_prop)
return feature->ops->get_prop(feature, prop);
return -ENOENT;
}
int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
{
struct feature *feature;
if (!fme)
return -ENOENT;
feature = get_fme_feature_by_id(fme, prop->feature_id);
if (feature && feature->ops && feature->ops->set_prop)
return feature->ops->set_prop(feature, prop);
return -ENOENT;
}
int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set)
{
struct feature *feature;
if (!fme)
return -ENOENT;
feature = get_fme_feature_by_id(fme, feature_id);
if (feature && feature->ops && feature->ops->set_irq)
return feature->ops->set_irq(feature, irq_set);
return -ENOENT;
}
/* fme private feature head */
static int fme_hdr_init(struct feature *feature)
{
struct feature_fme_header *fme_hdr;
fme_hdr = (struct feature_fme_header *)feature->addr;
dev_info(NULL, "FME HDR Init.\n");
dev_info(NULL, "FME cap %llx.\n",
(unsigned long long)fme_hdr->capability.csr);
return 0;
}
static void fme_hdr_uinit(struct feature *feature)
{
UNUSED(feature);
dev_info(NULL, "FME HDR UInit.\n");
}
static int fme_hdr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
{
struct feature_fme_header *fme_hdr
= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
struct feature_header header;
header.csr = readq(&fme_hdr->header);
*revision = header.revision;
return 0;
}
static int fme_hdr_get_ports_num(struct ifpga_fme_hw *fme, u64 *ports_num)
{
struct feature_fme_header *fme_hdr
= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
struct feature_fme_capability fme_capability;
fme_capability.csr = readq(&fme_hdr->capability);
*ports_num = fme_capability.num_ports;
return 0;
}
static int fme_hdr_get_cache_size(struct ifpga_fme_hw *fme, u64 *cache_size)
{
struct feature_fme_header *fme_hdr
= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
struct feature_fme_capability fme_capability;
fme_capability.csr = readq(&fme_hdr->capability);
*cache_size = fme_capability.cache_size;
return 0;
}
static int fme_hdr_get_version(struct ifpga_fme_hw *fme, u64 *version)
{
struct feature_fme_header *fme_hdr
= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
struct feature_fme_capability fme_capability;
fme_capability.csr = readq(&fme_hdr->capability);
*version = fme_capability.fabric_verid;
return 0;
}
static int fme_hdr_get_socket_id(struct ifpga_fme_hw *fme, u64 *socket_id)
{
struct feature_fme_header *fme_hdr
= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
struct feature_fme_capability fme_capability;
fme_capability.csr = readq(&fme_hdr->capability);
*socket_id = fme_capability.socket_id;
return 0;
}
static int fme_hdr_get_bitstream_id(struct ifpga_fme_hw *fme,
u64 *bitstream_id)
{
struct feature_fme_header *fme_hdr
= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
*bitstream_id = readq(&fme_hdr->bitstream_id);
return 0;
}
static int fme_hdr_get_bitstream_metadata(struct ifpga_fme_hw *fme,
u64 *bitstream_metadata)
{
struct feature_fme_header *fme_hdr
= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
*bitstream_metadata = readq(&fme_hdr->bitstream_md);
return 0;
}
static int
fme_hdr_get_prop(struct feature *feature, struct feature_prop *prop)
{
struct ifpga_fme_hw *fme = feature->parent;
switch (prop->prop_id) {
case FME_HDR_PROP_REVISION:
return fme_hdr_get_revision(fme, &prop->data);
case FME_HDR_PROP_PORTS_NUM:
return fme_hdr_get_ports_num(fme, &prop->data);
case FME_HDR_PROP_CACHE_SIZE:
return fme_hdr_get_cache_size(fme, &prop->data);
case FME_HDR_PROP_VERSION:
return fme_hdr_get_version(fme, &prop->data);
case FME_HDR_PROP_SOCKET_ID:
return fme_hdr_get_socket_id(fme, &prop->data);
case FME_HDR_PROP_BITSTREAM_ID:
return fme_hdr_get_bitstream_id(fme, &prop->data);
case FME_HDR_PROP_BITSTREAM_METADATA:
return fme_hdr_get_bitstream_metadata(fme, &prop->data);
}
return -ENOENT;
}
struct feature_ops fme_hdr_ops = {
.init = fme_hdr_init,
.uinit = fme_hdr_uinit,
.get_prop = fme_hdr_get_prop,
};
/* thermal management */
static int fme_thermal_get_threshold1(struct ifpga_fme_hw *fme, u64 *thres1)
{
struct feature_fme_thermal *thermal;
struct feature_fme_tmp_threshold temp_threshold;
thermal = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_THERMAL_MGMT);
temp_threshold.csr = readq(&thermal->threshold);
*thres1 = temp_threshold.tmp_thshold1;
return 0;
}
static int fme_thermal_set_threshold1(struct ifpga_fme_hw *fme, u64 thres1)
{
struct feature_fme_thermal *thermal;
struct feature_fme_header *fme_hdr;
struct feature_fme_tmp_threshold tmp_threshold;
struct feature_fme_capability fme_capability;
thermal = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_THERMAL_MGMT);
fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
spinlock_lock(&fme->lock);
tmp_threshold.csr = readq(&thermal->threshold);
fme_capability.csr = readq(&fme_hdr->capability);
if (fme_capability.lock_bit == 1) {
spinlock_unlock(&fme->lock);
return -EBUSY;
} else if (thres1 > 100) {
spinlock_unlock(&fme->lock);
return -EINVAL;
} else if (thres1 == 0) {
tmp_threshold.tmp_thshold1_enable = 0;
tmp_threshold.tmp_thshold1 = thres1;
} else {
tmp_threshold.tmp_thshold1_enable = 1;
tmp_threshold.tmp_thshold1 = thres1;
}
writeq(tmp_threshold.csr, &thermal->threshold);
spinlock_unlock(&fme->lock);
return 0;
}
static int fme_thermal_get_threshold2(struct ifpga_fme_hw *fme, u64 *thres2)
{
struct feature_fme_thermal *thermal;
struct feature_fme_tmp_threshold temp_threshold;
thermal = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_THERMAL_MGMT);
temp_threshold.csr = readq(&thermal->threshold);
*thres2 = temp_threshold.tmp_thshold2;
return 0;
}
static int fme_thermal_set_threshold2(struct ifpga_fme_hw *fme, u64 thres2)
{
struct feature_fme_thermal *thermal;
struct feature_fme_header *fme_hdr;
struct feature_fme_tmp_threshold tmp_threshold;
struct feature_fme_capability fme_capability;
thermal = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_THERMAL_MGMT);
fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
spinlock_lock(&fme->lock);
tmp_threshold.csr = readq(&thermal->threshold);
fme_capability.csr = readq(&fme_hdr->capability);
if (fme_capability.lock_bit == 1) {
spinlock_unlock(&fme->lock);
return -EBUSY;
} else if (thres2 > 100) {
spinlock_unlock(&fme->lock);
return -EINVAL;
} else if (thres2 == 0) {
tmp_threshold.tmp_thshold2_enable = 0;
tmp_threshold.tmp_thshold2 = thres2;
} else {
tmp_threshold.tmp_thshold2_enable = 1;
tmp_threshold.tmp_thshold2 = thres2;
}
writeq(tmp_threshold.csr, &thermal->threshold);
spinlock_unlock(&fme->lock);
return 0;
}
static int fme_thermal_get_threshold_trip(struct ifpga_fme_hw *fme,
u64 *thres_trip)
{
struct feature_fme_thermal *thermal;
struct feature_fme_tmp_threshold temp_threshold;
thermal = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_THERMAL_MGMT);
temp_threshold.csr = readq(&thermal->threshold);
*thres_trip = temp_threshold.therm_trip_thshold;
return 0;
}
static int fme_thermal_get_threshold1_reached(struct ifpga_fme_hw *fme,
u64 *thres1_reached)
{
struct feature_fme_thermal *thermal;
struct feature_fme_tmp_threshold temp_threshold;
thermal = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_THERMAL_MGMT);
temp_threshold.csr = readq(&thermal->threshold);
*thres1_reached = temp_threshold.thshold1_status;
return 0;
}
static int fme_thermal_get_threshold2_reached(struct ifpga_fme_hw *fme,
u64 *thres1_reached)
{
struct feature_fme_thermal *thermal;
struct feature_fme_tmp_threshold temp_threshold;
thermal = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_THERMAL_MGMT);
temp_threshold.csr = readq(&thermal->threshold);
*thres1_reached = temp_threshold.thshold2_status;
return 0;
}
static int fme_thermal_get_threshold1_policy(struct ifpga_fme_hw *fme,
u64 *thres1_policy)
{
struct feature_fme_thermal *thermal;
struct feature_fme_tmp_threshold temp_threshold;
thermal = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_THERMAL_MGMT);
temp_threshold.csr = readq(&thermal->threshold);
*thres1_policy = temp_threshold.thshold_policy;
return 0;
}
static int fme_thermal_set_threshold1_policy(struct ifpga_fme_hw *fme,
u64 thres1_policy)
{
struct feature_fme_thermal *thermal;
struct feature_fme_tmp_threshold tmp_threshold;
thermal = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_THERMAL_MGMT);
spinlock_lock(&fme->lock);
tmp_threshold.csr = readq(&thermal->threshold);
if (thres1_policy == 0) {
tmp_threshold.thshold_policy = 0;
} else if (thres1_policy == 1) {
tmp_threshold.thshold_policy = 1;
} else {
spinlock_unlock(&fme->lock);
return -EINVAL;
}
writeq(tmp_threshold.csr, &thermal->threshold);
spinlock_unlock(&fme->lock);
return 0;
}
static int fme_thermal_get_temperature(struct ifpga_fme_hw *fme, u64 *temp)
{
struct feature_fme_thermal *thermal;
struct feature_fme_temp_rdsensor_fmt1 temp_rdsensor_fmt1;
thermal = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_THERMAL_MGMT);
temp_rdsensor_fmt1.csr = readq(&thermal->rdsensor_fm1);
*temp = temp_rdsensor_fmt1.fpga_temp;
return 0;
}
static int fme_thermal_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
{
struct feature_fme_thermal *fme_thermal
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_THERMAL_MGMT);
struct feature_header header;
header.csr = readq(&fme_thermal->header);
*revision = header.revision;
return 0;
}
#define FME_THERMAL_CAP_NO_TMP_THRESHOLD 0x1
static int fme_thermal_mgmt_init(struct feature *feature)
{
struct feature_fme_thermal *fme_thermal;
struct feature_fme_tmp_threshold_cap thermal_cap;
UNUSED(feature);
dev_info(NULL, "FME thermal mgmt Init.\n");
fme_thermal = (struct feature_fme_thermal *)feature->addr;
thermal_cap.csr = readq(&fme_thermal->threshold_cap);
dev_info(NULL, "FME thermal cap %llx.\n",
(unsigned long long)fme_thermal->threshold_cap.csr);
if (thermal_cap.tmp_thshold_disabled)
feature->cap |= FME_THERMAL_CAP_NO_TMP_THRESHOLD;
return 0;
}
static void fme_thermal_mgmt_uinit(struct feature *feature)
{
UNUSED(feature);
dev_info(NULL, "FME thermal mgmt UInit.\n");
}
static int
fme_thermal_set_prop(struct feature *feature, struct feature_prop *prop)
{
struct ifpga_fme_hw *fme = feature->parent;
if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD)
return -ENOENT;
switch (prop->prop_id) {
case FME_THERMAL_PROP_THRESHOLD1:
return fme_thermal_set_threshold1(fme, prop->data);
case FME_THERMAL_PROP_THRESHOLD2:
return fme_thermal_set_threshold2(fme, prop->data);
case FME_THERMAL_PROP_THRESHOLD1_POLICY:
return fme_thermal_set_threshold1_policy(fme, prop->data);
}
return -ENOENT;
}
static int
fme_thermal_get_prop(struct feature *feature, struct feature_prop *prop)
{
struct ifpga_fme_hw *fme = feature->parent;
if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD &&
prop->prop_id != FME_THERMAL_PROP_TEMPERATURE &&
prop->prop_id != FME_THERMAL_PROP_REVISION)
return -ENOENT;
switch (prop->prop_id) {
case FME_THERMAL_PROP_THRESHOLD1:
return fme_thermal_get_threshold1(fme, &prop->data);
case FME_THERMAL_PROP_THRESHOLD2:
return fme_thermal_get_threshold2(fme, &prop->data);
case FME_THERMAL_PROP_THRESHOLD_TRIP:
return fme_thermal_get_threshold_trip(fme, &prop->data);
case FME_THERMAL_PROP_THRESHOLD1_REACHED:
return fme_thermal_get_threshold1_reached(fme, &prop->data);
case FME_THERMAL_PROP_THRESHOLD2_REACHED:
return fme_thermal_get_threshold2_reached(fme, &prop->data);
case FME_THERMAL_PROP_THRESHOLD1_POLICY:
return fme_thermal_get_threshold1_policy(fme, &prop->data);
case FME_THERMAL_PROP_TEMPERATURE:
return fme_thermal_get_temperature(fme, &prop->data);
case FME_THERMAL_PROP_REVISION:
return fme_thermal_get_revision(fme, &prop->data);
}
return -ENOENT;
}
struct feature_ops fme_thermal_mgmt_ops = {
.init = fme_thermal_mgmt_init,
.uinit = fme_thermal_mgmt_uinit,
.get_prop = fme_thermal_get_prop,
.set_prop = fme_thermal_set_prop,
};
static int fme_pwr_get_consumed(struct ifpga_fme_hw *fme, u64 *consumed)
{
struct feature_fme_power *fme_power
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_POWER_MGMT);
struct feature_fme_pm_status pm_status;
pm_status.csr = readq(&fme_power->status);
*consumed = pm_status.pwr_consumed;
return 0;
}
static int fme_pwr_get_threshold1(struct ifpga_fme_hw *fme, u64 *threshold)
{
struct feature_fme_power *fme_power
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_POWER_MGMT);
struct feature_fme_pm_ap_threshold pm_ap_threshold;
pm_ap_threshold.csr = readq(&fme_power->threshold);
*threshold = pm_ap_threshold.threshold1;
return 0;
}
static int fme_pwr_set_threshold1(struct ifpga_fme_hw *fme, u64 threshold)
{
struct feature_fme_power *fme_power
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_POWER_MGMT);
struct feature_fme_pm_ap_threshold pm_ap_threshold;
spinlock_lock(&fme->lock);
pm_ap_threshold.csr = readq(&fme_power->threshold);
if (threshold <= PWR_THRESHOLD_MAX) {
pm_ap_threshold.threshold1 = threshold;
} else {
spinlock_unlock(&fme->lock);
return -EINVAL;
}
writeq(pm_ap_threshold.csr, &fme_power->threshold);
spinlock_unlock(&fme->lock);
return 0;
}
static int fme_pwr_get_threshold2(struct ifpga_fme_hw *fme, u64 *threshold)
{
struct feature_fme_power *fme_power
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_POWER_MGMT);
struct feature_fme_pm_ap_threshold pm_ap_threshold;
pm_ap_threshold.csr = readq(&fme_power->threshold);
*threshold = pm_ap_threshold.threshold2;
return 0;
}
static int fme_pwr_set_threshold2(struct ifpga_fme_hw *fme, u64 threshold)
{
struct feature_fme_power *fme_power
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_POWER_MGMT);
struct feature_fme_pm_ap_threshold pm_ap_threshold;
spinlock_lock(&fme->lock);
pm_ap_threshold.csr = readq(&fme_power->threshold);
if (threshold <= PWR_THRESHOLD_MAX) {
pm_ap_threshold.threshold2 = threshold;
} else {
spinlock_unlock(&fme->lock);
return -EINVAL;
}
writeq(pm_ap_threshold.csr, &fme_power->threshold);
spinlock_unlock(&fme->lock);
return 0;
}
static int fme_pwr_get_threshold1_status(struct ifpga_fme_hw *fme,
u64 *threshold_status)
{
struct feature_fme_power *fme_power
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_POWER_MGMT);
struct feature_fme_pm_ap_threshold pm_ap_threshold;
pm_ap_threshold.csr = readq(&fme_power->threshold);
*threshold_status = pm_ap_threshold.threshold1_status;
return 0;
}
static int fme_pwr_get_threshold2_status(struct ifpga_fme_hw *fme,
u64 *threshold_status)
{
struct feature_fme_power *fme_power
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_POWER_MGMT);
struct feature_fme_pm_ap_threshold pm_ap_threshold;
pm_ap_threshold.csr = readq(&fme_power->threshold);
*threshold_status = pm_ap_threshold.threshold2_status;
return 0;
}
static int fme_pwr_get_rtl(struct ifpga_fme_hw *fme, u64 *rtl)
{
struct feature_fme_power *fme_power
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_POWER_MGMT);
struct feature_fme_pm_status pm_status;
pm_status.csr = readq(&fme_power->status);
*rtl = pm_status.fpga_latency_report;
return 0;
}
static int fme_pwr_get_xeon_limit(struct ifpga_fme_hw *fme, u64 *limit)
{
struct feature_fme_power *fme_power
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_POWER_MGMT);
struct feature_fme_pm_xeon_limit xeon_limit;
xeon_limit.csr = readq(&fme_power->xeon_limit);
if (!xeon_limit.enable)
xeon_limit.pwr_limit = 0;
*limit = xeon_limit.pwr_limit;
return 0;
}
static int fme_pwr_get_fpga_limit(struct ifpga_fme_hw *fme, u64 *limit)
{
struct feature_fme_power *fme_power
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_POWER_MGMT);
struct feature_fme_pm_fpga_limit fpga_limit;
fpga_limit.csr = readq(&fme_power->fpga_limit);
if (!fpga_limit.enable)
fpga_limit.pwr_limit = 0;
*limit = fpga_limit.pwr_limit;
return 0;
}
static int fme_pwr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
{
struct feature_fme_power *fme_power
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_POWER_MGMT);
struct feature_header header;
header.csr = readq(&fme_power->header);
*revision = header.revision;
return 0;
}
static int fme_power_mgmt_init(struct feature *feature)
{
UNUSED(feature);
dev_info(NULL, "FME power mgmt Init.\n");
return 0;
}
static void fme_power_mgmt_uinit(struct feature *feature)
{
UNUSED(feature);
dev_info(NULL, "FME power mgmt UInit.\n");
}
static int fme_power_mgmt_get_prop(struct feature *feature,
struct feature_prop *prop)
{
struct ifpga_fme_hw *fme = feature->parent;
switch (prop->prop_id) {
case FME_PWR_PROP_CONSUMED:
return fme_pwr_get_consumed(fme, &prop->data);
case FME_PWR_PROP_THRESHOLD1:
return fme_pwr_get_threshold1(fme, &prop->data);
case FME_PWR_PROP_THRESHOLD2:
return fme_pwr_get_threshold2(fme, &prop->data);
case FME_PWR_PROP_THRESHOLD1_STATUS:
return fme_pwr_get_threshold1_status(fme, &prop->data);
case FME_PWR_PROP_THRESHOLD2_STATUS:
return fme_pwr_get_threshold2_status(fme, &prop->data);
case FME_PWR_PROP_RTL:
return fme_pwr_get_rtl(fme, &prop->data);
case FME_PWR_PROP_XEON_LIMIT:
return fme_pwr_get_xeon_limit(fme, &prop->data);
case FME_PWR_PROP_FPGA_LIMIT:
return fme_pwr_get_fpga_limit(fme, &prop->data);
case FME_PWR_PROP_REVISION:
return fme_pwr_get_revision(fme, &prop->data);
}
return -ENOENT;
}
static int fme_power_mgmt_set_prop(struct feature *feature,
struct feature_prop *prop)
{
struct ifpga_fme_hw *fme = feature->parent;
switch (prop->prop_id) {
case FME_PWR_PROP_THRESHOLD1:
return fme_pwr_set_threshold1(fme, prop->data);
case FME_PWR_PROP_THRESHOLD2:
return fme_pwr_set_threshold2(fme, prop->data);
}
return -ENOENT;
}
struct feature_ops fme_power_mgmt_ops = {
.init = fme_power_mgmt_init,
.uinit = fme_power_mgmt_uinit,
.get_prop = fme_power_mgmt_get_prop,
.set_prop = fme_power_mgmt_set_prop,
};

View File

@ -0,0 +1,301 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#include "ifpga_feature_dev.h"
#define PERF_OBJ_ROOT_ID 0xff
static int fme_dperf_get_clock(struct ifpga_fme_hw *fme, u64 *clock)
{
struct feature_fme_dperf *dperf;
struct feature_fme_dfpmon_clk_ctr clk;
dperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_DPERF);
clk.afu_interf_clock = readq(&dperf->clk);
*clock = clk.afu_interf_clock;
return 0;
}
static int fme_dperf_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
{
struct feature_fme_dperf *dperf;
struct feature_header header;
dperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_DPERF);
header.csr = readq(&dperf->header);
*revision = header.revision;
return 0;
}
#define DPERF_TIMEOUT 30
static bool fabric_pobj_is_enabled(int port_id,
struct feature_fme_dperf *dperf)
{
struct feature_fme_dfpmon_fab_ctl ctl;
ctl.csr = readq(&dperf->fab_ctl);
if (ctl.port_filter == FAB_DISABLE_FILTER)
return port_id == PERF_OBJ_ROOT_ID;
return port_id == ctl.port_id;
}
static u64 read_fabric_counter(struct ifpga_fme_hw *fme, u8 port_id,
enum dperf_fab_events fab_event)
{
struct feature_fme_dfpmon_fab_ctl ctl;
struct feature_fme_dfpmon_fab_ctr ctr;
struct feature_fme_dperf *dperf;
u64 counter = 0;
spinlock_lock(&fme->lock);
dperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_DPERF);
/* if it is disabled, force the counter to return zero. */
if (!fabric_pobj_is_enabled(port_id, dperf))
goto exit;
ctl.csr = readq(&dperf->fab_ctl);
ctl.fab_evtcode = fab_event;
writeq(ctl.csr, &dperf->fab_ctl);
ctr.event_code = fab_event;
if (fpga_wait_register_field(event_code, ctr,
&dperf->fab_ctr, DPERF_TIMEOUT, 1)) {
dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
spinlock_unlock(&fme->lock);
return -ETIMEDOUT;
}
ctr.csr = readq(&dperf->fab_ctr);
counter = ctr.fab_cnt;
exit:
spinlock_unlock(&fme->lock);
return counter;
}
#define FAB_PORT_SHOW(name, event) \
static int fme_dperf_get_fab_port_##name(struct ifpga_fme_hw *fme, \
u8 port_id, u64 *counter) \
{ \
*counter = read_fabric_counter(fme, port_id, event); \
return 0; \
}
FAB_PORT_SHOW(pcie0_read, DPERF_FAB_PCIE0_RD);
FAB_PORT_SHOW(pcie0_write, DPERF_FAB_PCIE0_WR);
FAB_PORT_SHOW(mmio_read, DPERF_FAB_MMIO_RD);
FAB_PORT_SHOW(mmio_write, DPERF_FAB_MMIO_WR);
static int fme_dperf_get_fab_port_enable(struct ifpga_fme_hw *fme,
u8 port_id, u64 *enable)
{
struct feature_fme_dperf *dperf;
int status;
dperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_DPERF);
status = fabric_pobj_is_enabled(port_id, dperf);
*enable = (u64)status;
return 0;
}
/*
* If enable one port or all port event counter in fabric, other
* fabric event counter originally enabled will be disable automatically.
*/
static int fme_dperf_set_fab_port_enable(struct ifpga_fme_hw *fme,
u8 port_id, u64 enable)
{
struct feature_fme_dfpmon_fab_ctl ctl;
struct feature_fme_dperf *dperf;
bool state;
state = !!enable;
if (!state)
return -EINVAL;
dperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_DPERF);
/* if it is already enabled. */
if (fabric_pobj_is_enabled(port_id, dperf))
return 0;
spinlock_lock(&fme->lock);
ctl.csr = readq(&dperf->fab_ctl);
if (port_id == PERF_OBJ_ROOT_ID) {
ctl.port_filter = FAB_DISABLE_FILTER;
} else {
ctl.port_filter = FAB_ENABLE_FILTER;
ctl.port_id = port_id;
}
writeq(ctl.csr, &dperf->fab_ctl);
spinlock_unlock(&fme->lock);
return 0;
}
static int fme_dperf_get_fab_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
{
struct feature_fme_dperf *dperf;
struct feature_fme_dfpmon_fab_ctl ctl;
dperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_DPERF);
ctl.csr = readq(&dperf->fab_ctl);
*freeze = (u64)ctl.freeze;
return 0;
}
static int fme_dperf_set_fab_freeze(struct ifpga_fme_hw *fme, u64 freeze)
{
struct feature_fme_dperf *dperf;
struct feature_fme_dfpmon_fab_ctl ctl;
bool state;
state = !!freeze;
spinlock_lock(&fme->lock);
dperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_DPERF);
ctl.csr = readq(&dperf->fab_ctl);
ctl.freeze = state;
writeq(ctl.csr, &dperf->fab_ctl);
spinlock_unlock(&fme->lock);
return 0;
}
#define PERF_MAX_PORT_NUM 1
static int fme_global_dperf_init(struct feature *feature)
{
UNUSED(feature);
dev_info(NULL, "FME global_dperf Init.\n");
return 0;
}
static void fme_global_dperf_uinit(struct feature *feature)
{
UNUSED(feature);
dev_info(NULL, "FME global_dperf UInit.\n");
}
static int fme_dperf_fab_get_prop(struct feature *feature,
struct feature_prop *prop)
{
struct ifpga_fme_hw *fme = feature->parent;
u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
u16 id = GET_FIELD(PROP_ID, prop->prop_id);
switch (id) {
case 0x1: /* FREEZE */
return fme_dperf_get_fab_freeze(fme, &prop->data);
case 0x2: /* PCIE0_READ */
return fme_dperf_get_fab_port_pcie0_read(fme, sub, &prop->data);
case 0x3: /* PCIE0_WRITE */
return fme_dperf_get_fab_port_pcie0_write(fme, sub,
&prop->data);
case 0x4: /* MMIO_READ */
return fme_dperf_get_fab_port_mmio_read(fme, sub, &prop->data);
case 0x5: /* MMIO_WRITE */
return fme_dperf_get_fab_port_mmio_write(fme, sub, &prop->data);
case 0x6: /* ENABLE */
return fme_dperf_get_fab_port_enable(fme, sub, &prop->data);
}
return -ENOENT;
}
static int fme_dperf_root_get_prop(struct feature *feature,
struct feature_prop *prop)
{
struct ifpga_fme_hw *fme = feature->parent;
u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
u16 id = GET_FIELD(PROP_ID, prop->prop_id);
if (sub != PERF_PROP_SUB_UNUSED)
return -ENOENT;
switch (id) {
case 0x1: /* CLOCK */
return fme_dperf_get_clock(fme, &prop->data);
case 0x2: /* REVISION */
return fme_dperf_get_revision(fme, &prop->data);
}
return -ENOENT;
}
static int fme_global_dperf_get_prop(struct feature *feature,
struct feature_prop *prop)
{
u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
switch (top) {
case PERF_PROP_TOP_FAB:
return fme_dperf_fab_get_prop(feature, prop);
case PERF_PROP_TOP_UNUSED:
return fme_dperf_root_get_prop(feature, prop);
}
return -ENOENT;
}
static int fme_dperf_fab_set_prop(struct feature *feature,
struct feature_prop *prop)
{
struct ifpga_fme_hw *fme = feature->parent;
u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
u16 id = GET_FIELD(PROP_ID, prop->prop_id);
switch (id) {
case 0x1: /* FREEZE - fab root only prop */
if (sub != PERF_PROP_SUB_UNUSED)
return -ENOENT;
return fme_dperf_set_fab_freeze(fme, prop->data);
case 0x6: /* ENABLE - fab both root and sub */
return fme_dperf_set_fab_port_enable(fme, sub, prop->data);
}
return -ENOENT;
}
static int fme_global_dperf_set_prop(struct feature *feature,
struct feature_prop *prop)
{
u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
switch (top) {
case PERF_PROP_TOP_FAB:
return fme_dperf_fab_set_prop(feature, prop);
}
return -ENOENT;
}
struct feature_ops fme_global_dperf_ops = {
.init = fme_global_dperf_init,
.uinit = fme_global_dperf_uinit,
.get_prop = fme_global_dperf_get_prop,
.set_prop = fme_global_dperf_set_prop,
};

View File

@ -0,0 +1,381 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#include "ifpga_feature_dev.h"
static int fme_err_get_errors(struct ifpga_fme_hw *fme, u64 *val)
{
struct feature_fme_err *fme_err
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_ERR);
struct feature_fme_error0 fme_error0;
fme_error0.csr = readq(&fme_err->fme_err);
*val = fme_error0.csr;
return 0;
}
static int fme_err_get_first_error(struct ifpga_fme_hw *fme, u64 *val)
{
struct feature_fme_err *fme_err
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_ERR);
struct feature_fme_first_error fme_first_err;
fme_first_err.csr = readq(&fme_err->fme_first_err);
*val = fme_first_err.err_reg_status;
return 0;
}
static int fme_err_get_next_error(struct ifpga_fme_hw *fme, u64 *val)
{
struct feature_fme_err *fme_err
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_ERR);
struct feature_fme_next_error fme_next_err;
fme_next_err.csr = readq(&fme_err->fme_next_err);
*val = fme_next_err.err_reg_status;
return 0;
}
static int fme_err_set_clear(struct ifpga_fme_hw *fme, u64 val)
{
struct feature_fme_err *fme_err
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_ERR);
struct feature_fme_error0 fme_error0;
struct feature_fme_first_error fme_first_err;
struct feature_fme_next_error fme_next_err;
int ret = 0;
spinlock_lock(&fme->lock);
writeq(FME_ERROR0_MASK, &fme_err->fme_err_mask);
fme_error0.csr = readq(&fme_err->fme_err);
if (val != fme_error0.csr) {
ret = -EBUSY;
goto exit;
}
fme_first_err.csr = readq(&fme_err->fme_first_err);
fme_next_err.csr = readq(&fme_err->fme_next_err);
writeq(fme_error0.csr & FME_ERROR0_MASK, &fme_err->fme_err);
writeq(fme_first_err.csr & FME_FIRST_ERROR_MASK,
&fme_err->fme_first_err);
writeq(fme_next_err.csr & FME_NEXT_ERROR_MASK,
&fme_err->fme_next_err);
exit:
writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
spinlock_unlock(&fme->lock);
return ret;
}
static int fme_err_get_revision(struct ifpga_fme_hw *fme, u64 *val)
{
struct feature_fme_err *fme_err
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_ERR);
struct feature_header header;
header.csr = readq(&fme_err->header);
*val = header.revision;
return 0;
}
static int fme_err_get_pcie0_errors(struct ifpga_fme_hw *fme, u64 *val)
{
struct feature_fme_err *fme_err
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_ERR);
struct feature_fme_pcie0_error pcie0_err;
pcie0_err.csr = readq(&fme_err->pcie0_err);
*val = pcie0_err.csr;
return 0;
}
static int fme_err_set_pcie0_errors(struct ifpga_fme_hw *fme, u64 val)
{
struct feature_fme_err *fme_err
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_ERR);
struct feature_fme_pcie0_error pcie0_err;
int ret = 0;
spinlock_lock(&fme->lock);
writeq(FME_PCIE0_ERROR_MASK, &fme_err->pcie0_err_mask);
pcie0_err.csr = readq(&fme_err->pcie0_err);
if (val != pcie0_err.csr)
ret = -EBUSY;
else
writeq(pcie0_err.csr & FME_PCIE0_ERROR_MASK,
&fme_err->pcie0_err);
writeq(0UL, &fme_err->pcie0_err_mask);
spinlock_unlock(&fme->lock);
return ret;
}
static int fme_err_get_pcie1_errors(struct ifpga_fme_hw *fme, u64 *val)
{
struct feature_fme_err *fme_err
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_ERR);
struct feature_fme_pcie1_error pcie1_err;
pcie1_err.csr = readq(&fme_err->pcie1_err);
*val = pcie1_err.csr;
return 0;
}
static int fme_err_set_pcie1_errors(struct ifpga_fme_hw *fme, u64 val)
{
struct feature_fme_err *fme_err
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_ERR);
struct feature_fme_pcie1_error pcie1_err;
int ret = 0;
spinlock_lock(&fme->lock);
writeq(FME_PCIE1_ERROR_MASK, &fme_err->pcie1_err_mask);
pcie1_err.csr = readq(&fme_err->pcie1_err);
if (val != pcie1_err.csr)
ret = -EBUSY;
else
writeq(pcie1_err.csr & FME_PCIE1_ERROR_MASK,
&fme_err->pcie1_err);
writeq(0UL, &fme_err->pcie1_err_mask);
spinlock_unlock(&fme->lock);
return ret;
}
static int fme_err_get_nonfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
{
struct feature_fme_err *fme_err
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_ERR);
struct feature_fme_ras_nonfaterror ras_nonfaterr;
ras_nonfaterr.csr = readq(&fme_err->ras_nonfaterr);
*val = ras_nonfaterr.csr;
return 0;
}
static int fme_err_get_catfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
{
struct feature_fme_err *fme_err
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_ERR);
struct feature_fme_ras_catfaterror ras_catfaterr;
ras_catfaterr.csr = readq(&fme_err->ras_catfaterr);
*val = ras_catfaterr.csr;
return 0;
}
static int fme_err_get_inject_errors(struct ifpga_fme_hw *fme, u64 *val)
{
struct feature_fme_err *fme_err
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_ERR);
struct feature_fme_ras_error_inj ras_error_inj;
ras_error_inj.csr = readq(&fme_err->ras_error_inj);
*val = ras_error_inj.csr & FME_RAS_ERROR_INJ_MASK;
return 0;
}
static int fme_err_set_inject_errors(struct ifpga_fme_hw *fme, u64 val)
{
struct feature_fme_err *fme_err
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_ERR);
struct feature_fme_ras_error_inj ras_error_inj;
spinlock_lock(&fme->lock);
ras_error_inj.csr = readq(&fme_err->ras_error_inj);
if (val <= FME_RAS_ERROR_INJ_MASK) {
ras_error_inj.csr = val;
} else {
spinlock_unlock(&fme->lock);
return -EINVAL;
}
writeq(ras_error_inj.csr, &fme_err->ras_error_inj);
spinlock_unlock(&fme->lock);
return 0;
}
static void fme_error_enable(struct ifpga_fme_hw *fme)
{
struct feature_fme_err *fme_err
= get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_ERR);
writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
writeq(0UL, &fme_err->pcie0_err_mask);
writeq(0UL, &fme_err->pcie1_err_mask);
writeq(0UL, &fme_err->ras_nonfat_mask);
writeq(0UL, &fme_err->ras_catfat_mask);
}
static int fme_global_error_init(struct feature *feature)
{
struct ifpga_fme_hw *fme = feature->parent;
fme_error_enable(fme);
if (feature->ctx_num)
fme->capability |= FPGA_FME_CAP_ERR_IRQ;
return 0;
}
static void fme_global_error_uinit(struct feature *feature)
{
UNUSED(feature);
}
static int fme_err_fme_err_get_prop(struct feature *feature,
struct feature_prop *prop)
{
struct ifpga_fme_hw *fme = feature->parent;
u16 id = GET_FIELD(PROP_ID, prop->prop_id);
switch (id) {
case 0x1: /* ERRORS */
return fme_err_get_errors(fme, &prop->data);
case 0x2: /* FIRST_ERROR */
return fme_err_get_first_error(fme, &prop->data);
case 0x3: /* NEXT_ERROR */
return fme_err_get_next_error(fme, &prop->data);
}
return -ENOENT;
}
static int fme_err_root_get_prop(struct feature *feature,
struct feature_prop *prop)
{
struct ifpga_fme_hw *fme = feature->parent;
u16 id = GET_FIELD(PROP_ID, prop->prop_id);
switch (id) {
case 0x5: /* REVISION */
return fme_err_get_revision(fme, &prop->data);
case 0x6: /* PCIE0_ERRORS */
return fme_err_get_pcie0_errors(fme, &prop->data);
case 0x7: /* PCIE1_ERRORS */
return fme_err_get_pcie1_errors(fme, &prop->data);
case 0x8: /* NONFATAL_ERRORS */
return fme_err_get_nonfatal_errors(fme, &prop->data);
case 0x9: /* CATFATAL_ERRORS */
return fme_err_get_catfatal_errors(fme, &prop->data);
case 0xa: /* INJECT_ERRORS */
return fme_err_get_inject_errors(fme, &prop->data);
case 0xb: /* REVISION*/
return fme_err_get_revision(fme, &prop->data);
}
return -ENOENT;
}
static int fme_global_error_get_prop(struct feature *feature,
struct feature_prop *prop)
{
u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
/* PROP_SUB is never used */
if (sub != PROP_SUB_UNUSED)
return -ENOENT;
switch (top) {
case ERR_PROP_TOP_FME_ERR:
return fme_err_fme_err_get_prop(feature, prop);
case ERR_PROP_TOP_UNUSED:
return fme_err_root_get_prop(feature, prop);
}
return -ENOENT;
}
static int fme_err_fme_err_set_prop(struct feature *feature,
struct feature_prop *prop)
{
struct ifpga_fme_hw *fme = feature->parent;
u16 id = GET_FIELD(PROP_ID, prop->prop_id);
switch (id) {
case 0x4: /* CLEAR */
return fme_err_set_clear(fme, prop->data);
}
return -ENOENT;
}
static int fme_err_root_set_prop(struct feature *feature,
struct feature_prop *prop)
{
struct ifpga_fme_hw *fme = feature->parent;
u16 id = GET_FIELD(PROP_ID, prop->prop_id);
switch (id) {
case 0x6: /* PCIE0_ERRORS */
return fme_err_set_pcie0_errors(fme, prop->data);
case 0x7: /* PCIE1_ERRORS */
return fme_err_set_pcie1_errors(fme, prop->data);
case 0xa: /* INJECT_ERRORS */
return fme_err_set_inject_errors(fme, prop->data);
}
return -ENOENT;
}
static int fme_global_error_set_prop(struct feature *feature,
struct feature_prop *prop)
{
u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
/* PROP_SUB is never used */
if (sub != PROP_SUB_UNUSED)
return -ENOENT;
switch (top) {
case ERR_PROP_TOP_FME_ERR:
return fme_err_fme_err_set_prop(feature, prop);
case ERR_PROP_TOP_UNUSED:
return fme_err_root_set_prop(feature, prop);
}
return -ENOENT;
}
struct feature_ops fme_global_err_ops = {
.init = fme_global_error_init,
.uinit = fme_global_error_uinit,
.get_prop = fme_global_error_get_prop,
.set_prop = fme_global_error_set_prop,
};

View File

@ -0,0 +1,715 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#include "ifpga_feature_dev.h"
#define PERF_OBJ_ROOT_ID 0xff
static int fme_iperf_get_clock(struct ifpga_fme_hw *fme, u64 *clock)
{
struct feature_fme_iperf *iperf;
struct feature_fme_ifpmon_clk_ctr clk;
iperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_IPERF);
clk.afu_interf_clock = readq(&iperf->clk);
*clock = clk.afu_interf_clock;
return 0;
}
static int fme_iperf_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
{
struct feature_fme_iperf *iperf;
struct feature_header header;
iperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_IPERF);
header.csr = readq(&iperf->header);
*revision = header.revision;
return 0;
}
static int fme_iperf_get_cache_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
{
struct feature_fme_iperf *iperf;
struct feature_fme_ifpmon_ch_ctl ctl;
iperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_IPERF);
ctl.csr = readq(&iperf->ch_ctl);
*freeze = (u64)ctl.freeze;
return 0;
}
static int fme_iperf_set_cache_freeze(struct ifpga_fme_hw *fme, u64 freeze)
{
struct feature_fme_iperf *iperf;
struct feature_fme_ifpmon_ch_ctl ctl;
bool state;
state = !!freeze;
spinlock_lock(&fme->lock);
iperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_IPERF);
ctl.csr = readq(&iperf->ch_ctl);
ctl.freeze = state;
writeq(ctl.csr, &iperf->ch_ctl);
spinlock_unlock(&fme->lock);
return 0;
}
#define IPERF_TIMEOUT 30
static u64 read_cache_counter(struct ifpga_fme_hw *fme,
u8 channel, enum iperf_cache_events event)
{
struct feature_fme_iperf *iperf;
struct feature_fme_ifpmon_ch_ctl ctl;
struct feature_fme_ifpmon_ch_ctr ctr0, ctr1;
u64 counter;
spinlock_lock(&fme->lock);
iperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_IPERF);
/* set channel access type and cache event code. */
ctl.csr = readq(&iperf->ch_ctl);
ctl.cci_chsel = channel;
ctl.cache_event = event;
writeq(ctl.csr, &iperf->ch_ctl);
/* check the event type in the counter registers */
ctr0.event_code = event;
if (fpga_wait_register_field(event_code, ctr0,
&iperf->ch_ctr0, IPERF_TIMEOUT, 1)) {
dev_err(fme, "timeout, unmatched cache event type in counter registers.\n");
spinlock_unlock(&fme->lock);
return -ETIMEDOUT;
}
ctr0.csr = readq(&iperf->ch_ctr0);
ctr1.csr = readq(&iperf->ch_ctr1);
counter = ctr0.cache_counter + ctr1.cache_counter;
spinlock_unlock(&fme->lock);
return counter;
}
#define CACHE_SHOW(name, type, event) \
static int fme_iperf_get_cache_##name(struct ifpga_fme_hw *fme, \
u64 *counter) \
{ \
*counter = read_cache_counter(fme, type, event); \
return 0; \
}
CACHE_SHOW(read_hit, CACHE_CHANNEL_RD, IPERF_CACHE_RD_HIT);
CACHE_SHOW(read_miss, CACHE_CHANNEL_RD, IPERF_CACHE_RD_MISS);
CACHE_SHOW(write_hit, CACHE_CHANNEL_WR, IPERF_CACHE_WR_HIT);
CACHE_SHOW(write_miss, CACHE_CHANNEL_WR, IPERF_CACHE_WR_MISS);
CACHE_SHOW(hold_request, CACHE_CHANNEL_RD, IPERF_CACHE_HOLD_REQ);
CACHE_SHOW(tx_req_stall, CACHE_CHANNEL_RD, IPERF_CACHE_TX_REQ_STALL);
CACHE_SHOW(rx_req_stall, CACHE_CHANNEL_RD, IPERF_CACHE_RX_REQ_STALL);
CACHE_SHOW(rx_eviction, CACHE_CHANNEL_RD, IPERF_CACHE_EVICTIONS);
CACHE_SHOW(data_write_port_contention, CACHE_CHANNEL_WR,
IPERF_CACHE_DATA_WR_PORT_CONTEN);
CACHE_SHOW(tag_write_port_contention, CACHE_CHANNEL_WR,
IPERF_CACHE_TAG_WR_PORT_CONTEN);
static int fme_iperf_get_vtd_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
{
struct feature_fme_ifpmon_vtd_ctl ctl;
struct feature_fme_iperf *iperf;
iperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_IPERF);
ctl.csr = readq(&iperf->vtd_ctl);
*freeze = (u64)ctl.freeze;
return 0;
}
static int fme_iperf_set_vtd_freeze(struct ifpga_fme_hw *fme, u64 freeze)
{
struct feature_fme_ifpmon_vtd_ctl ctl;
struct feature_fme_iperf *iperf;
bool state;
state = !!freeze;
spinlock_lock(&fme->lock);
iperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_IPERF);
ctl.csr = readq(&iperf->vtd_ctl);
ctl.freeze = state;
writeq(ctl.csr, &iperf->vtd_ctl);
spinlock_unlock(&fme->lock);
return 0;
}
static u64 read_iommu_sip_counter(struct ifpga_fme_hw *fme,
enum iperf_vtd_sip_events event)
{
struct feature_fme_ifpmon_vtd_sip_ctl sip_ctl;
struct feature_fme_ifpmon_vtd_sip_ctr sip_ctr;
struct feature_fme_iperf *iperf;
u64 counter;
spinlock_lock(&fme->lock);
iperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_IPERF);
sip_ctl.csr = readq(&iperf->vtd_sip_ctl);
sip_ctl.vtd_evtcode = event;
writeq(sip_ctl.csr, &iperf->vtd_sip_ctl);
sip_ctr.event_code = event;
if (fpga_wait_register_field(event_code, sip_ctr,
&iperf->vtd_sip_ctr, IPERF_TIMEOUT, 1)) {
dev_err(fme, "timeout, unmatched VTd SIP event type in counter registers\n");
spinlock_unlock(&fme->lock);
return -ETIMEDOUT;
}
sip_ctr.csr = readq(&iperf->vtd_sip_ctr);
counter = sip_ctr.vtd_counter;
spinlock_unlock(&fme->lock);
return counter;
}
#define VTD_SIP_SHOW(name, event) \
static int fme_iperf_get_vtd_sip_##name(struct ifpga_fme_hw *fme, \
u64 *counter) \
{ \
*counter = read_iommu_sip_counter(fme, event); \
return 0; \
}
VTD_SIP_SHOW(iotlb_4k_hit, IPERF_VTD_SIP_IOTLB_4K_HIT);
VTD_SIP_SHOW(iotlb_2m_hit, IPERF_VTD_SIP_IOTLB_2M_HIT);
VTD_SIP_SHOW(iotlb_1g_hit, IPERF_VTD_SIP_IOTLB_1G_HIT);
VTD_SIP_SHOW(slpwc_l3_hit, IPERF_VTD_SIP_SLPWC_L3_HIT);
VTD_SIP_SHOW(slpwc_l4_hit, IPERF_VTD_SIP_SLPWC_L4_HIT);
VTD_SIP_SHOW(rcc_hit, IPERF_VTD_SIP_RCC_HIT);
VTD_SIP_SHOW(iotlb_4k_miss, IPERF_VTD_SIP_IOTLB_4K_MISS);
VTD_SIP_SHOW(iotlb_2m_miss, IPERF_VTD_SIP_IOTLB_2M_MISS);
VTD_SIP_SHOW(iotlb_1g_miss, IPERF_VTD_SIP_IOTLB_1G_MISS);
VTD_SIP_SHOW(slpwc_l3_miss, IPERF_VTD_SIP_SLPWC_L3_MISS);
VTD_SIP_SHOW(slpwc_l4_miss, IPERF_VTD_SIP_SLPWC_L4_MISS);
VTD_SIP_SHOW(rcc_miss, IPERF_VTD_SIP_RCC_MISS);
static u64 read_iommu_counter(struct ifpga_fme_hw *fme, u8 port_id,
enum iperf_vtd_events base_event)
{
struct feature_fme_ifpmon_vtd_ctl ctl;
struct feature_fme_ifpmon_vtd_ctr ctr;
struct feature_fme_iperf *iperf;
enum iperf_vtd_events event = base_event + port_id;
u64 counter;
spinlock_lock(&fme->lock);
iperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_IPERF);
ctl.csr = readq(&iperf->vtd_ctl);
ctl.vtd_evtcode = event;
writeq(ctl.csr, &iperf->vtd_ctl);
ctr.event_code = event;
if (fpga_wait_register_field(event_code, ctr,
&iperf->vtd_ctr, IPERF_TIMEOUT, 1)) {
dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
spinlock_unlock(&fme->lock);
return -ETIMEDOUT;
}
ctr.csr = readq(&iperf->vtd_ctr);
counter = ctr.vtd_counter;
spinlock_unlock(&fme->lock);
return counter;
}
#define VTD_PORT_SHOW(name, base_event) \
static int fme_iperf_get_vtd_port_##name(struct ifpga_fme_hw *fme, \
u8 port_id, u64 *counter) \
{ \
*counter = read_iommu_counter(fme, port_id, base_event); \
return 0; \
}
VTD_PORT_SHOW(read_transaction, IPERF_VTD_AFU_MEM_RD_TRANS);
VTD_PORT_SHOW(write_transaction, IPERF_VTD_AFU_MEM_WR_TRANS);
VTD_PORT_SHOW(devtlb_read_hit, IPERF_VTD_AFU_DEVTLB_RD_HIT);
VTD_PORT_SHOW(devtlb_write_hit, IPERF_VTD_AFU_DEVTLB_WR_HIT);
VTD_PORT_SHOW(devtlb_4k_fill, IPERF_VTD_DEVTLB_4K_FILL);
VTD_PORT_SHOW(devtlb_2m_fill, IPERF_VTD_DEVTLB_2M_FILL);
VTD_PORT_SHOW(devtlb_1g_fill, IPERF_VTD_DEVTLB_1G_FILL);
static bool fabric_pobj_is_enabled(u8 port_id, struct feature_fme_iperf *iperf)
{
struct feature_fme_ifpmon_fab_ctl ctl;
ctl.csr = readq(&iperf->fab_ctl);
if (ctl.port_filter == FAB_DISABLE_FILTER)
return port_id == PERF_OBJ_ROOT_ID;
return port_id == ctl.port_id;
}
static u64 read_fabric_counter(struct ifpga_fme_hw *fme, u8 port_id,
enum iperf_fab_events fab_event)
{
struct feature_fme_ifpmon_fab_ctl ctl;
struct feature_fme_ifpmon_fab_ctr ctr;
struct feature_fme_iperf *iperf;
u64 counter = 0;
spinlock_lock(&fme->lock);
iperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_IPERF);
/* if it is disabled, force the counter to return zero. */
if (!fabric_pobj_is_enabled(port_id, iperf))
goto exit;
ctl.csr = readq(&iperf->fab_ctl);
ctl.fab_evtcode = fab_event;
writeq(ctl.csr, &iperf->fab_ctl);
ctr.event_code = fab_event;
if (fpga_wait_register_field(event_code, ctr,
&iperf->fab_ctr, IPERF_TIMEOUT, 1)) {
dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
spinlock_unlock(&fme->lock);
return -ETIMEDOUT;
}
ctr.csr = readq(&iperf->fab_ctr);
counter = ctr.fab_cnt;
exit:
spinlock_unlock(&fme->lock);
return counter;
}
#define FAB_PORT_SHOW(name, event) \
static int fme_iperf_get_fab_port_##name(struct ifpga_fme_hw *fme, \
u8 port_id, u64 *counter) \
{ \
*counter = read_fabric_counter(fme, port_id, event); \
return 0; \
}
FAB_PORT_SHOW(pcie0_read, IPERF_FAB_PCIE0_RD);
FAB_PORT_SHOW(pcie0_write, IPERF_FAB_PCIE0_WR);
FAB_PORT_SHOW(pcie1_read, IPERF_FAB_PCIE1_RD);
FAB_PORT_SHOW(pcie1_write, IPERF_FAB_PCIE1_WR);
FAB_PORT_SHOW(upi_read, IPERF_FAB_UPI_RD);
FAB_PORT_SHOW(upi_write, IPERF_FAB_UPI_WR);
FAB_PORT_SHOW(mmio_read, IPERF_FAB_MMIO_RD);
FAB_PORT_SHOW(mmio_write, IPERF_FAB_MMIO_WR);
static int fme_iperf_get_fab_port_enable(struct ifpga_fme_hw *fme,
u8 port_id, u64 *enable)
{
struct feature_fme_iperf *iperf;
int status;
iperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_IPERF);
status = fabric_pobj_is_enabled(port_id, iperf);
*enable = (u64)status;
return 0;
}
/*
* If enable one port or all port event counter in fabric, other
* fabric event counter originally enabled will be disable automatically.
*/
static int fme_iperf_set_fab_port_enable(struct ifpga_fme_hw *fme,
u8 port_id, u64 enable)
{
struct feature_fme_ifpmon_fab_ctl ctl;
struct feature_fme_iperf *iperf;
bool state;
state = !!enable;
if (!state)
return -EINVAL;
iperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_IPERF);
/* if it is already enabled. */
if (fabric_pobj_is_enabled(port_id, iperf))
return 0;
spinlock_lock(&fme->lock);
ctl.csr = readq(&iperf->fab_ctl);
if (port_id == PERF_OBJ_ROOT_ID) {
ctl.port_filter = FAB_DISABLE_FILTER;
} else {
ctl.port_filter = FAB_ENABLE_FILTER;
ctl.port_id = port_id;
}
writeq(ctl.csr, &iperf->fab_ctl);
spinlock_unlock(&fme->lock);
return 0;
}
static int fme_iperf_get_fab_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
{
struct feature_fme_iperf *iperf;
struct feature_fme_ifpmon_fab_ctl ctl;
iperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_IPERF);
ctl.csr = readq(&iperf->fab_ctl);
*freeze = (u64)ctl.freeze;
return 0;
}
static int fme_iperf_set_fab_freeze(struct ifpga_fme_hw *fme, u64 freeze)
{
struct feature_fme_iperf *iperf;
struct feature_fme_ifpmon_fab_ctl ctl;
bool state;
state = !!freeze;
spinlock_lock(&fme->lock);
iperf = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_GLOBAL_IPERF);
ctl.csr = readq(&iperf->fab_ctl);
ctl.freeze = state;
writeq(ctl.csr, &iperf->fab_ctl);
spinlock_unlock(&fme->lock);
return 0;
}
#define PERF_MAX_PORT_NUM 1
#define FME_IPERF_CAP_IOMMU 0x1
static int fme_global_iperf_init(struct feature *feature)
{
struct ifpga_fme_hw *fme;
struct feature_fme_header *fme_hdr;
struct feature_fme_capability fme_capability;
dev_info(NULL, "FME global_iperf Init.\n");
fme = (struct ifpga_fme_hw *)feature->parent;
fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
/* check if iommu is not supported on this device. */
fme_capability.csr = readq(&fme_hdr->capability);
dev_info(NULL, "FME HEAD fme_capability %llx.\n",
(unsigned long long)fme_hdr->capability.csr);
if (fme_capability.iommu_support)
feature->cap |= FME_IPERF_CAP_IOMMU;
return 0;
}
static void fme_global_iperf_uinit(struct feature *feature)
{
UNUSED(feature);
dev_info(NULL, "FME global_iperf UInit.\n");
}
static int fme_iperf_root_get_prop(struct feature *feature,
struct feature_prop *prop)
{
struct ifpga_fme_hw *fme = feature->parent;
u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
u16 id = GET_FIELD(PROP_ID, prop->prop_id);
if (sub != PERF_PROP_SUB_UNUSED)
return -ENOENT;
switch (id) {
case 0x1: /* CLOCK */
return fme_iperf_get_clock(fme, &prop->data);
case 0x2: /* REVISION */
return fme_iperf_get_revision(fme, &prop->data);
}
return -ENOENT;
}
static int fme_iperf_cache_get_prop(struct feature *feature,
struct feature_prop *prop)
{
struct ifpga_fme_hw *fme = feature->parent;
u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
u16 id = GET_FIELD(PROP_ID, prop->prop_id);
if (sub != PERF_PROP_SUB_UNUSED)
return -ENOENT;
switch (id) {
case 0x1: /* FREEZE */
return fme_iperf_get_cache_freeze(fme, &prop->data);
case 0x2: /* READ_HIT */
return fme_iperf_get_cache_read_hit(fme, &prop->data);
case 0x3: /* READ_MISS */
return fme_iperf_get_cache_read_miss(fme, &prop->data);
case 0x4: /* WRITE_HIT */
return fme_iperf_get_cache_write_hit(fme, &prop->data);
case 0x5: /* WRITE_MISS */
return fme_iperf_get_cache_write_miss(fme, &prop->data);
case 0x6: /* HOLD_REQUEST */
return fme_iperf_get_cache_hold_request(fme, &prop->data);
case 0x7: /* TX_REQ_STALL */
return fme_iperf_get_cache_tx_req_stall(fme, &prop->data);
case 0x8: /* RX_REQ_STALL */
return fme_iperf_get_cache_rx_req_stall(fme, &prop->data);
case 0x9: /* RX_EVICTION */
return fme_iperf_get_cache_rx_eviction(fme, &prop->data);
case 0xa: /* DATA_WRITE_PORT_CONTENTION */
return fme_iperf_get_cache_data_write_port_contention(fme,
&prop->data);
case 0xb: /* TAG_WRITE_PORT_CONTENTION */
return fme_iperf_get_cache_tag_write_port_contention(fme,
&prop->data);
}
return -ENOENT;
}
static int fme_iperf_vtd_root_get_prop(struct feature *feature,
struct feature_prop *prop)
{
struct ifpga_fme_hw *fme = feature->parent;
u16 id = GET_FIELD(PROP_ID, prop->prop_id);
switch (id) {
case 0x1: /* FREEZE */
return fme_iperf_get_vtd_freeze(fme, &prop->data);
case 0x2: /* IOTLB_4K_HIT */
return fme_iperf_get_vtd_sip_iotlb_4k_hit(fme, &prop->data);
case 0x3: /* IOTLB_2M_HIT */
return fme_iperf_get_vtd_sip_iotlb_2m_hit(fme, &prop->data);
case 0x4: /* IOTLB_1G_HIT */
return fme_iperf_get_vtd_sip_iotlb_1g_hit(fme, &prop->data);
case 0x5: /* SLPWC_L3_HIT */
return fme_iperf_get_vtd_sip_slpwc_l3_hit(fme, &prop->data);
case 0x6: /* SLPWC_L4_HIT */
return fme_iperf_get_vtd_sip_slpwc_l4_hit(fme, &prop->data);
case 0x7: /* RCC_HIT */
return fme_iperf_get_vtd_sip_rcc_hit(fme, &prop->data);
case 0x8: /* IOTLB_4K_MISS */
return fme_iperf_get_vtd_sip_iotlb_4k_miss(fme, &prop->data);
case 0x9: /* IOTLB_2M_MISS */
return fme_iperf_get_vtd_sip_iotlb_2m_miss(fme, &prop->data);
case 0xa: /* IOTLB_1G_MISS */
return fme_iperf_get_vtd_sip_iotlb_1g_miss(fme, &prop->data);
case 0xb: /* SLPWC_L3_MISS */
return fme_iperf_get_vtd_sip_slpwc_l3_miss(fme, &prop->data);
case 0xc: /* SLPWC_L4_MISS */
return fme_iperf_get_vtd_sip_slpwc_l4_miss(fme, &prop->data);
case 0xd: /* RCC_MISS */
return fme_iperf_get_vtd_sip_rcc_miss(fme, &prop->data);
}
return -ENOENT;
}
static int fme_iperf_vtd_sub_get_prop(struct feature *feature,
struct feature_prop *prop)
{
struct ifpga_fme_hw *fme = feature->parent;
u16 id = GET_FIELD(PROP_ID, prop->prop_id);
u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
if (sub > PERF_MAX_PORT_NUM)
return -ENOENT;
switch (id) {
case 0xe: /* READ_TRANSACTION */
return fme_iperf_get_vtd_port_read_transaction(fme, sub,
&prop->data);
case 0xf: /* WRITE_TRANSACTION */
return fme_iperf_get_vtd_port_write_transaction(fme, sub,
&prop->data);
case 0x10: /* DEVTLB_READ_HIT */
return fme_iperf_get_vtd_port_devtlb_read_hit(fme, sub,
&prop->data);
case 0x11: /* DEVTLB_WRITE_HIT */
return fme_iperf_get_vtd_port_devtlb_write_hit(fme, sub,
&prop->data);
case 0x12: /* DEVTLB_4K_FILL */
return fme_iperf_get_vtd_port_devtlb_4k_fill(fme, sub,
&prop->data);
case 0x13: /* DEVTLB_2M_FILL */
return fme_iperf_get_vtd_port_devtlb_2m_fill(fme, sub,
&prop->data);
case 0x14: /* DEVTLB_1G_FILL */
return fme_iperf_get_vtd_port_devtlb_1g_fill(fme, sub,
&prop->data);
}
return -ENOENT;
}
static int fme_iperf_vtd_get_prop(struct feature *feature,
struct feature_prop *prop)
{
u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
if (sub == PERF_PROP_SUB_UNUSED)
return fme_iperf_vtd_root_get_prop(feature, prop);
return fme_iperf_vtd_sub_get_prop(feature, prop);
}
static int fme_iperf_fab_get_prop(struct feature *feature,
struct feature_prop *prop)
{
struct ifpga_fme_hw *fme = feature->parent;
u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
u16 id = GET_FIELD(PROP_ID, prop->prop_id);
/* Other properties are present for both top and sub levels */
switch (id) {
case 0x1: /* FREEZE */
if (sub != PERF_PROP_SUB_UNUSED)
return -ENOENT;
return fme_iperf_get_fab_freeze(fme, &prop->data);
case 0x2: /* PCIE0_READ */
return fme_iperf_get_fab_port_pcie0_read(fme, sub,
&prop->data);
case 0x3: /* PCIE0_WRITE */
return fme_iperf_get_fab_port_pcie0_write(fme, sub,
&prop->data);
case 0x4: /* PCIE1_READ */
return fme_iperf_get_fab_port_pcie1_read(fme, sub,
&prop->data);
case 0x5: /* PCIE1_WRITE */
return fme_iperf_get_fab_port_pcie1_write(fme, sub,
&prop->data);
case 0x6: /* UPI_READ */
return fme_iperf_get_fab_port_upi_read(fme, sub,
&prop->data);
case 0x7: /* UPI_WRITE */
return fme_iperf_get_fab_port_upi_write(fme, sub,
&prop->data);
case 0x8: /* MMIO_READ */
return fme_iperf_get_fab_port_mmio_read(fme, sub,
&prop->data);
case 0x9: /* MMIO_WRITE */
return fme_iperf_get_fab_port_mmio_write(fme, sub,
&prop->data);
case 0xa: /* ENABLE */
return fme_iperf_get_fab_port_enable(fme, sub, &prop->data);
}
return -ENOENT;
}
static int fme_global_iperf_get_prop(struct feature *feature,
struct feature_prop *prop)
{
u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
switch (top) {
case PERF_PROP_TOP_CACHE:
return fme_iperf_cache_get_prop(feature, prop);
case PERF_PROP_TOP_VTD:
return fme_iperf_vtd_get_prop(feature, prop);
case PERF_PROP_TOP_FAB:
return fme_iperf_fab_get_prop(feature, prop);
case PERF_PROP_TOP_UNUSED:
return fme_iperf_root_get_prop(feature, prop);
}
return -ENOENT;
}
static int fme_iperf_cache_set_prop(struct feature *feature,
struct feature_prop *prop)
{
struct ifpga_fme_hw *fme = feature->parent;
u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
u16 id = GET_FIELD(PROP_ID, prop->prop_id);
if (sub == PERF_PROP_SUB_UNUSED && id == 0x1) /* FREEZE */
return fme_iperf_set_cache_freeze(fme, prop->data);
return -ENOENT;
}
static int fme_iperf_vtd_set_prop(struct feature *feature,
struct feature_prop *prop)
{
struct ifpga_fme_hw *fme = feature->parent;
u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
u16 id = GET_FIELD(PROP_ID, prop->prop_id);
if (sub == PERF_PROP_SUB_UNUSED && id == 0x1) /* FREEZE */
return fme_iperf_set_vtd_freeze(fme, prop->data);
return -ENOENT;
}
static int fme_iperf_fab_set_prop(struct feature *feature,
struct feature_prop *prop)
{
struct ifpga_fme_hw *fme = feature->parent;
u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
u16 id = GET_FIELD(PROP_ID, prop->prop_id);
switch (id) {
case 0x1: /* FREEZE */
if (sub != PERF_PROP_SUB_UNUSED)
return -ENOENT;
return fme_iperf_set_fab_freeze(fme, prop->data);
case 0xa: /* ENABLE */
return fme_iperf_set_fab_port_enable(fme, sub, prop->data);
}
return -ENOENT;
}
static int fme_global_iperf_set_prop(struct feature *feature,
struct feature_prop *prop)
{
u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
switch (top) {
case PERF_PROP_TOP_CACHE:
return fme_iperf_cache_set_prop(feature, prop);
case PERF_PROP_TOP_VTD:
return fme_iperf_vtd_set_prop(feature, prop);
case PERF_PROP_TOP_FAB:
return fme_iperf_fab_set_prop(feature, prop);
}
return -ENOENT;
}
struct feature_ops fme_global_iperf_ops = {
.init = fme_global_iperf_init,
.uinit = fme_global_iperf_uinit,
.get_prop = fme_global_iperf_get_prop,
.set_prop = fme_global_iperf_set_prop,
};

View File

@ -0,0 +1,352 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#include "ifpga_feature_dev.h"
static u64
pr_err_handle(struct feature_fme_pr *fme_pr)
{
struct feature_fme_pr_status fme_pr_status;
unsigned long err_code;
u64 fme_pr_error;
int i;
fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
if (!fme_pr_status.pr_status)
return 0;
err_code = readq(&fme_pr->ccip_fme_pr_err);
fme_pr_error = err_code;
for (i = 0; i < PR_MAX_ERR_NUM; i++) {
if (err_code & (1 << i))
dev_info(NULL, "%s\n", pr_err_msg[i]);
}
writeq(fme_pr_error, &fme_pr->ccip_fme_pr_err);
return fme_pr_error;
}
static int fme_pr_write_init(struct ifpga_fme_hw *fme_dev,
struct fpga_pr_info *info)
{
struct feature_fme_pr *fme_pr;
struct feature_fme_pr_ctl fme_pr_ctl;
struct feature_fme_pr_status fme_pr_status;
fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
FME_FEATURE_ID_PR_MGMT);
if (!fme_pr)
return -EINVAL;
if (info->flags != FPGA_MGR_PARTIAL_RECONFIG)
return -EINVAL;
dev_info(fme_dev, "resetting PR before initiated PR\n");
fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
fme_pr_ctl.pr_reset = 1;
writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
fme_pr_ctl.pr_reset_ack = 1;
if (fpga_wait_register_field(pr_reset_ack, fme_pr_ctl,
&fme_pr->ccip_fme_pr_control,
PR_WAIT_TIMEOUT, 1)) {
dev_err(fme_dev, "maximum PR timeout\n");
return -ETIMEDOUT;
}
fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
fme_pr_ctl.pr_reset = 0;
writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
dev_info(fme_dev, "waiting for PR resource in HW to be initialized and ready\n");
fme_pr_status.pr_host_status = PR_HOST_STATUS_IDLE;
if (fpga_wait_register_field(pr_host_status, fme_pr_status,
&fme_pr->ccip_fme_pr_status,
PR_WAIT_TIMEOUT, 1)) {
dev_err(fme_dev, "maximum PR timeout\n");
return -ETIMEDOUT;
}
dev_info(fme_dev, "check if have any previous PR error\n");
pr_err_handle(fme_pr);
return 0;
}
static int fme_pr_write(struct ifpga_fme_hw *fme_dev,
int port_id, const char *buf, size_t count,
struct fpga_pr_info *info)
{
struct feature_fme_pr *fme_pr;
struct feature_fme_pr_ctl fme_pr_ctl;
struct feature_fme_pr_status fme_pr_status;
struct feature_fme_pr_data fme_pr_data;
int delay, pr_credit;
int ret = 0;
fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
FME_FEATURE_ID_PR_MGMT);
if (!fme_pr)
return -EINVAL;
dev_info(fme_dev, "set PR port ID and start request\n");
fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
fme_pr_ctl.pr_regionid = port_id;
fme_pr_ctl.pr_start_req = 1;
writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
dev_info(fme_dev, "pushing data from bitstream to HW\n");
fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
pr_credit = fme_pr_status.pr_credit;
while (count > 0) {
delay = 0;
while (pr_credit <= 1) {
if (delay++ > PR_WAIT_TIMEOUT) {
dev_err(fme_dev, "maximum try\n");
info->pr_err = pr_err_handle(fme_pr);
return info->pr_err ? -EIO : -ETIMEDOUT;
}
udelay(1);
fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
pr_credit = fme_pr_status.pr_credit;
};
if (count >= fme_dev->pr_bandwidth) {
switch (fme_dev->pr_bandwidth) {
case 4:
fme_pr_data.rsvd = 0;
fme_pr_data.pr_data_raw = *((const u32 *)buf);
writeq(fme_pr_data.csr,
&fme_pr->ccip_fme_pr_data);
break;
default:
ret = -EFAULT;
goto done;
}
buf += fme_dev->pr_bandwidth;
count -= fme_dev->pr_bandwidth;
pr_credit--;
} else {
WARN_ON(1);
return -EINVAL;
goto done;
}
}
done:
return ret;
}
static int fme_pr_write_complete(struct ifpga_fme_hw *fme_dev,
struct fpga_pr_info *info)
{
struct feature_fme_pr *fme_pr;
struct feature_fme_pr_ctl fme_pr_ctl;
fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
FME_FEATURE_ID_PR_MGMT);
fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
fme_pr_ctl.pr_push_complete = 1;
writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
dev_info(fme_dev, "green bitstream push complete\n");
dev_info(fme_dev, "waiting for HW to release PR resource\n");
fme_pr_ctl.pr_start_req = 0;
if (fpga_wait_register_field(pr_start_req, fme_pr_ctl,
&fme_pr->ccip_fme_pr_control,
PR_WAIT_TIMEOUT, 1)) {
printf("maximum try.\n");
return -ETIMEDOUT;
}
dev_info(fme_dev, "PR operation complete, checking status\n");
info->pr_err = pr_err_handle(fme_pr);
if (info->pr_err)
return -EIO;
dev_info(fme_dev, "PR done successfully\n");
return 0;
}
static int fpga_pr_buf_load(struct ifpga_fme_hw *fme_dev,
struct fpga_pr_info *info, const char *buf,
size_t count)
{
int ret;
info->state = FPGA_PR_STATE_WRITE_INIT;
ret = fme_pr_write_init(fme_dev, info);
if (ret) {
dev_err(fme_dev, "Error preparing FPGA for writing\n");
info->state = FPGA_PR_STATE_WRITE_INIT_ERR;
return ret;
}
/*
* Write the FPGA image to the FPGA.
*/
info->state = FPGA_PR_STATE_WRITE;
ret = fme_pr_write(fme_dev, info->port_id, buf, count, info);
if (ret) {
dev_err(fme_dev, "Error while writing image data to FPGA\n");
info->state = FPGA_PR_STATE_WRITE_ERR;
return ret;
}
/*
* After all the FPGA image has been written, do the device specific
* steps to finish and set the FPGA into operating mode.
*/
info->state = FPGA_PR_STATE_WRITE_COMPLETE;
ret = fme_pr_write_complete(fme_dev, info);
if (ret) {
dev_err(fme_dev, "Error after writing image data to FPGA\n");
info->state = FPGA_PR_STATE_WRITE_COMPLETE_ERR;
return ret;
}
info->state = FPGA_PR_STATE_DONE;
return 0;
}
static int fme_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
u64 *status)
{
struct feature_fme_header *fme_hdr;
struct feature_fme_capability fme_capability;
struct ifpga_fme_hw *fme = &hw->fme;
struct fpga_pr_info info;
struct ifpga_port_hw *port;
int ret = 0;
if (!buffer || size == 0)
return -EINVAL;
if (fme->state != IFPGA_FME_IMPLEMENTED)
return -EINVAL;
/*
* Padding extra zeros to align PR buffer with PR bandwidth, HW will
* ignore these zeros automatically.
*/
size = IFPGA_ALIGN(size, fme->pr_bandwidth);
/* get fme header region */
fme_hdr = get_fme_feature_ioaddr_by_index(fme,
FME_FEATURE_ID_HEADER);
if (!fme_hdr)
return -EINVAL;
/* check port id */
fme_capability.csr = readq(&fme_hdr->capability);
if (port_id >= fme_capability.num_ports) {
dev_err(fme, "port number more than maximum\n");
return -EINVAL;
}
memset(&info, 0, sizeof(struct fpga_pr_info));
info.flags = FPGA_MGR_PARTIAL_RECONFIG;
info.port_id = port_id;
spinlock_lock(&fme->lock);
/* get port device by port_id */
port = &hw->port[port_id];
/* Disable Port before PR */
fpga_port_disable(port);
ret = fpga_pr_buf_load(fme, &info, (void *)buffer, size);
*status = info.pr_err;
/* Re-enable Port after PR finished */
fpga_port_enable(port);
spinlock_unlock(&fme->lock);
return ret;
}
int do_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size, u64 *status)
{
struct bts_header *bts_hdr;
void *buf;
struct ifpga_port_hw *port;
int ret;
if (!buffer || size == 0) {
dev_err(hw, "invalid parameter\n");
return -EINVAL;
}
bts_hdr = (struct bts_header *)buffer;
if (is_valid_bts(bts_hdr)) {
dev_info(hw, "this is a valid bitsteam..\n");
size -= (sizeof(struct bts_header) +
bts_hdr->metadata_len);
buf = (u8 *)buffer + sizeof(struct bts_header) +
bts_hdr->metadata_len;
} else {
return -EINVAL;
}
/* clean port error before do PR */
port = &hw->port[port_id];
ret = port_clear_error(port);
if (ret) {
dev_err(hw, "port cannot clear error\n");
return -EINVAL;
}
return fme_pr(hw, port_id, buf, size, status);
}
static int fme_pr_mgmt_init(struct feature *feature)
{
struct feature_fme_pr *fme_pr;
struct feature_header fme_pr_header;
struct ifpga_fme_hw *fme;
dev_info(NULL, "FME PR MGMT Init.\n");
fme = (struct ifpga_fme_hw *)feature->parent;
fme_pr = (struct feature_fme_pr *)feature->addr;
fme_pr_header.csr = readq(&fme_pr->header);
if (fme_pr_header.revision == 2) {
dev_info(NULL, "using 512-bit PR\n");
fme->pr_bandwidth = 64;
} else {
dev_info(NULL, "using 32-bit PR\n");
fme->pr_bandwidth = 4;
}
return 0;
}
static void fme_pr_mgmt_uinit(struct feature *feature)
{
UNUSED(feature);
dev_info(NULL, "FME PR MGMT UInit.\n");
}
struct feature_ops fme_pr_mgmt_ops = {
.init = fme_pr_mgmt_init,
.uinit = fme_pr_mgmt_uinit,
};

View File

@ -0,0 +1,127 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#ifndef _IFPGA_HW_H_
#define _IFPGA_HW_H_
#include "ifpga_defines.h"
#include "opae_ifpga_hw_api.h"
enum ifpga_feature_state {
IFPGA_FEATURE_UNUSED = 0,
IFPGA_FEATURE_ATTACHED,
};
struct feature_irq_ctx {
int eventfd;
int idx;
};
struct feature {
enum ifpga_feature_state state;
const char *name;
u64 id;
u8 *addr;
uint64_t phys_addr;
u32 size;
int revision;
u64 cap;
int vfio_dev_fd;
struct feature_irq_ctx *ctx;
unsigned int ctx_num;
void *parent; /* to parent hw data structure */
struct feature_ops *ops;/* callback to this private feature */
};
struct feature_ops {
int (*init)(struct feature *feature);
void (*uinit)(struct feature *feature);
int (*get_prop)(struct feature *feature, struct feature_prop *prop);
int (*set_prop)(struct feature *feature, struct feature_prop *prop);
int (*set_irq)(struct feature *feature, void *irq_set);
};
enum ifpga_fme_state {
IFPGA_FME_UNUSED = 0,
IFPGA_FME_IMPLEMENTED,
};
struct ifpga_fme_hw {
enum ifpga_fme_state state;
struct feature sub_feature[FME_FEATURE_ID_MAX];
spinlock_t lock; /* protect hardware access */
void *parent; /* pointer to ifpga_hw */
/* provied by HEADER feature */
u32 port_num;
struct uuid bitstream_id;
u64 bitstream_md;
size_t pr_bandwidth;
u32 socket_id;
u32 fabric_version_id;
u32 cache_size;
u32 capability;
};
enum ifpga_port_state {
IFPGA_PORT_UNUSED = 0,
IFPGA_PORT_ATTACHED,
IFPGA_PORT_DETACHED,
};
struct ifpga_port_hw {
enum ifpga_port_state state;
struct feature sub_feature[PORT_FEATURE_ID_MAX];
spinlock_t lock; /* protect access to hw */
void *parent; /* pointer to ifpga_hw */
int port_id; /* provied by HEADER feature */
struct uuid afu_id; /* provied by User AFU feature */
unsigned int disable_count;
u32 capability;
u32 num_umsgs; /* The number of allocated umsgs */
u32 num_uafu_irqs; /* The number of uafu interrupts */
u8 *stp_addr;
u32 stp_size;
};
#define AFU_MAX_REGION 1
struct ifpga_afu_info {
struct opae_reg_region region[AFU_MAX_REGION];
unsigned int num_regions;
unsigned int num_irqs;
};
struct ifpga_hw {
struct opae_adapter *adapter;
struct opae_adapter_data_pci *pci_data;
struct ifpga_fme_hw fme;
struct ifpga_port_hw port[MAX_FPGA_PORT_NUM];
};
static inline bool is_ifpga_hw_pf(struct ifpga_hw *hw)
{
return hw->fme.state != IFPGA_FME_UNUSED;
}
static inline bool is_valid_port_id(struct ifpga_hw *hw, u32 port_id)
{
if (port_id >= MAX_FPGA_PORT_NUM ||
hw->port[port_id].state != IFPGA_PORT_ATTACHED)
return false;
return true;
}
#endif /* _IFPGA_HW_H_ */

View File

@ -0,0 +1,388 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#include "ifpga_feature_dev.h"
int port_get_prop(struct ifpga_port_hw *port, struct feature_prop *prop)
{
struct feature *feature;
if (!port)
return -ENOENT;
feature = get_port_feature_by_id(port, prop->feature_id);
if (feature && feature->ops && feature->ops->get_prop)
return feature->ops->get_prop(feature, prop);
return -ENOENT;
}
int port_set_prop(struct ifpga_port_hw *port, struct feature_prop *prop)
{
struct feature *feature;
if (!port)
return -ENOENT;
feature = get_port_feature_by_id(port, prop->feature_id);
if (feature && feature->ops && feature->ops->set_prop)
return feature->ops->set_prop(feature, prop);
return -ENOENT;
}
int port_set_irq(struct ifpga_port_hw *port, u32 feature_id, void *irq_set)
{
struct feature *feature;
if (!port)
return -ENOENT;
feature = get_port_feature_by_id(port, feature_id);
if (feature && feature->ops && feature->ops->set_irq)
return feature->ops->set_irq(feature, irq_set);
return -ENOENT;
}
static int port_get_revision(struct ifpga_port_hw *port, u64 *revision)
{
struct feature_port_header *port_hdr
= get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_HEADER);
struct feature_header header;
header.csr = readq(&port_hdr->header);
*revision = header.revision;
return 0;
}
static int port_get_portidx(struct ifpga_port_hw *port, u64 *idx)
{
struct feature_port_header *port_hdr;
struct feature_port_capability capability;
port_hdr = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_HEADER);
capability.csr = readq(&port_hdr->capability);
*idx = capability.port_number;
return 0;
}
static int port_get_latency_tolerance(struct ifpga_port_hw *port, u64 *val)
{
struct feature_port_header *port_hdr;
struct feature_port_control control;
port_hdr = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_HEADER);
control.csr = readq(&port_hdr->control);
*val = control.latency_tolerance;
return 0;
}
static int port_get_ap1_event(struct ifpga_port_hw *port, u64 *val)
{
struct feature_port_header *port_hdr;
struct feature_port_status status;
port_hdr = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_HEADER);
spinlock_lock(&port->lock);
status.csr = readq(&port_hdr->status);
spinlock_unlock(&port->lock);
*val = status.ap1_event;
return 0;
}
static int port_set_ap1_event(struct ifpga_port_hw *port, u64 val)
{
struct feature_port_header *port_hdr;
struct feature_port_status status;
port_hdr = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_HEADER);
spinlock_lock(&port->lock);
status.csr = readq(&port_hdr->status);
status.ap1_event = val;
writeq(status.csr, &port_hdr->status);
spinlock_unlock(&port->lock);
return 0;
}
static int port_get_ap2_event(struct ifpga_port_hw *port, u64 *val)
{
struct feature_port_header *port_hdr;
struct feature_port_status status;
port_hdr = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_HEADER);
spinlock_lock(&port->lock);
status.csr = readq(&port_hdr->status);
spinlock_unlock(&port->lock);
*val = status.ap2_event;
return 0;
}
static int port_set_ap2_event(struct ifpga_port_hw *port, u64 val)
{
struct feature_port_header *port_hdr;
struct feature_port_status status;
port_hdr = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_HEADER);
spinlock_lock(&port->lock);
status.csr = readq(&port_hdr->status);
status.ap2_event = val;
writeq(status.csr, &port_hdr->status);
spinlock_unlock(&port->lock);
return 0;
}
static int port_get_power_state(struct ifpga_port_hw *port, u64 *val)
{
struct feature_port_header *port_hdr;
struct feature_port_status status;
port_hdr = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_HEADER);
spinlock_lock(&port->lock);
status.csr = readq(&port_hdr->status);
spinlock_unlock(&port->lock);
*val = status.power_state;
return 0;
}
static int port_get_userclk_freqcmd(struct ifpga_port_hw *port, u64 *val)
{
struct feature_port_header *port_hdr;
port_hdr = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_HEADER);
spinlock_lock(&port->lock);
*val = readq(&port_hdr->user_clk_freq_cmd0);
spinlock_unlock(&port->lock);
return 0;
}
static int port_set_userclk_freqcmd(struct ifpga_port_hw *port, u64 val)
{
struct feature_port_header *port_hdr;
port_hdr = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_HEADER);
spinlock_lock(&port->lock);
writeq(val, &port_hdr->user_clk_freq_cmd0);
spinlock_unlock(&port->lock);
return 0;
}
static int port_get_userclk_freqcntrcmd(struct ifpga_port_hw *port, u64 *val)
{
struct feature_port_header *port_hdr;
port_hdr = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_HEADER);
spinlock_lock(&port->lock);
*val = readq(&port_hdr->user_clk_freq_cmd1);
spinlock_unlock(&port->lock);
return 0;
}
static int port_set_userclk_freqcntrcmd(struct ifpga_port_hw *port, u64 val)
{
struct feature_port_header *port_hdr;
port_hdr = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_HEADER);
spinlock_lock(&port->lock);
writeq(val, &port_hdr->user_clk_freq_cmd1);
spinlock_unlock(&port->lock);
return 0;
}
static int port_get_userclk_freqsts(struct ifpga_port_hw *port, u64 *val)
{
struct feature_port_header *port_hdr;
port_hdr = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_HEADER);
spinlock_lock(&port->lock);
*val = readq(&port_hdr->user_clk_freq_sts0);
spinlock_unlock(&port->lock);
return 0;
}
static int port_get_userclk_freqcntrsts(struct ifpga_port_hw *port, u64 *val)
{
struct feature_port_header *port_hdr;
port_hdr = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_HEADER);
spinlock_lock(&port->lock);
*val = readq(&port_hdr->user_clk_freq_sts1);
spinlock_unlock(&port->lock);
return 0;
}
static int port_hdr_init(struct feature *feature)
{
struct ifpga_port_hw *port = feature->parent;
dev_info(NULL, "port hdr Init.\n");
fpga_port_reset(port);
return 0;
}
static void port_hdr_uinit(struct feature *feature)
{
UNUSED(feature);
dev_info(NULL, "port hdr uinit.\n");
}
static int port_hdr_get_prop(struct feature *feature, struct feature_prop *prop)
{
struct ifpga_port_hw *port = feature->parent;
switch (prop->prop_id) {
case PORT_HDR_PROP_REVISION:
return port_get_revision(port, &prop->data);
case PORT_HDR_PROP_PORTIDX:
return port_get_portidx(port, &prop->data);
case PORT_HDR_PROP_LATENCY_TOLERANCE:
return port_get_latency_tolerance(port, &prop->data);
case PORT_HDR_PROP_AP1_EVENT:
return port_get_ap1_event(port, &prop->data);
case PORT_HDR_PROP_AP2_EVENT:
return port_get_ap2_event(port, &prop->data);
case PORT_HDR_PROP_POWER_STATE:
return port_get_power_state(port, &prop->data);
case PORT_HDR_PROP_USERCLK_FREQCMD:
return port_get_userclk_freqcmd(port, &prop->data);
case PORT_HDR_PROP_USERCLK_FREQCNTRCMD:
return port_get_userclk_freqcntrcmd(port, &prop->data);
case PORT_HDR_PROP_USERCLK_FREQSTS:
return port_get_userclk_freqsts(port, &prop->data);
case PORT_HDR_PROP_USERCLK_CNTRSTS:
return port_get_userclk_freqcntrsts(port, &prop->data);
}
return -ENOENT;
}
static int port_hdr_set_prop(struct feature *feature, struct feature_prop *prop)
{
struct ifpga_port_hw *port = feature->parent;
switch (prop->prop_id) {
case PORT_HDR_PROP_AP1_EVENT:
return port_set_ap1_event(port, prop->data);
case PORT_HDR_PROP_AP2_EVENT:
return port_set_ap2_event(port, prop->data);
case PORT_HDR_PROP_USERCLK_FREQCMD:
return port_set_userclk_freqcmd(port, prop->data);
case PORT_HDR_PROP_USERCLK_FREQCNTRCMD:
return port_set_userclk_freqcntrcmd(port, prop->data);
}
return -ENOENT;
}
struct feature_ops port_hdr_ops = {
.init = port_hdr_init,
.uinit = port_hdr_uinit,
.get_prop = port_hdr_get_prop,
.set_prop = port_hdr_set_prop,
};
static int port_stp_init(struct feature *feature)
{
struct ifpga_port_hw *port = feature->parent;
dev_info(NULL, "port stp Init.\n");
spinlock_lock(&port->lock);
port->stp_addr = feature->addr;
port->stp_size = feature->size;
spinlock_unlock(&port->lock);
return 0;
}
static void port_stp_uinit(struct feature *feature)
{
UNUSED(feature);
dev_info(NULL, "port stp uinit.\n");
}
struct feature_ops port_stp_ops = {
.init = port_stp_init,
.uinit = port_stp_uinit,
};
static int port_uint_init(struct feature *feature)
{
struct ifpga_port_hw *port = feature->parent;
dev_info(NULL, "PORT UINT Init.\n");
spinlock_lock(&port->lock);
if (feature->ctx_num) {
port->capability |= FPGA_PORT_CAP_UAFU_IRQ;
port->num_uafu_irqs = feature->ctx_num;
}
spinlock_unlock(&port->lock);
return 0;
}
static void port_uint_uinit(struct feature *feature)
{
UNUSED(feature);
dev_info(NULL, "PORT UINT UInit.\n");
}
struct feature_ops port_uint_ops = {
.init = port_uint_init,
.uinit = port_uint_uinit,
};

View File

@ -0,0 +1,144 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#include "ifpga_feature_dev.h"
static int port_err_get_revision(struct ifpga_port_hw *port, u64 *val)
{
struct feature_port_error *port_err;
struct feature_header header;
port_err = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_ERROR);
header.csr = readq(&port_err->header);
*val = header.revision;
return 0;
}
static int port_err_get_errors(struct ifpga_port_hw *port, u64 *val)
{
struct feature_port_error *port_err;
struct feature_port_err_key error;
port_err = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_ERROR);
error.csr = readq(&port_err->port_error);
*val = error.csr;
return 0;
}
static int port_err_get_first_error(struct ifpga_port_hw *port, u64 *val)
{
struct feature_port_error *port_err;
struct feature_port_first_err_key first_error;
port_err = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_ERROR);
first_error.csr = readq(&port_err->port_first_error);
*val = first_error.csr;
return 0;
}
static int port_err_get_first_malformed_req_lsb(struct ifpga_port_hw *port,
u64 *val)
{
struct feature_port_error *port_err;
struct feature_port_malformed_req0 malreq0;
port_err = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_ERROR);
malreq0.header_lsb = readq(&port_err->malreq0);
*val = malreq0.header_lsb;
return 0;
}
static int port_err_get_first_malformed_req_msb(struct ifpga_port_hw *port,
u64 *val)
{
struct feature_port_error *port_err;
struct feature_port_malformed_req1 malreq1;
port_err = get_port_feature_ioaddr_by_index(port,
PORT_FEATURE_ID_ERROR);
malreq1.header_msb = readq(&port_err->malreq1);
*val = malreq1.header_msb;
return 0;
}
static int port_err_set_clear(struct ifpga_port_hw *port, u64 val)
{
int ret;
spinlock_lock(&port->lock);
ret = port_err_clear(port, val);
spinlock_unlock(&port->lock);
return ret;
}
static int port_error_init(struct feature *feature)
{
struct ifpga_port_hw *port = feature->parent;
dev_info(NULL, "port error Init.\n");
spinlock_lock(&port->lock);
port_err_mask(port, false);
if (feature->ctx_num)
port->capability |= FPGA_PORT_CAP_ERR_IRQ;
spinlock_unlock(&port->lock);
return 0;
}
static void port_error_uinit(struct feature *feature)
{
UNUSED(feature);
}
static int port_error_get_prop(struct feature *feature,
struct feature_prop *prop)
{
struct ifpga_port_hw *port = feature->parent;
switch (prop->prop_id) {
case PORT_ERR_PROP_REVISION:
return port_err_get_revision(port, &prop->data);
case PORT_ERR_PROP_ERRORS:
return port_err_get_errors(port, &prop->data);
case PORT_ERR_PROP_FIRST_ERROR:
return port_err_get_first_error(port, &prop->data);
case PORT_ERR_PROP_FIRST_MALFORMED_REQ_LSB:
return port_err_get_first_malformed_req_lsb(port, &prop->data);
case PORT_ERR_PROP_FIRST_MALFORMED_REQ_MSB:
return port_err_get_first_malformed_req_msb(port, &prop->data);
}
return -ENOENT;
}
static int port_error_set_prop(struct feature *feature,
struct feature_prop *prop)
{
struct ifpga_port_hw *port = feature->parent;
if (prop->prop_id == PORT_ERR_PROP_CLEAR)
return port_err_set_clear(port, prop->data);
return -ENOENT;
}
struct feature_ops port_error_ops = {
.init = port_error_init,
.uinit = port_error_uinit,
.get_prop = port_error_get_prop,
.set_prop = port_error_set_prop,
};

View File

@ -0,0 +1,34 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2018 Intel Corporation
sources = [
'ifpga_api.c',
'ifpga_enumerate.c',
'ifpga_feature_dev.c',
'ifpga_fme.c',
'ifpga_fme_iperf.c',
'ifpga_fme_dperf.c',
'ifpga_fme_error.c',
'ifpga_port.c',
'ifpga_port_error.c',
'ifpga_fme_pr.c',
'opae_hw_api.c',
'opae_ifpga_hw_api.c',
'opae_debug.c'
]
error_cflags = ['-Wno-sign-compare', '-Wno-unused-value',
'-Wno-format', '-Wno-unused-but-set-variable',
'-Wno-strict-aliasing'
]
c_args = cflags
foreach flag: error_cflags
if cc.has_argument(flag)
c_args += flag
endif
endforeach
base_lib = static_library('ifpga_rawdev_base', sources,
dependencies: static_rte_eal,
c_args: c_args)
base_objs = base_lib.extract_all_objects()

View File

@ -0,0 +1,99 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#define OPAE_HW_DEBUG
#include "opae_hw_api.h"
#include "opae_debug.h"
void opae_manager_dump(struct opae_manager *mgr)
{
opae_log("=====%s=====\n", __func__);
opae_log("OPAE Manger %s\n", mgr->name);
opae_log("OPAE Manger OPs = %p\n", mgr->ops);
opae_log("OPAE Manager Private Data = %p\n", mgr->data);
opae_log("OPAE Adapter(parent) = %p\n", mgr->adapter);
opae_log("==========================\n");
}
void opae_bridge_dump(struct opae_bridge *br)
{
opae_log("=====%s=====\n", __func__);
opae_log("OPAE Bridge %s\n", br->name);
opae_log("OPAE Bridge ID = %d\n", br->id);
opae_log("OPAE Bridge OPs = %p\n", br->ops);
opae_log("OPAE Bridge Private Data = %p\n", br->data);
opae_log("OPAE Accelerator(under this bridge) = %p\n", br->acc);
opae_log("==========================\n");
}
void opae_accelerator_dump(struct opae_accelerator *acc)
{
opae_log("=====%s=====\n", __func__);
opae_log("OPAE Accelerator %s\n", acc->name);
opae_log("OPAE Accelerator Index = %d\n", acc->index);
opae_log("OPAE Accelerator OPs = %p\n", acc->ops);
opae_log("OPAE Accelerator Private Data = %p\n", acc->data);
opae_log("OPAE Bridge (upstream) = %p\n", acc->br);
opae_log("OPAE Manager (upstream) = %p\n", acc->mgr);
opae_log("==========================\n");
if (acc->br)
opae_bridge_dump(acc->br);
}
static void opae_adapter_data_dump(void *data)
{
struct opae_adapter_data *d = data;
struct opae_adapter_data_pci *d_pci;
struct opae_reg_region *r;
int i;
opae_log("=====%s=====\n", __func__);
switch (d->type) {
case OPAE_FPGA_PCI:
d_pci = (struct opae_adapter_data_pci *)d;
opae_log("OPAE Adapter Type = PCI\n");
opae_log("PCI Device ID: 0x%04x\n", d_pci->device_id);
opae_log("PCI Vendor ID: 0x%04x\n", d_pci->vendor_id);
for (i = 0; i < PCI_MAX_RESOURCE; i++) {
r = &d_pci->region[i];
opae_log("PCI Bar %d: phy(%llx) len(%llx) addr(%p)\n",
i, (unsigned long long)r->phys_addr,
(unsigned long long)r->len, r->addr);
}
break;
case OPAE_FPGA_NET:
break;
}
opae_log("==========================\n");
}
void opae_adapter_dump(struct opae_adapter *adapter, int verbose)
{
struct opae_accelerator *acc;
opae_log("=====%s=====\n", __func__);
opae_log("OPAE Adapter %s\n", adapter->name);
opae_log("OPAE Adapter OPs = %p\n", adapter->ops);
opae_log("OPAE Adapter Private Data = %p\n", adapter->data);
opae_log("OPAE Manager (downstream) = %p\n", adapter->mgr);
if (verbose) {
if (adapter->mgr)
opae_manager_dump(adapter->mgr);
opae_adapter_for_each_acc(adapter, acc)
opae_accelerator_dump(acc);
if (adapter->data)
opae_adapter_data_dump(adapter->data);
}
opae_log("==========================\n");
}

View File

@ -0,0 +1,19 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#ifndef _OPAE_DEBUG_H_
#define _OPAE_DEBUG_H_
#ifdef OPAE_HW_DEBUG
#define opae_log(fmt, args...) printf(fmt, ## args)
#else
#define opae_log(fme, args...) do {} while (0)
#endif
void opae_manager_dump(struct opae_manager *mgr);
void opae_bridge_dump(struct opae_bridge *br);
void opae_accelerator_dump(struct opae_accelerator *acc);
void opae_adapter_dump(struct opae_adapter *adapter, int verbose);
#endif /* _OPAE_DEBUG_H_ */

View File

@ -0,0 +1,381 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#include "opae_hw_api.h"
#include "opae_debug.h"
#include "ifpga_api.h"
/* OPAE Bridge Functions */
/**
* opae_bridge_alloc - alloc opae_bridge data structure
* @name: bridge name.
* @ops: ops of this bridge.
* @data: private data of this bridge.
*
* Return opae_bridge on success, otherwise NULL.
*/
struct opae_bridge *
opae_bridge_alloc(const char *name, struct opae_bridge_ops *ops, void *data)
{
struct opae_bridge *br = opae_zmalloc(sizeof(*br));
if (!br)
return NULL;
br->name = name;
br->ops = ops;
br->data = data;
opae_log("%s %p\n", __func__, br);
return br;
}
/**
* opae_bridge_reset - reset opae_bridge
* @br: bridge to be reset.
*
* Return: 0 on success, otherwise error code.
*/
int opae_bridge_reset(struct opae_bridge *br)
{
if (!br)
return -EINVAL;
if (br->ops && br->ops->reset)
return br->ops->reset(br);
opae_log("%s no ops\n", __func__);
return -ENOENT;
}
/* Accelerator Functions */
/**
* opae_accelerator_alloc - alloc opae_accelerator data structure
* @name: accelerator name.
* @ops: ops of this accelerator.
* @data: private data of this accelerator.
*
* Return: opae_accelerator on success, otherwise NULL.
*/
struct opae_accelerator *
opae_accelerator_alloc(const char *name, struct opae_accelerator_ops *ops,
void *data)
{
struct opae_accelerator *acc = opae_zmalloc(sizeof(*acc));
if (!acc)
return NULL;
acc->name = name;
acc->ops = ops;
acc->data = data;
opae_log("%s %p\n", __func__, acc);
return acc;
}
/**
* opae_acc_reg_read - read accelerator's register from its reg region.
* @acc: accelerator to read.
* @region_idx: reg region index.
* @offset: reg offset.
* @byte: read operation width, e.g 4 byte = 32bit read.
* @data: data to store the value read from the register.
*
* Return: 0 on success, otherwise error code.
*/
int opae_acc_reg_read(struct opae_accelerator *acc, unsigned int region_idx,
u64 offset, unsigned int byte, void *data)
{
if (!acc || !data)
return -EINVAL;
if (acc->ops && acc->ops->read)
return acc->ops->read(acc, region_idx, offset, byte, data);
return -ENOENT;
}
/**
* opae_acc_reg_write - write to accelerator's register from its reg region.
* @acc: accelerator to write.
* @region_idx: reg region index.
* @offset: reg offset.
* @byte: write operation width, e.g 4 byte = 32bit write.
* @data: data stored the value to write to the register.
*
* Return: 0 on success, otherwise error code.
*/
int opae_acc_reg_write(struct opae_accelerator *acc, unsigned int region_idx,
u64 offset, unsigned int byte, void *data)
{
if (!acc || !data)
return -EINVAL;
if (acc->ops && acc->ops->write)
return acc->ops->write(acc, region_idx, offset, byte, data);
return -ENOENT;
}
/**
* opae_acc_get_info - get information of an accelerator.
* @acc: targeted accelerator
* @info: accelerator info data structure to be filled.
*
* Return: 0 on success, otherwise error code.
*/
int opae_acc_get_info(struct opae_accelerator *acc, struct opae_acc_info *info)
{
if (!acc || !info)
return -EINVAL;
if (acc->ops && acc->ops->get_info)
return acc->ops->get_info(acc, info);
return -ENOENT;
}
/**
* opae_acc_get_region_info - get information of an accelerator register region.
* @acc: targeted accelerator
* @info: accelerator region info data structure to be filled.
*
* Return: 0 on success, otherwise error code.
*/
int opae_acc_get_region_info(struct opae_accelerator *acc,
struct opae_acc_region_info *info)
{
if (!acc || !info)
return -EINVAL;
if (acc->ops && acc->ops->get_region_info)
return acc->ops->get_region_info(acc, info);
return -ENOENT;
}
/**
* opae_acc_set_irq - set an accelerator's irq.
* @acc: targeted accelerator
* @start: start vector number
* @count: count of vectors to be set from the start vector
* @evtfds: event fds to be notified when corresponding irqs happens
*
* Return: 0 on success, otherwise error code.
*/
int opae_acc_set_irq(struct opae_accelerator *acc,
u32 start, u32 count, s32 evtfds[])
{
if (!acc || !acc->data)
return -EINVAL;
if (start + count <= start)
return -EINVAL;
if (acc->ops && acc->ops->set_irq)
return acc->ops->set_irq(acc, start, count, evtfds);
return -ENOENT;
}
/**
* opae_acc_get_uuid - get accelerator's UUID.
* @acc: targeted accelerator
* @uuid: a pointer to UUID
*
* Return: 0 on success, otherwise error code.
*/
int opae_acc_get_uuid(struct opae_accelerator *acc,
struct uuid *uuid)
{
if (!acc || !uuid)
return -EINVAL;
if (acc->ops && acc->ops->get_uuid)
return acc->ops->get_uuid(acc, uuid);
return -ENOENT;
}
/* Manager Functions */
/**
* opae_manager_alloc - alloc opae_manager data structure
* @name: manager name.
* @ops: ops of this manager.
* @data: private data of this manager.
*
* Return: opae_manager on success, otherwise NULL.
*/
struct opae_manager *
opae_manager_alloc(const char *name, struct opae_manager_ops *ops, void *data)
{
struct opae_manager *mgr = opae_zmalloc(sizeof(*mgr));
if (!mgr)
return NULL;
mgr->name = name;
mgr->ops = ops;
mgr->data = data;
opae_log("%s %p\n", __func__, mgr);
return mgr;
}
/**
* opae_manager_flash - flash a reconfiguration image via opae_manager
* @mgr: opae_manager for flash.
* @id: id of target region (accelerator).
* @buf: image data buffer.
* @size: buffer size.
* @status: status to store flash result.
*
* Return: 0 on success, otherwise error code.
*/
int opae_manager_flash(struct opae_manager *mgr, int id, void *buf, u32 size,
u64 *status)
{
if (!mgr)
return -EINVAL;
if (mgr && mgr->ops && mgr->ops->flash)
return mgr->ops->flash(mgr, id, buf, size, status);
return -ENOENT;
}
/* Adapter Functions */
/**
* opae_adapter_data_alloc - alloc opae_adapter_data data structure
* @type: opae_adapter_type.
*
* Return: opae_adapter_data on success, otherwise NULL.
*/
void *opae_adapter_data_alloc(enum opae_adapter_type type)
{
struct opae_adapter_data *data;
int size;
switch (type) {
case OPAE_FPGA_PCI:
size = sizeof(struct opae_adapter_data_pci);
break;
case OPAE_FPGA_NET:
size = sizeof(struct opae_adapter_data_net);
break;
default:
size = sizeof(struct opae_adapter_data);
break;
}
data = opae_zmalloc(size);
if (!data)
return NULL;
data->type = type;
return data;
}
static struct opae_adapter_ops *match_ops(struct opae_adapter *adapter)
{
struct opae_adapter_data *data;
if (!adapter || !adapter->data)
return NULL;
data = adapter->data;
if (data->type == OPAE_FPGA_PCI)
return &ifpga_adapter_ops;
return NULL;
}
/**
* opae_adapter_data_alloc - alloc opae_adapter_data data structure
* @name: adapter name.
* @data: private data of this adapter.
*
* Return: opae_adapter on success, otherwise NULL.
*/
struct opae_adapter *opae_adapter_alloc(const char *name, void *data)
{
struct opae_adapter *adapter = opae_zmalloc(sizeof(*adapter));
if (!adapter)
return NULL;
TAILQ_INIT(&adapter->acc_list);
adapter->data = data;
adapter->name = name;
adapter->ops = match_ops(adapter);
return adapter;
}
/**
* opae_adapter_enumerate - enumerate this adapter
* @adapter: adapter to enumerate.
*
* Return: 0 on success, otherwise error code.
*/
int opae_adapter_enumerate(struct opae_adapter *adapter)
{
int ret = -ENOENT;
if (!adapter)
return -EINVAL;
if (adapter->ops && adapter->ops->enumerate)
ret = adapter->ops->enumerate(adapter);
if (!ret)
opae_adapter_dump(adapter, 1);
return ret;
}
/**
* opae_adapter_destroy - destroy this adapter
* @adapter: adapter to destroy.
*
* destroy things allocated during adapter enumeration.
*/
void opae_adapter_destroy(struct opae_adapter *adapter)
{
if (adapter && adapter->ops && adapter->ops->destroy)
adapter->ops->destroy(adapter);
}
/**
* opae_adapter_get_acc - find and return accelerator with matched id
* @adapter: adapter to find the accelerator.
* @acc_id: id (index) of the accelerator.
*
* destroy things allocated during adapter enumeration.
*/
struct opae_accelerator *
opae_adapter_get_acc(struct opae_adapter *adapter, int acc_id)
{
struct opae_accelerator *acc = NULL;
if (!adapter)
return NULL;
opae_adapter_for_each_acc(adapter, acc)
if (acc->index == acc_id)
return acc;
return NULL;
}

View File

@ -0,0 +1,253 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#ifndef _OPAE_HW_API_H_
#define _OPAE_HW_API_H_
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/queue.h>
#include "opae_osdep.h"
#ifndef PCI_MAX_RESOURCE
#define PCI_MAX_RESOURCE 6
#endif
struct opae_adapter;
enum opae_adapter_type {
OPAE_FPGA_PCI,
OPAE_FPGA_NET,
};
/* OPAE Manager Data Structure */
struct opae_manager_ops;
/*
* opae_manager has pointer to its parent adapter, as it could be able to manage
* all components on this FPGA device (adapter). If not the case, don't set this
* adapter, which limit opae_manager ops to manager itself.
*/
struct opae_manager {
const char *name;
struct opae_adapter *adapter;
struct opae_manager_ops *ops;
void *data;
};
/* FIXME: add more management ops, e.g power/thermal and etc */
struct opae_manager_ops {
int (*flash)(struct opae_manager *mgr, int id, void *buffer,
u32 size, u64 *status);
};
/* OPAE Manager APIs */
struct opae_manager *
opae_manager_alloc(const char *name, struct opae_manager_ops *ops, void *data);
#define opae_manager_free(mgr) opae_free(mgr)
int opae_manager_flash(struct opae_manager *mgr, int acc_id, void *buf,
u32 size, u64 *status);
/* OPAE Bridge Data Structure */
struct opae_bridge_ops;
/*
* opae_bridge only has pointer to its downstream accelerator.
*/
struct opae_bridge {
const char *name;
int id;
struct opae_accelerator *acc;
struct opae_bridge_ops *ops;
void *data;
};
struct opae_bridge_ops {
int (*reset)(struct opae_bridge *br);
};
/* OPAE Bridge APIs */
struct opae_bridge *
opae_bridge_alloc(const char *name, struct opae_bridge_ops *ops, void *data);
int opae_bridge_reset(struct opae_bridge *br);
#define opae_bridge_free(br) opae_free(br)
/* OPAE Acceleraotr Data Structure */
struct opae_accelerator_ops;
/*
* opae_accelerator has pointer to its upstream bridge(port).
* In some cases, if we allow same user to do PR on its own accelerator, then
* set the manager pointer during the enumeration. But in other cases, the PR
* functions only could be done via manager in another module / thread / service
* / application for better protection.
*/
struct opae_accelerator {
TAILQ_ENTRY(opae_accelerator) node;
const char *name;
int index;
struct opae_bridge *br;
struct opae_manager *mgr;
struct opae_accelerator_ops *ops;
void *data;
};
struct opae_acc_info {
unsigned int num_regions;
unsigned int num_irqs;
};
struct opae_acc_region_info {
u32 flags;
#define ACC_REGION_READ (1 << 0)
#define ACC_REGION_WRITE (1 << 1)
#define ACC_REGION_MMIO (1 << 2)
u32 index;
u64 phys_addr;
u64 len;
u8 *addr;
};
struct opae_accelerator_ops {
int (*read)(struct opae_accelerator *acc, unsigned int region_idx,
u64 offset, unsigned int byte, void *data);
int (*write)(struct opae_accelerator *acc, unsigned int region_idx,
u64 offset, unsigned int byte, void *data);
int (*get_info)(struct opae_accelerator *acc,
struct opae_acc_info *info);
int (*get_region_info)(struct opae_accelerator *acc,
struct opae_acc_region_info *info);
int (*set_irq)(struct opae_accelerator *acc,
u32 start, u32 count, s32 evtfds[]);
int (*get_uuid)(struct opae_accelerator *acc,
struct uuid *uuid);
};
/* OPAE accelerator APIs */
struct opae_accelerator *
opae_accelerator_alloc(const char *name, struct opae_accelerator_ops *ops,
void *data);
#define opae_accelerator_free(acc) opae_free(acc)
int opae_acc_get_info(struct opae_accelerator *acc, struct opae_acc_info *info);
int opae_acc_get_region_info(struct opae_accelerator *acc,
struct opae_acc_region_info *info);
int opae_acc_set_irq(struct opae_accelerator *acc,
u32 start, u32 count, s32 evtfds[]);
int opae_acc_get_uuid(struct opae_accelerator *acc,
struct uuid *uuid);
static inline struct opae_bridge *
opae_acc_get_br(struct opae_accelerator *acc)
{
return acc ? acc->br : NULL;
}
static inline struct opae_manager *
opae_acc_get_mgr(struct opae_accelerator *acc)
{
return acc ? acc->mgr : NULL;
}
int opae_acc_reg_read(struct opae_accelerator *acc, unsigned int region_idx,
u64 offset, unsigned int byte, void *data);
int opae_acc_reg_write(struct opae_accelerator *acc, unsigned int region_idx,
u64 offset, unsigned int byte, void *data);
#define opae_acc_reg_read64(acc, region, offset, data) \
opae_acc_reg_read(acc, region, offset, 8, data)
#define opae_acc_reg_write64(acc, region, offset, data) \
opae_acc_reg_write(acc, region, offset, 8, data)
#define opae_acc_reg_read32(acc, region, offset, data) \
opae_acc_reg_read(acc, region, offset, 4, data)
#define opae_acc_reg_write32(acc, region, offset, data) \
opae_acc_reg_write(acc, region, offset, 4, data)
#define opae_acc_reg_read16(acc, region, offset, data) \
opae_acc_reg_read(acc, region, offset, 2, data)
#define opae_acc_reg_write16(acc, region, offset, data) \
opae_acc_reg_write(acc, region, offset, 2, data)
#define opae_acc_reg_read8(acc, region, offset, data) \
opae_acc_reg_read(acc, region, offset, 1, data)
#define opae_acc_reg_write8(acc, region, offset, data) \
opae_acc_reg_write(acc, region, offset, 1, data)
/*for data stream read/write*/
int opae_acc_data_read(struct opae_accelerator *acc, unsigned int flags,
u64 offset, unsigned int byte, void *data);
int opae_acc_data_write(struct opae_accelerator *acc, unsigned int flags,
u64 offset, unsigned int byte, void *data);
/* OPAE Adapter Data Structure */
struct opae_adapter_data {
enum opae_adapter_type type;
};
struct opae_reg_region {
u64 phys_addr;
u64 len;
u8 *addr;
};
struct opae_adapter_data_pci {
enum opae_adapter_type type;
u16 device_id;
u16 vendor_id;
struct opae_reg_region region[PCI_MAX_RESOURCE];
int vfio_dev_fd; /* VFIO device file descriptor */
};
/* FIXME: OPAE_FPGA_NET type */
struct opae_adapter_data_net {
enum opae_adapter_type type;
};
struct opae_adapter_ops {
int (*enumerate)(struct opae_adapter *adapter);
void (*destroy)(struct opae_adapter *adapter);
};
TAILQ_HEAD(opae_accelerator_list, opae_accelerator);
#define opae_adapter_for_each_acc(adatper, acc) \
TAILQ_FOREACH(acc, &adapter->acc_list, node)
struct opae_adapter {
const char *name;
struct opae_manager *mgr;
struct opae_accelerator_list acc_list;
struct opae_adapter_ops *ops;
void *data;
};
/* OPAE Adapter APIs */
void *opae_adapter_data_alloc(enum opae_adapter_type type);
#define opae_adapter_data_free(data) opae_free(data)
struct opae_adapter *opae_adapter_alloc(const char *name, void *data);
#define opae_adapter_free(adapter) opae_free(adapter)
int opae_adapter_enumerate(struct opae_adapter *adapter);
void opae_adapter_destroy(struct opae_adapter *adapter);
static inline struct opae_manager *
opae_adapter_get_mgr(struct opae_adapter *adapter)
{
return adapter ? adapter->mgr : NULL;
}
struct opae_accelerator *
opae_adapter_get_acc(struct opae_adapter *adapter, int acc_id);
static inline void opae_adapter_add_acc(struct opae_adapter *adapter,
struct opae_accelerator *acc)
{
TAILQ_INSERT_TAIL(&adapter->acc_list, acc, node);
}
static inline void opae_adapter_remove_acc(struct opae_adapter *adapter,
struct opae_accelerator *acc)
{
TAILQ_REMOVE(&adapter->acc_list, acc, node);
}
#endif /* _OPAE_HW_API_H_*/

View File

@ -0,0 +1,145 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#include "opae_ifpga_hw_api.h"
#include "ifpga_api.h"
int opae_manager_ifpga_get_prop(struct opae_manager *mgr,
struct feature_prop *prop)
{
struct ifpga_fme_hw *fme;
if (!mgr || !mgr->data)
return -EINVAL;
fme = mgr->data;
return ifpga_get_prop(fme->parent, FEATURE_FIU_ID_FME, 0, prop);
}
int opae_manager_ifpga_set_prop(struct opae_manager *mgr,
struct feature_prop *prop)
{
struct ifpga_fme_hw *fme;
if (!mgr || !mgr->data)
return -EINVAL;
fme = mgr->data;
return ifpga_set_prop(fme->parent, FEATURE_FIU_ID_FME, 0, prop);
}
int opae_manager_ifpga_get_info(struct opae_manager *mgr,
struct fpga_fme_info *fme_info)
{
struct ifpga_fme_hw *fme;
if (!mgr || !mgr->data || !fme_info)
return -EINVAL;
fme = mgr->data;
spinlock_lock(&fme->lock);
fme_info->capability = fme->capability;
spinlock_unlock(&fme->lock);
return 0;
}
int opae_manager_ifpga_set_err_irq(struct opae_manager *mgr,
struct fpga_fme_err_irq_set *err_irq_set)
{
struct ifpga_fme_hw *fme;
if (!mgr || !mgr->data)
return -EINVAL;
fme = mgr->data;
return ifpga_set_irq(fme->parent, FEATURE_FIU_ID_FME, 0,
IFPGA_FME_FEATURE_ID_GLOBAL_ERR, err_irq_set);
}
int opae_bridge_ifpga_get_prop(struct opae_bridge *br,
struct feature_prop *prop)
{
struct ifpga_port_hw *port;
if (!br || !br->data)
return -EINVAL;
port = br->data;
return ifpga_get_prop(port->parent, FEATURE_FIU_ID_PORT,
port->port_id, prop);
}
int opae_bridge_ifpga_set_prop(struct opae_bridge *br,
struct feature_prop *prop)
{
struct ifpga_port_hw *port;
if (!br || !br->data)
return -EINVAL;
port = br->data;
return ifpga_set_prop(port->parent, FEATURE_FIU_ID_PORT,
port->port_id, prop);
}
int opae_bridge_ifpga_get_info(struct opae_bridge *br,
struct fpga_port_info *port_info)
{
struct ifpga_port_hw *port;
if (!br || !br->data || !port_info)
return -EINVAL;
port = br->data;
spinlock_lock(&port->lock);
port_info->capability = port->capability;
port_info->num_uafu_irqs = port->num_uafu_irqs;
spinlock_unlock(&port->lock);
return 0;
}
int opae_bridge_ifpga_get_region_info(struct opae_bridge *br,
struct fpga_port_region_info *info)
{
struct ifpga_port_hw *port;
if (!br || !br->data || !info)
return -EINVAL;
/* Only support STP region now */
if (info->index != PORT_REGION_INDEX_STP)
return -EINVAL;
port = br->data;
spinlock_lock(&port->lock);
info->addr = port->stp_addr;
info->size = port->stp_size;
spinlock_unlock(&port->lock);
return 0;
}
int opae_bridge_ifpga_set_err_irq(struct opae_bridge *br,
struct fpga_port_err_irq_set *err_irq_set)
{
struct ifpga_port_hw *port;
if (!br || !br->data)
return -EINVAL;
port = br->data;
return ifpga_set_irq(port->parent, FEATURE_FIU_ID_PORT, port->port_id,
IFPGA_PORT_FEATURE_ID_ERROR, err_irq_set);
}

View File

@ -0,0 +1,279 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#ifndef _OPAE_IFPGA_HW_API_H_
#define _OPAE_IFPGA_HW_API_H_
#include "opae_hw_api.h"
/**
* struct feature_prop - data structure for feature property
* @feature_id: id of this feature.
* @prop_id: id of this property under this feature.
* @data: property value to set/get.
*/
struct feature_prop {
u64 feature_id;
u64 prop_id;
u64 data;
};
#define IFPGA_FIU_ID_FME 0x0
#define IFPGA_FIU_ID_PORT 0x1
#define IFPGA_FME_FEATURE_ID_HEADER 0x0
#define IFPGA_FME_FEATURE_ID_THERMAL_MGMT 0x1
#define IFPGA_FME_FEATURE_ID_POWER_MGMT 0x2
#define IFPGA_FME_FEATURE_ID_GLOBAL_IPERF 0x3
#define IFPGA_FME_FEATURE_ID_GLOBAL_ERR 0x4
#define IFPGA_FME_FEATURE_ID_PR_MGMT 0x5
#define IFPGA_FME_FEATURE_ID_HSSI 0x6
#define IFPGA_FME_FEATURE_ID_GLOBAL_DPERF 0x7
#define IFPGA_PORT_FEATURE_ID_HEADER 0x0
#define IFPGA_PORT_FEATURE_ID_AFU 0xff
#define IFPGA_PORT_FEATURE_ID_ERROR 0x10
#define IFPGA_PORT_FEATURE_ID_UMSG 0x11
#define IFPGA_PORT_FEATURE_ID_UINT 0x12
#define IFPGA_PORT_FEATURE_ID_STP 0x13
/*
* PROP format (TOP + SUB + ID)
*
* (~0x0) means this field is unused.
*/
#define PROP_TOP GENMASK(31, 24)
#define PROP_TOP_UNUSED 0xff
#define PROP_SUB GENMASK(23, 16)
#define PROP_SUB_UNUSED 0xff
#define PROP_ID GENMASK(15, 0)
#define PROP(_top, _sub, _id) \
(SET_FIELD(PROP_TOP, _top) | SET_FIELD(PROP_SUB, _sub) |\
SET_FIELD(PROP_ID, _id))
/* FME head feature's properties*/
#define FME_HDR_PROP_REVISION 0x1 /* RDONLY */
#define FME_HDR_PROP_PORTS_NUM 0x2 /* RDONLY */
#define FME_HDR_PROP_CACHE_SIZE 0x3 /* RDONLY */
#define FME_HDR_PROP_VERSION 0x4 /* RDONLY */
#define FME_HDR_PROP_SOCKET_ID 0x5 /* RDONLY */
#define FME_HDR_PROP_BITSTREAM_ID 0x6 /* RDONLY */
#define FME_HDR_PROP_BITSTREAM_METADATA 0x7 /* RDONLY */
/* FME error reporting feature's properties */
/* FME error reporting properties format */
#define ERR_PROP(_top, _id) PROP(_top, 0xff, _id)
#define ERR_PROP_TOP_UNUSED PROP_TOP_UNUSED
#define ERR_PROP_TOP_FME_ERR 0x1
#define ERR_PROP_ROOT(_id) ERR_PROP(0xff, _id)
#define ERR_PROP_FME_ERR(_id) ERR_PROP(ERR_PROP_TOP_FME_ERR, _id)
#define FME_ERR_PROP_ERRORS ERR_PROP_FME_ERR(0x1)
#define FME_ERR_PROP_FIRST_ERROR ERR_PROP_FME_ERR(0x2)
#define FME_ERR_PROP_NEXT_ERROR ERR_PROP_FME_ERR(0x3)
#define FME_ERR_PROP_CLEAR ERR_PROP_FME_ERR(0x4) /* WO */
#define FME_ERR_PROP_REVISION ERR_PROP_ROOT(0x5)
#define FME_ERR_PROP_PCIE0_ERRORS ERR_PROP_ROOT(0x6) /* RW */
#define FME_ERR_PROP_PCIE1_ERRORS ERR_PROP_ROOT(0x7) /* RW */
#define FME_ERR_PROP_NONFATAL_ERRORS ERR_PROP_ROOT(0x8)
#define FME_ERR_PROP_CATFATAL_ERRORS ERR_PROP_ROOT(0x9)
#define FME_ERR_PROP_INJECT_ERRORS ERR_PROP_ROOT(0xa) /* RW */
/* FME thermal feature's properties */
#define FME_THERMAL_PROP_THRESHOLD1 0x1 /* RW */
#define FME_THERMAL_PROP_THRESHOLD2 0x2 /* RW */
#define FME_THERMAL_PROP_THRESHOLD_TRIP 0x3 /* RDONLY */
#define FME_THERMAL_PROP_THRESHOLD1_REACHED 0x4 /* RDONLY */
#define FME_THERMAL_PROP_THRESHOLD2_REACHED 0x5 /* RDONLY */
#define FME_THERMAL_PROP_THRESHOLD1_POLICY 0x6 /* RW */
#define FME_THERMAL_PROP_TEMPERATURE 0x7 /* RDONLY */
#define FME_THERMAL_PROP_REVISION 0x8 /* RDONLY */
/* FME power feature's properties */
#define FME_PWR_PROP_CONSUMED 0x1 /* RDONLY */
#define FME_PWR_PROP_THRESHOLD1 0x2 /* RW */
#define FME_PWR_PROP_THRESHOLD2 0x3 /* RW */
#define FME_PWR_PROP_THRESHOLD1_STATUS 0x4 /* RDONLY */
#define FME_PWR_PROP_THRESHOLD2_STATUS 0x5 /* RDONLY */
#define FME_PWR_PROP_RTL 0x6 /* RDONLY */
#define FME_PWR_PROP_XEON_LIMIT 0x7 /* RDONLY */
#define FME_PWR_PROP_FPGA_LIMIT 0x8 /* RDONLY */
#define FME_PWR_PROP_REVISION 0x9 /* RDONLY */
/* FME iperf/dperf PROP format */
#define PERF_PROP_TOP_CACHE 0x1
#define PERF_PROP_TOP_VTD 0x2
#define PERF_PROP_TOP_FAB 0x3
#define PERF_PROP_TOP_UNUSED PROP_TOP_UNUSED
#define PERF_PROP_SUB_UNUSED PROP_SUB_UNUSED
#define PERF_PROP_ROOT(_id) PROP(0xff, 0xff, _id)
#define PERF_PROP_CACHE(_id) PROP(PERF_PROP_TOP_CACHE, 0xff, _id)
#define PERF_PROP_VTD(_sub, _id) PROP(PERF_PROP_TOP_VTD, _sub, _id)
#define PERF_PROP_VTD_ROOT(_id) PROP(PERF_PROP_TOP_VTD, 0xff, _id)
#define PERF_PROP_FAB(_sub, _id) PROP(PERF_PROP_TOP_FAB, _sub, _id)
#define PERF_PROP_FAB_ROOT(_id) PROP(PERF_PROP_TOP_FAB, 0xff, _id)
/* FME iperf feature's properties */
#define FME_IPERF_PROP_CLOCK PERF_PROP_ROOT(0x1)
#define FME_IPERF_PROP_REVISION PERF_PROP_ROOT(0x2)
/* iperf CACHE properties */
#define FME_IPERF_PROP_CACHE_FREEZE PERF_PROP_CACHE(0x1) /* RW */
#define FME_IPERF_PROP_CACHE_READ_HIT PERF_PROP_CACHE(0x2)
#define FME_IPERF_PROP_CACHE_READ_MISS PERF_PROP_CACHE(0x3)
#define FME_IPERF_PROP_CACHE_WRITE_HIT PERF_PROP_CACHE(0x4)
#define FME_IPERF_PROP_CACHE_WRITE_MISS PERF_PROP_CACHE(0x5)
#define FME_IPERF_PROP_CACHE_HOLD_REQUEST PERF_PROP_CACHE(0x6)
#define FME_IPERF_PROP_CACHE_TX_REQ_STALL PERF_PROP_CACHE(0x7)
#define FME_IPERF_PROP_CACHE_RX_REQ_STALL PERF_PROP_CACHE(0x8)
#define FME_IPERF_PROP_CACHE_RX_EVICTION PERF_PROP_CACHE(0x9)
#define FME_IPERF_PROP_CACHE_DATA_WRITE_PORT_CONTENTION PERF_PROP_CACHE(0xa)
#define FME_IPERF_PROP_CACHE_TAG_WRITE_PORT_CONTENTION PERF_PROP_CACHE(0xb)
/* iperf VTD properties */
#define FME_IPERF_PROP_VTD_FREEZE PERF_PROP_VTD_ROOT(0x1) /* RW */
#define FME_IPERF_PROP_VTD_SIP_IOTLB_4K_HIT PERF_PROP_VTD_ROOT(0x2)
#define FME_IPERF_PROP_VTD_SIP_IOTLB_2M_HIT PERF_PROP_VTD_ROOT(0x3)
#define FME_IPERF_PROP_VTD_SIP_IOTLB_1G_HIT PERF_PROP_VTD_ROOT(0x4)
#define FME_IPERF_PROP_VTD_SIP_SLPWC_L3_HIT PERF_PROP_VTD_ROOT(0x5)
#define FME_IPERF_PROP_VTD_SIP_SLPWC_L4_HIT PERF_PROP_VTD_ROOT(0x6)
#define FME_IPERF_PROP_VTD_SIP_RCC_HIT PERF_PROP_VTD_ROOT(0x7)
#define FME_IPERF_PROP_VTD_SIP_IOTLB_4K_MISS PERF_PROP_VTD_ROOT(0x8)
#define FME_IPERF_PROP_VTD_SIP_IOTLB_2M_MISS PERF_PROP_VTD_ROOT(0x9)
#define FME_IPERF_PROP_VTD_SIP_IOTLB_1G_MISS PERF_PROP_VTD_ROOT(0xa)
#define FME_IPERF_PROP_VTD_SIP_SLPWC_L3_MISS PERF_PROP_VTD_ROOT(0xb)
#define FME_IPERF_PROP_VTD_SIP_SLPWC_L4_MISS PERF_PROP_VTD_ROOT(0xc)
#define FME_IPERF_PROP_VTD_SIP_RCC_MISS PERF_PROP_VTD_ROOT(0xd)
#define FME_IPERF_PROP_VTD_PORT_READ_TRANSACTION(n) PERF_PROP_VTD(n, 0xe)
#define FME_IPERF_PROP_VTD_PORT_WRITE_TRANSACTION(n) PERF_PROP_VTD(n, 0xf)
#define FME_IPERF_PROP_VTD_PORT_DEVTLB_READ_HIT(n) PERF_PROP_VTD(n, 0x10)
#define FME_IPERF_PROP_VTD_PORT_DEVTLB_WRITE_HIT(n) PERF_PROP_VTD(n, 0x11)
#define FME_IPERF_PROP_VTD_PORT_DEVTLB_4K_FILL(n) PERF_PROP_VTD(n, 0x12)
#define FME_IPERF_PROP_VTD_PORT_DEVTLB_2M_FILL(n) PERF_PROP_VTD(n, 0x13)
#define FME_IPERF_PROP_VTD_PORT_DEVTLB_1G_FILL(n) PERF_PROP_VTD(n, 0x14)
/* iperf FAB properties */
#define FME_IPERF_PROP_FAB_FREEZE PERF_PROP_FAB_ROOT(0x1) /* RW */
#define FME_IPERF_PROP_FAB_PCIE0_READ PERF_PROP_FAB_ROOT(0x2)
#define FME_IPERF_PROP_FAB_PORT_PCIE0_READ(n) PERF_PROP_FAB(n, 0x2)
#define FME_IPERF_PROP_FAB_PCIE0_WRITE PERF_PROP_FAB_ROOT(0x3)
#define FME_IPERF_PROP_FAB_PORT_PCIE0_WRITE(n) PERF_PROP_FAB(n, 0x3)
#define FME_IPERF_PROP_FAB_PCIE1_READ PERF_PROP_FAB_ROOT(0x4)
#define FME_IPERF_PROP_FAB_PORT_PCIE1_READ(n) PERF_PROP_FAB(n, 0x4)
#define FME_IPERF_PROP_FAB_PCIE1_WRITE PERF_PROP_FAB_ROOT(0x5)
#define FME_IPERF_PROP_FAB_PORT_PCIE1_WRITE(n) PERF_PROP_FAB(n, 0x5)
#define FME_IPERF_PROP_FAB_UPI_READ PERF_PROP_FAB_ROOT(0x6)
#define FME_IPERF_PROP_FAB_PORT_UPI_READ(n) PERF_PROP_FAB(n, 0x6)
#define FME_IPERF_PROP_FAB_UPI_WRITE PERF_PROP_FAB_ROOT(0x7)
#define FME_IPERF_PROP_FAB_PORT_UPI_WRITE(n) PERF_PROP_FAB(n, 0x7)
#define FME_IPERF_PROP_FAB_MMIO_READ PERF_PROP_FAB_ROOT(0x8)
#define FME_IPERF_PROP_FAB_PORT_MMIO_READ(n) PERF_PROP_FAB(n, 0x8)
#define FME_IPERF_PROP_FAB_MMIO_WRITE PERF_PROP_FAB_ROOT(0x9)
#define FME_IPERF_PROP_FAB_PORT_MMIO_WRITE(n) PERF_PROP_FAB(n, 0x9)
#define FME_IPERF_PROP_FAB_ENABLE PERF_PROP_FAB_ROOT(0xa) /* RW */
#define FME_IPERF_PROP_FAB_PORT_ENABLE(n) PERF_PROP_FAB(n, 0xa) /* RW */
/* FME dperf properties */
#define FME_DPERF_PROP_CLOCK PERF_PROP_ROOT(0x1)
#define FME_DPERF_PROP_REVISION PERF_PROP_ROOT(0x2)
/* dperf FAB properties */
#define FME_DPERF_PROP_FAB_FREEZE PERF_PROP_FAB_ROOT(0x1) /* RW */
#define FME_DPERF_PROP_FAB_PCIE0_READ PERF_PROP_FAB_ROOT(0x2)
#define FME_DPERF_PROP_FAB_PORT_PCIE0_READ(n) PERF_PROP_FAB(n, 0x2)
#define FME_DPERF_PROP_FAB_PCIE0_WRITE PERF_PROP_FAB_ROOT(0x3)
#define FME_DPERF_PROP_FAB_PORT_PCIE0_WRITE(n) PERF_PROP_FAB(n, 0x3)
#define FME_DPERF_PROP_FAB_MMIO_READ PERF_PROP_FAB_ROOT(0x4)
#define FME_DPERF_PROP_FAB_PORT_MMIO_READ(n) PERF_PROP_FAB(n, 0x4)
#define FME_DPERF_PROP_FAB_MMIO_WRITE PERF_PROP_FAB_ROOT(0x5)
#define FME_DPERF_PROP_FAB_PORT_MMIO_WRITE(n) PERF_PROP_FAB(n, 0x5)
#define FME_DPERF_PROP_FAB_ENABLE PERF_PROP_FAB_ROOT(0x6) /* RW */
#define FME_DPERF_PROP_FAB_PORT_ENABLE(n) PERF_PROP_FAB(n, 0x6) /* RW */
/*PORT hdr feature's properties*/
#define PORT_HDR_PROP_REVISION 0x1 /* RDONLY */
#define PORT_HDR_PROP_PORTIDX 0x2 /* RDONLY */
#define PORT_HDR_PROP_LATENCY_TOLERANCE 0x3 /* RDONLY */
#define PORT_HDR_PROP_AP1_EVENT 0x4 /* RW */
#define PORT_HDR_PROP_AP2_EVENT 0x5 /* RW */
#define PORT_HDR_PROP_POWER_STATE 0x6 /* RDONLY */
#define PORT_HDR_PROP_USERCLK_FREQCMD 0x7 /* RW */
#define PORT_HDR_PROP_USERCLK_FREQCNTRCMD 0x8 /* RW */
#define PORT_HDR_PROP_USERCLK_FREQSTS 0x9 /* RDONLY */
#define PORT_HDR_PROP_USERCLK_CNTRSTS 0xa /* RDONLY */
/*PORT error feature's properties*/
#define PORT_ERR_PROP_REVISION 0x1 /* RDONLY */
#define PORT_ERR_PROP_ERRORS 0x2 /* RDONLY */
#define PORT_ERR_PROP_FIRST_ERROR 0x3 /* RDONLY */
#define PORT_ERR_PROP_FIRST_MALFORMED_REQ_LSB 0x4 /* RDONLY */
#define PORT_ERR_PROP_FIRST_MALFORMED_REQ_MSB 0x5 /* RDONLY */
#define PORT_ERR_PROP_CLEAR 0x6 /* WRONLY */
int opae_manager_ifpga_get_prop(struct opae_manager *mgr,
struct feature_prop *prop);
int opae_manager_ifpga_set_prop(struct opae_manager *mgr,
struct feature_prop *prop);
int opae_bridge_ifpga_get_prop(struct opae_bridge *br,
struct feature_prop *prop);
int opae_bridge_ifpga_set_prop(struct opae_bridge *br,
struct feature_prop *prop);
/*
* Retrieve information about the fpga fme.
* Driver fills the info in provided struct fpga_fme_info.
*/
struct fpga_fme_info {
u32 capability; /* The capability of FME device */
#define FPGA_FME_CAP_ERR_IRQ (1 << 0) /* Support fme error interrupt */
};
int opae_manager_ifpga_get_info(struct opae_manager *mgr,
struct fpga_fme_info *fme_info);
/* Set eventfd information for ifpga FME error interrupt */
struct fpga_fme_err_irq_set {
s32 evtfd; /* Eventfd handler */
};
int opae_manager_ifpga_set_err_irq(struct opae_manager *mgr,
struct fpga_fme_err_irq_set *err_irq_set);
/*
* Retrieve information about the fpga port.
* Driver fills the info in provided struct fpga_port_info.
*/
struct fpga_port_info {
u32 capability; /* The capability of port device */
#define FPGA_PORT_CAP_ERR_IRQ (1 << 0) /* Support port error interrupt */
#define FPGA_PORT_CAP_UAFU_IRQ (1 << 1) /* Support uafu error interrupt */
u32 num_umsgs; /* The number of allocated umsgs */
u32 num_uafu_irqs; /* The number of uafu interrupts */
};
int opae_bridge_ifpga_get_info(struct opae_bridge *br,
struct fpga_port_info *port_info);
/*
* Retrieve region information about the fpga port.
* Driver needs to fill the index of struct fpga_port_region_info.
*/
struct fpga_port_region_info {
u32 index;
#define PORT_REGION_INDEX_STP (1 << 1) /* Signal Tap Region */
u64 size; /* Region Size */
u8 *addr; /* Base address of the region */
};
int opae_bridge_ifpga_get_region_info(struct opae_bridge *br,
struct fpga_port_region_info *info);
/* Set eventfd information for ifpga port error interrupt */
struct fpga_port_err_irq_set {
s32 evtfd; /* Eventfd handler */
};
int opae_bridge_ifpga_set_err_irq(struct opae_bridge *br,
struct fpga_port_err_irq_set *err_irq_set);
#endif /* _OPAE_IFPGA_HW_API_H_ */

View File

@ -0,0 +1,79 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#ifndef _OPAE_OSDEP_H
#define _OPAE_OSDEP_H
#include <string.h>
#include <stdbool.h>
#ifdef RTE_LIBRTE_EAL
#include "osdep_rte/osdep_generic.h"
#else
#include "osdep_raw/osdep_generic.h"
#endif
#define __iomem
typedef uint8_t u8;
typedef int8_t s8;
typedef uint16_t u16;
typedef uint32_t u32;
typedef int32_t s32;
typedef uint64_t u64;
typedef uint64_t dma_addr_t;
struct uuid {
u8 b[16];
};
#ifndef LINUX_MACROS
#ifndef BITS_PER_LONG
#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
#endif
#ifndef BIT
#define BIT(a) (1UL << (a))
#endif /* BIT */
#ifndef BIT_ULL
#define BIT_ULL(a) (1ULL << (a))
#endif /* BIT_ULL */
#ifndef GENMASK
#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
#endif /* GENMASK */
#ifndef GENMASK_ULL
#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
#endif /* GENMASK_ULL */
#endif /* LINUX_MACROS */
#define SET_FIELD(m, v) (((v) << (__builtin_ffsll(m) - 1)) & (m))
#define GET_FIELD(m, v) (((v) & (m)) >> (__builtin_ffsll(m) - 1))
#define dev_err(x, args...) dev_printf(ERR, args)
#define dev_info(x, args...) dev_printf(INFO, args)
#define dev_warn(x, args...) dev_printf(WARNING, args)
#ifdef OPAE_DEBUG
#define dev_debug(x, args...) dev_printf(DEBUG, args)
#else
#define dev_debug(x, args...) do { } while (0)
#endif
#define pr_err(y, args...) dev_err(0, y, ##args)
#define pr_warn(y, args...) dev_warn(0, y, ##args)
#define pr_info(y, args...) dev_info(0, y, ##args)
#ifndef WARN_ON
#define WARN_ON(x) do { \
int ret = !!(x); \
if (unlikely(ret)) \
pr_warn("WARN_ON: \"" #x "\" at %s:%d\n", __func__, __LINE__); \
} while (0)
#endif
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define udelay(x) opae_udelay(x)
#define msleep(x) opae_udelay(1000 * (x))
#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
#endif

View File

@ -0,0 +1,75 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#ifndef _OSDEP_RAW_GENERIC_H
#define _OSDEP_RAW_GENERIC_H
#define compiler_barrier() (asm volatile ("" : : : "memory"))
#define io_wmb() compiler_barrier()
#define io_rmb() compiler_barrier()
static inline uint8_t opae_readb(const volatile void *addr)
{
uint8_t val;
val = *(const volatile uint8_t *)addr;
io_rmb();
return val;
}
static inline uint16_t opae_readw(const volatile void *addr)
{
uint16_t val;
val = *(const volatile uint16_t *)addr;
io_rmb();
return val;
}
static inline uint32_t opae_readl(const volatile void *addr)
{
uint32_t val;
val = *(const volatile uint32_t *)addr;
io_rmb();
return val;
}
static inline uint64_t opae_readq(const volatile void *addr)
{
uint64_t val;
val = *(const volatile uint64_t *)addr;
io_rmb();
return val;
}
static inline void opae_writeb(uint8_t value, volatile void *addr)
{
io_wmb();
*(volatile uint8_t *)addr = value;
}
static inline void opae_writew(uint16_t value, volatile void *addr)
{
io_wmb();
*(volatile uint16_t *)addr = value;
}
static inline void opae_writel(uint32_t value, volatile void *addr)
{
io_wmb();
*(volatile uint32_t *)addr = value;
}
static inline void opae_writeq(uint64_t value, volatile void *addr)
{
io_wmb();
*(volatile uint64_t *)addr = value;
}
#define opae_free(addr) free(addr)
#endif

View File

@ -0,0 +1,45 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
#ifndef _OSDEP_RTE_GENERIC_H
#define _OSDEP_RTE_GENERIC_H
#include <rte_common.h>
#include <rte_cycles.h>
#include <rte_spinlock.h>
#include <rte_log.h>
#include <rte_io.h>
#include <rte_malloc.h>
#define dev_printf(level, fmt, args...) \
RTE_LOG(level, PMD, "osdep_rte: " fmt, ## args)
#define osdep_panic(...) rte_panic(...)
#define opae_udelay(x) rte_delay_us(x)
#define opae_readb(addr) rte_read8(addr)
#define opae_readw(addr) rte_read16(addr)
#define opae_readl(addr) rte_read32(addr)
#define opae_readq(addr) rte_read64(addr)
#define opae_writeb(value, addr) rte_write8(value, addr)
#define opae_writew(value, addr) rte_write16(value, addr)
#define opae_writel(value, addr) rte_write32(value, addr)
#define opae_writeq(value, addr) rte_write64(value, addr)
#define opae_malloc(size) rte_malloc(NULL, size, 0)
#define opae_zmalloc(size) rte_zmalloc(NULL, size, 0)
#define opae_free(addr) rte_free(addr)
#define ARRAY_SIZE(arr) RTE_DIM(arr)
#define min(a, b) RTE_MIN(a, b)
#define max(a, b) RTE_MAX(a, b)
#define spinlock_t rte_spinlock_t
#define spinlock_init(x) rte_spinlock_init(x)
#define spinlock_lock(x) rte_spinlock_lock(x)
#define spinlock_unlock(x) rte_spinlock_unlock(x)
#endif