raw/ntb: support Intel Ice Lake
Add NTB device support (4th generation) for Intel Ice Lake platform. Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com> Acked-by: Jingjing Wu <jingjing.wu@intel.com>
This commit is contained in:
parent
6f2dc9c0b4
commit
f5057be340
@ -14,16 +14,16 @@ allocation for the peer to access and read/write allocated memory from peer.
|
||||
Also, the PMD allows to use doorbell registers to notify the peer and share
|
||||
some information by using scratchpad registers.
|
||||
|
||||
BIOS setting on Intel Skylake
|
||||
-----------------------------
|
||||
BIOS setting on Intel Xeon
|
||||
--------------------------
|
||||
|
||||
Intel Non-transparent Bridge needs special BIOS setting. Since the PMD only
|
||||
supports Intel Skylake platform, introduce BIOS setting here. The referencce
|
||||
is https://www.intel.com/content/dam/support/us/en/documents/server-products/Intel_Xeon_Processor_Scalable_Family_BIOS_User_Guide.pdf
|
||||
Intel Non-transparent Bridge needs special BIOS setting. The referencce for
|
||||
Skylake is https://www.intel.com/content/dam/support/us/en/documents/server-products/Intel_Xeon_Processor_Scalable_Family_BIOS_User_Guide.pdf
|
||||
|
||||
- Set the needed PCIe port as NTB to NTB mode on both hosts.
|
||||
- Enable NTB bars and set bar size of bar 23 and bar 45 as 12-29 (2K-512M)
|
||||
on both hosts. Note that bar size on both hosts should be the same.
|
||||
- Enable NTB bars and set bar size of bar 23 and bar 45 as 12-29 (4K-512M)
|
||||
on both hosts (for Ice Lake, bar size can be set as 12-51, namely 4K-128PB).
|
||||
Note that bar size on both hosts should be the same.
|
||||
- Disable split bars for both hosts.
|
||||
- Set crosslink control override as DSD/USP on one host, USD/DSP on
|
||||
another host.
|
||||
@ -151,4 +151,4 @@ like the following:
|
||||
Limitation
|
||||
----------
|
||||
|
||||
- This PMD only supports Intel Skylake platform.
|
||||
- This PMD only supports Intel Skylake and Ice Lake platforms.
|
||||
|
@ -80,6 +80,10 @@ New Features
|
||||
* Changed default link speed to unknown.
|
||||
* Added support for 200G link speed.
|
||||
|
||||
* **Added Ice Lake (Gen4) support for Intel NTB.**
|
||||
|
||||
Added NTB device support (4th generation) for Intel Ice Lake platform.
|
||||
|
||||
* **Extended flow-perf application.**
|
||||
|
||||
* Started supporting user order instead of bit mask:
|
||||
|
@ -25,6 +25,7 @@
|
||||
|
||||
static const struct rte_pci_id pci_id_ntb_map[] = {
|
||||
{ RTE_PCI_DEVICE(NTB_INTEL_VENDOR_ID, NTB_INTEL_DEV_ID_B2B_SKX) },
|
||||
{ RTE_PCI_DEVICE(NTB_INTEL_VENDOR_ID, NTB_INTEL_DEV_ID_B2B_ICX) },
|
||||
{ .vendor_id = 0, /* sentinel */ },
|
||||
};
|
||||
|
||||
@ -244,6 +245,9 @@ ntb_dev_intr_handler(void *param)
|
||||
hw->peer_dev_up = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Clear other received doorbells. */
|
||||
(*hw->ntb_ops->db_clear)(dev, db_bits);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1360,6 +1364,7 @@ ntb_init_hw(struct rte_rawdev *dev, struct rte_pci_device *pci_dev)
|
||||
|
||||
switch (pci_dev->id.device_id) {
|
||||
case NTB_INTEL_DEV_ID_B2B_SKX:
|
||||
case NTB_INTEL_DEV_ID_B2B_ICX:
|
||||
hw->ntb_ops = &intel_ntb_ops;
|
||||
break;
|
||||
default:
|
||||
|
@ -18,6 +18,7 @@ extern int ntb_logtype;
|
||||
|
||||
/* Device IDs */
|
||||
#define NTB_INTEL_DEV_ID_B2B_SKX 0x201C
|
||||
#define NTB_INTEL_DEV_ID_B2B_ICX 0x347E
|
||||
|
||||
/* Reserved to app to use. */
|
||||
#define NTB_SPAD_USER "spad_user_"
|
||||
|
@ -25,17 +25,29 @@ static enum xeon_ntb_bar intel_ntb_bar[] = {
|
||||
XEON_NTB_BAR45,
|
||||
};
|
||||
|
||||
static int
|
||||
intel_ntb_dev_init(const struct rte_rawdev *dev)
|
||||
static inline int
|
||||
is_gen3_ntb(const struct ntb_hw *hw)
|
||||
{
|
||||
struct ntb_hw *hw = dev->dev_private;
|
||||
uint8_t reg_val, bar;
|
||||
int ret, i;
|
||||
if (hw->pci_dev->id.device_id == NTB_INTEL_DEV_ID_B2B_SKX)
|
||||
return 1;
|
||||
|
||||
if (hw == NULL) {
|
||||
NTB_LOG(ERR, "Invalid device.");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
is_gen4_ntb(const struct ntb_hw *hw)
|
||||
{
|
||||
if (hw->pci_dev->id.device_id == NTB_INTEL_DEV_ID_B2B_ICX)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_ntb3_check_ppd(struct ntb_hw *hw)
|
||||
{
|
||||
uint8_t reg_val;
|
||||
int ret;
|
||||
|
||||
ret = rte_pci_read_config(hw->pci_dev, ®_val,
|
||||
sizeof(reg_val), XEON_PPD_OFFSET);
|
||||
@ -71,8 +83,65 @@ intel_ntb_dev_init(const struct rte_rawdev *dev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_ntb4_check_ppd(struct ntb_hw *hw)
|
||||
{
|
||||
uint32_t reg_val;
|
||||
|
||||
reg_val = rte_read32(hw->hw_addr + XEON_GEN4_PPD1_OFFSET);
|
||||
|
||||
/* Check connection topo type. Only support B2B. */
|
||||
switch (reg_val & XEON_GEN4_PPD_CONN_MASK) {
|
||||
case XEON_GEN4_PPD_CONN_B2B:
|
||||
NTB_LOG(INFO, "Topo B2B (back to back) is using.");
|
||||
break;
|
||||
default:
|
||||
NTB_LOG(ERR, "Not supported conn topo. Please use B2B.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Check device type. */
|
||||
if (reg_val & XEON_GEN4_PPD_DEV_DSD) {
|
||||
NTB_LOG(INFO, "DSD, Downstream Device.");
|
||||
hw->topo = NTB_TOPO_B2B_DSD;
|
||||
} else {
|
||||
NTB_LOG(INFO, "USD, Upstream device.");
|
||||
hw->topo = NTB_TOPO_B2B_USD;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_ntb_dev_init(const struct rte_rawdev *dev)
|
||||
{
|
||||
struct ntb_hw *hw = dev->dev_private;
|
||||
uint8_t bar;
|
||||
int ret, i;
|
||||
|
||||
if (hw == NULL) {
|
||||
NTB_LOG(ERR, "Invalid device.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hw->hw_addr = (char *)hw->pci_dev->mem_resource[0].addr;
|
||||
|
||||
if (is_gen3_ntb(hw))
|
||||
ret = intel_ntb3_check_ppd(hw);
|
||||
else if (is_gen4_ntb(hw))
|
||||
/* PPD is in MMIO but not config space for NTB Gen4 */
|
||||
ret = intel_ntb4_check_ppd(hw);
|
||||
else {
|
||||
NTB_LOG(ERR, "Cannot init device for unsupported device.");
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hw->mw_cnt = XEON_MW_COUNT;
|
||||
hw->db_cnt = XEON_DB_COUNT;
|
||||
hw->spad_cnt = XEON_SPAD_COUNT;
|
||||
@ -149,15 +218,29 @@ intel_ntb_mw_set_trans(const struct rte_rawdev *dev, int mw_idx,
|
||||
rte_write64(base, xlat_addr);
|
||||
rte_write64(limit, limit_addr);
|
||||
|
||||
/* Setup the external point so that remote can access. */
|
||||
xlat_off = XEON_EMBAR1_OFFSET + 8 * mw_idx;
|
||||
xlat_addr = hw->hw_addr + xlat_off;
|
||||
limit_off = XEON_EMBAR1XLMT_OFFSET + mw_idx * XEON_BAR_INTERVAL_OFFSET;
|
||||
limit_addr = hw->hw_addr + limit_off;
|
||||
base = rte_read64(xlat_addr);
|
||||
base &= ~0xf;
|
||||
limit = base + size;
|
||||
rte_write64(limit, limit_addr);
|
||||
if (is_gen3_ntb(hw)) {
|
||||
/* Setup the external point so that remote can access. */
|
||||
xlat_off = XEON_EMBAR1_OFFSET + 8 * mw_idx;
|
||||
xlat_addr = hw->hw_addr + xlat_off;
|
||||
limit_off = XEON_EMBAR1XLMT_OFFSET +
|
||||
mw_idx * XEON_BAR_INTERVAL_OFFSET;
|
||||
limit_addr = hw->hw_addr + limit_off;
|
||||
base = rte_read64(xlat_addr);
|
||||
base &= ~0xf;
|
||||
limit = base + size;
|
||||
rte_write64(limit, limit_addr);
|
||||
} else if (is_gen4_ntb(hw)) {
|
||||
/* Set translate base address index register */
|
||||
xlat_off = XEON_GEN4_IM1XBASEIDX_OFFSET +
|
||||
mw_idx * XEON_GEN4_XBASEIDX_INTERVAL;
|
||||
xlat_addr = hw->hw_addr + xlat_off;
|
||||
rte_write16(rte_log2_u64(size), xlat_addr);
|
||||
} else {
|
||||
NTB_LOG(ERR, "Cannot set translation of memory windows for unsupported device.");
|
||||
rte_write64(base, limit_addr);
|
||||
rte_write64(0, xlat_addr);
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -187,7 +270,7 @@ static int
|
||||
intel_ntb_get_link_status(const struct rte_rawdev *dev)
|
||||
{
|
||||
struct ntb_hw *hw = dev->dev_private;
|
||||
uint16_t reg_val;
|
||||
uint16_t reg_val, reg_off;
|
||||
int ret;
|
||||
|
||||
if (hw == NULL) {
|
||||
@ -195,11 +278,20 @@ intel_ntb_get_link_status(const struct rte_rawdev *dev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = rte_pci_read_config(hw->pci_dev, ®_val,
|
||||
sizeof(reg_val), XEON_LINK_STATUS_OFFSET);
|
||||
if (ret < 0) {
|
||||
NTB_LOG(ERR, "Unable to get link status.");
|
||||
return -EIO;
|
||||
if (is_gen3_ntb(hw)) {
|
||||
reg_off = XEON_GEN3_LINK_STATUS_OFFSET;
|
||||
ret = rte_pci_read_config(hw->pci_dev, ®_val,
|
||||
sizeof(reg_val), reg_off);
|
||||
if (ret < 0) {
|
||||
NTB_LOG(ERR, "Unable to get link status.");
|
||||
return -EIO;
|
||||
}
|
||||
} else if (is_gen4_ntb(hw)) {
|
||||
reg_off = XEON_GEN4_LINK_STATUS_OFFSET;
|
||||
reg_val = rte_read16(hw->hw_addr + reg_off);
|
||||
} else {
|
||||
NTB_LOG(ERR, "Cannot get link status for unsupported device.");
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
hw->link_status = NTB_LNK_STA_ACTIVE(reg_val);
|
||||
@ -216,9 +308,8 @@ intel_ntb_get_link_status(const struct rte_rawdev *dev)
|
||||
}
|
||||
|
||||
static int
|
||||
intel_ntb_set_link(const struct rte_rawdev *dev, bool up)
|
||||
intel_ntb_gen3_set_link(const struct ntb_hw *hw, bool up)
|
||||
{
|
||||
struct ntb_hw *hw = dev->dev_private;
|
||||
uint32_t ntb_ctrl, reg_off;
|
||||
void *reg_addr;
|
||||
|
||||
@ -241,6 +332,70 @@ intel_ntb_set_link(const struct rte_rawdev *dev, bool up)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_ntb_gen4_set_link(const struct ntb_hw *hw, bool up)
|
||||
{
|
||||
uint32_t ntb_ctrl, ppd0;
|
||||
uint16_t link_ctrl;
|
||||
void *reg_addr;
|
||||
|
||||
if (up) {
|
||||
reg_addr = hw->hw_addr + XEON_NTBCNTL_OFFSET;
|
||||
ntb_ctrl = NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
|
||||
ntb_ctrl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
|
||||
rte_write32(ntb_ctrl, reg_addr);
|
||||
|
||||
reg_addr = hw->hw_addr + XEON_GEN4_LINK_CTRL_OFFSET;
|
||||
link_ctrl = rte_read16(reg_addr);
|
||||
link_ctrl &= ~XEON_GEN4_LINK_CTRL_LINK_DIS;
|
||||
rte_write16(link_ctrl, reg_addr);
|
||||
|
||||
/* start link training */
|
||||
reg_addr = hw->hw_addr + XEON_GEN4_PPD0_OFFSET;
|
||||
ppd0 = rte_read32(reg_addr);
|
||||
ppd0 |= XEON_GEN4_PPD_LINKTRN;
|
||||
rte_write32(ppd0, reg_addr);
|
||||
|
||||
/* make sure link training has started */
|
||||
ppd0 = rte_read32(reg_addr);
|
||||
if (!(ppd0 & XEON_GEN4_PPD_LINKTRN)) {
|
||||
NTB_LOG(ERR, "Link is not training.");
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
reg_addr = hw->hw_addr + XEON_NTBCNTL_OFFSET;
|
||||
ntb_ctrl = rte_read32(reg_addr);
|
||||
ntb_ctrl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
|
||||
ntb_ctrl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
|
||||
rte_write32(ntb_ctrl, reg_addr);
|
||||
|
||||
reg_addr = hw->hw_addr + XEON_GEN4_LINK_CTRL_OFFSET;
|
||||
link_ctrl = rte_read16(reg_addr);
|
||||
link_ctrl |= XEON_GEN4_LINK_CTRL_LINK_DIS;
|
||||
rte_write16(link_ctrl, reg_addr);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_ntb_set_link(const struct rte_rawdev *dev, bool up)
|
||||
{
|
||||
struct ntb_hw *hw = dev->dev_private;
|
||||
int ret = 0;
|
||||
|
||||
if (is_gen3_ntb(hw))
|
||||
ret = intel_ntb_gen3_set_link(hw, up);
|
||||
else if (is_gen4_ntb(hw))
|
||||
ret = intel_ntb_gen4_set_link(hw, up);
|
||||
else {
|
||||
NTB_LOG(ERR, "Cannot set link for unsupported device.");
|
||||
ret = -ENOTSUP;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
intel_ntb_spad_read(const struct rte_rawdev *dev, int spad, bool peer)
|
||||
{
|
||||
@ -254,7 +409,16 @@ intel_ntb_spad_read(const struct rte_rawdev *dev, int spad, bool peer)
|
||||
}
|
||||
|
||||
/* When peer is true, read peer spad reg */
|
||||
reg_off = peer ? XEON_B2B_SPAD_OFFSET : XEON_IM_SPAD_OFFSET;
|
||||
if (is_gen3_ntb(hw))
|
||||
reg_off = peer ? XEON_GEN3_B2B_SPAD_OFFSET :
|
||||
XEON_IM_SPAD_OFFSET;
|
||||
else if (is_gen4_ntb(hw))
|
||||
reg_off = peer ? XEON_GEN4_B2B_SPAD_OFFSET :
|
||||
XEON_IM_SPAD_OFFSET;
|
||||
else {
|
||||
NTB_LOG(ERR, "Cannot read spad for unsupported device.");
|
||||
return -ENOTSUP;
|
||||
}
|
||||
reg_addr = hw->hw_addr + reg_off + (spad << 2);
|
||||
spad_v = rte_read32(reg_addr);
|
||||
|
||||
@ -275,7 +439,16 @@ intel_ntb_spad_write(const struct rte_rawdev *dev, int spad,
|
||||
}
|
||||
|
||||
/* When peer is true, write peer spad reg */
|
||||
reg_off = peer ? XEON_B2B_SPAD_OFFSET : XEON_IM_SPAD_OFFSET;
|
||||
if (is_gen3_ntb(hw))
|
||||
reg_off = peer ? XEON_GEN3_B2B_SPAD_OFFSET :
|
||||
XEON_IM_SPAD_OFFSET;
|
||||
else if (is_gen4_ntb(hw))
|
||||
reg_off = peer ? XEON_GEN4_B2B_SPAD_OFFSET :
|
||||
XEON_IM_SPAD_OFFSET;
|
||||
else {
|
||||
NTB_LOG(ERR, "Cannot write spad for unsupported device.");
|
||||
return -ENOTSUP;
|
||||
}
|
||||
reg_addr = hw->hw_addr + reg_off + (spad << 2);
|
||||
|
||||
rte_write32(spad_v, reg_addr);
|
||||
@ -308,6 +481,9 @@ intel_ntb_db_clear(const struct rte_rawdev *dev, uint64_t db_bits)
|
||||
db_off = XEON_IM_INT_STATUS_OFFSET;
|
||||
db_addr = hw->hw_addr + db_off;
|
||||
|
||||
if (is_gen4_ntb(hw))
|
||||
rte_write16(XEON_GEN4_SLOTSTS_DLLSCS,
|
||||
hw->hw_addr + XEON_GEN4_SLOTSTS);
|
||||
rte_write64(db_bits, db_addr);
|
||||
|
||||
return 0;
|
||||
@ -365,7 +541,14 @@ intel_ntb_vector_bind(const struct rte_rawdev *dev, uint8_t intr, uint8_t msix)
|
||||
}
|
||||
|
||||
/* Bind intr source to msix vector */
|
||||
reg_off = XEON_INTVEC_OFFSET;
|
||||
if (is_gen3_ntb(hw))
|
||||
reg_off = XEON_GEN3_INTVEC_OFFSET;
|
||||
else if (is_gen4_ntb(hw))
|
||||
reg_off = XEON_GEN4_INTVEC_OFFSET;
|
||||
else {
|
||||
NTB_LOG(ERR, "Cannot bind vectors for unsupported device.");
|
||||
return -ENOTSUP;
|
||||
}
|
||||
reg_addr = hw->hw_addr + reg_off + intr;
|
||||
|
||||
rte_write8(msix, reg_addr);
|
||||
|
@ -22,7 +22,7 @@
|
||||
#define NTB_LNK_STA_SPEED(x) ((x) & NTB_LNK_STA_SPEED_MASK)
|
||||
#define NTB_LNK_STA_WIDTH(x) (((x) & NTB_LNK_STA_WIDTH_MASK) >> 4)
|
||||
|
||||
/* Intel Skylake Xeon hardware */
|
||||
/* Intel Xeon hardware */
|
||||
#define XEON_IMBAR1SZ_OFFSET 0x00d0
|
||||
#define XEON_IMBAR2SZ_OFFSET 0x00d1
|
||||
#define XEON_EMBAR1SZ_OFFSET 0x00d2
|
||||
@ -31,7 +31,13 @@
|
||||
#define XEON_DEVSTS_OFFSET 0x009a
|
||||
#define XEON_UNCERRSTS_OFFSET 0x014c
|
||||
#define XEON_CORERRSTS_OFFSET 0x0158
|
||||
#define XEON_LINK_STATUS_OFFSET 0x01a2
|
||||
#define XEON_GEN3_LINK_STATUS_OFFSET 0x01a2
|
||||
/* Link status and PPD are in MMIO but not config space for Gen4 NTB */
|
||||
#define XEON_GEN4_PPD0_OFFSET 0xb0d4
|
||||
#define XEON_GEN4_PPD1_OFFSET 0xb4c0
|
||||
#define XEON_GEN4_LINK_CTRL_OFFSET 0xb050
|
||||
#define XEON_GEN4_LINK_STATUS_OFFSET 0xb052
|
||||
#define XEON_GEN4_LINK_CTRL_LINK_DIS 0x0010
|
||||
|
||||
#define XEON_NTBCNTL_OFFSET 0x0000
|
||||
#define XEON_BAR_INTERVAL_OFFSET 0x0010
|
||||
@ -39,13 +45,18 @@
|
||||
#define XEON_IMBAR1XLMT_OFFSET 0x0018 /* SBAR2LMT */
|
||||
#define XEON_IMBAR2XBASE_OFFSET 0x0020 /* SBAR4XLAT */
|
||||
#define XEON_IMBAR2XLMT_OFFSET 0x0028 /* SBAR4LMT */
|
||||
#define XEON_GEN4_XBASEIDX_INTERVAL 0x0002
|
||||
#define XEON_GEN4_IM1XBASEIDX_OFFSET 0x0074
|
||||
#define XEON_GEN4_IM2XBASEIDX_OFFSET 0x0076
|
||||
#define XEON_IM_INT_STATUS_OFFSET 0x0040
|
||||
#define XEON_IM_INT_DISABLE_OFFSET 0x0048
|
||||
#define XEON_IM_SPAD_OFFSET 0x0080 /* SPAD */
|
||||
#define XEON_GEN3_B2B_SPAD_OFFSET 0x0180 /* GEN3 B2B SPAD */
|
||||
#define XEON_GEN4_B2B_SPAD_OFFSET 0x8080 /* GEN4 B2B SPAD */
|
||||
#define XEON_USMEMMISS_OFFSET 0x0070
|
||||
#define XEON_INTVEC_OFFSET 0x00d0
|
||||
#define XEON_GEN3_INTVEC_OFFSET 0x00d0
|
||||
#define XEON_GEN4_INTVEC_OFFSET 0x0050
|
||||
#define XEON_IM_DOORBELL_OFFSET 0x0100 /* SDOORBELL0 */
|
||||
#define XEON_B2B_SPAD_OFFSET 0x0180 /* B2B SPAD */
|
||||
#define XEON_EMBAR0XBASE_OFFSET 0x4008 /* B2B_XLAT */
|
||||
#define XEON_EMBAR1XBASE_OFFSET 0x4010 /* PBAR2XLAT */
|
||||
#define XEON_EMBAR1XLMT_OFFSET 0x4018 /* PBAR2LMT */
|
||||
@ -70,6 +81,14 @@
|
||||
#define XEON_PPD_DEV_DSD 0x10
|
||||
#define XEON_PPD_SPLIT_BAR_MASK 0x40
|
||||
|
||||
#define XEON_GEN4_PPD_CONN_MASK 0x0300
|
||||
#define XEON_GEN4_PPD_CONN_B2B 0x0200
|
||||
#define XEON_GEN4_PPD_DEV_MASK 0x1000
|
||||
#define XEON_GEN4_PPD_DEV_DSD 0x1000
|
||||
#define XEON_GEN4_PPD_DEV_USD 0x0000
|
||||
#define XEON_GEN4_PPD_LINKTRN 0x0008
|
||||
#define XEON_GEN4_SLOTSTS 0xb05a
|
||||
#define XEON_GEN4_SLOTSTS_DLLSCS 0x100
|
||||
|
||||
#define XEON_MW_COUNT 2
|
||||
|
||||
|
@ -50,6 +50,8 @@ intel_ioat_icx = {'Class': '08', 'Vendor': '8086', 'Device': '0b00',
|
||||
'SVendor': None, 'SDevice': None}
|
||||
intel_ntb_skx = {'Class': '06', 'Vendor': '8086', 'Device': '201c',
|
||||
'SVendor': None, 'SDevice': None}
|
||||
intel_ntb_icx = {'Class': '06', 'Vendor': '8086', 'Device': '347e',
|
||||
'SVendor': None, 'SDevice': None}
|
||||
|
||||
network_devices = [network_class, cavium_pkx, avp_vnic, ifpga_class]
|
||||
baseband_devices = [acceleration_class]
|
||||
@ -57,7 +59,7 @@ crypto_devices = [encryption_class, intel_processor_class]
|
||||
eventdev_devices = [cavium_sso, cavium_tim, octeontx2_sso]
|
||||
mempool_devices = [cavium_fpa, octeontx2_npa]
|
||||
compress_devices = [cavium_zip]
|
||||
misc_devices = [intel_ioat_bdw, intel_ioat_skx, intel_ioat_icx, intel_ntb_skx, octeontx2_dma]
|
||||
misc_devices = [intel_ioat_bdw, intel_ioat_skx, intel_ioat_icx, intel_ntb_skx, intel_ntb_icx, octeontx2_dma]
|
||||
|
||||
# global dict ethernet devices present. Dictionary indexed by PCI address.
|
||||
# Each device within this is itself a dictionary of device properties
|
||||
|
Loading…
x
Reference in New Issue
Block a user