raw/ioat: probe idxd PCI

When a matching device is found via PCI probe create a rawdev instance for
each queue on the hardware. Use empty self-test function for these devices
so that the overall rawdev_autotest does not report failures.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Acked-by: Radu Nicolau <radu.nicolau@intel.com>
This commit is contained in:
Bruce Richardson 2020-10-08 10:51:22 +01:00 committed by Thomas Monjalon
parent 01863b9d23
commit ff06fa2cf3
7 changed files with 442 additions and 3 deletions

View File

@ -3,8 +3,10 @@
*/
#include <rte_bus_pci.h>
#include <rte_memzone.h>
#include "ioat_private.h"
#include "ioat_spec.h"
#define IDXD_VENDOR_ID 0x8086
#define IDXD_DEVICE_ID_SPR 0x0B25
@ -16,17 +18,246 @@ const struct rte_pci_id pci_id_idxd_map[] = {
{ .vendor_id = 0, /* sentinel */ },
};
static inline int
idxd_pci_dev_command(struct idxd_rawdev *idxd, enum rte_idxd_cmds command)
{
uint8_t err_code;
uint16_t qid = idxd->qid;
int i = 0;
if (command >= idxd_disable_wq && command <= idxd_reset_wq)
qid = (1 << qid);
rte_spinlock_lock(&idxd->u.pci->lk);
idxd->u.pci->regs->cmd = (command << IDXD_CMD_SHIFT) | qid;
do {
rte_pause();
err_code = idxd->u.pci->regs->cmdstatus;
if (++i >= 1000) {
IOAT_PMD_ERR("Timeout waiting for command response from HW");
rte_spinlock_unlock(&idxd->u.pci->lk);
return err_code;
}
} while (idxd->u.pci->regs->cmdstatus & CMDSTATUS_ACTIVE_MASK);
rte_spinlock_unlock(&idxd->u.pci->lk);
return err_code & CMDSTATUS_ERR_MASK;
}
static int
idxd_is_wq_enabled(struct idxd_rawdev *idxd)
{
uint32_t state = idxd->u.pci->wq_regs[idxd->qid].wqcfg[WQ_STATE_IDX];
return ((state >> WQ_STATE_SHIFT) & WQ_STATE_MASK) == 0x1;
}
static const struct rte_rawdev_ops idxd_pci_ops = {
.dev_close = idxd_rawdev_close,
.dev_selftest = idxd_rawdev_test,
};
/* each portal uses 4 x 4k pages */
#define IDXD_PORTAL_SIZE (4096 * 4)
static int
init_pci_device(struct rte_pci_device *dev, struct idxd_rawdev *idxd)
{
struct idxd_pci_common *pci;
uint8_t nb_groups, nb_engines, nb_wqs;
uint16_t grp_offset, wq_offset; /* how far into bar0 the regs are */
uint16_t wq_size, total_wq_size;
uint8_t lg2_max_batch, lg2_max_copy_size;
unsigned int i, err_code;
pci = malloc(sizeof(*pci));
if (pci == NULL) {
IOAT_PMD_ERR("%s: Can't allocate memory", __func__);
goto err;
}
rte_spinlock_init(&pci->lk);
/* assign the bar registers, and then configure device */
pci->regs = dev->mem_resource[0].addr;
grp_offset = (uint16_t)pci->regs->offsets[0];
pci->grp_regs = RTE_PTR_ADD(pci->regs, grp_offset * 0x100);
wq_offset = (uint16_t)(pci->regs->offsets[0] >> 16);
pci->wq_regs = RTE_PTR_ADD(pci->regs, wq_offset * 0x100);
pci->portals = dev->mem_resource[2].addr;
/* sanity check device status */
if (pci->regs->gensts & GENSTS_DEV_STATE_MASK) {
/* need function-level-reset (FLR) or is enabled */
IOAT_PMD_ERR("Device status is not disabled, cannot init");
goto err;
}
if (pci->regs->cmdstatus & CMDSTATUS_ACTIVE_MASK) {
/* command in progress */
IOAT_PMD_ERR("Device has a command in progress, cannot init");
goto err;
}
/* read basic info about the hardware for use when configuring */
nb_groups = (uint8_t)pci->regs->grpcap;
nb_engines = (uint8_t)pci->regs->engcap;
nb_wqs = (uint8_t)(pci->regs->wqcap >> 16);
total_wq_size = (uint16_t)pci->regs->wqcap;
lg2_max_copy_size = (uint8_t)(pci->regs->gencap >> 16) & 0x1F;
lg2_max_batch = (uint8_t)(pci->regs->gencap >> 21) & 0x0F;
IOAT_PMD_DEBUG("nb_groups = %u, nb_engines = %u, nb_wqs = %u",
nb_groups, nb_engines, nb_wqs);
/* zero out any old config */
for (i = 0; i < nb_groups; i++) {
pci->grp_regs[i].grpengcfg = 0;
pci->grp_regs[i].grpwqcfg[0] = 0;
}
for (i = 0; i < nb_wqs; i++)
pci->wq_regs[i].wqcfg[0] = 0;
/* put each engine into a separate group to avoid reordering */
if (nb_groups > nb_engines)
nb_groups = nb_engines;
if (nb_groups < nb_engines)
nb_engines = nb_groups;
/* assign engines to groups, round-robin style */
for (i = 0; i < nb_engines; i++) {
IOAT_PMD_DEBUG("Assigning engine %u to group %u",
i, i % nb_groups);
pci->grp_regs[i % nb_groups].grpengcfg |= (1ULL << i);
}
/* now do the same for queues and give work slots to each queue */
wq_size = total_wq_size / nb_wqs;
IOAT_PMD_DEBUG("Work queue size = %u, max batch = 2^%u, max copy = 2^%u",
wq_size, lg2_max_batch, lg2_max_copy_size);
for (i = 0; i < nb_wqs; i++) {
/* add engine "i" to a group */
IOAT_PMD_DEBUG("Assigning work queue %u to group %u",
i, i % nb_groups);
pci->grp_regs[i % nb_groups].grpwqcfg[0] |= (1ULL << i);
/* now configure it, in terms of size, max batch, mode */
pci->wq_regs[i].wqcfg[WQ_SIZE_IDX] = wq_size;
pci->wq_regs[i].wqcfg[WQ_MODE_IDX] = (1 << WQ_PRIORITY_SHIFT) |
WQ_MODE_DEDICATED;
pci->wq_regs[i].wqcfg[WQ_SIZES_IDX] = lg2_max_copy_size |
(lg2_max_batch << WQ_BATCH_SZ_SHIFT);
}
/* dump the group configuration to output */
for (i = 0; i < nb_groups; i++) {
IOAT_PMD_DEBUG("## Group %d", i);
IOAT_PMD_DEBUG(" GRPWQCFG: %"PRIx64, pci->grp_regs[i].grpwqcfg[0]);
IOAT_PMD_DEBUG(" GRPENGCFG: %"PRIx64, pci->grp_regs[i].grpengcfg);
IOAT_PMD_DEBUG(" GRPFLAGS: %"PRIx32, pci->grp_regs[i].grpflags);
}
idxd->u.pci = pci;
idxd->max_batches = wq_size;
/* enable the device itself */
err_code = idxd_pci_dev_command(idxd, idxd_enable_dev);
if (err_code) {
IOAT_PMD_ERR("Error enabling device: code %#x", err_code);
return err_code;
}
IOAT_PMD_DEBUG("IDXD Device enabled OK");
return nb_wqs;
err:
free(pci);
return -1;
}
static int
idxd_rawdev_probe_pci(struct rte_pci_driver *drv, struct rte_pci_device *dev)
{
int ret = 0;
struct idxd_rawdev idxd = {{0}}; /* Double {} to avoid error on BSD12 */
uint8_t nb_wqs;
int qid, ret = 0;
char name[PCI_PRI_STR_SIZE];
rte_pci_device_name(&dev->addr, name, sizeof(name));
IOAT_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
dev->device.driver = &drv->driver;
return ret;
ret = init_pci_device(dev, &idxd);
if (ret < 0) {
IOAT_PMD_ERR("Error initializing PCI hardware");
return ret;
}
nb_wqs = (uint8_t)ret;
/* set up one device for each queue */
for (qid = 0; qid < nb_wqs; qid++) {
char qname[32];
/* add the queue number to each device name */
snprintf(qname, sizeof(qname), "%s-q%d", name, qid);
idxd.qid = qid;
idxd.public.portal = RTE_PTR_ADD(idxd.u.pci->portals,
qid * IDXD_PORTAL_SIZE);
if (idxd_is_wq_enabled(&idxd))
IOAT_PMD_ERR("Error, WQ %u seems enabled", qid);
ret = idxd_rawdev_create(qname, &dev->device,
&idxd, &idxd_pci_ops);
if (ret != 0) {
IOAT_PMD_ERR("Failed to create rawdev %s", name);
if (qid == 0) /* if no devices using this, free pci */
free(idxd.u.pci);
return ret;
}
}
return 0;
}
static int
idxd_rawdev_destroy(const char *name)
{
int ret;
uint8_t err_code;
struct rte_rawdev *rdev;
struct idxd_rawdev *idxd;
if (!name) {
IOAT_PMD_ERR("Invalid device name");
return -EINVAL;
}
rdev = rte_rawdev_pmd_get_named_dev(name);
if (!rdev) {
IOAT_PMD_ERR("Invalid device name (%s)", name);
return -EINVAL;
}
idxd = rdev->dev_private;
/* disable the device */
err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
if (err_code) {
IOAT_PMD_ERR("Error disabling device: code %#x", err_code);
return err_code;
}
IOAT_PMD_DEBUG("IDXD Device disabled OK");
/* free device memory */
if (rdev->dev_private != NULL) {
IOAT_PMD_DEBUG("Freeing device driver memory");
rdev->dev_private = NULL;
rte_free(idxd->public.batch_ring);
rte_free(idxd->public.hdl_ring);
rte_memzone_free(idxd->mz);
}
/* rte_rawdev_close is called by pmd_release */
ret = rte_rawdev_pmd_release(rdev);
if (ret)
IOAT_PMD_DEBUG("Device cleanup failed");
return 0;
}
static int
@ -40,6 +271,8 @@ idxd_rawdev_remove_pci(struct rte_pci_device *dev)
IOAT_PMD_INFO("Closing %s on NUMA node %d",
name, dev->device.numa_node);
ret = idxd_rawdev_destroy(name);
return ret;
}

View File

@ -0,0 +1,68 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2020 Intel Corporation
*/
#include <rte_rawdev_pmd.h>
#include <rte_memzone.h>
#include <rte_common.h>
#include "ioat_private.h"
int
idxd_rawdev_close(struct rte_rawdev *dev __rte_unused)
{
return 0;
}
int
idxd_rawdev_create(const char *name, struct rte_device *dev,
const struct idxd_rawdev *base_idxd,
const struct rte_rawdev_ops *ops)
{
struct idxd_rawdev *idxd;
struct rte_rawdev *rawdev = NULL;
const struct rte_memzone *mz = NULL;
char mz_name[RTE_MEMZONE_NAMESIZE];
int ret = 0;
if (!name) {
IOAT_PMD_ERR("Invalid name of the device!");
ret = -EINVAL;
goto cleanup;
}
/* Allocate device structure */
rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct idxd_rawdev),
dev->numa_node);
if (rawdev == NULL) {
IOAT_PMD_ERR("Unable to allocate raw device");
ret = -ENOMEM;
goto cleanup;
}
snprintf(mz_name, sizeof(mz_name), "rawdev%u_private", rawdev->dev_id);
mz = rte_memzone_reserve(mz_name, sizeof(struct idxd_rawdev),
dev->numa_node, RTE_MEMZONE_IOVA_CONTIG);
if (mz == NULL) {
IOAT_PMD_ERR("Unable to reserve memzone for private data\n");
ret = -ENOMEM;
goto cleanup;
}
rawdev->dev_private = mz->addr;
rawdev->dev_ops = ops;
rawdev->device = dev;
rawdev->driver_name = IOAT_PMD_RAWDEV_NAME_STR;
idxd = rawdev->dev_private;
*idxd = *base_idxd; /* copy over the main fields already passed in */
idxd->rawdev = rawdev;
idxd->mz = mz;
return 0;
cleanup:
if (rawdev)
rte_rawdev_pmd_release(rawdev);
return ret;
}

View File

@ -14,6 +14,10 @@
* @b EXPERIMENTAL: these structures and APIs may change without prior notice
*/
#include <rte_spinlock.h>
#include <rte_rawdev_pmd.h>
#include "rte_ioat_rawdev.h"
extern int ioat_pmd_logtype;
#define IOAT_PMD_LOG(level, fmt, args...) rte_log(RTE_LOG_ ## level, \
@ -24,4 +28,33 @@ extern int ioat_pmd_logtype;
#define IOAT_PMD_ERR(fmt, args...) IOAT_PMD_LOG(ERR, fmt, ## args)
#define IOAT_PMD_WARN(fmt, args...) IOAT_PMD_LOG(WARNING, fmt, ## args)
struct idxd_pci_common {
rte_spinlock_t lk;
volatile struct rte_idxd_bar0 *regs;
volatile struct rte_idxd_wqcfg *wq_regs;
volatile struct rte_idxd_grpcfg *grp_regs;
volatile void *portals;
};
struct idxd_rawdev {
struct rte_idxd_rawdev public; /* the public members, must be first */
struct rte_rawdev *rawdev;
const struct rte_memzone *mz;
uint8_t qid;
uint16_t max_batches;
union {
struct idxd_pci_common *pci;
} u;
};
extern int idxd_rawdev_create(const char *name, struct rte_device *dev,
const struct idxd_rawdev *idxd,
const struct rte_rawdev_ops *ops);
extern int idxd_rawdev_close(struct rte_rawdev *dev);
extern int idxd_rawdev_test(uint16_t dev_id);
#endif /* _IOAT_PRIVATE_H_ */

View File

@ -7,6 +7,7 @@
#include <rte_mbuf.h>
#include "rte_rawdev.h"
#include "rte_ioat_rawdev.h"
#include "ioat_private.h"
#define MAX_SUPPORTED_RAWDEVS 64
#define TEST_SKIPPED 77
@ -267,3 +268,9 @@ ioat_rawdev_test(uint16_t dev_id)
free(ids);
return -1;
}
int
idxd_rawdev_test(uint16_t dev_id __rte_unused)
{
return 0;
}

View File

@ -268,6 +268,70 @@ union rte_ioat_hw_desc {
struct rte_ioat_pq_update_hw_desc pq_update;
};
/*** Definitions for Intel(R) Data Streaming Accelerator Follow ***/
#define IDXD_CMD_SHIFT 20
enum rte_idxd_cmds {
idxd_enable_dev = 1,
idxd_disable_dev,
idxd_drain_all,
idxd_abort_all,
idxd_reset_device,
idxd_enable_wq,
idxd_disable_wq,
idxd_drain_wq,
idxd_abort_wq,
idxd_reset_wq,
};
/* General bar0 registers */
struct rte_idxd_bar0 {
uint32_t __rte_cache_aligned version; /* offset 0x00 */
uint64_t __rte_aligned(0x10) gencap; /* offset 0x10 */
uint64_t __rte_aligned(0x10) wqcap; /* offset 0x20 */
uint64_t __rte_aligned(0x10) grpcap; /* offset 0x30 */
uint64_t __rte_aligned(0x08) engcap; /* offset 0x38 */
uint64_t __rte_aligned(0x10) opcap; /* offset 0x40 */
uint64_t __rte_aligned(0x20) offsets[2]; /* offset 0x60 */
uint32_t __rte_aligned(0x20) gencfg; /* offset 0x80 */
uint32_t __rte_aligned(0x08) genctrl; /* offset 0x88 */
uint32_t __rte_aligned(0x10) gensts; /* offset 0x90 */
uint32_t __rte_aligned(0x08) intcause; /* offset 0x98 */
uint32_t __rte_aligned(0x10) cmd; /* offset 0xA0 */
uint32_t __rte_aligned(0x08) cmdstatus; /* offset 0xA8 */
uint64_t __rte_aligned(0x20) swerror[4]; /* offset 0xC0 */
};
struct rte_idxd_wqcfg {
uint32_t wqcfg[8] __rte_aligned(32); /* 32-byte register */
};
#define WQ_SIZE_IDX 0 /* size is in first 32-bit value */
#define WQ_THRESHOLD_IDX 1 /* WQ threshold second 32-bits */
#define WQ_MODE_IDX 2 /* WQ mode and other flags */
#define WQ_SIZES_IDX 3 /* WQ transfer and batch sizes */
#define WQ_OCC_INT_IDX 4 /* WQ occupancy interrupt handle */
#define WQ_OCC_LIMIT_IDX 5 /* WQ occupancy limit */
#define WQ_STATE_IDX 6 /* WQ state and occupancy state */
#define WQ_MODE_SHARED 0
#define WQ_MODE_DEDICATED 1
#define WQ_PRIORITY_SHIFT 4
#define WQ_BATCH_SZ_SHIFT 5
#define WQ_STATE_SHIFT 30
#define WQ_STATE_MASK 0x3
struct rte_idxd_grpcfg {
uint64_t grpwqcfg[4] __rte_cache_aligned; /* 64-byte register set */
uint64_t grpengcfg; /* offset 32 */
uint32_t grpflags; /* offset 40 */
};
#define GENSTS_DEV_STATE_MASK 0x03
#define CMDSTATUS_ACTIVE_SHIFT 31
#define CMDSTATUS_ACTIVE_MASK (1 << 31)
#define CMDSTATUS_ERR_MASK 0xFF
#ifdef __cplusplus
}
#endif

View File

@ -6,6 +6,7 @@ reason = 'only supported on x86'
sources = files(
'idxd_pci.c',
'idxd_vdev.c',
'ioat_common.c',
'ioat_rawdev.c',
'ioat_rawdev_test.c')
deps += ['bus_pci',

View File

@ -41,9 +41,20 @@ struct rte_ioat_generic_hw_desc {
/**
* @internal
* Structure representing a device instance
* Identify the data path to use.
* Must be first field of rte_ioat_rawdev and rte_idxd_rawdev structs
*/
enum rte_ioat_dev_type {
RTE_IOAT_DEV,
RTE_IDXD_DEV,
};
/**
* @internal
* Structure representing an IOAT device instance
*/
struct rte_ioat_rawdev {
enum rte_ioat_dev_type type;
struct rte_rawdev *rawdev;
const struct rte_memzone *mz;
const struct rte_memzone *desc_mz;
@ -79,6 +90,28 @@ struct rte_ioat_rawdev {
#define RTE_IOAT_CHANSTS_HALTED 0x3
#define RTE_IOAT_CHANSTS_ARMED 0x4
/**
* @internal
* Structure representing an IDXD device instance
*/
struct rte_idxd_rawdev {
enum rte_ioat_dev_type type;
void *portal; /* address to write the batch descriptor */
/* counters to track the batches and the individual op handles */
uint16_t batch_ring_sz; /* size of batch ring */
uint16_t hdl_ring_sz; /* size of the user hdl ring */
uint16_t next_batch; /* where we write descriptor ops */
uint16_t next_completed; /* batch where we read completions */
uint16_t next_ret_hdl; /* the next user hdl to return */
uint16_t last_completed_hdl; /* the last user hdl that has completed */
uint16_t next_free_hdl; /* where the handle for next op will go */
struct rte_idxd_user_hdl *hdl_ring;
struct rte_idxd_desc_batch *batch_ring;
};
/*
* Enqueue a copy operation onto the ioat device
*/