module/idxd: accel framework plug-in for idxd

Docs, RPC, unit tests, etc., will follow.  Notes:

* The current implementation will only work with VFIO.

* The current implementation supports only the existing accel
framework API. The API will be expanded for DSA exclusive features
in a subsequent patch.

* SW is required to manage flow control, to not over-run the work queues.
This is provided in the accel plug-in module. The upper layers use public
API to manage this.

* As we need to support any number of channels (we can't limit ourselves
to the number of work queues) we need to dynamically size/resize our
per channel descriptor rings based on the number of current channels. This
is done from upper layers via public API into the lib.

* As channels are created, the total number of work queue slots is divided
across the channels evenly.  Same thing when they are destroyed, remaining
channels will see the ring sizes increase. This is done from upper layers
via public API into the lib.

Signed-off-by: paul luse <paul.e.luse@intel.com>
Change-Id: Ifaa39935107206a2d990cec992854675e5502057
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1722
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
paul luse 2020-04-07 12:38:58 -04:00 committed by Ben Walker
parent e58e9fbda8
commit 5b03dd938c
12 changed files with 706 additions and 6 deletions

View File

@ -135,7 +135,7 @@ void spdk_idxd_detach(struct spdk_idxd_device *idxd);
*
* \param config_number the configuration number for a valid IDXD config.
*/
void spdk_idxd_set_config(int config_number);
void spdk_idxd_set_config(uint32_t config_number);
/**
* Build and submit a DMA engine memory copy request.

View File

@ -43,6 +43,7 @@ enum accel_module {
ACCEL_SW = 0,
ACCEL_AUTO,
ACCEL_CBDMA,
ACCEL_IDXD_DSA,
ACCEL_MODULE_MAX
};

View File

@ -75,12 +75,15 @@ accel_set_module(enum accel_module *opts)
return 0;
}
/* Registration of hw modules (currently supports only 1) */
/* Registration of hw modules (currently supports only 1 at a time) */
void
spdk_accel_hw_engine_register(struct spdk_accel_engine *accel_engine)
{
assert(g_hw_accel_engine == NULL);
g_hw_accel_engine = accel_engine;
if (g_hw_accel_engine == NULL) {
g_hw_accel_engine = accel_engine;
} else {
SPDK_NOTICELOG("Hardware offload engine already enabled\n");
}
}
/* Registration of sw modules (currently supports only 1) */
@ -160,13 +163,17 @@ accel_engine_create_cb(void *io_device, void *ctx_buf)
return -EINVAL;
}
if (g_active_accel_module == ACCEL_IDXD_DSA && g_hw_accel_engine == NULL) {
SPDK_ERRLOG("IDXD acceleration engine specified but not available.\n");
return -EINVAL;
}
/* For either HW or AUTO */
if (g_active_accel_module > ACCEL_SW) {
if (g_hw_accel_engine != NULL) {
accel_ch->ch = g_hw_accel_engine->get_io_channel();
if (accel_ch->ch != NULL) {
accel_ch->engine = g_hw_accel_engine;
SPDK_NOTICELOG("Acceleration framework using module: CBDMA\n");
return 0;
}
}

View File

@ -53,6 +53,7 @@
spdk_vtophys;
spdk_pci_nvme_get_driver;
spdk_pci_vmd_get_driver;
spdk_pci_idxd_get_driver;
spdk_pci_ioat_get_driver;
spdk_pci_virtio_get_driver;
spdk_pci_enumerate;

View File

@ -222,7 +222,7 @@ spdk_idxd_reconfigure_chan(struct spdk_idxd_io_channel *chan, uint32_t num_chann
/* Called via RPC to select a pre-defined configuration. */
void
spdk_idxd_set_config(int config_num)
spdk_idxd_set_config(uint32_t config_num)
{
switch (config_num) {
case 0:

View File

@ -103,6 +103,7 @@ DEPDIRS-blobfs_bdev := $(BDEV_DEPS_THREAD) blob_bdev blobfs
# module/accel
DEPDIRS-accel_ioat := log ioat conf thread $(JSON_LIBS) accel
DEPDIRS-accel_idxd := log idxd thread $(JSON_LIBS) accel
# module/env_dpdk
DEPDIRS-env_dpdk_rpc := log $(JSON_LIBS)

View File

@ -106,5 +106,8 @@ SOCK_MODULES_LIST += sock_vpp
endif
ACCEL_MODULES_LIST = accel_ioat ioat
ifeq ($(CONFIG_IDXD),y)
ACCEL_MODULES_LIST += accel_idxd idxd
endif
ALL_MODULES_LIST = $(BLOCKDEV_MODULES_LIST) $(ACCEL_MODULES_LIST) $(SOCK_MODULES_LIST)

View File

@ -36,6 +36,8 @@ include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
DIRS-y = ioat
DIRS-$(CONFIG_IDXD) += idxd
.PHONY: all clean $(DIRS-y)
all: $(DIRS-y)

View File

@ -0,0 +1,40 @@
#
# BSD LICENSE
#
# Copyright (c) Intel Corporation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
LIBNAME = accel_idxd
C_SRCS = accel_engine_idxd.c accel_engine_idxd_rpc.c
include $(SPDK_ROOT_DIR)/mk/spdk.lib.mk

View File

@ -0,0 +1,527 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "accel_engine_idxd.h"
#include "spdk/stdinc.h"
#include "spdk_internal/accel_engine.h"
#include "spdk_internal/log.h"
#include "spdk_internal/idxd.h"
#include "spdk/env.h"
#include "spdk/conf.h"
#include "spdk/event.h"
#include "spdk/thread.h"
#include "spdk/idxd.h"
#include "spdk/util.h"
/* Undefine this to require an RPC to enable IDXD. */
#undef DEVELOPER_DEBUG_MODE
#ifdef DEVELOPER_DEBUG_MODE
static bool g_idxd_enable = true;
#else
static bool g_idxd_enable = false;
#endif
enum channel_state {
IDXD_CHANNEL_ACTIVE,
IDXD_CHANNEL_PAUSED,
IDXD_CHANNEL_ERROR,
};
static bool g_idxd_initialized = false;
struct pci_device {
struct spdk_pci_device *pci_dev;
TAILQ_ENTRY(pci_device) tailq;
};
static TAILQ_HEAD(, pci_device) g_pci_devices = TAILQ_HEAD_INITIALIZER(g_pci_devices);
struct idxd_device {
struct spdk_idxd_device *idxd;
int num_channels;
TAILQ_ENTRY(idxd_device) tailq;
};
static TAILQ_HEAD(, idxd_device) g_idxd_devices = TAILQ_HEAD_INITIALIZER(g_idxd_devices);
static struct idxd_device *g_next_dev = NULL;
struct idxd_op {
struct spdk_idxd_io_channel *chan;
void *cb_arg;
spdk_idxd_req_cb cb_fn;
void *src;
void *dst;
uint64_t fill_pattern;
uint32_t op_code;
uint64_t nbytes;
TAILQ_ENTRY(idxd_op) link;
};
struct idxd_io_channel {
struct spdk_idxd_io_channel *chan;
struct spdk_idxd_device *idxd;
struct idxd_device *dev;
enum channel_state state;
struct spdk_poller *poller;
TAILQ_HEAD(, idxd_op) queued_ops;
};
struct idxd_task {
spdk_accel_completion_cb cb;
};
static int accel_engine_idxd_init(void);
static void accel_engine_idxd_exit(void *ctx);
static struct spdk_io_channel *idxd_get_io_channel(void);
static int idxd_submit_copy(void *cb_arg, struct spdk_io_channel *ch, void *dst, void *src,
uint64_t nbytes,
spdk_accel_completion_cb cb);
static int idxd_submit_fill(void *cb_arg, struct spdk_io_channel *ch, void *dst, uint8_t fill,
uint64_t nbytes, spdk_accel_completion_cb cb);
static struct spdk_accel_engine idxd_accel_engine = {
.copy = idxd_submit_copy,
.fill = idxd_submit_fill,
.get_io_channel = idxd_get_io_channel,
};
static struct idxd_device *
idxd_select_device(void)
{
/*
* We allow channels to share underlying devices,
* selection is round-robin based.
*/
g_next_dev = TAILQ_NEXT(g_next_dev, tailq);
if (g_next_dev == NULL) {
g_next_dev = TAILQ_FIRST(&g_idxd_devices);
}
return g_next_dev;
}
static int
idxd_poll(void *arg)
{
struct idxd_io_channel *chan = arg;
struct idxd_op *op = NULL;
int rc;
spdk_idxd_process_events(chan->chan);
/* Check if there are any pending ops to process if the channel is active */
if (chan->state != IDXD_CHANNEL_ACTIVE) {
return -1;
}
while (!TAILQ_EMPTY(&chan->queued_ops)) {
op = TAILQ_FIRST(&chan->queued_ops);
TAILQ_REMOVE(&chan->queued_ops, op, link);
switch (op->op_code) {
case IDXD_OPCODE_MEMMOVE:
rc = spdk_idxd_submit_copy(op->chan, op->dst, op->src, op->nbytes,
op->cb_fn, op->cb_arg);
break;
case IDXD_OPCODE_MEMFILL:
rc = spdk_idxd_submit_fill(op->chan, op->dst, op->fill_pattern, op->nbytes,
op->cb_fn, op->cb_arg);
break;
default:
/* Should never get here */
assert(false);
break;
}
if (rc == 0) {
free(op);
} else {
/* Busy, resubmit to try again later */
TAILQ_INSERT_HEAD(&chan->queued_ops, op, link);
break;
}
}
return -1;
}
static size_t
accel_engine_idxd_get_ctx_size(void)
{
return sizeof(struct idxd_task) + sizeof(struct spdk_accel_task);
}
static void
idxd_done(void *cb_arg, int status)
{
struct spdk_accel_task *accel_req;
struct idxd_task *idxd_task = cb_arg;
accel_req = SPDK_CONTAINEROF(idxd_task, struct spdk_accel_task,
offload_ctx);
idxd_task->cb(accel_req, status);
}
static int
idxd_submit_copy(void *cb_arg, struct spdk_io_channel *ch, void *dst, void *src, uint64_t nbytes,
spdk_accel_completion_cb cb)
{
struct idxd_task *idxd_task = (struct idxd_task *)cb_arg;
struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
int rc = 0;
idxd_task->cb = cb;
if (chan->state == IDXD_CHANNEL_ACTIVE) {
rc = spdk_idxd_submit_copy(chan->chan, dst, src, nbytes, idxd_done, idxd_task);
}
if (chan->state == IDXD_CHANNEL_PAUSED || rc == -EBUSY) {
struct idxd_op *op_to_queue;
op_to_queue = calloc(1, sizeof(struct idxd_op));
if (op_to_queue == NULL) {
SPDK_ERRLOG("Failed to allocate operation for queueing\n");
return -ENOMEM;
}
op_to_queue->chan = chan->chan;
op_to_queue->dst = dst;
op_to_queue->src = src;
op_to_queue->nbytes = nbytes;
op_to_queue->cb_arg = idxd_task;
op_to_queue->cb_fn = idxd_done;
op_to_queue->op_code = IDXD_OPCODE_MEMMOVE;
TAILQ_INSERT_TAIL(&chan->queued_ops, op_to_queue, link);
} else if (chan->state == IDXD_CHANNEL_ERROR) {
return -EINVAL;
}
return rc;
}
static int
idxd_submit_fill(void *cb_arg, struct spdk_io_channel *ch, void *dst, uint8_t fill,
uint64_t nbytes, spdk_accel_completion_cb cb)
{
struct idxd_task *idxd_task = (struct idxd_task *)cb_arg;
struct idxd_io_channel *chan = spdk_io_channel_get_ctx(ch);
int rc = 0;
uint64_t fill_pattern;
idxd_task->cb = cb;
memset(&fill_pattern, fill, sizeof(uint64_t));
if (chan->state == IDXD_CHANNEL_ACTIVE) {
rc = spdk_idxd_submit_fill(chan->chan, dst, fill_pattern, nbytes, idxd_done, idxd_task);
}
if (chan->state == IDXD_CHANNEL_PAUSED || rc == -EBUSY) {
struct idxd_op *op_to_queue;
op_to_queue = calloc(1, sizeof(struct idxd_op));
if (op_to_queue == NULL) {
SPDK_ERRLOG("Failed to allocate operation for queueing\n");
return -ENOMEM;
}
op_to_queue->chan = chan->chan;
op_to_queue->dst = dst;
op_to_queue->fill_pattern = fill_pattern;
op_to_queue->nbytes = nbytes;
op_to_queue->cb_arg = idxd_task;
op_to_queue->cb_fn = idxd_done;
op_to_queue->op_code = IDXD_OPCODE_MEMFILL;
TAILQ_INSERT_TAIL(&chan->queued_ops, op_to_queue, link);
} else if (chan->state == IDXD_CHANNEL_ERROR) {
return -EINVAL;
}
return rc;
}
pthread_mutex_t g_num_channels_lock = PTHREAD_MUTEX_INITIALIZER;
/*
* Configure the max number of descriptors that a channel is
* allowed to use based on the total number of current channels.
* This is to allow for dynamic load balancing for hw flow control.
*/
static void
_config_max_desc(struct spdk_io_channel_iter *i)
{
struct idxd_io_channel *chan;
struct spdk_io_channel *ch;
int rc;
ch = spdk_io_channel_iter_get_channel(i);
chan = spdk_io_channel_get_ctx(ch);
pthread_mutex_lock(&g_num_channels_lock);
rc = spdk_idxd_reconfigure_chan(chan->chan, chan->dev->num_channels);
pthread_mutex_unlock(&g_num_channels_lock);
if (rc == 0) {
chan->state = IDXD_CHANNEL_ACTIVE;
} else {
chan->state = IDXD_CHANNEL_ERROR;
}
spdk_for_each_channel_continue(i, 0);
}
/* Pauses a channel so that it can be re-configured. */
static void
_pause_chan(struct spdk_io_channel_iter *i)
{
struct idxd_io_channel *chan;
struct spdk_io_channel *ch;
ch = spdk_io_channel_iter_get_channel(i);
chan = spdk_io_channel_get_ctx(ch);
/* start queueing up new requests. */
chan->state = IDXD_CHANNEL_PAUSED;
spdk_for_each_channel_continue(i, 0);
}
static void
_pause_chan_done(struct spdk_io_channel_iter *i, int status)
{
spdk_for_each_channel(&idxd_accel_engine, _config_max_desc, NULL,
NULL);
}
static int
idxd_create_cb(void *io_device, void *ctx_buf)
{
struct idxd_io_channel *chan = ctx_buf;
struct idxd_device *dev;
int rc;
dev = idxd_select_device();
if (dev == NULL) {
SPDK_ERRLOG("Failed to allocate idxd_device\n");
return -EINVAL;
}
chan->chan = spdk_idxd_get_channel(dev->idxd);
if (chan->chan == NULL) {
return -ENOMEM;
}
chan->dev = dev;
chan->poller = spdk_poller_register(idxd_poll, chan, 0);
TAILQ_INIT(&chan->queued_ops);
/*
* Configure the channel but leave paused until all others
* are paused and re-configured based on the new number of
* channels. This enables dynamic load balancing for HW
* flow control.
*/
rc = spdk_idxd_configure_chan(chan->chan);
if (rc) {
SPDK_ERRLOG("Failed to configure new channel rc = %d\n", rc);
chan->state = IDXD_CHANNEL_ERROR;
spdk_poller_unregister(&chan->poller);
return rc;
}
chan->state = IDXD_CHANNEL_PAUSED;
pthread_mutex_lock(&g_num_channels_lock);
chan->dev->num_channels++;
pthread_mutex_unlock(&g_num_channels_lock);
/*
* Pause all channels so that we can set proper flow control
* per channel. When all are paused, we'll update the max
* number of descriptors allowed per channel.
*/
spdk_for_each_channel(&idxd_accel_engine, _pause_chan, NULL,
_pause_chan_done);
return 0;
}
static void
_pause_chan_destroy_done(struct spdk_io_channel_iter *i, int status)
{
/* Rebalance the rings with the smaller number of remaining channels. */
spdk_for_each_channel(&idxd_accel_engine, _config_max_desc, NULL,
NULL);
}
static void
idxd_destroy_cb(void *io_device, void *ctx_buf)
{
struct idxd_io_channel *chan = ctx_buf;
pthread_mutex_lock(&g_num_channels_lock);
assert(chan->dev->num_channels > 0);
chan->dev->num_channels--;
pthread_mutex_unlock(&g_num_channels_lock);
spdk_idxd_reconfigure_chan(chan->chan, 0);
spdk_poller_unregister(&chan->poller);
spdk_idxd_put_channel(chan->chan);
/* Pause each channel then rebalance the max number of ring slots. */
spdk_for_each_channel(&idxd_accel_engine, _pause_chan, NULL,
_pause_chan_destroy_done);
}
static struct spdk_io_channel *
idxd_get_io_channel(void)
{
return spdk_get_io_channel(&idxd_accel_engine);
}
static bool
probe_cb(void *cb_ctx, struct spdk_pci_device *pci_dev)
{
struct spdk_pci_addr pci_addr = spdk_pci_device_get_addr(pci_dev);
struct pci_device *pdev;
SPDK_NOTICELOG(
" Found matching device at %04x:%02x:%02x.%x vendor:0x%04x device:0x%04x\n",
pci_addr.domain,
pci_addr.bus,
pci_addr.dev,
pci_addr.func,
spdk_pci_device_get_vendor_id(pci_dev),
spdk_pci_device_get_device_id(pci_dev));
pdev = calloc(1, sizeof(*pdev));
if (pdev == NULL) {
return false;
}
pdev->pci_dev = pci_dev;
TAILQ_INSERT_TAIL(&g_pci_devices, pdev, tailq);
/* Claim the device in case conflict with other process */
if (spdk_pci_device_claim(pci_dev) < 0) {
return false;
}
#ifdef DEVELOPER_DEBUG_MODE
spdk_idxd_set_config(0);
#endif
return true;
}
static void
attach_cb(void *cb_ctx, struct spdk_pci_device *pci_dev, struct spdk_idxd_device *idxd)
{
struct idxd_device *dev;
dev = calloc(1, sizeof(*dev));
if (dev == NULL) {
SPDK_ERRLOG("Failed to allocate device struct\n");
return;
}
dev->idxd = idxd;
if (g_next_dev == NULL) {
g_next_dev = dev;
}
TAILQ_INSERT_TAIL(&g_idxd_devices, dev, tailq);
}
void
accel_engine_idxd_enable_probe(uint32_t config_number)
{
if (config_number > IDXD_MAX_CONFIG_NUM) {
SPDK_ERRLOG("Invalid config number, using default of 0\n");
config_number = 0;
}
g_idxd_enable = true;
spdk_idxd_set_config(config_number);
}
static int
accel_engine_idxd_init(void)
{
if (!g_idxd_enable) {
return -EINVAL;
}
if (spdk_idxd_probe(NULL, probe_cb, attach_cb) != 0) {
SPDK_ERRLOG("spdk_idxd_probe() failed\n");
return -EINVAL;
}
g_idxd_initialized = true;
SPDK_NOTICELOG("IDXD Acceleration Engine Offload Enabled\n");
spdk_accel_hw_engine_register(&idxd_accel_engine);
spdk_io_device_register(&idxd_accel_engine, idxd_create_cb, idxd_destroy_cb,
sizeof(struct idxd_io_channel), "idxd_accel_engine");
return 0;
}
static void
accel_engine_idxd_exit(void *ctx)
{
struct idxd_device *dev;
struct pci_device *pci_dev;
if (g_idxd_initialized) {
spdk_io_device_unregister(&idxd_accel_engine, NULL);
}
while (!TAILQ_EMPTY(&g_idxd_devices)) {
dev = TAILQ_FIRST(&g_idxd_devices);
TAILQ_REMOVE(&g_idxd_devices, dev, tailq);
spdk_idxd_detach(dev->idxd);
free(dev);
}
while (!TAILQ_EMPTY(&g_pci_devices)) {
pci_dev = TAILQ_FIRST(&g_pci_devices);
TAILQ_REMOVE(&g_pci_devices, pci_dev, tailq);
spdk_pci_device_detach(pci_dev->pci_dev);
free(pci_dev);
}
spdk_accel_engine_module_finish();
}
SPDK_ACCEL_MODULE_REGISTER(accel_engine_idxd_init, accel_engine_idxd_exit,
NULL,
accel_engine_idxd_get_ctx_size)
SPDK_LOG_REGISTER_COMPONENT("accel_idxd", SPDK_LOG_ACCEL_IDXD)

View File

@ -0,0 +1,43 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SPDK_ACCEL_ENGINE_IDXD_H
#define SPDK_ACCEL_ENGINE_IDXD_H
#include "spdk/stdinc.h"
#define IDXD_MAX_DEVICES 16
void accel_engine_idxd_enable_probe(uint32_t config_number);
#endif /* SPDK_ACCEL_ENGINE_IDXD_H */

View File

@ -0,0 +1,75 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "accel_engine_idxd.h"
#include "spdk/rpc.h"
#include "spdk/util.h"
#include "spdk/event.h"
#include "spdk/stdinc.h"
#include "spdk/env.h"
struct rpc_idxd_scan_accel_engine {
uint32_t config_number;
};
static const struct spdk_json_object_decoder rpc_idxd_scan_accel_engine_decoder[] = {
{"config_number", offsetof(struct rpc_idxd_scan_accel_engine, config_number), spdk_json_decode_uint32},
};
static void
spdk_rpc_idxd_scan_accel_engine(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct rpc_idxd_scan_accel_engine req = {};
struct spdk_json_write_ctx *w;
if (params != NULL) {
if (spdk_json_decode_object(params, rpc_idxd_scan_accel_engine_decoder,
SPDK_COUNTOF(rpc_idxd_scan_accel_engine_decoder),
&req)) {
SPDK_ERRLOG("spdk_json_decode_object() failed\n");
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
"Invalid parameters");
return;
}
}
SPDK_NOTICELOG("Enabling IDXD with config #%u\n", req.config_number);
accel_engine_idxd_enable_probe(req.config_number);
w = spdk_jsonrpc_begin_result(request);
spdk_json_write_bool(w, true);
spdk_jsonrpc_end_result(request, w);
}
SPDK_RPC_REGISTER("idxd_scan_accel_engine", spdk_rpc_idxd_scan_accel_engine, SPDK_RPC_STARTUP)