numam-spdk/lib/copy/copy_engine.c
Jim Harris 0babf8ce81 bdev, copy: move all I/O paths to use I/O channels
bdev and copy modules no longer have check_io functions
now - all polling is done via pollers registered when
I/O channels are created.

Other default resources are also removed - for example,
a qpair is no longer allocated and assigned per bdev
exposed by the nvme driver - the qpairs are only allocated
via I/O channels.  Similar principle also applies to the
aio driver.

ioat channels are no longer allocated and assigned to
lcores - they are dynamically allocated and assigned
to I/O channels when needed.  If no ioat channel is
available for an I/O channel, the copy engine framework
will revert to using memcpy/memset instead.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I99435a75fe792a2b91ab08f25962dfd407d6402f
2016-09-26 14:02:07 -07:00

266 lines
7.1 KiB
C

/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "spdk/copy_engine.h"
#include <stdio.h>
#include <errno.h>
#include <assert.h>
#include <rte_config.h>
#include <rte_debug.h>
#include <rte_memcpy.h>
#include "spdk/log.h"
#include "spdk/event.h"
#include "spdk/io_channel.h"
static struct spdk_copy_engine *hw_copy_engine = NULL;
/* Memcpy engine always exist */
static struct spdk_copy_engine *mem_copy_engine = NULL;
TAILQ_HEAD(, spdk_copy_module_if) spdk_copy_module_list =
TAILQ_HEAD_INITIALIZER(spdk_copy_module_list);
struct copy_io_channel {
struct spdk_copy_engine *engine;
struct spdk_io_channel *ch;
};
void
spdk_copy_engine_register(struct spdk_copy_engine *copy_engine)
{
RTE_VERIFY(hw_copy_engine == NULL);
hw_copy_engine = copy_engine;
}
static void
spdk_memcpy_register(struct spdk_copy_engine *copy_engine)
{
RTE_VERIFY(mem_copy_engine == NULL);
mem_copy_engine = copy_engine;
}
static void
copy_engine_done(void *ref, int status)
{
struct copy_task *req = (struct copy_task *)ref;
req->cb(req, status);
}
int64_t
spdk_copy_submit(struct copy_task *copy_req, struct spdk_io_channel *ch,
void *dst, void *src, uint64_t nbytes, copy_completion_cb cb)
{
struct copy_task *req = copy_req;
struct copy_io_channel *copy_ch = spdk_io_channel_get_ctx(ch);
req->cb = cb;
return copy_ch->engine->copy(req->offload_ctx, copy_ch->ch, dst, src, nbytes,
copy_engine_done);
}
int64_t
spdk_copy_submit_fill(struct copy_task *copy_req, struct spdk_io_channel *ch,
void *dst, uint8_t fill, uint64_t nbytes, copy_completion_cb cb)
{
struct copy_task *req = copy_req;
struct copy_io_channel *copy_ch = spdk_io_channel_get_ctx(ch);
req->cb = cb;
return copy_ch->engine->fill(req->offload_ctx, copy_ch->ch, dst, fill, nbytes,
copy_engine_done);
}
/* memcpy default copy engine */
static int64_t
mem_copy_submit(void *cb_arg, struct spdk_io_channel *ch, void *dst, void *src, uint64_t nbytes,
copy_completion_cb cb)
{
struct copy_task *copy_req;
rte_memcpy(dst, src, (size_t)nbytes);
copy_req = (struct copy_task *)((uintptr_t)cb_arg -
offsetof(struct copy_task, offload_ctx));
cb(copy_req, 0);
return nbytes;
}
static int64_t
mem_copy_fill(void *cb_arg, struct spdk_io_channel *ch, void *dst, uint8_t fill, uint64_t nbytes,
copy_completion_cb cb)
{
struct copy_task *copy_req;
memset(dst, fill, nbytes);
copy_req = (struct copy_task *)((uintptr_t)cb_arg -
offsetof(struct copy_task, offload_ctx));
cb(copy_req, 0);
return nbytes;
}
static struct spdk_io_channel *mem_get_io_channel(uint32_t priority);
static struct spdk_copy_engine memcpy_copy_engine = {
.copy = mem_copy_submit,
.fill = mem_copy_fill,
.get_io_channel = mem_get_io_channel,
};
static int
memcpy_create_cb(void *io_device, uint32_t priority, void *ctx_buf)
{
return 0;
}
static void
memcpy_destroy_cb(void *io_device, void *ctx_buf)
{
}
static struct spdk_io_channel *mem_get_io_channel(uint32_t priority)
{
return spdk_get_io_channel(&memcpy_copy_engine, priority);
}
static int
copy_engine_mem_get_ctx_size(void)
{
return sizeof(struct copy_task);
}
int spdk_copy_module_get_max_ctx_size(void)
{
struct spdk_copy_module_if *copy_engine;
int max_copy_module_size = 0;
TAILQ_FOREACH(copy_engine, &spdk_copy_module_list, tailq) {
if (copy_engine->get_ctx_size && copy_engine->get_ctx_size() > max_copy_module_size) {
max_copy_module_size = copy_engine->get_ctx_size();
}
}
return max_copy_module_size;
}
void spdk_copy_module_list_add(struct spdk_copy_module_if *copy_module)
{
TAILQ_INSERT_TAIL(&spdk_copy_module_list, copy_module, tailq);
}
static int
copy_create_cb(void *io_device, uint32_t priority, void *ctx_buf)
{
struct copy_io_channel *copy_ch = ctx_buf;
if (hw_copy_engine != NULL) {
copy_ch->ch = hw_copy_engine->get_io_channel(priority);
if (copy_ch->ch != NULL) {
copy_ch->engine = hw_copy_engine;
return 0;
}
}
copy_ch->ch = mem_copy_engine->get_io_channel(priority);
assert(copy_ch->ch != NULL);
copy_ch->engine = mem_copy_engine;
return 0;
}
static void
copy_destroy_cb(void *io_device, void *ctx_buf)
{
struct copy_io_channel *copy_ch = ctx_buf;
spdk_put_io_channel(copy_ch->ch);
}
struct spdk_io_channel *
spdk_copy_engine_get_io_channel(uint32_t priority)
{
return spdk_get_io_channel(&spdk_copy_module_list, priority);
}
static int
copy_engine_mem_init(void)
{
spdk_memcpy_register(&memcpy_copy_engine);
spdk_io_device_register(&memcpy_copy_engine, memcpy_create_cb, memcpy_destroy_cb, 0);
return 0;
}
static void
spdk_copy_engine_module_initialize(void)
{
struct spdk_copy_module_if *copy_engine_module;
TAILQ_FOREACH(copy_engine_module, &spdk_copy_module_list, tailq) {
copy_engine_module->module_init();
}
}
static void
spdk_copy_engine_module_finish(void)
{
struct spdk_copy_module_if *copy_engine_module;
TAILQ_FOREACH(copy_engine_module, &spdk_copy_module_list, tailq) {
if (copy_engine_module->module_fini)
copy_engine_module->module_fini();
}
}
static int
spdk_copy_engine_initialize(void)
{
spdk_copy_engine_module_initialize();
/*
* We need a unique identifier for the copy engine framework, so use the
* spdk_copy_module_list address for this purpose.
*/
spdk_io_device_register(&spdk_copy_module_list, copy_create_cb, copy_destroy_cb,
sizeof(struct copy_io_channel));
return 0;
}
static int
spdk_copy_engine_finish(void)
{
spdk_copy_engine_module_finish();
return 0;
}
SPDK_COPY_MODULE_REGISTER(copy_engine_mem_init, NULL, NULL, copy_engine_mem_get_ctx_size)
SPDK_SUBSYSTEM_REGISTER(copy, spdk_copy_engine_initialize, spdk_copy_engine_finish, NULL)