numam-spdk/lib/bdev/raid/bdev_raid.c
Kunal Sablok 41586b0f1d bdev: add raid bdev module
Raid module:
============
- SPDK raid bdev module is a new bdev module which is
  responsible for striping various NVMe devices and expose the raid bdev
  to bdev layer which would enhance the performance and capacity.
- It can support theoretically 256 base devices (currently it is being
  tested max upto 8 base devices)
- Multiple strip sizes like 32KB, 64KB, 128KB, 256KB, 512KB etc is
  supported. Most of the current testing is focused on 64KB strip size.
- New RPC commands like "create raid bdev", "destroy raid bdev" and "get raid bdevs"
  are introduced to configure raid bdev dynamically in a running
SPDK system.
- Currently raid bdev configuration parameters are persisted in the
  current SPDK configuration file for across reboot support. DDF will be
introduced later.

High level testing done:
=======================
- Raid bdev is created with 8 base NVMe devices via configuration
  file and is exposed to initiator via existing methods. Initiator is
able to see a single NVMe namespace with capacity equal to sum of the
minimum capacities of 8 devices. Initiator was able to run raw
read/write workload, file system workload etc (tested with XFS file
system workload).
- Multiple raid bdevs are also created and exposed to initiator and
  tested with file system and other workloads for read/write IO.
- LVS / LVOL are created over raid bdev and exposed to initiator.
  Testing was done for raw read/write workloads and XFS file system
workloads.
- RPC testing is done where on the running SPDK system raid bdevs
  are created out of NVMe base devices. These raid bdevs (and LVOLs
over raid bdevs) are then exposed to initiator and IO workload was
tested for raw read/write and XFS file system workload.
- RPC testing is done for delete raid bdevs where all raid bdevs
  are deleted in running SPDK system.
- RPC testing is done for get raid bdevs where existing list of
  raid bdev names is printed (it can be all raid bdevs or only
online or only configuring or only offline).
- RPC testing is done where raid bdevs and underlying NVMe devices
  relationship was returned in JSON RPC commands

Change-Id: I10ae1266f8f2cca3c106e4df8c1c0993ddf435d8
Signed-off-by: Kunal Sablok <kunal.sablok@intel.com>
Reviewed-on: https://review.gerrithub.io/410484
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
2018-07-16 20:50:40 +00:00

1322 lines
44 KiB
C

/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "bdev_raid.h"
#include "spdk/env.h"
#include "spdk/io_channel.h"
#include "spdk/conf.h"
#include "spdk_internal/log.h"
#include "spdk/string.h"
#include "spdk/util.h"
#include "spdk/json.h"
#include "spdk/string.h"
/* raid bdev config as read from config file */
struct raid_config g_spdk_raid_config;
/*
* List of raid bdev in configured list, these raid bdevs are registered with
* bdev layer
*/
struct spdk_raid_configured_tailq g_spdk_raid_bdev_configured_list;
/* List of raid bdev in configuring list */
struct spdk_raid_configuring_tailq g_spdk_raid_bdev_configuring_list;
/* List of all raid bdevs */
struct spdk_raid_all_tailq g_spdk_raid_bdev_list;
/* List of all raid bdevs that are offline */
struct spdk_raid_offline_tailq g_spdk_raid_bdev_offline_list;
/* Function declarations */
static void raid_bdev_examine(struct spdk_bdev *bdev);
static int raid_bdev_init(void);
static void raid_bdev_waitq_io_process(void *ctx);
/*
* brief:
* raid_bdev_create_cb function is a cb function for raid bdev which creates the
* hierarchy from raid bdev to base bdev io channels. It will be called per core
* params:
* io_device - pointer to raid bdev io device represented by raid_bdev
* ctx_buf - pointer to context buffer for raid bdev io channel
* returns:
* 0 - success
* non zero - failure
*/
static int
raid_bdev_create_cb(void *io_device, void *ctx_buf)
{
struct raid_bdev *raid_bdev = io_device;
struct raid_bdev_io_channel *ch = ctx_buf;
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid_bdev_create_cb, %p\n", ch);
assert(raid_bdev != NULL);
assert(raid_bdev->state == RAID_BDEV_STATE_ONLINE);
/*
* Store raid_bdev_ctxt in each channel which is used to get the read only
* raid bdev specific information during io split logic like base bdev
* descriptors, strip size etc
*/
ch->raid_bdev_ctxt = SPDK_CONTAINEROF(raid_bdev, struct raid_bdev_ctxt, raid_bdev);
ch->base_bdevs_io_channel = calloc(ch->raid_bdev_ctxt->raid_bdev.num_base_bdevs,
sizeof(struct spdk_io_channel *));
if (!ch->base_bdevs_io_channel) {
SPDK_ERRLOG("Unable to allocate base bdevs io channel\n");
return -1;
}
for (uint32_t iter = 0; iter < ch->raid_bdev_ctxt->raid_bdev.num_base_bdevs; iter++) {
/*
* Get the spdk_io_channel for all the base bdevs. This is used during
* split logic to send the respective child bdev ios to respective base
* bdev io channel.
*/
ch->base_bdevs_io_channel[iter] = spdk_bdev_get_io_channel(
raid_bdev->base_bdev_info[iter].base_bdev_desc);
if (!ch->base_bdevs_io_channel[iter]) {
for (uint32_t iter1 = 0; iter1 < iter ; iter1++) {
spdk_put_io_channel(ch->base_bdevs_io_channel[iter1]);
}
free(ch->base_bdevs_io_channel);
SPDK_ERRLOG("Unable to create io channel for base bdev\n");
return -1;
}
}
return 0;
}
/*
* brief:
* raid_bdev_destroy_cb function is a cb function for raid bdev which deletes the
* hierarchy from raid bdev to base bdev io channels. It will be called per core
* params:
* io_device - pointer to raid bdev io device represented by raid_bdev
* ctx_buf - pointer to context buffer for raid bdev io channel
* returns:
* none
*/
static void
raid_bdev_destroy_cb(void *io_device, void *ctx_buf)
{
struct raid_bdev_io_channel *ch = ctx_buf;
struct raid_bdev *raid_bdev = io_device;
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid_bdev_destroy_cb\n");
assert(raid_bdev != NULL);
assert(ch != NULL);
assert(ch->base_bdevs_io_channel);
for (uint32_t iter = 0; iter < raid_bdev->num_base_bdevs; iter++) {
/* Free base bdev channels */
assert(ch->base_bdevs_io_channel[iter] != NULL);
spdk_put_io_channel(ch->base_bdevs_io_channel[iter]);
ch->base_bdevs_io_channel[iter] = NULL;
}
ch->raid_bdev_ctxt = NULL;
free(ch->base_bdevs_io_channel);
ch->base_bdevs_io_channel = NULL;
}
/*
* brief:
* raid_bdev_cleanup is used to cleanup and free raid_bdev related data
* structures.
* params:
* raid_bdev_ctxt - pointer to raid_bdev_ctxt
* returns:
* none
*/
static void
raid_bdev_cleanup(struct raid_bdev_ctxt *raid_bdev_ctxt)
{
struct raid_bdev *raid_bdev = &raid_bdev_ctxt->raid_bdev;
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid_bdev_cleanup, %p name %s, state %u, raid_bdev_config %p\n",
raid_bdev_ctxt,
raid_bdev_ctxt->bdev.name, raid_bdev->state, raid_bdev->raid_bdev_config);
if (raid_bdev->state == RAID_BDEV_STATE_CONFIGURING) {
TAILQ_REMOVE(&g_spdk_raid_bdev_configuring_list, raid_bdev, link_specific_list);
} else if (raid_bdev->state == RAID_BDEV_STATE_OFFLINE) {
TAILQ_REMOVE(&g_spdk_raid_bdev_offline_list, raid_bdev, link_specific_list);
} else {
assert(0);
}
TAILQ_REMOVE(&g_spdk_raid_bdev_list, raid_bdev, link_global_list);
assert(raid_bdev_ctxt->bdev.name);
free(raid_bdev_ctxt->bdev.name);
raid_bdev_ctxt->bdev.name = NULL;
assert(raid_bdev->base_bdev_info);
free(raid_bdev->base_bdev_info);
raid_bdev->base_bdev_info = NULL;
if (raid_bdev->raid_bdev_config) {
raid_bdev->raid_bdev_config->raid_bdev_ctxt = NULL;
}
free(raid_bdev_ctxt);
}
/*
* brief:
* raid_bdev_destruct is the destruct function table pointer for raid bdev
* params:
* ctxt - pointer to raid_bdev_ctxt
* returns:
* 0 - success
* non zero - failure
*/
static int
raid_bdev_destruct(void *ctxt)
{
struct raid_bdev_ctxt *raid_bdev_ctxt = ctxt;
struct raid_bdev *raid_bdev = &raid_bdev_ctxt->raid_bdev;
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid_bdev_destruct\n");
raid_bdev->destruct_called = true;
for (uint16_t iter = 0; iter < raid_bdev->num_base_bdevs; iter++) {
/*
* Close all base bdev descriptors for which call has come from below
* layers
*/
if ((raid_bdev->base_bdev_info[iter].base_bdev_remove_scheduled == true) &&
(raid_bdev->base_bdev_info[iter].base_bdev != NULL)) {
spdk_bdev_module_release_bdev(raid_bdev->base_bdev_info[iter].base_bdev);
spdk_bdev_close(raid_bdev->base_bdev_info[iter].base_bdev_desc);
raid_bdev->base_bdev_info[iter].base_bdev_desc = NULL;
raid_bdev->base_bdev_info[iter].base_bdev = NULL;
assert(raid_bdev->num_base_bdevs_discovered);
raid_bdev->num_base_bdevs_discovered--;
}
}
if (raid_bdev->num_base_bdevs_discovered == 0) {
/* Free raid_bdev when there no base bdevs left */
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid bdev base bdevs is 0, going to free all in destruct\n");
raid_bdev_cleanup(raid_bdev_ctxt);
}
return 0;
}
/*
* brief:
* raid_bdev_io_completion function is called by lower layers to notify raid
* module that particular bdev_io is completed.
* params:
* bdev_io - pointer to bdev io submitted to lower layers, like child io
* success - bdev_io status
* cb_arg - function callback context, like parent io pointer
* returns:
* none
*/
static void
raid_bdev_io_completion(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
{
struct spdk_bdev_io *parent_io = cb_arg;
struct raid_bdev_io *raid_bdev_io = (struct raid_bdev_io *)parent_io->driver_ctx;
assert(raid_bdev_io->splits_comp_outstanding);
raid_bdev_io->splits_comp_outstanding--;
if (raid_bdev_io->status == SPDK_BDEV_IO_STATUS_SUCCESS) {
/*
* Store failure status if any of the child bdev io. If any of the child
* fails, overall parent bdev_io is considered failed but parent bdev io
* status is only communicated to above layers on all child completions
*/
raid_bdev_io->status = success;
}
/* Free child bdev io */
spdk_bdev_free_io(bdev_io);
if (!raid_bdev_io->splits_pending && !raid_bdev_io->splits_comp_outstanding) {
/*
* If all childs are submitted and all childs are completed, process
* parent bdev io completion and complete the parent bdev io with
* appropriate status. If any of the child bdev io is failed, parent
* bdev io is considered failed.
*/
if (raid_bdev_io->status) {
spdk_bdev_io_complete(parent_io, SPDK_BDEV_IO_STATUS_SUCCESS);
} else {
spdk_bdev_io_complete(parent_io, SPDK_BDEV_IO_STATUS_FAILED);
}
}
}
/*
* brief:
* raid_bdev_send_passthru function sends the bdev_io to the underlying
* base device by-passing the splitting logic. This is used for optimization
* when the total number of base devices in a raid bdev is only 1.
* params:
* ch - pointer to io channel for this io
* bdev_io - pointer to bdev_io
* returns:
* 0 - success
* non-zero - error
*/
static int
raid_bdev_send_passthru(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
{
struct raid_bdev_io_channel *raid_bdev_io_channel;
struct raid_bdev_io *raid_bdev_io;
struct raid_bdev *raid_bdev;
int ret;
raid_bdev_io_channel = spdk_io_channel_get_ctx(ch);
raid_bdev = &raid_bdev_io_channel->raid_bdev_ctxt->raid_bdev;
raid_bdev_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
raid_bdev_io->status = SPDK_BDEV_IO_STATUS_SUCCESS;
if (raid_bdev->base_bdev_info[0].base_bdev_desc == NULL) {
SPDK_ERRLOG("base bdev desc null for pd_idx %u\n", 0);
assert(0);
}
raid_bdev_io->splits_pending = 0;
raid_bdev_io->splits_comp_outstanding = 1;
if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
ret = spdk_bdev_read_blocks(raid_bdev->base_bdev_info[0].base_bdev_desc,
raid_bdev_io_channel->base_bdevs_io_channel[0],
bdev_io->u.bdev.iovs->iov_base,
bdev_io->u.bdev.offset_blocks,
bdev_io->u.bdev.num_blocks, raid_bdev_io_completion,
bdev_io);
} else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
ret = spdk_bdev_write_blocks(raid_bdev->base_bdev_info[0].base_bdev_desc,
raid_bdev_io_channel->base_bdevs_io_channel[0],
bdev_io->u.bdev.iovs->iov_base,
bdev_io->u.bdev.offset_blocks,
bdev_io->u.bdev.num_blocks, raid_bdev_io_completion,
bdev_io);
} else {
ret = -EINVAL;
}
if (ret != 0) {
/*
* If failed to submit child io to bdev layer then queue the parent
* bdev io with current active split information in the wait queue
* for that core. This will get resume from this point only. Assume
* if 4 splits are required and 2 childs are submitted, then parent
* io is queued to io waitq of this core and it will get resumed and
* try to submit the remaining 3 and 4 childs
*/
raid_bdev_io->splits_pending = 1;
raid_bdev_io->splits_comp_outstanding = 0;
raid_bdev_io->ch = ch;
return ret;
}
return 0;
}
/*
* brief:
* raid_bdev_submit_children function is used to split the parent io and submit
* the childs to bdev layer. bdev layer redirects the childs to appropriate base
* bdev nvme module
* params:
* ch - pointer to spdk_io_channel for the raid bdev
* bdev_io - parent bdev io
* start_strip - start strip number of this io
* end_strip - end strip number of this io
* cur_strip - current strip number of this io to start processing
* buf - pointer to buffer for this io
* returns:
* 0 - success
* non zero - failure
*/
static int
raid_bdev_submit_children(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
uint64_t start_strip, uint64_t end_strip, uint64_t cur_strip, uint8_t *buf)
{
struct raid_bdev_io_channel *raid_bdev_io_channel = spdk_io_channel_get_ctx(ch);
struct raid_bdev_io *raid_bdev_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
struct raid_bdev *raid_bdev = &raid_bdev_io_channel->raid_bdev_ctxt->raid_bdev;
uint64_t pd_strip;
uint32_t offset_in_strip;
uint64_t pd_lba;
uint64_t pd_blocks;
uint32_t pd_idx;
int ret;
for (uint64_t strip = cur_strip; strip <= end_strip; strip++) {
/*
* For each strip of parent bdev io, process for each strip and submit
* child io to bdev layer. Calculate base bdev level start lba, length
* and buffer for this child io
*/
pd_strip = strip / raid_bdev->num_base_bdevs;
pd_idx = strip % raid_bdev->num_base_bdevs;
if (strip == start_strip) {
offset_in_strip = bdev_io->u.bdev.offset_blocks & (raid_bdev->strip_size - 1);
pd_lba = (pd_strip << raid_bdev->strip_size_shift) + offset_in_strip;
if (strip == end_strip) {
pd_blocks = bdev_io->u.bdev.num_blocks;
} else {
pd_blocks = raid_bdev->strip_size - offset_in_strip;
}
} else if (strip == end_strip) {
pd_lba = pd_strip << raid_bdev->strip_size_shift;
pd_blocks = ((bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) &
(raid_bdev->strip_size - 1)) + 1;
} else {
pd_lba = pd_strip << raid_bdev->strip_size_shift;
pd_blocks = raid_bdev->strip_size;
}
raid_bdev_io->splits_comp_outstanding++;
assert(raid_bdev_io->splits_pending);
raid_bdev_io->splits_pending--;
if (raid_bdev->base_bdev_info[pd_idx].base_bdev_desc == NULL) {
SPDK_ERRLOG("base bdev desc null for pd_idx %u\n", pd_idx);
assert(0);
}
/*
* Submit child io to bdev layer with using base bdev descriptors, base
* bdev lba, base bdev child io length in blocks, buffer, completion
* function and function callback context
*/
if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
ret = spdk_bdev_read_blocks(raid_bdev->base_bdev_info[pd_idx].base_bdev_desc,
raid_bdev_io_channel->base_bdevs_io_channel[pd_idx],
buf, pd_lba, pd_blocks, raid_bdev_io_completion,
bdev_io);
} else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
ret = spdk_bdev_write_blocks(raid_bdev->base_bdev_info[pd_idx].base_bdev_desc,
raid_bdev_io_channel->base_bdevs_io_channel[pd_idx],
buf, pd_lba, pd_blocks, raid_bdev_io_completion,
bdev_io);
} else {
SPDK_ERRLOG("Recvd not supported io type %u\n", bdev_io->type);
assert(0);
}
if (ret != 0) {
/*
* If failed to submit child io to bdev layer then queue the parent
* bdev io with current active split information in the wait queue
* for that core. This will get resume from this point only. Assume
* if 4 splits are required and 2 childs are submitted, then parent
* io is queued to io waitq of this core and it will get resumed and
* try to submit the remaining 3 and 4 childs
*/
raid_bdev_io->buf = buf;
raid_bdev_io->ch = ch;
raid_bdev_io->splits_comp_outstanding--;
raid_bdev_io->splits_pending++;
return ret;
}
buf += (pd_blocks << raid_bdev->blocklen_shift);
}
return 0;
}
/*
* brief:
* get_curr_base_bdev_index function calculates the base bdev index
* which should be processed next based on splits_pending parameter
* params:
* raid_bdev - pointer to pooled bdev
* raid_bdev_io - pointer to parent io context
* returns:
* base bdev index
*/
static uint8_t
get_curr_base_bdev_index(struct raid_bdev *raid_bdev, struct raid_bdev_io *raid_bdev_io)
{
struct spdk_bdev_io *bdev_io;
uint64_t start_strip;
uint64_t end_strip;
uint64_t cur_strip;
bdev_io = SPDK_CONTAINEROF(raid_bdev_io, struct spdk_bdev_io, driver_ctx);
start_strip = bdev_io->u.bdev.offset_blocks >> raid_bdev->strip_size_shift;
end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
raid_bdev->strip_size_shift;
cur_strip = start_strip + ((end_strip - start_strip + 1) - raid_bdev_io->splits_pending);
return (cur_strip % raid_bdev->num_base_bdevs);
}
/*
* brief:
* raid_bdev_io_terminate function terminates the execution of the IO. If
* any outstanding children are there it waits for completion, otherwise it
* immediately completes the IO with failure.
* params:
* bdev_io - pointer to parent io
* raid_bdev_io - pointer to parent io context
* returns:
* none
*/
static void
raid_bdev_io_terminate(struct spdk_bdev_io *bdev_io, struct raid_bdev_io *raid_bdev_io)
{
if (raid_bdev_io->splits_comp_outstanding == 0) {
/* If no children is outstanding, immediately fail the parent IO */
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
} else {
/* If any children is outstanding,
* wait for them to complete but don't send further Ios */
raid_bdev_io->splits_pending = 0;
raid_bdev_io->status = SPDK_BDEV_IO_STATUS_FAILED;
}
}
/*
* brief:
* raid_bdev_io_submit_fail_process function processes the IO which failed to submit.
* It will try to queue the IOs after storing the context to bdev wait queue logic.
* params:
* bdev_io - pointer to bdev_io
* raid_bdev_io - pointer to raid bdev io
* ret - return code
* returns:
* none
*/
static void
raid_bdev_io_submit_fail_process(struct raid_bdev *raid_bdev, struct spdk_bdev_io *bdev_io,
struct raid_bdev_io *raid_bdev_io, int ret)
{
struct raid_bdev_io_channel *raid_bdev_io_channel;
uint8_t pd_idx;
if (ret != -ENOMEM) {
raid_bdev_io_terminate(bdev_io, raid_bdev_io);
} else {
/* Queue the IO to bdev layer wait queue */
pd_idx = get_curr_base_bdev_index(raid_bdev, raid_bdev_io);
raid_bdev_io->waitq_entry.bdev = raid_bdev->base_bdev_info[pd_idx].base_bdev;
raid_bdev_io->waitq_entry.cb_fn = raid_bdev_waitq_io_process;
raid_bdev_io->waitq_entry.cb_arg = raid_bdev_io;
raid_bdev_io_channel = spdk_io_channel_get_ctx(raid_bdev_io->ch);
if (spdk_bdev_queue_io_wait(raid_bdev->base_bdev_info[pd_idx].base_bdev,
raid_bdev_io_channel->base_bdevs_io_channel[pd_idx],
&raid_bdev_io->waitq_entry) != 0) {
SPDK_ERRLOG("bdev io waitq error, it should not happen\n");
assert(0);
raid_bdev_io_terminate(bdev_io, raid_bdev_io);
}
}
}
/*
* brief:
* raid_bdev_waitq_io_process function is the callback function
* registerd by raid bdev module to bdev when bdev_io was unavailable.
* params:
* ctx - pointer to raid_bdev_io
* returns:
* none
*/
static void
raid_bdev_waitq_io_process(void *ctx)
{
struct raid_bdev_io *raid_bdev_io = ctx;
struct spdk_bdev_io *bdev_io;
struct raid_bdev_io_channel *raid_bdev_io_channel;
struct raid_bdev *raid_bdev;
int ret;
uint64_t start_strip;
uint64_t end_strip;
uint64_t cur_strip;
bdev_io = SPDK_CONTAINEROF(raid_bdev_io, struct spdk_bdev_io, driver_ctx);
/*
* Try to submit childs of parent bdev io. If failed due to resource
* crunch then break the loop and don't try to process other queued IOs.
*/
raid_bdev_io_channel = spdk_io_channel_get_ctx(raid_bdev_io->ch);
raid_bdev = &raid_bdev_io_channel->raid_bdev_ctxt->raid_bdev;
if (raid_bdev->num_base_bdevs > 1) {
start_strip = bdev_io->u.bdev.offset_blocks >> raid_bdev->strip_size_shift;
end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
raid_bdev->strip_size_shift;
cur_strip = start_strip + ((end_strip - start_strip + 1) - raid_bdev_io->splits_pending);
ret = raid_bdev_submit_children(raid_bdev_io->ch, bdev_io, start_strip, end_strip, cur_strip,
raid_bdev_io->buf);
} else {
ret = raid_bdev_send_passthru(raid_bdev_io->ch, bdev_io);
}
if (ret != 0) {
raid_bdev_io_submit_fail_process(raid_bdev, bdev_io, raid_bdev_io, ret);
}
}
/*
* brief:
* raid_bdev_submit_request function is the submit_request function pointer of
* raid bdev function table. This is used to submit the io on raid_bdev to below
* layers. If iowaitq is not empty, it will queue the parent bdev_io to the end
* of the queue.
* params:
* ch - pointer to raid bdev io channel
* bdev_io - pointer to parent bdev_io on raid bdev device
* returns:
* none
*/
static void
raid_bdev_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
{
struct raid_bdev_io_channel *raid_bdev_io_channel;
struct raid_bdev_io *raid_bdev_io;
struct raid_bdev *raid_bdev;
uint64_t start_strip = 0;
uint64_t end_strip = 0;
int ret;
switch (bdev_io->type) {
case SPDK_BDEV_IO_TYPE_READ:
case SPDK_BDEV_IO_TYPE_WRITE:
if (bdev_io->u.bdev.iovcnt != 1) {
SPDK_ERRLOG("iov vector count is not 1\n");
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
break;
}
/*
* IO parameters used during io split and io completion
*/
raid_bdev_io_channel = spdk_io_channel_get_ctx(ch);
raid_bdev = &raid_bdev_io_channel->raid_bdev_ctxt->raid_bdev;
raid_bdev_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
if (raid_bdev->num_base_bdevs > 1) {
start_strip = bdev_io->u.bdev.offset_blocks >> raid_bdev->strip_size_shift;
end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
raid_bdev->strip_size_shift;
/*
* IO parameters used during io split and io completion
*/
raid_bdev_io->splits_pending = (end_strip - start_strip + 1);
raid_bdev_io->splits_comp_outstanding = 0;
raid_bdev_io->status = SPDK_BDEV_IO_STATUS_SUCCESS;
ret = raid_bdev_submit_children(ch, bdev_io, start_strip, end_strip, start_strip,
bdev_io->u.bdev.iovs->iov_base);
} else {
ret = raid_bdev_send_passthru(ch, bdev_io);
}
if (ret != 0) {
raid_bdev_io_submit_fail_process(raid_bdev, bdev_io, raid_bdev_io, ret);
}
break;
case SPDK_BDEV_IO_TYPE_FLUSH:
// TODO: support flush if requirement comes
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
break;
default:
SPDK_ERRLOG("submit request, invalid io type %u\n", bdev_io->type);
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
break;
}
}
/*
* brief:
* raid_bdev_io_type_supported is the io_supported function for bdev function
* table which returns whether the particular io type is supported or not by
* raid bdev module
* params:
* ctx - pointer to raid bdev context
* type - io type
* returns:
* true - io_type is supported
* false - io_type is not supported
*/
static bool
raid_bdev_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
{
switch (io_type) {
case SPDK_BDEV_IO_TYPE_READ:
case SPDK_BDEV_IO_TYPE_WRITE:
case SPDK_BDEV_IO_TYPE_FLUSH:
return true;
default:
return false;
}
return false;
}
/*
* brief:
* raid_bdev_get_io_channel is the get_io_channel function table pointer for
* raid bdev. This is used to return the io channel for this raid bdev
* params:
* ctxt - pointer to raid_bdev_ctxt
* returns:
* pointer to io channel for raid bdev
*/
static struct spdk_io_channel *
raid_bdev_get_io_channel(void *ctxt)
{
struct raid_bdev_ctxt *raid_bdev_ctxt = ctxt;
return spdk_get_io_channel(&raid_bdev_ctxt->raid_bdev);
}
/*
* brief:
* raid_bdev_dump_info_json is the function table pointer for raid bdev
* params:
* ctx - pointer to raid_bdev_ctxt
* w - pointer to json context
* returns:
* 0 - success
* non zero - failure
*/
static int
raid_bdev_dump_info_json(void *ctx, struct spdk_json_write_ctx *w)
{
struct raid_bdev_ctxt *raid_bdev_ctxt = ctx;
struct raid_bdev *raid_bdev;
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid_bdev_dump_config_json\n");
assert(raid_bdev_ctxt != NULL);
raid_bdev = &raid_bdev_ctxt->raid_bdev;
/* Dump the raid bdev configuration related information */
spdk_json_write_name(w, "raid");
spdk_json_write_object_begin(w);
spdk_json_write_named_uint32(w, "strip_size", raid_bdev->strip_size);
spdk_json_write_named_uint32(w, "state", raid_bdev->state);
spdk_json_write_named_uint32(w, "raid_level", raid_bdev->raid_level);
spdk_json_write_named_uint32(w, "destruct_called", raid_bdev->destruct_called);
spdk_json_write_named_uint32(w, "num_base_bdevs", raid_bdev->num_base_bdevs);
spdk_json_write_named_uint32(w, "num_base_bdevs_discovered", raid_bdev->num_base_bdevs_discovered);
spdk_json_write_name(w, "base_bdevs_list");
spdk_json_write_array_begin(w);
for (uint16_t iter = 0; iter < raid_bdev->num_base_bdevs; iter++) {
if (raid_bdev->base_bdev_info[iter].base_bdev) {
spdk_json_write_string(w, raid_bdev->base_bdev_info[iter].base_bdev->name);
} else {
spdk_json_write_null(w);
}
}
spdk_json_write_array_end(w);
spdk_json_write_object_end(w);
return 0;
}
/* g_raid_bdev_fn_table is the function table for raid bdev */
static const struct spdk_bdev_fn_table g_raid_bdev_fn_table = {
.destruct = raid_bdev_destruct,
.submit_request = raid_bdev_submit_request,
.io_type_supported = raid_bdev_io_type_supported,
.get_io_channel = raid_bdev_get_io_channel,
.dump_info_json = raid_bdev_dump_info_json,
};
/*
* brief:
* raid_bdev_free is the raid bdev function table function pointer. This is
* called on bdev free path
* params:
* none
* returns:
* none
*/
static void
raid_bdev_free(void)
{
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid_bdev_free\n");
for (uint32_t raid_bdev = 0; raid_bdev < g_spdk_raid_config.total_raid_bdev; raid_bdev++) {
if (g_spdk_raid_config.raid_bdev_config[raid_bdev].base_bdev) {
for (uint32_t iter = 0; iter < g_spdk_raid_config.raid_bdev_config[raid_bdev].num_base_bdevs;
iter++) {
free(g_spdk_raid_config.raid_bdev_config[raid_bdev].base_bdev[iter].bdev_name);
}
free(g_spdk_raid_config.raid_bdev_config[raid_bdev].base_bdev);
g_spdk_raid_config.raid_bdev_config[raid_bdev].base_bdev = NULL;
}
free(g_spdk_raid_config.raid_bdev_config[raid_bdev].name);
}
if (g_spdk_raid_config.raid_bdev_config) {
if (g_spdk_raid_config.raid_bdev_config->raid_bdev_ctxt) {
g_spdk_raid_config.raid_bdev_config->raid_bdev_ctxt->raid_bdev.raid_bdev_config = NULL;
}
free(g_spdk_raid_config.raid_bdev_config);
g_spdk_raid_config.raid_bdev_config = NULL;
g_spdk_raid_config.total_raid_bdev = 0;
}
}
/*
* brief:
* raid_bdev_parse_raid is used to parse the raid bdev from config file based on
* pre-defined raid bdev format in config file.
* Format of config file:
* [RAID1]
* Name raid1
* StripSize 64
* NumDevices 2
* RaidLevel 0
* Devices Nvme0n1 Nvme1n1
*
* [RAID2]
* Name raid2
* StripSize 64
* NumDevices 3
* RaidLevel 0
* Devices Nvme2n1 Nvme3n1 Nvme4n1
*
* params:
* conf_section - pointer to config section
* returns:
* 0 - success
* non zero - failure
*/
static int
raid_bdev_parse_raid(struct spdk_conf_section *conf_section)
{
const char *raid_name;
int strip_size;
int num_base_bdevs;
int raid_level;
const char *base_bdev_name;
uint32_t iter;
void *temp_ptr;
struct raid_bdev_config *raid_bdev_config;
raid_name = spdk_conf_section_get_val(conf_section, "Name");
if (raid_name == NULL) {
SPDK_ERRLOG("raid_name %s is null\n", raid_name);
return -1;
}
strip_size = spdk_conf_section_get_intval(conf_section, "StripSize");
if (spdk_u32_is_pow2(strip_size) == false) {
SPDK_ERRLOG("Invalid strip size %d\n", strip_size);
return -1;
}
num_base_bdevs = spdk_conf_section_get_intval(conf_section, "NumDevices");
if (num_base_bdevs <= 0) {
SPDK_ERRLOG("Invalid base device count %d\n", num_base_bdevs);
return -1;
}
raid_level = spdk_conf_section_get_intval(conf_section, "RaidLevel");
if (raid_level != 0) {
SPDK_ERRLOG("invalid raid level %d, only raid level 0 is supported\n", raid_level);
return -1;
}
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "%s %d %d %d\n", raid_name, strip_size, num_base_bdevs,
raid_level);
for (iter = 0; iter < g_spdk_raid_config.total_raid_bdev; iter++) {
if (!strcmp(g_spdk_raid_config.raid_bdev_config[iter].name, raid_name)) {
SPDK_ERRLOG("Duplicate raid bdev name found in config file %s\n", raid_name);
return -1;
}
}
temp_ptr = realloc(g_spdk_raid_config.raid_bdev_config,
sizeof(struct raid_bdev_config) * (g_spdk_raid_config.total_raid_bdev + 1));
if (temp_ptr == NULL) {
SPDK_ERRLOG("unable to allocate memory\n");
return -1;
}
g_spdk_raid_config.raid_bdev_config = temp_ptr;
raid_bdev_config = &g_spdk_raid_config.raid_bdev_config[g_spdk_raid_config.total_raid_bdev];
memset(raid_bdev_config, 0, sizeof(*raid_bdev_config));
raid_bdev_config->name = strdup(raid_name);
if (!raid_bdev_config->name) {
SPDK_ERRLOG("unable to allocate memory\n");
return -1;
}
raid_bdev_config->strip_size = strip_size;
raid_bdev_config->num_base_bdevs = num_base_bdevs;
raid_bdev_config->raid_level = raid_level;
g_spdk_raid_config.total_raid_bdev++;
raid_bdev_config->base_bdev = calloc(num_base_bdevs, sizeof(*raid_bdev_config->base_bdev));
if (raid_bdev_config->base_bdev == NULL) {
SPDK_ERRLOG("unable to allocate memory\n");
return -1;
}
for (iter = 0; true; iter++) {
base_bdev_name = spdk_conf_section_get_nmval(conf_section, "Devices", 0, iter);
if (base_bdev_name == NULL) {
break;
}
if (iter >= raid_bdev_config->num_base_bdevs) {
SPDK_ERRLOG("Number of devices mentioned is more than count\n");
return -1;
}
for (uint32_t iter2 = 0; iter2 < g_spdk_raid_config.total_raid_bdev; iter2++) {
for (uint32_t iter3 = 0; iter3 < g_spdk_raid_config.raid_bdev_config[iter2].num_base_bdevs;
iter3++) {
if (g_spdk_raid_config.raid_bdev_config[iter2].base_bdev[iter3].bdev_name != NULL) {
if (!strcmp(g_spdk_raid_config.raid_bdev_config[iter2].base_bdev[iter3].bdev_name,
base_bdev_name)) {
SPDK_ERRLOG("duplicate base bdev name %s mentioned\n", base_bdev_name);
return -1;
}
}
}
}
raid_bdev_config->base_bdev[iter].bdev_name = strdup(base_bdev_name);
}
if (iter != raid_bdev_config->num_base_bdevs) {
SPDK_ERRLOG("Number of devices mentioned is less than count\n");
return -1;
}
return 0;
}
/*
* brief:
* raid_bdev_parse_config is used to find the raid bdev config section and parse it
* Format of config file:
* params:
* none
* returns:
* 0 - success
* non zero - failure
*/
static int
raid_bdev_parse_config(void)
{
int ret;
struct spdk_conf_section *conf_section;
conf_section = spdk_conf_first_section(NULL);
while (conf_section != NULL) {
if (spdk_conf_section_match_prefix(conf_section, "RAID")) {
ret = raid_bdev_parse_raid(conf_section);
if (ret < 0) {
SPDK_ERRLOG("Unable to parse raid bdev section\n");
return ret;
}
}
conf_section = spdk_conf_next_section(conf_section);
}
return 0;
}
/*
* brief:
* raid_bdev_exit is called on raid bdev module exit time by bdev layer
* params:
* none
* returns:
* none
*/
static void
raid_bdev_exit(void)
{
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid_bdev_exit\n");
raid_bdev_free();
}
/*
* brief:
* raid_bdev_get_ctx_size is used to return the context size of bdev_io for raid
* module
* params:
* none
* returns:
* size of spdk_bdev_io context for raid
*/
static int
raid_bdev_get_ctx_size(void)
{
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid_bdev_get_ctx_size\n");
return sizeof(struct raid_bdev_io);
}
/*
* brief:
* raid_bdev_can_claim_bdev is the function to check if this base_bdev can be
* claimed by raid bdev or not.
* params:
* bdev_name - represents base bdev name
* raid_bdev_config - pointer to raid bdev config parsed from config file
* base_bdev_slot - if bdev can be claimed, it represents the base_bdev correct
* slot. This field is only valid if return value of this function is true
* returns:
* true - if bdev can be claimed
* false - if bdev can't be claimed
*/
static bool
raid_bdev_can_claim_bdev(const char *bdev_name, struct raid_bdev_config **raid_bdev_config,
uint32_t *base_bdev_slot)
{
bool rv = false;
for (uint32_t iter1 = 0; iter1 < g_spdk_raid_config.total_raid_bdev && !rv; iter1++) {
for (uint32_t iter2 = 0; iter2 < g_spdk_raid_config.raid_bdev_config[iter1].num_base_bdevs;
iter2++) {
/*
* Check if the base bdev name is part of raid bdev configuration.
* If match is found then return true and the slot information where
* this base bdev should be inserted in raid bdev
*/
if (!strcmp(bdev_name, g_spdk_raid_config.raid_bdev_config[iter1].base_bdev[iter2].bdev_name)) {
*raid_bdev_config = &g_spdk_raid_config.raid_bdev_config[iter1];
*base_bdev_slot = iter2;
rv = true;
break;
}
}
}
return rv;
}
static struct spdk_bdev_module g_raid_if = {
.name = "raid",
.module_init = raid_bdev_init,
.module_fini = raid_bdev_exit,
.get_ctx_size = raid_bdev_get_ctx_size,
.examine_config = raid_bdev_examine,
.config_text = NULL,
.async_init = false,
.async_fini = false,
};
SPDK_BDEV_MODULE_REGISTER(&g_raid_if)
/*
* brief:
* raid_bdev_init is the initialization function for raid bdev module
* params:
* none
* returns:
* 0 - success
* non zero - failure
*/
static int
raid_bdev_init(void)
{
int ret;
memset(&g_spdk_raid_config, 0, sizeof(g_spdk_raid_config));
TAILQ_INIT(&g_spdk_raid_bdev_configured_list);
TAILQ_INIT(&g_spdk_raid_bdev_configuring_list);
TAILQ_INIT(&g_spdk_raid_bdev_list);
TAILQ_INIT(&g_spdk_raid_bdev_offline_list);
/* Parse config file for raids */
ret = raid_bdev_parse_config();
if (ret < 0) {
SPDK_ERRLOG("raid bdev init failed parsing\n");
raid_bdev_free();
return ret;
}
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid_bdev_init completed successfully\n");
return 0;
}
/*
* brief:
* raid_bdev_remove_base_bdev function is called by below layers when base_bdev
* is removed. This function checks if this base bdev is part of any raid bdev
* or not. If yes, it takes necessary action on that particular raid bdev.
* params:
* ctx - pointer to base bdev pointer which got removed
* returns:
* none
*/
void
raid_bdev_remove_base_bdev(void *ctx)
{
struct spdk_bdev *base_bdev = ctx;
struct raid_bdev *raid_bdev;
struct raid_bdev *next_raid_bdev;
struct raid_bdev_ctxt *raid_bdev_ctxt;
uint16_t iter;
bool found = false;
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid_bdev_remove_base_bdev\n");
/* Find the raid_bdev which has claimed this base_bdev */
TAILQ_FOREACH_SAFE(raid_bdev, &g_spdk_raid_bdev_list, link_global_list, next_raid_bdev) {
for (iter = 0; iter < raid_bdev->num_base_bdevs; iter++) {
if (raid_bdev->base_bdev_info[iter].base_bdev == base_bdev) {
found = true;
break;
}
}
if (found == true) {
break;
}
}
if (found == false) {
SPDK_ERRLOG("bdev to remove '%s' not found\n", base_bdev->name);
return;
}
assert(raid_bdev != NULL);
assert(raid_bdev->base_bdev_info[iter].base_bdev);
assert(raid_bdev->base_bdev_info[iter].base_bdev_desc);
raid_bdev_ctxt = SPDK_CONTAINEROF(raid_bdev, struct raid_bdev_ctxt, raid_bdev);
raid_bdev->base_bdev_info[iter].base_bdev_remove_scheduled = true;
if (raid_bdev->destruct_called == true && raid_bdev->base_bdev_info[iter].base_bdev != NULL) {
/* As raid bdev is already unregistered, so cleanup should be done here itself */
spdk_bdev_module_release_bdev(raid_bdev->base_bdev_info[iter].base_bdev);
spdk_bdev_close(raid_bdev->base_bdev_info[iter].base_bdev_desc);
raid_bdev->base_bdev_info[iter].base_bdev_desc = NULL;
raid_bdev->base_bdev_info[iter].base_bdev = NULL;
assert(raid_bdev->num_base_bdevs_discovered);
raid_bdev->num_base_bdevs_discovered--;
if (raid_bdev->num_base_bdevs_discovered == 0) {
/* Since there is no base bdev for this raid, so free the raid device */
raid_bdev_cleanup(raid_bdev_ctxt);
return;
}
}
if (raid_bdev->state == RAID_BDEV_STATE_ONLINE) {
/*
* If raid bdev is online and registered, change the bdev state to
* configuring and unregister this raid device. Queue this raid device
* in configuring list
*/
assert(raid_bdev->num_base_bdevs == raid_bdev->num_base_bdevs_discovered);
TAILQ_REMOVE(&g_spdk_raid_bdev_configured_list, raid_bdev, link_specific_list);
raid_bdev->state = RAID_BDEV_STATE_OFFLINE;
assert(raid_bdev->num_base_bdevs_discovered);
TAILQ_INSERT_TAIL(&g_spdk_raid_bdev_offline_list, raid_bdev, link_specific_list);
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid bdev state chaning from online to offline\n");
spdk_io_device_unregister(&raid_bdev_ctxt->raid_bdev, NULL);
spdk_bdev_unregister(&raid_bdev_ctxt->bdev, NULL, NULL);
}
}
/*
* brief:
* raid_bdev_add_base_device function is the actual function which either adds
* the nvme base device to existing raid bdev or create a new raid bdev. It also claims
* the base device and keep the open descriptor.
* params:
* bdev - pointer to base bdev
* returns:
* 0 - success
* non zero - failure
*/
int
raid_bdev_add_base_device(struct spdk_bdev *bdev)
{
struct raid_bdev_config *raid_bdev_config = NULL;
struct raid_bdev_ctxt *raid_bdev_ctxt;
struct raid_bdev *raid_bdev;
struct spdk_bdev_desc *desc;
struct spdk_bdev *raid_bdev_gen;
uint32_t blocklen;
uint64_t min_blockcnt;
uint32_t base_bdev_slot;
bool can_claim;
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid_bdev_examine %p\n", bdev);
can_claim = raid_bdev_can_claim_bdev(bdev->name, &raid_bdev_config, &base_bdev_slot);
if (!can_claim) {
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "bdev %s can't be claimed\n", bdev->name);
return -1;
}
assert(raid_bdev_config);
if (spdk_bdev_open(bdev, true, raid_bdev_remove_base_bdev, bdev, &desc)) {
SPDK_ERRLOG("Unable to create desc on bdev '%s'\n", bdev->name);
return -1;
}
if (spdk_bdev_module_claim_bdev(bdev, NULL, &g_raid_if)) {
SPDK_ERRLOG("Unable to claim this bdev as it is already claimed\n");
spdk_bdev_close(desc);
return -1;
}
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "bdev %s is claimed\n", bdev->name);
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid_bdev_config->raid_bdev_ctxt %p\n",
raid_bdev_config->raid_bdev_ctxt);
if (!raid_bdev_config->raid_bdev_ctxt) {
/* Allocate raid_bdev entity if it is not already allocated */
raid_bdev_ctxt = calloc(1, sizeof(*raid_bdev_ctxt));
if (!raid_bdev_ctxt) {
SPDK_ERRLOG("Unable to allocate memory for raid bdev for bdev '%s'\n", bdev->name);
spdk_bdev_module_release_bdev(bdev);
spdk_bdev_close(desc);
return -1;
}
raid_bdev = &raid_bdev_ctxt->raid_bdev;
raid_bdev->num_base_bdevs = raid_bdev_config->num_base_bdevs;
raid_bdev->base_bdev_info = calloc(raid_bdev->num_base_bdevs, sizeof(struct raid_base_bdev_info));
if (!raid_bdev->base_bdev_info) {
SPDK_ERRLOG("Unable able to allocate base bdev info\n");
free(raid_bdev_ctxt);
spdk_bdev_module_release_bdev(bdev);
spdk_bdev_close(desc);
return -1;
}
raid_bdev_config->raid_bdev_ctxt = raid_bdev_ctxt;
raid_bdev->strip_size = raid_bdev_config->strip_size;
raid_bdev->state = RAID_BDEV_STATE_CONFIGURING;
raid_bdev->raid_bdev_config = raid_bdev_config;
TAILQ_INSERT_TAIL(&g_spdk_raid_bdev_configuring_list, raid_bdev, link_specific_list);
TAILQ_INSERT_TAIL(&g_spdk_raid_bdev_list, raid_bdev, link_global_list);
} else {
raid_bdev = &raid_bdev_config->raid_bdev_ctxt->raid_bdev;
}
assert(raid_bdev->state != RAID_BDEV_STATE_ONLINE);
assert(base_bdev_slot < raid_bdev->num_base_bdevs);
raid_bdev->base_bdev_info[base_bdev_slot].base_bdev = bdev;
raid_bdev->base_bdev_info[base_bdev_slot].base_bdev_desc = desc;
raid_bdev->num_base_bdevs_discovered++;
assert(raid_bdev->num_base_bdevs_discovered <= raid_bdev->num_base_bdevs);
if (raid_bdev->num_base_bdevs_discovered == raid_bdev->num_base_bdevs) {
/* If raid bdev config is complete, then only register the raid bdev to
* bdev layer and remove this raid bdev from configuring list and
* insert the raid bdev to configured list
*/
blocklen = raid_bdev->base_bdev_info[0].base_bdev->blocklen;
min_blockcnt = raid_bdev->base_bdev_info[0].base_bdev->blockcnt;
for (uint32_t iter = 1; iter < raid_bdev->num_base_bdevs; iter++) {
/* Calculate minimum block count from all base bdevs */
if (raid_bdev->base_bdev_info[iter].base_bdev->blockcnt < min_blockcnt) {
min_blockcnt = raid_bdev->base_bdev_info[iter].base_bdev->blockcnt;
}
/* Check blocklen for all base bdevs that it should be same */
if (blocklen != raid_bdev->base_bdev_info[iter].base_bdev->blocklen) {
/*
* Assumption is that all the base bdevs for any raid bdev should
* have same blocklen
*/
SPDK_ERRLOG("Blocklen of various bdevs not matching\n");
raid_bdev->state = RAID_BDEV_STATE_OFFLINE;
TAILQ_REMOVE(&g_spdk_raid_bdev_configuring_list, raid_bdev, link_specific_list);
TAILQ_INSERT_TAIL(&g_spdk_raid_bdev_offline_list, raid_bdev, link_specific_list);
return -1;
}
}
raid_bdev_ctxt = SPDK_CONTAINEROF(raid_bdev, struct raid_bdev_ctxt, raid_bdev);
raid_bdev_gen = &raid_bdev_ctxt->bdev;
raid_bdev_gen->name = strdup(raid_bdev_config->name);
if (!raid_bdev_gen->name) {
SPDK_ERRLOG("Unable to allocate name for raid\n");
raid_bdev->state = RAID_BDEV_STATE_OFFLINE;
TAILQ_REMOVE(&g_spdk_raid_bdev_configuring_list, raid_bdev, link_specific_list);
TAILQ_INSERT_TAIL(&g_spdk_raid_bdev_offline_list, raid_bdev, link_specific_list);
return -1;
}
raid_bdev_gen->product_name = "Pooled Device";
raid_bdev_gen->write_cache = 0;
raid_bdev_gen->blocklen = blocklen;
raid_bdev_gen->optimal_io_boundary = 0;
raid_bdev_gen->ctxt = raid_bdev_ctxt;
raid_bdev_gen->fn_table = &g_raid_bdev_fn_table;
raid_bdev_gen->module = &g_raid_if;
raid_bdev->strip_size = (raid_bdev->strip_size * 1024) / blocklen;
raid_bdev->strip_size_shift = spdk_u32log2(raid_bdev->strip_size);
raid_bdev->blocklen_shift = spdk_u32log2(blocklen);
/*
* RAID bdev logic is for striping so take the minimum block count based
* approach where total block count of raid bdev is the number of base
* bdev times the minimum block count of any base bdev
*/
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "min blockcount %lu, numbasedev %u, strip size shift %u\n",
min_blockcnt,
raid_bdev->num_base_bdevs, raid_bdev->strip_size_shift);
raid_bdev_gen->blockcnt = ((min_blockcnt >> raid_bdev->strip_size_shift) <<
raid_bdev->strip_size_shift) * raid_bdev->num_base_bdevs;
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "io device register %p\n", raid_bdev);
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "blockcnt %lu, blocklen %u\n", raid_bdev_gen->blockcnt,
raid_bdev_gen->blocklen);
if (raid_bdev->state == RAID_BDEV_STATE_CONFIGURING) {
raid_bdev->state = RAID_BDEV_STATE_ONLINE;
spdk_io_device_register(raid_bdev, raid_bdev_create_cb, raid_bdev_destroy_cb,
sizeof(struct raid_bdev_io_channel));
if (spdk_bdev_register(raid_bdev_gen)) {
/*
* If failed to register raid bdev to bdev layer, make raid bdev offline
* and add to offline list
*/
SPDK_ERRLOG("Unable to register pooled bdev\n");
spdk_io_device_unregister(raid_bdev, NULL);
raid_bdev->state = RAID_BDEV_STATE_OFFLINE;
TAILQ_REMOVE(&g_spdk_raid_bdev_configuring_list, raid_bdev, link_specific_list);
TAILQ_INSERT_TAIL(&g_spdk_raid_bdev_offline_list, raid_bdev, link_specific_list);
return -1;
}
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid bdev generic %p\n", raid_bdev_gen);
TAILQ_REMOVE(&g_spdk_raid_bdev_configuring_list, raid_bdev, link_specific_list);
TAILQ_INSERT_TAIL(&g_spdk_raid_bdev_configured_list, raid_bdev, link_specific_list);
SPDK_DEBUGLOG(SPDK_LOG_BDEV_RAID, "raid bdev is created with name %s, raid_bdev %p\n",
raid_bdev_gen->name, raid_bdev);
}
}
return 0;
}
/*
* brief:
* raid_bdev_examine function is the examine function call by the below layers
* like bdev_nvme layer. This function will check if this base bdev can be
* claimed by this raid bdev or not.
* params:
* bdev - pointer to base bdev
* returns:
* none
*/
static void
raid_bdev_examine(struct spdk_bdev *bdev)
{
raid_bdev_add_base_device(bdev);
spdk_bdev_module_examine_done(&g_raid_if);
}
/* Log component for bdev raid bdev module */
SPDK_LOG_REGISTER_COMPONENT("bdev_raid", SPDK_LOG_BDEV_RAID)