module/raid: unify submit_request function signatures

Pass raid_bdev_io instead of spdk_bdev_io.

Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
Change-Id: I8ecc39abafe54a7bc5034caa126f961c444230a2
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/471084
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Artur Paszkiewicz 2019-10-10 12:01:28 +02:00 committed by Tomasz Zawadzki
parent 77b8618ecc
commit 0ae5a89dc4
3 changed files with 44 additions and 22 deletions

View File

@ -346,27 +346,38 @@ raid_bdev_queue_io_wait(struct spdk_bdev_io *raid_bdev_io, uint8_t pd_idx,
spdk_bdev_io_complete(raid_bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
}
static void
raid_bdev_submit_reset_request(struct raid_bdev_io *raid_io);
static void
_raid_bdev_submit_reset_request(void *_bdev_io)
{
struct spdk_bdev_io *bdev_io = _bdev_io;
struct raid_bdev_io *raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
raid_bdev_submit_reset_request(raid_io);
}
/*
* brief:
* _raid_bdev_submit_reset_request_next function submits the next batch of reset requests
* raid_bdev_submit_reset_request function submits reset requests
* to member disks; it will submit as many as possible unless a reset fails with -ENOMEM, in
* which case it will queue it for later submission
* params:
* bdev_io - pointer to parent bdev_io on raid bdev device
* raid_io
* returns:
* none
*/
static void
_raid_bdev_submit_reset_request_next(void *_bdev_io)
raid_bdev_submit_reset_request(struct raid_bdev_io *raid_io)
{
struct spdk_bdev_io *bdev_io = _bdev_io;
struct raid_bdev_io *raid_io;
struct spdk_bdev_io *bdev_io;
struct raid_bdev *raid_bdev;
int ret;
uint8_t i;
bdev_io = spdk_bdev_io_from_ctx(raid_io);
raid_bdev = (struct raid_bdev *)bdev_io->bdev->ctxt;
raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
raid_io->base_bdev_io_expected = raid_bdev->num_base_bdevs;
@ -379,7 +390,7 @@ _raid_bdev_submit_reset_request_next(void *_bdev_io)
raid_io->base_bdev_io_submitted++;
} else {
raid_bdev_queue_io_wait(bdev_io, i,
_raid_bdev_submit_reset_request_next, ret);
_raid_bdev_submit_reset_request, ret);
return;
}
}
@ -399,12 +410,14 @@ static void
raid_bdev_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
bool success)
{
struct raid_bdev_io *raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
if (!success) {
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
return;
}
raid0_submit_rw_request(bdev_io);
raid0_submit_rw_request(raid_io);
}
/*
@ -434,16 +447,16 @@ raid_bdev_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
break;
case SPDK_BDEV_IO_TYPE_WRITE:
raid0_submit_rw_request(bdev_io);
raid0_submit_rw_request(raid_io);
break;
case SPDK_BDEV_IO_TYPE_RESET:
_raid_bdev_submit_reset_request_next(bdev_io);
raid_bdev_submit_reset_request(raid_io);
break;
case SPDK_BDEV_IO_TYPE_FLUSH:
case SPDK_BDEV_IO_TYPE_UNMAP:
raid0_submit_null_payload_request(bdev_io);
raid0_submit_null_payload_request(raid_io);
break;
default:

View File

@ -266,9 +266,9 @@ __RAID_MODULE_REGISTER(__LINE__)(void) \
}
void
raid0_submit_rw_request(struct spdk_bdev_io *bdev_io);
raid0_submit_rw_request(struct raid_bdev_io *raid_io);
void
raid0_submit_null_payload_request(void *_bdev_io);
raid0_submit_null_payload_request(struct raid_bdev_io *raid_io);
void
raid_bdev_base_io_completion(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg);
void

View File

@ -73,14 +73,14 @@ raid0_waitq_io_process(void *ctx);
* raid0_submit_rw_request function is used to submit I/O to the correct
* member disk for raid0 bdevs.
* params:
* bdev_io - parent bdev io
* raid_io
* returns:
* none
*/
void
raid0_submit_rw_request(struct spdk_bdev_io *bdev_io)
raid0_submit_rw_request(struct raid_bdev_io *raid_io)
{
struct raid_bdev_io *raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io);
struct raid_bdev_io_channel *raid_ch = raid_io->raid_ch;
struct raid_bdev *raid_bdev = (struct raid_bdev *)bdev_io->bdev->ctxt;
uint64_t pd_strip;
@ -155,8 +155,9 @@ static void
raid0_waitq_io_process(void *ctx)
{
struct spdk_bdev_io *bdev_io = ctx;
struct raid_bdev_io *raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
raid0_submit_rw_request(bdev_io);
raid0_submit_rw_request(raid_io);
}
/* raid0 IO range */
@ -254,6 +255,15 @@ _raid0_split_io_range(struct raid_bdev_io_range *io_range, uint8_t disk_idx,
*_nblocks_in_disk = nblocks_in_disk;
}
static void
_raid0_submit_null_payload_request(void *_bdev_io)
{
struct spdk_bdev_io *bdev_io = _bdev_io;
struct raid_bdev_io *raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
raid0_submit_null_payload_request(raid_io);
}
/*
* brief:
* raid0_submit_null_payload_request function submits the next batch of
@ -266,16 +276,15 @@ _raid0_split_io_range(struct raid_bdev_io_range *io_range, uint8_t disk_idx,
* none
*/
void
raid0_submit_null_payload_request(void *_bdev_io)
raid0_submit_null_payload_request(struct raid_bdev_io *raid_io)
{
struct spdk_bdev_io *bdev_io = _bdev_io;
struct raid_bdev_io *raid_io;
struct spdk_bdev_io *bdev_io;
struct raid_bdev *raid_bdev;
struct raid_bdev_io_range io_range;
int ret;
bdev_io = spdk_bdev_io_from_ctx(raid_io);
raid_bdev = (struct raid_bdev *)bdev_io->bdev->ctxt;
raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
_raid0_get_io_range(&io_range, raid_bdev->num_base_bdevs,
raid_bdev->strip_size, raid_bdev->strip_size_shift,
@ -320,7 +329,7 @@ raid0_submit_null_payload_request(void *_bdev_io)
raid_io->base_bdev_io_submitted++;
} else {
raid_bdev_queue_io_wait(bdev_io, disk_idx,
raid0_submit_null_payload_request, ret);
_raid0_submit_null_payload_request, ret);
return;
}
}