Remove limits on size of READ/WRITE operations.

Instead of allocating up to 16MB or RAM at once to handle whole I/O,
allocate up to 1MB at a time, but do multiple ctl_datamove() and storage
I/Os if needed.
This commit is contained in:
mav 2014-04-24 16:19:49 +00:00
parent dc51a2f994
commit 61397be4e9
3 changed files with 203 additions and 137 deletions

View File

@ -9534,7 +9534,7 @@ ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len)
bl_ptr->page_code = SVPD_BLOCK_LIMITS;
scsi_ulto2b(sizeof(*bl_ptr), bl_ptr->page_length);
scsi_ulto4b((16 * 1024 * 1024) / bs, bl_ptr->max_txfer_len);
scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len);
scsi_ulto4b(MAXPHYS / bs, bl_ptr->opt_txfer_len);
if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt);

View File

@ -89,11 +89,12 @@ __FBSDID("$FreeBSD$");
#include <cam/ctl/ctl_error.h>
/*
* The idea here is that we'll allocate enough S/G space to hold a 16MB
* I/O. If we get an I/O larger than that, we'll reject it.
* The idea here is that we'll allocate enough S/G space to hold a 1MB
* I/O. If we get an I/O larger than that, we'll split it.
*/
#define CTLBLK_MAX_IO_SIZE (16 * 1024 * 1024)
#define CTLBLK_MAX_SEGS (CTLBLK_MAX_IO_SIZE / MAXPHYS) + 1
#define CTLBLK_MAX_IO_SIZE (1024 * 1024)
#define CTLBLK_MAX_SEG MAXPHYS
#define CTLBLK_MAX_SEGS MAX(CTLBLK_MAX_IO_SIZE / CTLBLK_MAX_SEG, 1)
#ifdef CTLBLK_DEBUG
#define DPRINTF(fmt, args...) \
@ -498,14 +499,6 @@ ctl_be_block_biodone(struct bio *bio)
ctl_set_success(&io->scsiio);
ctl_complete_beio(beio);
} else {
io->scsiio.be_move_done = ctl_be_block_move_done;
io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
io->scsiio.kern_data_len = beio->io_len;
io->scsiio.kern_total_len = beio->io_len;
io->scsiio.kern_rel_offset = 0;
io->scsiio.kern_data_resid = 0;
io->scsiio.kern_sg_entries = beio->num_segs;
io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
#ifdef CTL_TIME_IO
getbintime(&io->io_hdr.dma_start_bt);
#endif
@ -705,14 +698,6 @@ ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
ctl_complete_beio(beio);
} else {
SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0);
io->scsiio.be_move_done = ctl_be_block_move_done;
io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
io->scsiio.kern_data_len = beio->io_len;
io->scsiio.kern_total_len = beio->io_len;
io->scsiio.kern_rel_offset = 0;
io->scsiio.kern_data_resid = 0;
io->scsiio.kern_sg_entries = beio->num_segs;
io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
#ifdef CTL_TIME_IO
getbintime(&io->io_hdr.dma_start_bt);
#endif
@ -1012,7 +997,7 @@ ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun,
/*
* Setup the S/G entry for this chunk.
*/
seglen = MIN(MAXPHYS, len_left);
seglen = MIN(CTLBLK_MAX_SEG, len_left);
seglen -= seglen % be_lun->blocksize;
beio->sg_segs[i].len = seglen;
beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK);
@ -1164,6 +1149,37 @@ SDT_PROBE_DEFINE1(cbb, kernel, write, start, "uint64_t");
SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, "uint64_t");
SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, "uint64_t");
static void
ctl_be_block_next(struct ctl_be_block_io *beio)
{
struct ctl_be_block_lun *be_lun;
union ctl_io *io;
io = beio->io;
be_lun = beio->lun;
ctl_free_beio(beio);
if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)
&& ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) {
ctl_done(io);
return;
}
io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
io->io_hdr.status &= ~CTL_STATUS_MASK;
io->io_hdr.status |= CTL_STATUS_NONE;
mtx_lock(&be_lun->lock);
/*
* XXX KDM make sure that links is okay to use at this point.
* Otherwise, we either need to add another field to ctl_io_hdr,
* or deal with resource allocation here.
*/
STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links);
mtx_unlock(&be_lun->lock);
taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
}
static void
ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
union ctl_io *io)
@ -1171,7 +1187,7 @@ ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
struct ctl_be_block_io *beio;
struct ctl_be_block_softc *softc;
struct ctl_lba_len lbalen;
uint64_t len_left, io_size_bytes;
uint64_t len_left, lbaoff;
int i;
softc = be_lun->softc;
@ -1184,29 +1200,6 @@ ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0);
}
memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
sizeof(lbalen));
io_size_bytes = lbalen.len * be_lun->blocksize;
/*
* XXX KDM this is temporary, until we implement chaining of beio
* structures and multiple datamove calls to move all the data in
* or out.
*/
if (io_size_bytes > CTLBLK_MAX_IO_SIZE) {
printf("%s: IO length %ju > max io size %u\n", __func__,
io_size_bytes, CTLBLK_MAX_IO_SIZE);
ctl_set_invalid_field(&io->scsiio,
/*sks_valid*/ 0,
/*command*/ 1,
/*field*/ 0,
/*bit_valid*/ 0,
/*bit*/ 0);
ctl_done(io);
return;
}
beio = ctl_alloc_beio(softc);
beio->io = io;
beio->lun = be_lun;
@ -1253,20 +1246,25 @@ ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
beio->ds_trans_type = DEVSTAT_WRITE;
}
beio->io_len = lbalen.len * be_lun->blocksize;
beio->io_offset = lbalen.lba * be_lun->blocksize;
DPRINTF("%s at LBA %jx len %u\n",
memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
sizeof(lbalen));
DPRINTF("%s at LBA %jx len %u @%ju\n",
(beio->bio_cmd == BIO_READ) ? "READ" : "WRITE",
(uintmax_t)lbalen.lba, lbalen.len);
(uintmax_t)lbalen.lba, lbalen.len, lbaoff);
lbaoff = io->scsiio.kern_rel_offset / be_lun->blocksize;
beio->io_offset = (lbalen.lba + lbaoff) * be_lun->blocksize;
beio->io_len = MIN((lbalen.len - lbaoff) * be_lun->blocksize,
CTLBLK_MAX_IO_SIZE);
beio->io_len -= beio->io_len % be_lun->blocksize;
for (i = 0, len_left = io_size_bytes; i < CTLBLK_MAX_SEGS &&
len_left > 0; i++) {
for (i = 0, len_left = beio->io_len; len_left > 0; i++) {
KASSERT(i < CTLBLK_MAX_SEGS, ("Too many segs (%d >= %d)",
i, CTLBLK_MAX_SEGS));
/*
* Setup the S/G entry for this chunk.
*/
beio->sg_segs[i].len = min(MAXPHYS, len_left);
beio->sg_segs[i].len = min(CTLBLK_MAX_SEG, len_left);
beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK);
DPRINTF("segment %d addr %p len %zd\n", i,
@ -1275,6 +1273,15 @@ ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
beio->num_segs++;
len_left -= beio->sg_segs[i].len;
}
if (io->scsiio.kern_rel_offset + beio->io_len <
io->scsiio.kern_total_len)
beio->beio_cont = ctl_be_block_next;
io->scsiio.be_move_done = ctl_be_block_move_done;
io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
io->scsiio.kern_data_len = beio->io_len;
io->scsiio.kern_data_resid = 0;
io->scsiio.kern_sg_entries = beio->num_segs;
io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
/*
* For the read case, we need to read the data into our buffers and
@ -1286,14 +1293,6 @@ ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
be_lun->dispatch(be_lun, beio);
} else {
SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0);
io->scsiio.be_move_done = ctl_be_block_move_done;
io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
io->scsiio.kern_data_len = beio->io_len;
io->scsiio.kern_total_len = beio->io_len;
io->scsiio.kern_rel_offset = 0;
io->scsiio.kern_data_resid = 0;
io->scsiio.kern_sg_entries = beio->num_segs;
io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
#ifdef CTL_TIME_IO
getbintime(&io->io_hdr.dma_start_bt);
#endif
@ -1384,6 +1383,7 @@ ctl_be_block_worker(void *context, int pending)
static int
ctl_be_block_submit(union ctl_io *io)
{
struct ctl_lba_len lbalen;
struct ctl_be_block_lun *be_lun;
struct ctl_be_lun *ctl_be_lun;
int retval;
@ -1402,6 +1402,11 @@ ctl_be_block_submit(union ctl_io *io)
KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type "
"%#x) encountered", io->io_hdr.io_type));
memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
sizeof(lbalen));
io->scsiio.kern_total_len = lbalen.len * be_lun->blocksize;
io->scsiio.kern_rel_offset = 0;
mtx_lock(&be_lun->lock);
/*
* XXX KDM make sure that links is okay to use at this point.
@ -1838,7 +1843,7 @@ ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
sprintf(be_lun->lunname, "cblk%d", softc->num_luns);
mtx_init(&be_lun->lock, be_lun->lunname, NULL, MTX_DEF);
be_lun->lun_zone = uma_zcreate(be_lun->lunname, MAXPHYS,
be_lun->lun_zone = uma_zcreate(be_lun->lunname, CTLBLK_MAX_SEG,
NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0);
if (be_lun->lun_zone == NULL) {

View File

@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/malloc.h>
#include <sys/taskqueue.h>
#include <sys/time.h>
#include <sys/queue.h>
#include <sys/conf.h>
@ -73,12 +74,17 @@ typedef enum {
} ctl_be_ramdisk_lun_flags;
struct ctl_be_ramdisk_lun {
char lunname[32];
uint64_t size_bytes;
uint64_t size_blocks;
struct ctl_be_ramdisk_softc *softc;
ctl_be_ramdisk_lun_flags flags;
STAILQ_ENTRY(ctl_be_ramdisk_lun) links;
struct ctl_be_lun ctl_be_lun;
struct taskqueue *io_taskqueue;
struct task io_task;
STAILQ_HEAD(, ctl_io_hdr) cont_queue;
struct mtx lock;
};
struct ctl_be_ramdisk_softc {
@ -100,6 +106,7 @@ int ctl_backend_ramdisk_init(void);
void ctl_backend_ramdisk_shutdown(void);
static int ctl_backend_ramdisk_move_done(union ctl_io *io);
static int ctl_backend_ramdisk_submit(union ctl_io *io);
static void ctl_backend_ramdisk_continue(union ctl_io *io);
static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd,
caddr_t addr, int flag, struct thread *td);
static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
@ -108,6 +115,7 @@ static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
struct ctl_lun_req *req, int do_wait);
static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
struct ctl_lun_req *req);
static void ctl_backend_ramdisk_worker(void *context, int pending);
static void ctl_backend_ramdisk_lun_shutdown(void *be_lun);
static void ctl_backend_ramdisk_lun_config_status(void *be_lun,
ctl_lun_config_status status);
@ -145,7 +153,7 @@ ctl_backend_ramdisk_init(void)
mtx_init(&softc->lock, "ramdisk", NULL, MTX_DEF);
STAILQ_INIT(&softc->lun_list);
softc->rd_size = 4 * 1024 * 1024;
softc->rd_size = 1024 * 1024;
#ifdef CTL_RAMDISK_PAGES
softc->num_pages = softc->rd_size / PAGE_SIZE;
softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) *
@ -211,16 +219,39 @@ ctl_backend_ramdisk_shutdown(void)
static int
ctl_backend_ramdisk_move_done(union ctl_io *io)
{
struct ctl_be_lun *ctl_be_lun;
struct ctl_be_ramdisk_lun *be_lun;
#ifdef CTL_TIME_IO
struct bintime cur_bt;
#endif
CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n"));
ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
CTL_PRIV_BACKEND_LUN].ptr;
be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun;
#ifdef CTL_TIME_IO
getbintime(&cur_bt);
bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
bintime_add(&io->io_hdr.dma_bt, &cur_bt);
io->io_hdr.num_dmas++;
#endif
if (io->scsiio.kern_sg_entries > 0)
free(io->scsiio.kern_data_ptr, M_RAMDISK);
io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
if ((io->io_hdr.port_status == 0)
&& ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
&& ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE))
&& ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
if (io->scsiio.kern_rel_offset < io->scsiio.kern_total_len) {
mtx_lock(&be_lun->lock);
STAILQ_INSERT_TAIL(&be_lun->cont_queue,
&io->io_hdr, links);
mtx_unlock(&be_lun->lock);
taskqueue_enqueue(be_lun->io_taskqueue,
&be_lun->io_task);
return (0);
}
io->io_hdr.status = CTL_SUCCESS;
else if ((io->io_hdr.port_status != 0)
} else if ((io->io_hdr.port_status != 0)
&& ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
&& ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)){
/*
@ -236,15 +267,6 @@ ctl_backend_ramdisk_move_done(union ctl_io *io)
/*retry_count*/
io->io_hdr.port_status);
}
#ifdef CTL_TIME_IO
getbintime(&cur_bt);
bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
bintime_add(&io->io_hdr.dma_bt, &cur_bt);
io->io_hdr.num_dmas++;
#endif
if (io->scsiio.kern_sg_entries > 0)
free(io->scsiio.kern_data_ptr, M_RAMDISK);
ctl_done(io);
return(0);
}
@ -253,93 +275,100 @@ static int
ctl_backend_ramdisk_submit(union ctl_io *io)
{
struct ctl_lba_len lbalen;
#ifdef CTL_RAMDISK_PAGES
struct ctl_sg_entry *sg_entries;
int len_filled;
int i;
#endif
int num_sg_entries, len;
struct ctl_be_ramdisk_softc *softc;
struct ctl_be_lun *ctl_be_lun;
struct ctl_be_ramdisk_lun *be_lun;
softc = &rd_softc;
ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
CTL_PRIV_BACKEND_LUN].ptr;
be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun;
memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
sizeof(lbalen));
io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
io->scsiio.kern_total_len = lbalen.len * ctl_be_lun->blocksize;
io->scsiio.kern_rel_offset = 0;
io->scsiio.kern_data_resid = 0;
ctl_backend_ramdisk_continue(io);
len = lbalen.len * ctl_be_lun->blocksize;
/*
* Kick out the request if it's bigger than we can handle.
*/
if (len > softc->rd_size) {
ctl_set_internal_failure(&io->scsiio,
/*sks_valid*/ 0,
/*retry_count*/ 0);
ctl_done(io);
return (CTL_RETVAL_COMPLETE);
}
/*
* Kick out the request if it's larger than the device size that
* the user requested.
*/
if (((lbalen.lba * ctl_be_lun->blocksize) + len) > be_lun->size_bytes) {
ctl_set_lba_out_of_range(&io->scsiio);
ctl_done(io);
return (CTL_RETVAL_COMPLETE);
}
return (CTL_RETVAL_COMPLETE);
}
static void
ctl_backend_ramdisk_continue(union ctl_io *io)
{
struct ctl_be_ramdisk_softc *softc;
int len, len_filled, sg_filled;
#ifdef CTL_RAMDISK_PAGES
num_sg_entries = len >> PAGE_SHIFT;
if ((len & (PAGE_SIZE - 1)) != 0)
num_sg_entries++;
struct ctl_sg_entry *sg_entries;
int i;
#endif
if (num_sg_entries > 1) {
softc = &rd_softc;
len = io->scsiio.kern_total_len - io->scsiio.kern_rel_offset;
#ifdef CTL_RAMDISK_PAGES
sg_filled = min(btoc(len), softc->num_pages);
if (sg_filled > 1) {
io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
num_sg_entries, M_RAMDISK,
sg_filled, M_RAMDISK,
M_WAITOK);
sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
for (i = 0, len_filled = 0; i < num_sg_entries;
i++, len_filled += PAGE_SIZE) {
for (i = 0, len_filled = 0; i < sg_filled; i++) {
sg_entries[i].addr = softc->ramdisk_pages[i];
sg_entries[i].len = ctl_min(PAGE_SIZE,
len - len_filled);
len_filled += sg_entries[i].len;
}
io->io_hdr.flags |= CTL_FLAG_KDPTR_SGLIST;
} else {
#endif /* CTL_RAMDISK_PAGES */
/*
* If this is less than 1 page, don't bother allocating a
* scatter/gather list for it. This saves time/overhead.
*/
num_sg_entries = 0;
#ifdef CTL_RAMDISK_PAGES
sg_filled = 0;
len_filled = len;
io->scsiio.kern_data_ptr = softc->ramdisk_pages[0];
#else
io->scsiio.kern_data_ptr = softc->ramdisk_buffer;
#endif
#ifdef CTL_RAMDISK_PAGES
}
#endif
#else
sg_filled = 0;
len_filled = min(len, softc->rd_size);
io->scsiio.kern_data_ptr = softc->ramdisk_buffer;
#endif /* CTL_RAMDISK_PAGES */
io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
io->scsiio.kern_data_len = len;
io->scsiio.kern_total_len = len;
io->scsiio.kern_rel_offset = 0;
io->scsiio.kern_data_resid = 0;
io->scsiio.kern_sg_entries = num_sg_entries;
io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
io->scsiio.kern_data_len = len_filled;
io->scsiio.kern_sg_entries = sg_filled;
io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
#ifdef CTL_TIME_IO
getbintime(&io->io_hdr.dma_start_bt);
#endif
ctl_datamove(io);
}
return (CTL_RETVAL_COMPLETE);
static void
ctl_backend_ramdisk_worker(void *context, int pending)
{
struct ctl_be_ramdisk_softc *softc;
struct ctl_be_ramdisk_lun *be_lun;
union ctl_io *io;
be_lun = (struct ctl_be_ramdisk_lun *)context;
softc = be_lun->softc;
mtx_lock(&be_lun->lock);
for (;;) {
io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue);
if (io != NULL) {
STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr,
ctl_io_hdr, links);
mtx_unlock(&be_lun->lock);
ctl_backend_ramdisk_continue(io);
mtx_lock(&be_lun->lock);
continue;
}
/*
* If we get here, there is no work left in the queues, so
* just break out and let the task queue go to sleep.
*/
break;
}
mtx_unlock(&be_lun->lock);
}
static int
@ -470,8 +499,12 @@ ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
mtx_unlock(&softc->lock);
if (retval == 0)
if (retval == 0) {
taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task);
taskqueue_free(be_lun->io_taskqueue);
mtx_destroy(&be_lun->lock);
free(be_lun, M_RAMDISK);
}
req->status = CTL_LUN_OK;
@ -509,6 +542,7 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
sizeof(*be_lun));
goto bailout_error;
}
sprintf(be_lun->lunname, "cram%d", softc->num_luns);
STAILQ_INIT(&be_lun->ctl_be_lun.options);
if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
@ -611,6 +645,27 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
sizeof(params->device_id)));
}
STAILQ_INIT(&be_lun->cont_queue);
mtx_init(&be_lun->lock, "CTL ramdisk", NULL, MTX_DEF);
TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker,
be_lun);
be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK,
taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
if (be_lun->io_taskqueue == NULL) {
snprintf(req->error_str, sizeof(req->error_str),
"%s: Unable to create taskqueue", __func__);
goto bailout_error;
}
retval = taskqueue_start_threads(&be_lun->io_taskqueue,
/*num threads*/1,
/*priority*/PWAIT,
/*thread name*/
"%s taskq", be_lun->lunname);
if (retval != 0)
goto bailout_error;
mtx_lock(&softc->lock);
softc->num_luns++;
STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
@ -669,7 +724,13 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
bailout_error:
req->status = CTL_LUN_ERROR;
free(be_lun, M_RAMDISK);
if (be_lun != NULL) {
if (be_lun->io_taskqueue != NULL) {
taskqueue_free(be_lun->io_taskqueue);
}
mtx_destroy(&be_lun->lock);
free(be_lun, M_RAMDISK);
}
return (retval);
}