Replace home-grown CTL IO allocator with UMA.
Old allocator created significant lock congestion protecting its lists of preallocated I/Os, while UMA provides much better SMP scalability. The downside of UMA is lack of reliable preallocation, that could guarantee successful allocation in non-sleepable environments. But careful code review shown, that only CAM target frontend really has that requirement. Fix that making that frontend preallocate and statically bind CTL I/O for every ATIO/INOT it preallocates any way. That allows to avoid allocations in hot I/O path. Other frontends either may sleep in allocation context or can properly handle allocation errors. On 40-core server with 6 ZVOL-backed LUNs and 7 iSCSI client connections this change increases peak performance from ~700K to >1M IOPS! Yay! :) MFC after: 1 month Sponsored by: iXsystems, Inc.
This commit is contained in:
parent
9efc7e72bb
commit
1251a76b12
@ -64,6 +64,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/smp.h>
|
||||
#include <sys/endian.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <vm/uma.h>
|
||||
|
||||
#include <cam/cam.h>
|
||||
#include <cam/scsi/scsi_all.h>
|
||||
@ -644,7 +645,7 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
|
||||
#if 0
|
||||
printf("Serialize\n");
|
||||
#endif
|
||||
io = ctl_alloc_io((void *)ctl_softc->othersc_pool);
|
||||
io = ctl_alloc_io_nowait(ctl_softc->othersc_pool);
|
||||
if (io == NULL) {
|
||||
printf("ctl_isc_event_handler: can't allocate "
|
||||
"ctl_io!\n");
|
||||
@ -889,8 +890,8 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
|
||||
/* Handle resets sent from the other side */
|
||||
case CTL_MSG_MANAGE_TASKS: {
|
||||
struct ctl_taskio *taskio;
|
||||
taskio = (struct ctl_taskio *)ctl_alloc_io(
|
||||
(void *)ctl_softc->othersc_pool);
|
||||
taskio = (struct ctl_taskio *)ctl_alloc_io_nowait(
|
||||
ctl_softc->othersc_pool);
|
||||
if (taskio == NULL) {
|
||||
printf("ctl_isc_event_handler: can't allocate "
|
||||
"ctl_io!\n");
|
||||
@ -918,8 +919,8 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
|
||||
}
|
||||
/* Persistent Reserve action which needs attention */
|
||||
case CTL_MSG_PERS_ACTION:
|
||||
presio = (struct ctl_prio *)ctl_alloc_io(
|
||||
(void *)ctl_softc->othersc_pool);
|
||||
presio = (struct ctl_prio *)ctl_alloc_io_nowait(
|
||||
ctl_softc->othersc_pool);
|
||||
if (presio == NULL) {
|
||||
printf("ctl_isc_event_handler: can't allocate "
|
||||
"ctl_io!\n");
|
||||
@ -1003,7 +1004,7 @@ static int
|
||||
ctl_init(void)
|
||||
{
|
||||
struct ctl_softc *softc;
|
||||
struct ctl_io_pool *internal_pool, *emergency_pool, *other_pool;
|
||||
void *other_pool;
|
||||
struct ctl_port *port;
|
||||
int i, error, retval;
|
||||
//int isc_retval;
|
||||
@ -1049,7 +1050,8 @@ ctl_init(void)
|
||||
"Report no lun possible for invalid LUNs");
|
||||
|
||||
mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF);
|
||||
mtx_init(&softc->pool_lock, "CTL pool mutex", NULL, MTX_DEF);
|
||||
softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io),
|
||||
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
|
||||
softc->open_count = 0;
|
||||
|
||||
/*
|
||||
@ -1086,36 +1088,15 @@ ctl_init(void)
|
||||
STAILQ_INIT(&softc->fe_list);
|
||||
STAILQ_INIT(&softc->port_list);
|
||||
STAILQ_INIT(&softc->be_list);
|
||||
STAILQ_INIT(&softc->io_pools);
|
||||
ctl_tpc_init(softc);
|
||||
|
||||
if (ctl_pool_create(softc, CTL_POOL_INTERNAL, CTL_POOL_ENTRIES_INTERNAL,
|
||||
&internal_pool)!= 0){
|
||||
printf("ctl: can't allocate %d entry internal pool, "
|
||||
"exiting\n", CTL_POOL_ENTRIES_INTERNAL);
|
||||
return (ENOMEM);
|
||||
}
|
||||
|
||||
if (ctl_pool_create(softc, CTL_POOL_EMERGENCY,
|
||||
CTL_POOL_ENTRIES_EMERGENCY, &emergency_pool) != 0) {
|
||||
printf("ctl: can't allocate %d entry emergency pool, "
|
||||
"exiting\n", CTL_POOL_ENTRIES_EMERGENCY);
|
||||
ctl_pool_free(internal_pool);
|
||||
return (ENOMEM);
|
||||
}
|
||||
|
||||
if (ctl_pool_create(softc, CTL_POOL_4OTHERSC, CTL_POOL_ENTRIES_OTHER_SC,
|
||||
if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC,
|
||||
&other_pool) != 0)
|
||||
{
|
||||
printf("ctl: can't allocate %d entry other SC pool, "
|
||||
"exiting\n", CTL_POOL_ENTRIES_OTHER_SC);
|
||||
ctl_pool_free(internal_pool);
|
||||
ctl_pool_free(emergency_pool);
|
||||
return (ENOMEM);
|
||||
}
|
||||
|
||||
softc->internal_pool = internal_pool;
|
||||
softc->emergency_pool = emergency_pool;
|
||||
softc->othersc_pool = other_pool;
|
||||
|
||||
if (worker_threads <= 0)
|
||||
@ -1137,8 +1118,6 @@ ctl_init(void)
|
||||
&softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i);
|
||||
if (error != 0) {
|
||||
printf("error creating CTL work thread!\n");
|
||||
ctl_pool_free(internal_pool);
|
||||
ctl_pool_free(emergency_pool);
|
||||
ctl_pool_free(other_pool);
|
||||
return (error);
|
||||
}
|
||||
@ -1147,8 +1126,6 @@ ctl_init(void)
|
||||
&softc->ctl_proc, NULL, 0, 0, "ctl", "lun");
|
||||
if (error != 0) {
|
||||
printf("error creating CTL lun thread!\n");
|
||||
ctl_pool_free(internal_pool);
|
||||
ctl_pool_free(emergency_pool);
|
||||
ctl_pool_free(other_pool);
|
||||
return (error);
|
||||
}
|
||||
@ -1156,8 +1133,6 @@ ctl_init(void)
|
||||
&softc->ctl_proc, NULL, 0, 0, "ctl", "thresh");
|
||||
if (error != 0) {
|
||||
printf("error creating CTL threshold thread!\n");
|
||||
ctl_pool_free(internal_pool);
|
||||
ctl_pool_free(emergency_pool);
|
||||
ctl_pool_free(other_pool);
|
||||
return (error);
|
||||
}
|
||||
@ -1210,7 +1185,6 @@ ctl_shutdown(void)
|
||||
{
|
||||
struct ctl_softc *softc;
|
||||
struct ctl_lun *lun, *next_lun;
|
||||
struct ctl_io_pool *pool;
|
||||
|
||||
softc = (struct ctl_softc *)control_softc;
|
||||
|
||||
@ -1231,24 +1205,13 @@ ctl_shutdown(void)
|
||||
|
||||
ctl_frontend_deregister(&ioctl_frontend);
|
||||
|
||||
/*
|
||||
* This will rip the rug out from under any FETDs or anyone else
|
||||
* that has a pool allocated. Since we increment our module
|
||||
* refcount any time someone outside the main CTL module allocates
|
||||
* a pool, we shouldn't have any problems here. The user won't be
|
||||
* able to unload the CTL module until client modules have
|
||||
* successfully unloaded.
|
||||
*/
|
||||
while ((pool = STAILQ_FIRST(&softc->io_pools)) != NULL)
|
||||
ctl_pool_free(pool);
|
||||
|
||||
#if 0
|
||||
ctl_shutdown_thread(softc->work_thread);
|
||||
mtx_destroy(&softc->queue_lock);
|
||||
#endif
|
||||
|
||||
ctl_tpc_shutdown(softc);
|
||||
mtx_destroy(&softc->pool_lock);
|
||||
uma_zdestroy(softc->io_zone);
|
||||
mtx_destroy(&softc->ctl_lock);
|
||||
|
||||
destroy_dev(softc->dev);
|
||||
@ -2371,21 +2334,15 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
|
||||
}
|
||||
|
||||
io = ctl_alloc_io(softc->ioctl_info.port.ctl_pool_ref);
|
||||
if (io == NULL) {
|
||||
printf("ctl_ioctl: can't allocate ctl_io!\n");
|
||||
retval = ENOSPC;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Need to save the pool reference so it doesn't get
|
||||
* spammed by the user's ctl_io.
|
||||
*/
|
||||
pool_tmp = io->io_hdr.pool;
|
||||
|
||||
memcpy(io, (void *)addr, sizeof(*io));
|
||||
|
||||
io->io_hdr.pool = pool_tmp;
|
||||
|
||||
/*
|
||||
* No status yet, so make sure the status is set properly.
|
||||
*/
|
||||
@ -3729,285 +3686,95 @@ ctl_kfree_io(union ctl_io *io)
|
||||
#endif /* unused */
|
||||
|
||||
/*
|
||||
* ctl_softc, pool_type, total_ctl_io are passed in.
|
||||
* ctl_softc, pool_name, total_ctl_io are passed in.
|
||||
* npool is passed out.
|
||||
*/
|
||||
int
|
||||
ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
|
||||
uint32_t total_ctl_io, struct ctl_io_pool **npool)
|
||||
ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name,
|
||||
uint32_t total_ctl_io, void **npool)
|
||||
{
|
||||
uint32_t i;
|
||||
union ctl_io *cur_io, *next_io;
|
||||
#ifdef IO_POOLS
|
||||
struct ctl_io_pool *pool;
|
||||
int retval;
|
||||
|
||||
retval = 0;
|
||||
|
||||
pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL,
|
||||
M_NOWAIT | M_ZERO);
|
||||
if (pool == NULL) {
|
||||
retval = ENOMEM;
|
||||
goto bailout;
|
||||
}
|
||||
if (pool == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
pool->type = pool_type;
|
||||
snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name);
|
||||
pool->ctl_softc = ctl_softc;
|
||||
|
||||
mtx_lock(&ctl_softc->pool_lock);
|
||||
pool->id = ctl_softc->cur_pool_id++;
|
||||
mtx_unlock(&ctl_softc->pool_lock);
|
||||
|
||||
pool->flags = CTL_POOL_FLAG_NONE;
|
||||
pool->refcount = 1; /* Reference for validity. */
|
||||
STAILQ_INIT(&pool->free_queue);
|
||||
|
||||
/*
|
||||
* XXX KDM other options here:
|
||||
* - allocate a page at a time
|
||||
* - allocate one big chunk of memory.
|
||||
* Page allocation might work well, but would take a little more
|
||||
* tracking.
|
||||
*/
|
||||
for (i = 0; i < total_ctl_io; i++) {
|
||||
cur_io = (union ctl_io *)malloc(sizeof(*cur_io), M_CTLIO,
|
||||
M_NOWAIT);
|
||||
if (cur_io == NULL) {
|
||||
retval = ENOMEM;
|
||||
break;
|
||||
}
|
||||
cur_io->io_hdr.pool = pool;
|
||||
STAILQ_INSERT_TAIL(&pool->free_queue, &cur_io->io_hdr, links);
|
||||
pool->total_ctl_io++;
|
||||
pool->free_ctl_io++;
|
||||
}
|
||||
|
||||
if (retval != 0) {
|
||||
for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue);
|
||||
cur_io != NULL; cur_io = next_io) {
|
||||
next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr,
|
||||
links);
|
||||
STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr,
|
||||
ctl_io_hdr, links);
|
||||
free(cur_io, M_CTLIO);
|
||||
}
|
||||
|
||||
free(pool, M_CTL);
|
||||
goto bailout;
|
||||
}
|
||||
mtx_lock(&ctl_softc->pool_lock);
|
||||
ctl_softc->num_pools++;
|
||||
STAILQ_INSERT_TAIL(&ctl_softc->io_pools, pool, links);
|
||||
/*
|
||||
* Increment our usage count if this is an external consumer, so we
|
||||
* can't get unloaded until the external consumer (most likely a
|
||||
* FETD) unloads and frees his pool.
|
||||
*
|
||||
* XXX KDM will this increment the caller's module use count, or
|
||||
* mine?
|
||||
*/
|
||||
#if 0
|
||||
if ((pool_type != CTL_POOL_EMERGENCY)
|
||||
&& (pool_type != CTL_POOL_INTERNAL)
|
||||
&& (pool_type != CTL_POOL_4OTHERSC))
|
||||
MOD_INC_USE_COUNT;
|
||||
#endif
|
||||
|
||||
mtx_unlock(&ctl_softc->pool_lock);
|
||||
pool->zone = uma_zsecond_create(pool->name, NULL,
|
||||
NULL, NULL, NULL, ctl_softc->io_zone);
|
||||
/* uma_prealloc(pool->zone, total_ctl_io); */
|
||||
|
||||
*npool = pool;
|
||||
|
||||
bailout:
|
||||
|
||||
return (retval);
|
||||
}
|
||||
|
||||
static int
|
||||
ctl_pool_acquire(struct ctl_io_pool *pool)
|
||||
{
|
||||
|
||||
mtx_assert(&pool->ctl_softc->pool_lock, MA_OWNED);
|
||||
|
||||
if (pool->flags & CTL_POOL_FLAG_INVALID)
|
||||
return (EINVAL);
|
||||
|
||||
pool->refcount++;
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
ctl_pool_release(struct ctl_io_pool *pool)
|
||||
{
|
||||
struct ctl_softc *ctl_softc = pool->ctl_softc;
|
||||
union ctl_io *io;
|
||||
|
||||
mtx_assert(&ctl_softc->pool_lock, MA_OWNED);
|
||||
|
||||
if (--pool->refcount != 0)
|
||||
return;
|
||||
|
||||
while ((io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue)) != NULL) {
|
||||
STAILQ_REMOVE(&pool->free_queue, &io->io_hdr, ctl_io_hdr,
|
||||
links);
|
||||
free(io, M_CTLIO);
|
||||
}
|
||||
|
||||
STAILQ_REMOVE(&ctl_softc->io_pools, pool, ctl_io_pool, links);
|
||||
ctl_softc->num_pools--;
|
||||
|
||||
/*
|
||||
* XXX KDM will this decrement the caller's usage count or mine?
|
||||
*/
|
||||
#if 0
|
||||
if ((pool->type != CTL_POOL_EMERGENCY)
|
||||
&& (pool->type != CTL_POOL_INTERNAL)
|
||||
&& (pool->type != CTL_POOL_4OTHERSC))
|
||||
MOD_DEC_USE_COUNT;
|
||||
#else
|
||||
*npool = ctl_softc->io_zone;
|
||||
#endif
|
||||
|
||||
free(pool, M_CTL);
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
ctl_pool_free(struct ctl_io_pool *pool)
|
||||
{
|
||||
struct ctl_softc *ctl_softc;
|
||||
|
||||
if (pool == NULL)
|
||||
return;
|
||||
|
||||
ctl_softc = pool->ctl_softc;
|
||||
mtx_lock(&ctl_softc->pool_lock);
|
||||
pool->flags |= CTL_POOL_FLAG_INVALID;
|
||||
ctl_pool_release(pool);
|
||||
mtx_unlock(&ctl_softc->pool_lock);
|
||||
#ifdef IO_POOLS
|
||||
uma_zdestroy(pool->zone);
|
||||
free(pool, M_CTL);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine does not block (except for spinlocks of course).
|
||||
* It tries to allocate a ctl_io union from the caller's pool as quickly as
|
||||
* possible.
|
||||
*/
|
||||
union ctl_io *
|
||||
ctl_alloc_io(void *pool_ref)
|
||||
{
|
||||
union ctl_io *io;
|
||||
struct ctl_softc *ctl_softc;
|
||||
struct ctl_io_pool *pool, *npool;
|
||||
struct ctl_io_pool *emergency_pool;
|
||||
#ifdef IO_POOLS
|
||||
struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
|
||||
|
||||
pool = (struct ctl_io_pool *)pool_ref;
|
||||
io = uma_zalloc(pool->zone, M_WAITOK);
|
||||
#else
|
||||
io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK);
|
||||
#endif
|
||||
if (io != NULL)
|
||||
io->io_hdr.pool = pool_ref;
|
||||
return (io);
|
||||
}
|
||||
|
||||
if (pool == NULL) {
|
||||
printf("%s: pool is NULL\n", __func__);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
emergency_pool = NULL;
|
||||
|
||||
ctl_softc = pool->ctl_softc;
|
||||
|
||||
mtx_lock(&ctl_softc->pool_lock);
|
||||
/*
|
||||
* First, try to get the io structure from the user's pool.
|
||||
*/
|
||||
if (ctl_pool_acquire(pool) == 0) {
|
||||
io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue);
|
||||
if (io != NULL) {
|
||||
STAILQ_REMOVE_HEAD(&pool->free_queue, links);
|
||||
pool->total_allocated++;
|
||||
pool->free_ctl_io--;
|
||||
mtx_unlock(&ctl_softc->pool_lock);
|
||||
return (io);
|
||||
} else
|
||||
ctl_pool_release(pool);
|
||||
}
|
||||
/*
|
||||
* If he doesn't have any io structures left, search for an
|
||||
* emergency pool and grab one from there.
|
||||
*/
|
||||
STAILQ_FOREACH(npool, &ctl_softc->io_pools, links) {
|
||||
if (npool->type != CTL_POOL_EMERGENCY)
|
||||
continue;
|
||||
|
||||
if (ctl_pool_acquire(npool) != 0)
|
||||
continue;
|
||||
|
||||
emergency_pool = npool;
|
||||
|
||||
io = (union ctl_io *)STAILQ_FIRST(&npool->free_queue);
|
||||
if (io != NULL) {
|
||||
STAILQ_REMOVE_HEAD(&npool->free_queue, links);
|
||||
npool->total_allocated++;
|
||||
npool->free_ctl_io--;
|
||||
mtx_unlock(&ctl_softc->pool_lock);
|
||||
return (io);
|
||||
} else
|
||||
ctl_pool_release(npool);
|
||||
}
|
||||
|
||||
/* Drop the spinlock before we malloc */
|
||||
mtx_unlock(&ctl_softc->pool_lock);
|
||||
|
||||
/*
|
||||
* The emergency pool (if it exists) didn't have one, so try an
|
||||
* atomic (i.e. nonblocking) malloc and see if we get lucky.
|
||||
*/
|
||||
io = (union ctl_io *)malloc(sizeof(*io), M_CTLIO, M_NOWAIT);
|
||||
if (io != NULL) {
|
||||
/*
|
||||
* If the emergency pool exists but is empty, add this
|
||||
* ctl_io to its list when it gets freed.
|
||||
*/
|
||||
if (emergency_pool != NULL) {
|
||||
mtx_lock(&ctl_softc->pool_lock);
|
||||
if (ctl_pool_acquire(emergency_pool) == 0) {
|
||||
io->io_hdr.pool = emergency_pool;
|
||||
emergency_pool->total_ctl_io++;
|
||||
/*
|
||||
* Need to bump this, otherwise
|
||||
* total_allocated and total_freed won't
|
||||
* match when we no longer have anything
|
||||
* outstanding.
|
||||
*/
|
||||
emergency_pool->total_allocated++;
|
||||
}
|
||||
mtx_unlock(&ctl_softc->pool_lock);
|
||||
} else
|
||||
io->io_hdr.pool = NULL;
|
||||
}
|
||||
union ctl_io *
|
||||
ctl_alloc_io_nowait(void *pool_ref)
|
||||
{
|
||||
union ctl_io *io;
|
||||
#ifdef IO_POOLS
|
||||
struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
|
||||
|
||||
io = uma_zalloc(pool->zone, M_NOWAIT);
|
||||
#else
|
||||
io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT);
|
||||
#endif
|
||||
if (io != NULL)
|
||||
io->io_hdr.pool = pool_ref;
|
||||
return (io);
|
||||
}
|
||||
|
||||
void
|
||||
ctl_free_io(union ctl_io *io)
|
||||
{
|
||||
#ifdef IO_POOLS
|
||||
struct ctl_io_pool *pool;
|
||||
#endif
|
||||
|
||||
if (io == NULL)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If this ctl_io has a pool, return it to that pool.
|
||||
*/
|
||||
if (io->io_hdr.pool != NULL) {
|
||||
struct ctl_io_pool *pool;
|
||||
|
||||
pool = (struct ctl_io_pool *)io->io_hdr.pool;
|
||||
mtx_lock(&pool->ctl_softc->pool_lock);
|
||||
io->io_hdr.io_type = 0xff;
|
||||
STAILQ_INSERT_TAIL(&pool->free_queue, &io->io_hdr, links);
|
||||
pool->total_freed++;
|
||||
pool->free_ctl_io++;
|
||||
ctl_pool_release(pool);
|
||||
mtx_unlock(&pool->ctl_softc->pool_lock);
|
||||
} else {
|
||||
/*
|
||||
* Otherwise, just free it. We probably malloced it and
|
||||
* the emergency pool wasn't available.
|
||||
*/
|
||||
free(io, M_CTLIO);
|
||||
}
|
||||
|
||||
#ifdef IO_POOLS
|
||||
pool = (struct ctl_io_pool *)io->io_hdr.pool;
|
||||
uma_zfree(pool->zone, io);
|
||||
#else
|
||||
uma_zfree((uma_zone_t)io->io_hdr.pool, io);
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
@ -4022,9 +3789,7 @@ ctl_zero_io(union ctl_io *io)
|
||||
* May need to preserve linked list pointers at some point too.
|
||||
*/
|
||||
pool_ref = io->io_hdr.pool;
|
||||
|
||||
memset(io, 0, sizeof(*io));
|
||||
|
||||
io->io_hdr.pool = pool_ref;
|
||||
}
|
||||
|
||||
@ -5657,16 +5422,10 @@ ctl_start_stop(struct ctl_scsiio *ctsio)
|
||||
union ctl_io *new_io;
|
||||
|
||||
new_io = ctl_alloc_io(ctsio->io_hdr.pool);
|
||||
if (new_io == NULL) {
|
||||
ctl_set_busy(ctsio);
|
||||
ctl_done((union ctl_io *)ctsio);
|
||||
} else {
|
||||
ctl_copy_io((union ctl_io *)ctsio,
|
||||
new_io);
|
||||
retval = lun->backend->config_write(new_io);
|
||||
ctl_set_success(ctsio);
|
||||
ctl_done((union ctl_io *)ctsio);
|
||||
}
|
||||
ctl_copy_io((union ctl_io *)ctsio, new_io);
|
||||
retval = lun->backend->config_write(new_io);
|
||||
ctl_set_success(ctsio);
|
||||
ctl_done((union ctl_io *)ctsio);
|
||||
} else {
|
||||
retval = lun->backend->config_write(
|
||||
(union ctl_io *)ctsio);
|
||||
|
@ -138,7 +138,7 @@ ctl_frontend_find(char *frontend_name)
|
||||
int
|
||||
ctl_port_register(struct ctl_port *port)
|
||||
{
|
||||
struct ctl_io_pool *pool;
|
||||
void *pool;
|
||||
int port_num;
|
||||
int retval;
|
||||
|
||||
@ -176,7 +176,7 @@ ctl_port_register(struct ctl_port *port)
|
||||
* pending sense queue on the next command, whether or not it is
|
||||
* a REQUEST SENSE.
|
||||
*/
|
||||
retval = ctl_pool_create(control_softc, CTL_POOL_FETD,
|
||||
retval = ctl_pool_create(control_softc, port->port_name,
|
||||
port->num_requested_ctl_io + 20, &pool);
|
||||
if (retval != 0) {
|
||||
free(port->wwpn_iid, M_CTL);
|
||||
|
@ -545,7 +545,7 @@ cfcs_action(struct cam_sim *sim, union ccb *ccb)
|
||||
return;
|
||||
}
|
||||
|
||||
io = ctl_alloc_io(softc->port.ctl_pool_ref);
|
||||
io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref);
|
||||
if (io == NULL) {
|
||||
printf("%s: can't allocate ctl_io\n", __func__);
|
||||
ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
|
||||
@ -642,7 +642,7 @@ cfcs_action(struct cam_sim *sim, union ccb *ccb)
|
||||
return;
|
||||
}
|
||||
|
||||
io = ctl_alloc_io(softc->port.ctl_pool_ref);
|
||||
io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref);
|
||||
if (io == NULL) {
|
||||
ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
|
||||
xpt_freeze_devq(ccb->ccb_h.path, 1);
|
||||
@ -737,7 +737,7 @@ cfcs_action(struct cam_sim *sim, union ccb *ccb)
|
||||
return;
|
||||
}
|
||||
|
||||
io = ctl_alloc_io(softc->port.ctl_pool_ref);
|
||||
io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref);
|
||||
if (io == NULL) {
|
||||
ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
|
||||
xpt_freeze_devq(ccb->ccb_h.path, 1);
|
||||
|
@ -761,11 +761,6 @@ cfi_done(union ctl_io *io)
|
||||
struct cfi_lun_io *new_lun_io;
|
||||
|
||||
new_io = ctl_alloc_io(softc->port.ctl_pool_ref);
|
||||
if (new_io == NULL) {
|
||||
printf("%s: unable to allocate ctl_io for "
|
||||
"error recovery\n", __func__);
|
||||
goto done;
|
||||
}
|
||||
ctl_zero_io(new_io);
|
||||
|
||||
new_io->io_hdr.io_type = CTL_IO_TASK;
|
||||
@ -967,12 +962,6 @@ cfi_lun_probe(struct cfi_lun *lun, int have_lock)
|
||||
union ctl_io *io;
|
||||
|
||||
io = ctl_alloc_io(lun->softc->port.ctl_pool_ref);
|
||||
if (io == NULL) {
|
||||
printf("%s: unable to alloc ctl_io for target %ju "
|
||||
"lun %d probe\n", __func__,
|
||||
(uintmax_t)lun->target_id.id, lun->lun_id);
|
||||
return;
|
||||
}
|
||||
ctl_scsi_inquiry(io,
|
||||
/*data_ptr*/(uint8_t *)&lun->inq_data,
|
||||
/*data_len*/ sizeof(lun->inq_data),
|
||||
@ -1014,12 +1003,6 @@ cfi_lun_probe(struct cfi_lun *lun, int have_lock)
|
||||
union ctl_io *io;
|
||||
|
||||
io = ctl_alloc_io(lun->softc->port.ctl_pool_ref);
|
||||
if (io == NULL) {
|
||||
printf("%s: unable to alloc ctl_io for target %ju "
|
||||
"lun %d probe\n", __func__,
|
||||
(uintmax_t)lun->target_id.id, lun->lun_id);
|
||||
return;
|
||||
}
|
||||
|
||||
dataptr = malloc(sizeof(struct scsi_read_capacity_data_long),
|
||||
M_CTL_CFI, M_NOWAIT);
|
||||
@ -1394,7 +1377,7 @@ cfi_action(struct cfi_metatask *metatask)
|
||||
if (SID_TYPE(&lun->inq_data) != T_DIRECT)
|
||||
continue;
|
||||
da_luns++;
|
||||
io = ctl_alloc_io(softc->port.ctl_pool_ref);
|
||||
io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref);
|
||||
if (io != NULL) {
|
||||
ios_allocated++;
|
||||
STAILQ_INSERT_TAIL(&tmp_io_list, &io->io_hdr,
|
||||
@ -1548,7 +1531,7 @@ cfi_action(struct cfi_metatask *metatask)
|
||||
|
||||
}
|
||||
|
||||
io = ctl_alloc_io(softc->port.ctl_pool_ref);
|
||||
io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref);
|
||||
if (io == NULL) {
|
||||
metatask->status = CFI_MT_ERROR;
|
||||
metatask->taskinfo.bbrread.status = CFI_BBR_NO_MEM;
|
||||
|
@ -542,13 +542,6 @@ cfiscsi_pdu_handle_scsi_command(struct icl_pdu *request)
|
||||
return;
|
||||
}
|
||||
io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref);
|
||||
if (io == NULL) {
|
||||
CFISCSI_SESSION_WARN(cs, "can't allocate ctl_io; "
|
||||
"dropping connection");
|
||||
icl_pdu_free(request);
|
||||
cfiscsi_session_terminate(cs);
|
||||
return;
|
||||
}
|
||||
ctl_zero_io(io);
|
||||
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = request;
|
||||
io->io_hdr.io_type = CTL_IO_SCSI;
|
||||
@ -606,13 +599,6 @@ cfiscsi_pdu_handle_task_request(struct icl_pdu *request)
|
||||
cs = PDU_SESSION(request);
|
||||
bhstmr = (struct iscsi_bhs_task_management_request *)request->ip_bhs;
|
||||
io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref);
|
||||
if (io == NULL) {
|
||||
CFISCSI_SESSION_WARN(cs, "can't allocate ctl_io;"
|
||||
"dropping connection");
|
||||
icl_pdu_free(request);
|
||||
cfiscsi_session_terminate(cs);
|
||||
return;
|
||||
}
|
||||
ctl_zero_io(io);
|
||||
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = request;
|
||||
io->io_hdr.io_type = CTL_IO_TASK;
|
||||
@ -1063,10 +1049,6 @@ cfiscsi_session_terminate_tasks(struct cfiscsi_session *cs)
|
||||
if (cs->cs_target == NULL)
|
||||
return; /* No target yet, so nothing to do. */
|
||||
io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref);
|
||||
if (io == NULL) {
|
||||
CFISCSI_SESSION_WARN(cs, "can't allocate ctl_io");
|
||||
return;
|
||||
}
|
||||
ctl_zero_io(io);
|
||||
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = cs;
|
||||
io->io_hdr.io_type = CTL_IO_TASK;
|
||||
|
@ -511,6 +511,7 @@ union ctl_io {
|
||||
#ifdef _KERNEL
|
||||
|
||||
union ctl_io *ctl_alloc_io(void *pool_ref);
|
||||
union ctl_io *ctl_alloc_io_nowait(void *pool_ref);
|
||||
void ctl_free_io(union ctl_io *io);
|
||||
void ctl_zero_io(union ctl_io *io);
|
||||
void ctl_copy_io(union ctl_io *src, union ctl_io *dest);
|
||||
|
@ -71,34 +71,13 @@ struct ctl_fe_ioctl_params {
|
||||
ctl_fe_ioctl_state state;
|
||||
};
|
||||
|
||||
#define CTL_POOL_ENTRIES_INTERNAL 200
|
||||
#define CTL_POOL_ENTRIES_EMERGENCY 300
|
||||
#define CTL_POOL_ENTRIES_OTHER_SC 200
|
||||
|
||||
typedef enum {
|
||||
CTL_POOL_INTERNAL,
|
||||
CTL_POOL_FETD,
|
||||
CTL_POOL_EMERGENCY,
|
||||
CTL_POOL_4OTHERSC
|
||||
} ctl_pool_type;
|
||||
|
||||
typedef enum {
|
||||
CTL_POOL_FLAG_NONE = 0x00,
|
||||
CTL_POOL_FLAG_INVALID = 0x01
|
||||
} ctl_pool_flags;
|
||||
|
||||
struct ctl_io_pool {
|
||||
ctl_pool_type type;
|
||||
ctl_pool_flags flags;
|
||||
char name[64];
|
||||
uint32_t id;
|
||||
struct ctl_softc *ctl_softc;
|
||||
uint32_t refcount;
|
||||
uint64_t total_allocated;
|
||||
uint64_t total_freed;
|
||||
int32_t total_ctl_io;
|
||||
int32_t free_ctl_io;
|
||||
STAILQ_HEAD(, ctl_io_hdr) free_queue;
|
||||
STAILQ_ENTRY(ctl_io_pool) links;
|
||||
struct uma_zone *zone;
|
||||
};
|
||||
|
||||
typedef enum {
|
||||
@ -475,9 +454,7 @@ struct ctl_softc {
|
||||
struct sysctl_ctx_list sysctl_ctx;
|
||||
struct sysctl_oid *sysctl_tree;
|
||||
struct ctl_ioctl_info ioctl_info;
|
||||
struct ctl_io_pool *internal_pool;
|
||||
struct ctl_io_pool *emergency_pool;
|
||||
struct ctl_io_pool *othersc_pool;
|
||||
void *othersc_pool;
|
||||
struct proc *ctl_proc;
|
||||
int targ_online;
|
||||
uint32_t ctl_lun_mask[(CTL_MAX_LUNS + 31) / 32];
|
||||
@ -492,10 +469,8 @@ struct ctl_softc {
|
||||
struct ctl_port *ctl_ports[CTL_MAX_PORTS];
|
||||
uint32_t num_backends;
|
||||
STAILQ_HEAD(, ctl_backend_driver) be_list;
|
||||
struct mtx pool_lock;
|
||||
uint32_t num_pools;
|
||||
struct uma_zone *io_zone;
|
||||
uint32_t cur_pool_id;
|
||||
STAILQ_HEAD(, ctl_io_pool) io_pools;
|
||||
struct ctl_thread threads[CTL_MAX_THREADS];
|
||||
TAILQ_HEAD(tpc_tokens, tpc_token) tpc_tokens;
|
||||
struct callout tpc_timeout;
|
||||
@ -508,8 +483,8 @@ extern const struct ctl_cmd_entry ctl_cmd_table[256];
|
||||
uint32_t ctl_get_initindex(struct ctl_nexus *nexus);
|
||||
uint32_t ctl_get_resindex(struct ctl_nexus *nexus);
|
||||
uint32_t ctl_port_idx(int port_num);
|
||||
int ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
|
||||
uint32_t total_ctl_io, struct ctl_io_pool **npool);
|
||||
int ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name,
|
||||
uint32_t total_ctl_io, void **npool);
|
||||
void ctl_pool_free(struct ctl_io_pool *pool);
|
||||
int ctl_scsi_release(struct ctl_scsiio *ctsio);
|
||||
int ctl_scsi_reserve(struct ctl_scsiio *ctsio);
|
||||
|
@ -812,7 +812,6 @@ tpc_process_b2b(struct tpc_list *list)
|
||||
uint32_t srcblock, dstblock;
|
||||
|
||||
if (list->stage == 1) {
|
||||
complete:
|
||||
while ((tior = TAILQ_FIRST(&list->allio)) != NULL) {
|
||||
TAILQ_REMOVE(&list->allio, tior, links);
|
||||
ctl_free_io(tior->io);
|
||||
@ -886,10 +885,6 @@ complete:
|
||||
tior->list = list;
|
||||
TAILQ_INSERT_TAIL(&list->allio, tior, links);
|
||||
tior->io = tpcl_alloc_io();
|
||||
if (tior->io == NULL) {
|
||||
list->error = 1;
|
||||
goto complete;
|
||||
}
|
||||
ctl_scsi_read_write(tior->io,
|
||||
/*data_ptr*/ &list->buf[donebytes],
|
||||
/*data_len*/ roundbytes,
|
||||
@ -909,10 +904,6 @@ complete:
|
||||
tiow->list = list;
|
||||
TAILQ_INSERT_TAIL(&list->allio, tiow, links);
|
||||
tiow->io = tpcl_alloc_io();
|
||||
if (tiow->io == NULL) {
|
||||
list->error = 1;
|
||||
goto complete;
|
||||
}
|
||||
ctl_scsi_read_write(tiow->io,
|
||||
/*data_ptr*/ &list->buf[donebytes],
|
||||
/*data_len*/ roundbytes,
|
||||
@ -951,7 +942,6 @@ tpc_process_verify(struct tpc_list *list)
|
||||
uint64_t sl;
|
||||
|
||||
if (list->stage == 1) {
|
||||
complete:
|
||||
while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
|
||||
TAILQ_REMOVE(&list->allio, tio, links);
|
||||
ctl_free_io(tio->io);
|
||||
@ -990,10 +980,6 @@ complete:
|
||||
tio->list = list;
|
||||
TAILQ_INSERT_TAIL(&list->allio, tio, links);
|
||||
tio->io = tpcl_alloc_io();
|
||||
if (tio->io == NULL) {
|
||||
list->error = 1;
|
||||
goto complete;
|
||||
}
|
||||
ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
|
||||
tio->io->io_hdr.retries = 3;
|
||||
tio->lun = sl;
|
||||
@ -1013,7 +999,6 @@ tpc_process_register_key(struct tpc_list *list)
|
||||
int datalen;
|
||||
|
||||
if (list->stage == 1) {
|
||||
complete:
|
||||
while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
|
||||
TAILQ_REMOVE(&list->allio, tio, links);
|
||||
ctl_free_io(tio->io);
|
||||
@ -1050,10 +1035,6 @@ complete:
|
||||
tio->list = list;
|
||||
TAILQ_INSERT_TAIL(&list->allio, tio, links);
|
||||
tio->io = tpcl_alloc_io();
|
||||
if (tio->io == NULL) {
|
||||
list->error = 1;
|
||||
goto complete;
|
||||
}
|
||||
datalen = sizeof(struct scsi_per_res_out_parms);
|
||||
list->buf = malloc(datalen, M_CTL, M_WAITOK);
|
||||
ctl_scsi_persistent_res_out(tio->io,
|
||||
@ -1112,7 +1093,6 @@ tpc_process_wut(struct tpc_list *list)
|
||||
uint32_t srcblock, dstblock;
|
||||
|
||||
if (list->stage > 0) {
|
||||
complete:
|
||||
/* Cleanup after previous rounds. */
|
||||
while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
|
||||
TAILQ_REMOVE(&list->allio, tio, links);
|
||||
@ -1184,10 +1164,6 @@ complete:
|
||||
tior->list = list;
|
||||
TAILQ_INSERT_TAIL(&list->allio, tior, links);
|
||||
tior->io = tpcl_alloc_io();
|
||||
if (tior->io == NULL) {
|
||||
list->error = 1;
|
||||
goto complete;
|
||||
}
|
||||
ctl_scsi_read_write(tior->io,
|
||||
/*data_ptr*/ &list->buf[donebytes],
|
||||
/*data_len*/ roundbytes,
|
||||
@ -1207,10 +1183,6 @@ complete:
|
||||
tiow->list = list;
|
||||
TAILQ_INSERT_TAIL(&list->allio, tiow, links);
|
||||
tiow->io = tpcl_alloc_io();
|
||||
if (tiow->io == NULL) {
|
||||
list->error = 1;
|
||||
goto complete;
|
||||
}
|
||||
ctl_scsi_read_write(tiow->io,
|
||||
/*data_ptr*/ &list->buf[donebytes],
|
||||
/*data_len*/ roundbytes,
|
||||
@ -1289,10 +1261,6 @@ complete:
|
||||
tiow->list = list;
|
||||
TAILQ_INSERT_TAIL(&list->allio, tiow, links);
|
||||
tiow->io = tpcl_alloc_io();
|
||||
if (tiow->io == NULL) {
|
||||
list->error = 1;
|
||||
goto complete;
|
||||
}
|
||||
ctl_scsi_write_same(tiow->io,
|
||||
/*data_ptr*/ list->buf,
|
||||
/*data_len*/ dstblock,
|
||||
|
@ -72,10 +72,6 @@ __FBSDID("$FreeBSD$");
|
||||
#include <cam/ctl/ctl_util.h>
|
||||
#include <cam/ctl/ctl_error.h>
|
||||
|
||||
typedef enum {
|
||||
CTLFE_CCB_DEFAULT = 0x00
|
||||
} ctlfe_ccb_types;
|
||||
|
||||
struct ctlfe_softc {
|
||||
struct ctl_port port;
|
||||
path_id_t path_id;
|
||||
@ -189,9 +185,7 @@ SYSCTL_INT(_kern_cam_ctl, OID_AUTO, dma_enabled, CTLFLAG_RW,
|
||||
&ctlfe_dma_enabled, 0, "DMA enabled");
|
||||
MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface");
|
||||
|
||||
#define ccb_type ppriv_field0
|
||||
/* This is only used in the ATIO */
|
||||
#define io_ptr ppriv_ptr1
|
||||
#define io_ptr ppriv_ptr0
|
||||
|
||||
/* This is only used in the CTIO */
|
||||
#define ccb_atio ppriv_ptr1
|
||||
@ -546,6 +540,7 @@ ctlferegister(struct cam_periph *periph, void *arg)
|
||||
|
||||
for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) {
|
||||
union ccb *new_ccb;
|
||||
union ctl_io *new_io;
|
||||
|
||||
new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE,
|
||||
M_ZERO|M_NOWAIT);
|
||||
@ -553,6 +548,14 @@ ctlferegister(struct cam_periph *periph, void *arg)
|
||||
status = CAM_RESRC_UNAVAIL;
|
||||
break;
|
||||
}
|
||||
new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref);
|
||||
if (new_io == NULL) {
|
||||
free(new_ccb, M_CTLFE);
|
||||
status = CAM_RESRC_UNAVAIL;
|
||||
break;
|
||||
}
|
||||
new_ccb->ccb_h.io_ptr = new_io;
|
||||
|
||||
xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1);
|
||||
new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
|
||||
new_ccb->ccb_h.cbfcnp = ctlfedone;
|
||||
@ -561,6 +564,7 @@ ctlferegister(struct cam_periph *periph, void *arg)
|
||||
softc->atios_sent++;
|
||||
status = new_ccb->ccb_h.status;
|
||||
if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
|
||||
ctl_free_io(new_io);
|
||||
free(new_ccb, M_CTLFE);
|
||||
break;
|
||||
}
|
||||
@ -581,6 +585,7 @@ ctlferegister(struct cam_periph *periph, void *arg)
|
||||
|
||||
for (i = 0; i < CTLFE_IN_PER_LUN; i++) {
|
||||
union ccb *new_ccb;
|
||||
union ctl_io *new_io;
|
||||
|
||||
new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE,
|
||||
M_ZERO|M_NOWAIT);
|
||||
@ -588,6 +593,13 @@ ctlferegister(struct cam_periph *periph, void *arg)
|
||||
status = CAM_RESRC_UNAVAIL;
|
||||
break;
|
||||
}
|
||||
new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref);
|
||||
if (new_io == NULL) {
|
||||
free(new_ccb, M_CTLFE);
|
||||
status = CAM_RESRC_UNAVAIL;
|
||||
break;
|
||||
}
|
||||
new_ccb->ccb_h.io_ptr = new_io;
|
||||
|
||||
xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1);
|
||||
new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
|
||||
@ -766,8 +778,6 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
|
||||
|
||||
softc->ccbs_alloced++;
|
||||
|
||||
start_ccb->ccb_h.ccb_type = CTLFE_CCB_DEFAULT;
|
||||
|
||||
ccb_h = TAILQ_FIRST(&softc->work_queue);
|
||||
if (ccb_h == NULL) {
|
||||
softc->ccbs_freed++;
|
||||
@ -812,7 +822,6 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
|
||||
xpt_print(periph->path, "%s: aborted "
|
||||
"command 0x%04x discarded\n",
|
||||
__func__, io->scsiio.tag_num);
|
||||
ctl_free_io(io);
|
||||
/*
|
||||
* For a wildcard attachment, commands can
|
||||
* come in with a specific target/lun. Reset
|
||||
@ -1038,6 +1047,7 @@ ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb)
|
||||
break;
|
||||
}
|
||||
|
||||
ctl_free_io(ccb->ccb_h.io_ptr);
|
||||
free(ccb, M_CTLFE);
|
||||
|
||||
KASSERT(softc->atios_returned <= softc->atios_sent, ("%s: "
|
||||
@ -1139,8 +1149,8 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
|
||||
KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0,
|
||||
("CCB in ctlfedone() without CAM_UNLOCKED flag"));
|
||||
#ifdef CTLFE_DEBUG
|
||||
printf("%s: entered, func_code = %#x, type = %#lx\n", __func__,
|
||||
done_ccb->ccb_h.func_code, done_ccb->ccb_h.ccb_type);
|
||||
printf("%s: entered, func_code = %#x\n", __func__,
|
||||
done_ccb->ccb_h.func_code);
|
||||
#endif
|
||||
|
||||
softc = (struct ctlfe_lun_softc *)periph->softc;
|
||||
@ -1180,27 +1190,8 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
|
||||
* Allocate a ctl_io, pass it to CTL, and wait for the
|
||||
* datamove or done.
|
||||
*/
|
||||
io = ctl_alloc_io(bus_softc->port.ctl_pool_ref);
|
||||
if (io == NULL) {
|
||||
atio->ccb_h.flags &= ~CAM_DIR_MASK;
|
||||
atio->ccb_h.flags |= CAM_DIR_NONE;
|
||||
|
||||
printf("%s: ctl_alloc_io failed!\n", __func__);
|
||||
|
||||
/*
|
||||
* XXX KDM need to set SCSI_STATUS_BUSY, but there
|
||||
* is no field in the ATIO structure to do that,
|
||||
* and we aren't able to allocate a ctl_io here.
|
||||
* What to do?
|
||||
*/
|
||||
atio->sense_len = 0;
|
||||
done_ccb->ccb_h.io_ptr = NULL;
|
||||
TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h,
|
||||
periph_links.tqe);
|
||||
xpt_schedule(periph, /*priority*/ 1);
|
||||
break;
|
||||
}
|
||||
mtx_unlock(mtx);
|
||||
io = done_ccb->ccb_h.io_ptr;
|
||||
ctl_zero_io(io);
|
||||
|
||||
/* Save pointers on both sides */
|
||||
@ -1296,7 +1287,6 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
|
||||
*/
|
||||
softc->ccbs_freed++;
|
||||
xpt_release_ccb(done_ccb);
|
||||
ctl_free_io(io);
|
||||
if (ctlfe_adjust_cdb(atio, srr_off) == 0) {
|
||||
done_ccb = (union ccb *)atio;
|
||||
goto resubmit;
|
||||
@ -1330,7 +1320,6 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
|
||||
if (done_ccb->ccb_h.flags & CAM_SEND_STATUS) {
|
||||
softc->ccbs_freed++;
|
||||
xpt_release_ccb(done_ccb);
|
||||
ctl_free_io(io);
|
||||
/*
|
||||
* For a wildcard attachment, commands can come in
|
||||
* with a specific target/lun. Reset the target
|
||||
@ -1473,7 +1462,7 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
|
||||
union ctl_io *io;
|
||||
struct ccb_immediate_notify *inot;
|
||||
cam_status status;
|
||||
int frozen;
|
||||
int frozen, send_ctl_io;
|
||||
|
||||
inot = &done_ccb->cin1;
|
||||
|
||||
@ -1485,112 +1474,99 @@ ctlfedone(struct cam_periph *periph, union ccb *done_ccb)
|
||||
"seq %#x\n", __func__, inot->ccb_h.status,
|
||||
inot->tag_id, inot->seq_id);
|
||||
|
||||
io = ctl_alloc_io(bus_softc->port.ctl_pool_ref);
|
||||
if (io != NULL) {
|
||||
int send_ctl_io;
|
||||
io = done_ccb->ccb_h.io_ptr;
|
||||
ctl_zero_io(io);
|
||||
|
||||
send_ctl_io = 1;
|
||||
send_ctl_io = 1;
|
||||
|
||||
ctl_zero_io(io);
|
||||
io->io_hdr.io_type = CTL_IO_TASK;
|
||||
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr =done_ccb;
|
||||
inot->ccb_h.io_ptr = io;
|
||||
io->io_hdr.nexus.initid.id = inot->initiator_id;
|
||||
io->io_hdr.nexus.targ_port = bus_softc->port.targ_port;
|
||||
io->io_hdr.nexus.targ_target.id = inot->ccb_h.target_id;
|
||||
io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun;
|
||||
/* XXX KDM should this be the tag_id? */
|
||||
io->taskio.tag_num = inot->seq_id;
|
||||
io->io_hdr.io_type = CTL_IO_TASK;
|
||||
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr =done_ccb;
|
||||
inot->ccb_h.io_ptr = io;
|
||||
io->io_hdr.nexus.initid.id = inot->initiator_id;
|
||||
io->io_hdr.nexus.targ_port = bus_softc->port.targ_port;
|
||||
io->io_hdr.nexus.targ_target.id = inot->ccb_h.target_id;
|
||||
io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun;
|
||||
/* XXX KDM should this be the tag_id? */
|
||||
io->taskio.tag_num = inot->seq_id;
|
||||
|
||||
status = inot->ccb_h.status & CAM_STATUS_MASK;
|
||||
switch (status) {
|
||||
case CAM_SCSI_BUS_RESET:
|
||||
io->taskio.task_action = CTL_TASK_BUS_RESET;
|
||||
status = inot->ccb_h.status & CAM_STATUS_MASK;
|
||||
switch (status) {
|
||||
case CAM_SCSI_BUS_RESET:
|
||||
io->taskio.task_action = CTL_TASK_BUS_RESET;
|
||||
break;
|
||||
case CAM_BDR_SENT:
|
||||
io->taskio.task_action = CTL_TASK_TARGET_RESET;
|
||||
break;
|
||||
case CAM_MESSAGE_RECV:
|
||||
switch (inot->arg) {
|
||||
case MSG_ABORT_TASK_SET:
|
||||
io->taskio.task_action =
|
||||
CTL_TASK_ABORT_TASK_SET;
|
||||
break;
|
||||
case CAM_BDR_SENT:
|
||||
io->taskio.task_action = CTL_TASK_TARGET_RESET;
|
||||
case MSG_TARGET_RESET:
|
||||
io->taskio.task_action =
|
||||
CTL_TASK_TARGET_RESET;
|
||||
break;
|
||||
case CAM_MESSAGE_RECV:
|
||||
switch (inot->arg) {
|
||||
case MSG_ABORT_TASK_SET:
|
||||
io->taskio.task_action =
|
||||
CTL_TASK_ABORT_TASK_SET;
|
||||
break;
|
||||
case MSG_TARGET_RESET:
|
||||
io->taskio.task_action =
|
||||
CTL_TASK_TARGET_RESET;
|
||||
break;
|
||||
case MSG_ABORT_TASK:
|
||||
io->taskio.task_action =
|
||||
CTL_TASK_ABORT_TASK;
|
||||
break;
|
||||
case MSG_LOGICAL_UNIT_RESET:
|
||||
io->taskio.task_action =
|
||||
CTL_TASK_LUN_RESET;
|
||||
break;
|
||||
case MSG_CLEAR_TASK_SET:
|
||||
io->taskio.task_action =
|
||||
CTL_TASK_CLEAR_TASK_SET;
|
||||
break;
|
||||
case MSG_CLEAR_ACA:
|
||||
io->taskio.task_action =
|
||||
CTL_TASK_CLEAR_ACA;
|
||||
break;
|
||||
case MSG_NOOP:
|
||||
send_ctl_io = 0;
|
||||
break;
|
||||
default:
|
||||
xpt_print(periph->path, "%s: "
|
||||
"unsupported message 0x%x\n",
|
||||
__func__, inot->arg);
|
||||
send_ctl_io = 0;
|
||||
break;
|
||||
}
|
||||
case MSG_ABORT_TASK:
|
||||
io->taskio.task_action =
|
||||
CTL_TASK_ABORT_TASK;
|
||||
break;
|
||||
case CAM_REQ_ABORTED:
|
||||
/*
|
||||
* This request was sent back by the driver.
|
||||
* XXX KDM what do we do here?
|
||||
*/
|
||||
case MSG_LOGICAL_UNIT_RESET:
|
||||
io->taskio.task_action =
|
||||
CTL_TASK_LUN_RESET;
|
||||
break;
|
||||
case MSG_CLEAR_TASK_SET:
|
||||
io->taskio.task_action =
|
||||
CTL_TASK_CLEAR_TASK_SET;
|
||||
break;
|
||||
case MSG_CLEAR_ACA:
|
||||
io->taskio.task_action =
|
||||
CTL_TASK_CLEAR_ACA;
|
||||
break;
|
||||
case MSG_NOOP:
|
||||
send_ctl_io = 0;
|
||||
break;
|
||||
case CAM_REQ_INVALID:
|
||||
case CAM_PROVIDE_FAIL:
|
||||
default:
|
||||
/*
|
||||
* We should only get here if we're talking
|
||||
* to a talking to a SIM that is target
|
||||
* capable but supports the old API. In
|
||||
* that case, we need to just free the CCB.
|
||||
* If we actually send a notify acknowledge,
|
||||
* it will send that back with an error as
|
||||
* well.
|
||||
*/
|
||||
|
||||
if ((status != CAM_REQ_INVALID)
|
||||
&& (status != CAM_PROVIDE_FAIL))
|
||||
xpt_print(periph->path, "%s: "
|
||||
"unsupported CAM status "
|
||||
"0x%x\n", __func__, status);
|
||||
|
||||
ctl_free_io(io);
|
||||
ctlfe_free_ccb(periph, done_ccb);
|
||||
|
||||
goto out;
|
||||
}
|
||||
if (send_ctl_io != 0) {
|
||||
ctl_queue(io);
|
||||
} else {
|
||||
ctl_free_io(io);
|
||||
done_ccb->ccb_h.status = CAM_REQ_INPROG;
|
||||
done_ccb->ccb_h.func_code =
|
||||
XPT_NOTIFY_ACKNOWLEDGE;
|
||||
xpt_action(done_ccb);
|
||||
xpt_print(periph->path,
|
||||
"%s: unsupported message 0x%x\n",
|
||||
__func__, inot->arg);
|
||||
send_ctl_io = 0;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case CAM_REQ_ABORTED:
|
||||
/*
|
||||
* This request was sent back by the driver.
|
||||
* XXX KDM what do we do here?
|
||||
*/
|
||||
send_ctl_io = 0;
|
||||
break;
|
||||
case CAM_REQ_INVALID:
|
||||
case CAM_PROVIDE_FAIL:
|
||||
default:
|
||||
/*
|
||||
* We should only get here if we're talking
|
||||
* to a talking to a SIM that is target
|
||||
* capable but supports the old API. In
|
||||
* that case, we need to just free the CCB.
|
||||
* If we actually send a notify acknowledge,
|
||||
* it will send that back with an error as
|
||||
* well.
|
||||
*/
|
||||
|
||||
if ((status != CAM_REQ_INVALID)
|
||||
&& (status != CAM_PROVIDE_FAIL))
|
||||
xpt_print(periph->path,
|
||||
"%s: unsupported CAM status 0x%x\n",
|
||||
__func__, status);
|
||||
|
||||
ctlfe_free_ccb(periph, done_ccb);
|
||||
|
||||
goto out;
|
||||
}
|
||||
if (send_ctl_io != 0) {
|
||||
ctl_queue(io);
|
||||
} else {
|
||||
xpt_print(periph->path, "%s: could not allocate "
|
||||
"ctl_io for immediate notify!\n", __func__);
|
||||
/* requeue this to the adapter */
|
||||
done_ccb->ccb_h.status = CAM_REQ_INPROG;
|
||||
done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
|
||||
xpt_action(done_ccb);
|
||||
@ -2166,7 +2142,6 @@ ctlfe_datamove_done(union ctl_io *io)
|
||||
ccb->ccb_h.status = CAM_REQ_INPROG;
|
||||
ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
|
||||
xpt_action(ccb);
|
||||
ctl_free_io(io);
|
||||
} else {
|
||||
if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)
|
||||
io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED;
|
||||
|
Loading…
x
Reference in New Issue
Block a user