Introduce seperate mutex lock to protect protect CTL I/O pools, slightly

reducing global CTL lock scope and congestion.

While there, simplify CTL I/O pools KPI, hiding implementation details.
This commit is contained in:
Alexander Motin 2013-11-11 08:27:20 +00:00
parent d2201d13a7
commit 8c6d5f8282
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=257946
3 changed files with 66 additions and 103 deletions

View File

@ -360,7 +360,6 @@ static union ctl_io *ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port,
int can_wait); int can_wait);
static void ctl_kfree_io(union ctl_io *io); static void ctl_kfree_io(union ctl_io *io);
#endif /* unused */ #endif /* unused */
static void ctl_free_io_internal(union ctl_io *io, int have_lock);
static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
struct ctl_be_lun *be_lun, struct ctl_id target_id); struct ctl_be_lun *be_lun, struct ctl_id target_id);
static int ctl_free_lun(struct ctl_lun *lun); static int ctl_free_lun(struct ctl_lun *lun);
@ -998,6 +997,7 @@ ctl_init(void)
"Report no lun possible for invalid LUNs"); "Report no lun possible for invalid LUNs");
mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF);
mtx_init(&softc->pool_lock, "CTL pool mutex", NULL, MTX_DEF);
softc->open_count = 0; softc->open_count = 0;
/* /*
@ -1057,7 +1057,7 @@ ctl_init(void)
CTL_POOL_ENTRIES_EMERGENCY, &emergency_pool) != 0) { CTL_POOL_ENTRIES_EMERGENCY, &emergency_pool) != 0) {
printf("ctl: can't allocate %d entry emergency pool, " printf("ctl: can't allocate %d entry emergency pool, "
"exiting\n", CTL_POOL_ENTRIES_EMERGENCY); "exiting\n", CTL_POOL_ENTRIES_EMERGENCY);
ctl_pool_free(softc, internal_pool); ctl_pool_free(internal_pool);
return (ENOMEM); return (ENOMEM);
} }
@ -1066,8 +1066,8 @@ ctl_init(void)
{ {
printf("ctl: can't allocate %d entry other SC pool, " printf("ctl: can't allocate %d entry other SC pool, "
"exiting\n", CTL_POOL_ENTRIES_OTHER_SC); "exiting\n", CTL_POOL_ENTRIES_OTHER_SC);
ctl_pool_free(softc, internal_pool); ctl_pool_free(internal_pool);
ctl_pool_free(softc, emergency_pool); ctl_pool_free(emergency_pool);
return (ENOMEM); return (ENOMEM);
} }
@ -1075,12 +1075,6 @@ ctl_init(void)
softc->emergency_pool = emergency_pool; softc->emergency_pool = emergency_pool;
softc->othersc_pool = other_pool; softc->othersc_pool = other_pool;
mtx_lock(&softc->ctl_lock);
ctl_pool_acquire(internal_pool);
ctl_pool_acquire(emergency_pool);
ctl_pool_acquire(other_pool);
mtx_unlock(&softc->ctl_lock);
/* /*
* We used to allocate a processor LUN here. The new scheme is to * We used to allocate a processor LUN here. The new scheme is to
* just let the user allocate LUNs as he sees fit. * just let the user allocate LUNs as he sees fit.
@ -1097,10 +1091,10 @@ ctl_init(void)
printf("error creating CTL work thread!\n"); printf("error creating CTL work thread!\n");
mtx_lock(&softc->ctl_lock); mtx_lock(&softc->ctl_lock);
ctl_free_lun(lun); ctl_free_lun(lun);
ctl_pool_free(softc, internal_pool);
ctl_pool_free(softc, emergency_pool);
ctl_pool_free(softc, other_pool);
mtx_unlock(&softc->ctl_lock); mtx_unlock(&softc->ctl_lock);
ctl_pool_free(internal_pool);
ctl_pool_free(emergency_pool);
ctl_pool_free(other_pool);
return (error); return (error);
} }
if (bootverbose) if (bootverbose)
@ -1154,7 +1148,7 @@ ctl_shutdown(void)
{ {
struct ctl_softc *softc; struct ctl_softc *softc;
struct ctl_lun *lun, *next_lun; struct ctl_lun *lun, *next_lun;
struct ctl_io_pool *pool, *next_pool; struct ctl_io_pool *pool;
softc = (struct ctl_softc *)control_softc; softc = (struct ctl_softc *)control_softc;
@ -1171,6 +1165,8 @@ ctl_shutdown(void)
ctl_free_lun(lun); ctl_free_lun(lun);
} }
mtx_unlock(&softc->ctl_lock);
/* /*
* This will rip the rug out from under any FETDs or anyone else * This will rip the rug out from under any FETDs or anyone else
* that has a pool allocated. Since we increment our module * that has a pool allocated. Since we increment our module
@ -1179,18 +1175,14 @@ ctl_shutdown(void)
* able to unload the CTL module until client modules have * able to unload the CTL module until client modules have
* successfully unloaded. * successfully unloaded.
*/ */
for (pool = STAILQ_FIRST(&softc->io_pools); pool != NULL; while ((pool = STAILQ_FIRST(&softc->io_pools)) != NULL)
pool = next_pool) { ctl_pool_free(pool);
next_pool = STAILQ_NEXT(pool, links);
ctl_pool_free(softc, pool);
}
mtx_unlock(&softc->ctl_lock);
#if 0 #if 0
ctl_shutdown_thread(softc->work_thread); ctl_shutdown_thread(softc->work_thread);
#endif #endif
mtx_destroy(&softc->pool_lock);
mtx_destroy(&softc->ctl_lock); mtx_destroy(&softc->ctl_lock);
destroy_dev(softc->dev); destroy_dev(softc->dev);
@ -3367,11 +3359,12 @@ ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
pool->type = pool_type; pool->type = pool_type;
pool->ctl_softc = ctl_softc; pool->ctl_softc = ctl_softc;
mtx_lock(&ctl_softc->ctl_lock); mtx_lock(&ctl_softc->pool_lock);
pool->id = ctl_softc->cur_pool_id++; pool->id = ctl_softc->cur_pool_id++;
mtx_unlock(&ctl_softc->ctl_lock); mtx_unlock(&ctl_softc->pool_lock);
pool->flags = CTL_POOL_FLAG_NONE; pool->flags = CTL_POOL_FLAG_NONE;
pool->refcount = 1; /* Reference for validity. */
STAILQ_INIT(&pool->free_queue); STAILQ_INIT(&pool->free_queue);
/* /*
@ -3407,7 +3400,7 @@ ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
free(pool, M_CTL); free(pool, M_CTL);
goto bailout; goto bailout;
} }
mtx_lock(&ctl_softc->ctl_lock); mtx_lock(&ctl_softc->pool_lock);
ctl_softc->num_pools++; ctl_softc->num_pools++;
STAILQ_INSERT_TAIL(&ctl_softc->io_pools, pool, links); STAILQ_INSERT_TAIL(&ctl_softc->io_pools, pool, links);
/* /*
@ -3426,7 +3419,7 @@ ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
MOD_INC_USE_COUNT; MOD_INC_USE_COUNT;
#endif #endif
mtx_unlock(&ctl_softc->ctl_lock); mtx_unlock(&ctl_softc->pool_lock);
*npool = pool; *npool = pool;
@ -3435,14 +3428,11 @@ ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
return (retval); return (retval);
} }
int static int
ctl_pool_acquire(struct ctl_io_pool *pool) ctl_pool_acquire(struct ctl_io_pool *pool)
{ {
mtx_assert(&control_softc->ctl_lock, MA_OWNED); mtx_assert(&pool->ctl_softc->pool_lock, MA_OWNED);
if (pool == NULL)
return (-EINVAL);
if (pool->flags & CTL_POOL_FLAG_INVALID) if (pool->flags & CTL_POOL_FLAG_INVALID)
return (-EINVAL); return (-EINVAL);
@ -3452,51 +3442,21 @@ ctl_pool_acquire(struct ctl_io_pool *pool)
return (0); return (0);
} }
int static void
ctl_pool_invalidate(struct ctl_io_pool *pool)
{
mtx_assert(&control_softc->ctl_lock, MA_OWNED);
if (pool == NULL)
return (-EINVAL);
pool->flags |= CTL_POOL_FLAG_INVALID;
return (0);
}
int
ctl_pool_release(struct ctl_io_pool *pool) ctl_pool_release(struct ctl_io_pool *pool)
{ {
struct ctl_softc *ctl_softc = pool->ctl_softc;
union ctl_io *io;
mtx_assert(&control_softc->ctl_lock, MA_OWNED); mtx_assert(&ctl_softc->pool_lock, MA_OWNED);
if (pool == NULL) if (--pool->refcount != 0)
return (-EINVAL); return;
if ((--pool->refcount == 0) while ((io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue)) != NULL) {
&& (pool->flags & CTL_POOL_FLAG_INVALID)) { STAILQ_REMOVE(&pool->free_queue, &io->io_hdr, ctl_io_hdr,
ctl_pool_free(pool->ctl_softc, pool);
}
return (0);
}
void
ctl_pool_free(struct ctl_softc *ctl_softc, struct ctl_io_pool *pool)
{
union ctl_io *cur_io, *next_io;
mtx_assert(&ctl_softc->ctl_lock, MA_OWNED);
for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue);
cur_io != NULL; cur_io = next_io) {
next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr,
links); links);
STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr, ctl_io_hdr, free(io, M_CTL);
links);
free(cur_io, M_CTL);
} }
STAILQ_REMOVE(&ctl_softc->io_pools, pool, ctl_io_pool, links); STAILQ_REMOVE(&ctl_softc->io_pools, pool, ctl_io_pool, links);
@ -3515,6 +3475,21 @@ ctl_pool_free(struct ctl_softc *ctl_softc, struct ctl_io_pool *pool)
free(pool, M_CTL); free(pool, M_CTL);
} }
void
ctl_pool_free(struct ctl_io_pool *pool)
{
struct ctl_softc *ctl_softc;
if (pool == NULL)
return;
ctl_softc = pool->ctl_softc;
mtx_lock(&ctl_softc->pool_lock);
pool->flags |= CTL_POOL_FLAG_INVALID;
ctl_pool_release(pool);
mtx_unlock(&ctl_softc->pool_lock);
}
/* /*
* This routine does not block (except for spinlocks of course). * This routine does not block (except for spinlocks of course).
* It tries to allocate a ctl_io union from the caller's pool as quickly as * It tries to allocate a ctl_io union from the caller's pool as quickly as
@ -3539,7 +3514,7 @@ ctl_alloc_io(void *pool_ref)
ctl_softc = pool->ctl_softc; ctl_softc = pool->ctl_softc;
mtx_lock(&ctl_softc->ctl_lock); mtx_lock(&ctl_softc->pool_lock);
/* /*
* First, try to get the io structure from the user's pool. * First, try to get the io structure from the user's pool.
*/ */
@ -3549,7 +3524,7 @@ ctl_alloc_io(void *pool_ref)
STAILQ_REMOVE_HEAD(&pool->free_queue, links); STAILQ_REMOVE_HEAD(&pool->free_queue, links);
pool->total_allocated++; pool->total_allocated++;
pool->free_ctl_io--; pool->free_ctl_io--;
mtx_unlock(&ctl_softc->ctl_lock); mtx_unlock(&ctl_softc->pool_lock);
return (io); return (io);
} else } else
ctl_pool_release(pool); ctl_pool_release(pool);
@ -3572,14 +3547,14 @@ ctl_alloc_io(void *pool_ref)
STAILQ_REMOVE_HEAD(&npool->free_queue, links); STAILQ_REMOVE_HEAD(&npool->free_queue, links);
npool->total_allocated++; npool->total_allocated++;
npool->free_ctl_io--; npool->free_ctl_io--;
mtx_unlock(&ctl_softc->ctl_lock); mtx_unlock(&ctl_softc->pool_lock);
return (io); return (io);
} else } else
ctl_pool_release(npool); ctl_pool_release(npool);
} }
/* Drop the spinlock before we malloc */ /* Drop the spinlock before we malloc */
mtx_unlock(&ctl_softc->ctl_lock); mtx_unlock(&ctl_softc->pool_lock);
/* /*
* The emergency pool (if it exists) didn't have one, so try an * The emergency pool (if it exists) didn't have one, so try an
@ -3592,7 +3567,7 @@ ctl_alloc_io(void *pool_ref)
* ctl_io to its list when it gets freed. * ctl_io to its list when it gets freed.
*/ */
if (emergency_pool != NULL) { if (emergency_pool != NULL) {
mtx_lock(&ctl_softc->ctl_lock); mtx_lock(&ctl_softc->pool_lock);
if (ctl_pool_acquire(emergency_pool) == 0) { if (ctl_pool_acquire(emergency_pool) == 0) {
io->io_hdr.pool = emergency_pool; io->io_hdr.pool = emergency_pool;
emergency_pool->total_ctl_io++; emergency_pool->total_ctl_io++;
@ -3604,7 +3579,7 @@ ctl_alloc_io(void *pool_ref)
*/ */
emergency_pool->total_allocated++; emergency_pool->total_allocated++;
} }
mtx_unlock(&ctl_softc->ctl_lock); mtx_unlock(&ctl_softc->pool_lock);
} else } else
io->io_hdr.pool = NULL; io->io_hdr.pool = NULL;
} }
@ -3612,8 +3587,8 @@ ctl_alloc_io(void *pool_ref)
return (io); return (io);
} }
static void void
ctl_free_io_internal(union ctl_io *io, int have_lock) ctl_free_io(union ctl_io *io)
{ {
if (io == NULL) if (io == NULL)
return; return;
@ -3634,8 +3609,7 @@ ctl_free_io_internal(union ctl_io *io, int have_lock)
pool = (struct ctl_io_pool *)io->io_hdr.pool; pool = (struct ctl_io_pool *)io->io_hdr.pool;
if (have_lock == 0) mtx_lock(&pool->ctl_softc->pool_lock);
mtx_lock(&pool->ctl_softc->ctl_lock);
#if 0 #if 0
save_flags(xflags); save_flags(xflags);
@ -3672,8 +3646,7 @@ ctl_free_io_internal(union ctl_io *io, int have_lock)
pool->total_freed++; pool->total_freed++;
pool->free_ctl_io++; pool->free_ctl_io++;
ctl_pool_release(pool); ctl_pool_release(pool);
if (have_lock == 0) mtx_unlock(&pool->ctl_softc->pool_lock);
mtx_unlock(&pool->ctl_softc->ctl_lock);
} else { } else {
/* /*
* Otherwise, just free it. We probably malloced it and * Otherwise, just free it. We probably malloced it and
@ -3684,12 +3657,6 @@ ctl_free_io_internal(union ctl_io *io, int have_lock)
} }
void
ctl_free_io(union ctl_io *io)
{
ctl_free_io_internal(io, /*have_lock*/ 0);
}
void void
ctl_zero_io(union ctl_io *io) ctl_zero_io(union ctl_io *io)
{ {
@ -4496,7 +4463,7 @@ ctl_free_lun(struct ctl_lun *lun)
io = next_io) { io = next_io) {
next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, ooa_links); next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, ooa_links);
TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
ctl_free_io_internal(io, /*have_lock*/ 1); ctl_free_io(io);
} }
softc->num_luns--; softc->num_luns--;
@ -10211,7 +10178,7 @@ ctl_failover(void)
TAILQ_REMOVE(&lun->ooa_queue, TAILQ_REMOVE(&lun->ooa_queue,
&io->io_hdr, ooa_links); &io->io_hdr, ooa_links);
ctl_free_io_internal(io, 1); ctl_free_io(io);
} }
} }
@ -10227,7 +10194,7 @@ ctl_failover(void)
&io->io_hdr, &io->io_hdr,
ooa_links); ooa_links);
ctl_free_io_internal(io, 1); ctl_free_io(io);
} }
} }
ctl_check_blocked(lun); ctl_check_blocked(lun);
@ -11118,7 +11085,7 @@ ctl_run_task_queue(struct ctl_softc *ctl_softc)
io->taskio.tag_num : io->scsiio.tag_num); io->taskio.tag_num : io->scsiio.tag_num);
STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr, STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr,
ctl_io_hdr, links); ctl_io_hdr, links);
ctl_free_io_internal(io, 1); ctl_free_io(io);
break; break;
} }
} }
@ -11215,7 +11182,7 @@ ctl_handle_isc(union ctl_io *io)
break; break;
} }
if (free_io) if (free_io)
ctl_free_io_internal(io, 0); ctl_free_io(io);
} }
@ -12363,7 +12330,7 @@ ctl_process_done(union ctl_io *io, int have_lock)
case CTL_IO_TASK: case CTL_IO_TASK:
ctl_io_error_print(io, NULL); ctl_io_error_print(io, NULL);
if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
ctl_free_io_internal(io, /*have_lock*/ 0); ctl_free_io(io);
else else
fe_done(io); fe_done(io);
return (CTL_RETVAL_COMPLETE); return (CTL_RETVAL_COMPLETE);
@ -12682,7 +12649,7 @@ ctl_process_done(union ctl_io *io, int have_lock)
/* XXX do something here */ /* XXX do something here */
} }
ctl_free_io_internal(io, /*have_lock*/ 0); ctl_free_io(io);
} else } else
fe_done(io); fe_done(io);

View File

@ -114,7 +114,6 @@ ctl_frontend_register(struct ctl_frontend *fe, int master_shelf)
fe->targ_port = port_num + (master_shelf!=0 ? 0 : CTL_MAX_PORTS); fe->targ_port = port_num + (master_shelf!=0 ? 0 : CTL_MAX_PORTS);
fe->max_initiators = CTL_MAX_INIT_PER_PORT; fe->max_initiators = CTL_MAX_INIT_PER_PORT;
STAILQ_INSERT_TAIL(&control_softc->fe_list, fe, links); STAILQ_INSERT_TAIL(&control_softc->fe_list, fe, links);
ctl_pool_acquire(pool);
control_softc->ctl_ports[port_num] = fe; control_softc->ctl_ports[port_num] = fe;
mtx_unlock(&control_softc->ctl_lock); mtx_unlock(&control_softc->ctl_lock);
@ -141,10 +140,6 @@ ctl_frontend_deregister(struct ctl_frontend *fe)
} }
mtx_lock(&control_softc->ctl_lock); mtx_lock(&control_softc->ctl_lock);
ctl_pool_invalidate(pool);
ctl_pool_release(pool);
STAILQ_REMOVE(&control_softc->fe_list, fe, ctl_frontend, links); STAILQ_REMOVE(&control_softc->fe_list, fe, ctl_frontend, links);
control_softc->num_frontends--; control_softc->num_frontends--;
port_num = (fe->targ_port < CTL_MAX_PORTS) ? fe->targ_port : port_num = (fe->targ_port < CTL_MAX_PORTS) ? fe->targ_port :
@ -152,6 +147,9 @@ ctl_frontend_deregister(struct ctl_frontend *fe)
ctl_clear_mask(&control_softc->ctl_port_mask, port_num); ctl_clear_mask(&control_softc->ctl_port_mask, port_num);
control_softc->ctl_ports[port_num] = NULL; control_softc->ctl_ports[port_num] = NULL;
mtx_unlock(&control_softc->ctl_lock); mtx_unlock(&control_softc->ctl_lock);
ctl_pool_free(pool);
bailout: bailout:
return (retval); return (retval);
} }

View File

@ -448,6 +448,7 @@ struct ctl_softc {
struct ctl_frontend *ctl_ports[CTL_MAX_PORTS]; struct ctl_frontend *ctl_ports[CTL_MAX_PORTS];
uint32_t num_backends; uint32_t num_backends;
STAILQ_HEAD(, ctl_backend_driver) be_list; STAILQ_HEAD(, ctl_backend_driver) be_list;
struct mtx pool_lock;
uint32_t num_pools; uint32_t num_pools;
uint32_t cur_pool_id; uint32_t cur_pool_id;
STAILQ_HEAD(, ctl_io_pool) io_pools; STAILQ_HEAD(, ctl_io_pool) io_pools;
@ -462,10 +463,7 @@ extern struct ctl_cmd_entry ctl_cmd_table[];
uint32_t ctl_get_initindex(struct ctl_nexus *nexus); uint32_t ctl_get_initindex(struct ctl_nexus *nexus);
int ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type, int ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
uint32_t total_ctl_io, struct ctl_io_pool **npool); uint32_t total_ctl_io, struct ctl_io_pool **npool);
int ctl_pool_acquire(struct ctl_io_pool *pool); void ctl_pool_free(struct ctl_io_pool *pool);
int ctl_pool_invalidate(struct ctl_io_pool *pool);
int ctl_pool_release(struct ctl_io_pool *pool);
void ctl_pool_free(struct ctl_softc *ctl_softc, struct ctl_io_pool *pool);
int ctl_scsi_release(struct ctl_scsiio *ctsio); int ctl_scsi_release(struct ctl_scsiio *ctsio);
int ctl_scsi_reserve(struct ctl_scsiio *ctsio); int ctl_scsi_reserve(struct ctl_scsiio *ctsio);
int ctl_start_stop(struct ctl_scsiio *ctsio); int ctl_start_stop(struct ctl_scsiio *ctsio);