Add missing *_destroy/*_fini calls

The proposed debugging enhancements in zfsonlinux/spl#587
identified the following missing *_destroy/*_fini calls.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Gvozden Neskovic <neskovic@gmail.com>
Closes #5428
This commit is contained in:
Gvozden Neskovic 2016-11-26 21:30:44 +01:00 committed by Brian Behlendorf
parent 8fa5250f5d
commit c17486b217
13 changed files with 68 additions and 11 deletions

View File

@ -729,6 +729,8 @@ run_sweep(void)
(ulong_t)tried_comb);
}
mutex_destroy(&sem_mtx);
return (sweep_state == SWEEP_ERROR ? SWEEP_ERROR : 0);
}

View File

@ -33,5 +33,6 @@ typedef struct {
int zfs_ratelimit(zfs_ratelimit_t *rl);
void zfs_ratelimit_init(zfs_ratelimit_t *rl, unsigned int burst,
unsigned int interval);
void zfs_ratelimit_fini(zfs_ratelimit_t *rl);
#endif /* _SYS_ZFS_RATELIMIT_H */

View File

@ -140,7 +140,21 @@ kcf_mech_hash_find(char *mechname)
void
kcf_destroy_mech_tabs(void)
{
if (kcf_mech_hash) mod_hash_destroy_hash(kcf_mech_hash);
int i, max;
kcf_ops_class_t class;
kcf_mech_entry_t *me_tab;
if (kcf_mech_hash)
mod_hash_destroy_hash(kcf_mech_hash);
mutex_destroy(&kcf_mech_tabs_lock);
for (class = KCF_FIRST_OPSCLASS; class <= KCF_LAST_OPSCLASS; class++) {
max = kcf_mech_tabs_tab[class].met_size;
me_tab = kcf_mech_tabs_tab[class].met_tab;
for (i = 0; i < max; i++)
mutex_destroy(&(me_tab[i].me_mutex));
}
}
/*

View File

@ -67,6 +67,8 @@ static uint_t prov_tab_max = KCF_MAX_PROVIDERS;
void
kcf_prov_tab_destroy(void)
{
mutex_destroy(&prov_tab_mutex);
if (prov_tab)
kmem_free(prov_tab, prov_tab_max *
sizeof (kcf_provider_desc_t *));
@ -485,6 +487,10 @@ kcf_free_provider_desc(kcf_provider_desc_t *desc)
if (desc->pd_sched_info.ks_taskq != NULL)
taskq_destroy(desc->pd_sched_info.ks_taskq);
mutex_destroy(&desc->pd_lock);
cv_destroy(&desc->pd_resume_cv);
cv_destroy(&desc->pd_remove_cv);
kmem_free(desc, sizeof (kcf_provider_desc_t));
}

View File

@ -1056,17 +1056,28 @@ kcf_sched_destroy(void)
if (kcf_misc_kstat)
kstat_delete(kcf_misc_kstat);
if (kcfpool)
kmem_free(kcfpool, sizeof (kcf_pool_t));
if (kcfpool) {
mutex_destroy(&kcfpool->kp_thread_lock);
cv_destroy(&kcfpool->kp_nothr_cv);
mutex_destroy(&kcfpool->kp_user_lock);
cv_destroy(&kcfpool->kp_user_cv);
for (i = 0; i < REQID_TABLES; i++) {
if (kcf_reqid_table[i])
kmem_free(kcf_reqid_table[i],
sizeof (kcf_reqid_table_t));
kmem_free(kcfpool, sizeof (kcf_pool_t));
}
if (gswq)
for (i = 0; i < REQID_TABLES; i++) {
if (kcf_reqid_table[i]) {
mutex_destroy(&(kcf_reqid_table[i]->rt_lock));
kmem_free(kcf_reqid_table[i],
sizeof (kcf_reqid_table_t));
}
}
if (gswq) {
mutex_destroy(&gswq->gs_lock);
cv_destroy(&gswq->gs_cv);
kmem_free(gswq, sizeof (kcf_global_swq_t));
}
if (kcf_context_cache)
kmem_cache_destroy(kcf_context_cache);
@ -1074,6 +1085,9 @@ kcf_sched_destroy(void)
kmem_cache_destroy(kcf_areq_cache);
if (kcf_sreq_cache)
kmem_cache_destroy(kcf_sreq_cache);
mutex_destroy(&ntfy_list_lock);
cv_destroy(&ntfy_list_cv);
}
/*

View File

@ -225,6 +225,17 @@ zfs_ratelimit_init(zfs_ratelimit_t *rl, unsigned int burst,
mutex_init(&rl->lock, NULL, MUTEX_DEFAULT, NULL);
}
/*
* Finalize rate limit struct
*
* rl: zfs_ratelimit_t struct
*/
void
zfs_ratelimit_fini(zfs_ratelimit_t *rl)
{
mutex_destroy(&rl->lock);
}
/*
* Re-implementation of the kernel's __ratelimit() function
*
@ -275,5 +286,6 @@ EXPORT_SYMBOL(zfs_zpl_version_map);
EXPORT_SYMBOL(zfs_spa_version_map);
EXPORT_SYMBOL(zfs_history_event_names);
EXPORT_SYMBOL(zfs_ratelimit_init);
EXPORT_SYMBOL(zfs_ratelimit_fini);
EXPORT_SYMBOL(zfs_ratelimit);
#endif

View File

@ -846,6 +846,7 @@ dmu_objset_evict_done(objset_t *os)
mutex_destroy(&os->os_userused_lock);
mutex_destroy(&os->os_obj_lock);
mutex_destroy(&os->os_user_ptr_lock);
mutex_destroy(&os->os_upgrade_lock);
for (int i = 0; i < TXG_SIZE; i++) {
multilist_destroy(os->os_dirty_dnodes[i]);
}

View File

@ -120,6 +120,7 @@ dsl_deadlist_close(dsl_deadlist_t *dl)
dsl_deadlist_entry_t *dle;
dl->dl_os = NULL;
mutex_destroy(&dl->dl_lock);
if (dl->dl_oldfmt) {
dl->dl_oldfmt = B_FALSE;
@ -136,7 +137,6 @@ dsl_deadlist_close(dsl_deadlist_t *dl)
avl_destroy(&dl->dl_tree);
}
dmu_buf_rele(dl->dl_dbuf, dl);
mutex_destroy(&dl->dl_lock);
dl->dl_dbuf = NULL;
dl->dl_phys = NULL;
}

View File

@ -348,6 +348,7 @@ dsl_pool_close(dsl_pool_t *dp)
rrw_destroy(&dp->dp_config_rwlock);
mutex_destroy(&dp->dp_lock);
cv_destroy(&dp->dp_spaceavail_cv);
taskq_destroy(dp->dp_iput_taskq);
if (dp->dp_blkstats)
vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));

View File

@ -710,6 +710,9 @@ vdev_free(vdev_t *vd)
mutex_destroy(&vd->vdev_stat_lock);
mutex_destroy(&vd->vdev_probe_lock);
zfs_ratelimit_fini(&vd->vdev_delay_rl);
zfs_ratelimit_fini(&vd->vdev_checksum_rl);
if (vd == spa->spa_root_vdev)
spa->spa_root_vdev = NULL;

View File

@ -84,7 +84,7 @@ fzap_upgrade(zap_t *zap, dmu_tx_t *tx, zap_flags_t flags)
zap->zap_dbu.dbu_evict_func_sync = zap_evict_sync;
zap->zap_dbu.dbu_evict_func_async = NULL;
mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, 0, 0);
mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, MUTEX_DEFAULT, 0);
zap->zap_f.zap_block_shift = highbit64(zap->zap_dbuf->db_size) - 1;
zp = zap_f_phys(zap);

View File

@ -404,7 +404,8 @@ mzap_open(objset_t *os, uint64_t obj, dmu_buf_t *db)
zap->zap_dbuf = db;
if (zap_block_type != ZBT_MICRO) {
mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, 0, 0);
mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, MUTEX_DEFAULT,
0);
zap->zap_f.zap_block_shift = highbit64(db->db_size) - 1;
if (zap_block_type != ZBT_HEADER || zap_magic != ZAP_MAGIC) {
winner = NULL; /* No actual winner here... */

View File

@ -1913,6 +1913,8 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
mutex_destroy(&zfsvfs->z_hold_locks[i]);
}
mutex_destroy(&zfsvfs->z_znodes_lock);
vmem_free(zfsvfs->z_hold_trees, sizeof (avl_tree_t) * size);
vmem_free(zfsvfs->z_hold_locks, sizeof (kmutex_t) * size);
kmem_free(sb, sizeof (struct super_block));