Remove all spin_is_locked calls

On systems with CONFIG_SMP turned off, spin_is_locked always returns
false causing these assertions to fail. Remove them as suggested in
zfsonlinux/zfs#6558.

Reviewed-by: George Melikov <mail@gmelikov.ru>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: James Cowgill <james.cowgill@mips.com>
Closes #665
This commit is contained in:
James Cowgill 2017-10-30 18:16:56 +00:00 committed by Brian Behlendorf
parent 8be3688999
commit 35a44fcb8d
4 changed files with 0 additions and 20 deletions

View File

@ -382,7 +382,6 @@ spl_slab_free(spl_kmem_slab_t *sks,
skc = sks->sks_cache; skc = sks->sks_cache;
ASSERT(skc->skc_magic == SKC_MAGIC); ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(spin_is_locked(&skc->skc_lock));
/* /*
* Update slab/objects counters in the cache, then remove the * Update slab/objects counters in the cache, then remove the
@ -583,7 +582,6 @@ __spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
ASSERT(skc->skc_magic == SKC_MAGIC); ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC); ASSERT(skm->skm_magic == SKM_MAGIC);
ASSERT(spin_is_locked(&skc->skc_lock));
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
spl_cache_shrink(skc, skm->skm_objs[i]); spl_cache_shrink(skc, skm->skm_objs[i]);
@ -1125,7 +1123,6 @@ spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
ASSERT(skc->skc_magic == SKC_MAGIC); ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(sks->sks_magic == SKS_MAGIC); ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(spin_is_locked(&skc->skc_lock));
sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list); sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list);
ASSERT(sko->sko_magic == SKO_MAGIC); ASSERT(sko->sko_magic == SKO_MAGIC);
@ -1396,7 +1393,6 @@ spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
spl_kmem_obj_t *sko = NULL; spl_kmem_obj_t *sko = NULL;
ASSERT(skc->skc_magic == SKC_MAGIC); ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(spin_is_locked(&skc->skc_lock));
sko = spl_sko_from_obj(skc, obj); sko = spl_sko_from_obj(skc, obj);
ASSERT(sko->sko_magic == SKO_MAGIC); ASSERT(sko->sko_magic == SKO_MAGIC);

View File

@ -103,7 +103,6 @@ task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags)
int count = 0; int count = 0;
ASSERT(tq); ASSERT(tq);
ASSERT(spin_is_locked(&tq->tq_lock));
retry: retry:
/* Acquire taskq_ent_t's from free list if available */ /* Acquire taskq_ent_t's from free list if available */
if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) { if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
@ -168,7 +167,6 @@ task_free(taskq_t *tq, taskq_ent_t *t)
{ {
ASSERT(tq); ASSERT(tq);
ASSERT(t); ASSERT(t);
ASSERT(spin_is_locked(&tq->tq_lock));
ASSERT(list_empty(&t->tqent_list)); ASSERT(list_empty(&t->tqent_list));
ASSERT(!timer_pending(&t->tqent_timer)); ASSERT(!timer_pending(&t->tqent_timer));
@ -185,7 +183,6 @@ task_done(taskq_t *tq, taskq_ent_t *t)
{ {
ASSERT(tq); ASSERT(tq);
ASSERT(t); ASSERT(t);
ASSERT(spin_is_locked(&tq->tq_lock));
/* Wake tasks blocked in taskq_wait_id() */ /* Wake tasks blocked in taskq_wait_id() */
wake_up_all(&t->tqent_waitq); wake_up_all(&t->tqent_waitq);
@ -259,7 +256,6 @@ taskq_lowest_id(taskq_t *tq)
taskq_thread_t *tqt; taskq_thread_t *tqt;
ASSERT(tq); ASSERT(tq);
ASSERT(spin_is_locked(&tq->tq_lock));
if (!list_empty(&tq->tq_pend_list)) { if (!list_empty(&tq->tq_pend_list)) {
t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list); t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list);
@ -297,7 +293,6 @@ taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt)
ASSERT(tq); ASSERT(tq);
ASSERT(tqt); ASSERT(tqt);
ASSERT(spin_is_locked(&tq->tq_lock));
list_for_each_prev(l, &tq->tq_active_list) { list_for_each_prev(l, &tq->tq_active_list) {
w = list_entry(l, taskq_thread_t, tqt_active_list); w = list_entry(l, taskq_thread_t, tqt_active_list);
@ -320,8 +315,6 @@ taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id)
struct list_head *l; struct list_head *l;
taskq_ent_t *t; taskq_ent_t *t;
ASSERT(spin_is_locked(&tq->tq_lock));
list_for_each(l, lh) { list_for_each(l, lh) {
t = list_entry(l, taskq_ent_t, tqent_list); t = list_entry(l, taskq_ent_t, tqent_list);
@ -348,8 +341,6 @@ taskq_find(taskq_t *tq, taskqid_t id)
struct list_head *l; struct list_head *l;
taskq_ent_t *t; taskq_ent_t *t;
ASSERT(spin_is_locked(&tq->tq_lock));
t = taskq_find_list(tq, &tq->tq_delay_list, id); t = taskq_find_list(tq, &tq->tq_delay_list, id);
if (t) if (t)
return (t); return (t);
@ -751,8 +742,6 @@ taskq_next_ent(taskq_t *tq)
{ {
struct list_head *list; struct list_head *list;
ASSERT(spin_is_locked(&tq->tq_lock));
if (!list_empty(&tq->tq_prio_list)) if (!list_empty(&tq->tq_prio_list))
list = &tq->tq_prio_list; list = &tq->tq_prio_list;
else if (!list_empty(&tq->tq_pend_list)) else if (!list_empty(&tq->tq_pend_list))
@ -817,8 +806,6 @@ taskq_thread_spawn(taskq_t *tq)
static int static int
taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt) taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt)
{ {
ASSERT(spin_is_locked(&tq->tq_lock));
if (!(tq->tq_flags & TASKQ_DYNAMIC)) if (!(tq->tq_flags & TASKQ_DYNAMIC))
return (0); return (0);

View File

@ -315,7 +315,6 @@ tsd_hash_add_pid(tsd_hash_table_t *table, pid_t pid)
static void static void
tsd_hash_del(tsd_hash_table_t *table, tsd_hash_entry_t *entry) tsd_hash_del(tsd_hash_table_t *table, tsd_hash_entry_t *entry)
{ {
ASSERT(spin_is_locked(&table->ht_lock));
hlist_del(&entry->he_list); hlist_del(&entry->he_list);
list_del_init(&entry->he_key_list); list_del_init(&entry->he_key_list);
list_del_init(&entry->he_pid_list); list_del_init(&entry->he_pid_list);

View File

@ -427,8 +427,6 @@ file_find(int fd, struct task_struct *task)
{ {
file_t *fp; file_t *fp;
ASSERT(spin_is_locked(&vn_file_lock));
list_for_each_entry(fp, &vn_file_list, f_list) { list_for_each_entry(fp, &vn_file_list, f_list) {
if (fd == fp->f_fd && fp->f_task == task) { if (fd == fp->f_fd && fp->f_task == task) {
ASSERT(atomic_read(&fp->f_ref) != 0); ASSERT(atomic_read(&fp->f_ref) != 0);