Remove all the checks on curthread != NULL with the exception of some MD

trap checks (eg. printtrap()).

Generally this check is not needed anymore, as there is not a legitimate
case where curthread != NULL, after pcpu 0 area has been properly
initialized.

Reviewed by:	bde, jhb
MFC after:	1 week
This commit is contained in:
Attilio Rao 2012-09-13 22:26:22 +00:00
parent 60149b5cce
commit 0a15e5d30d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=240475
8 changed files with 4 additions and 24 deletions

View File

@ -75,12 +75,10 @@ pmc_save_kernel_callchain(uintptr_t *cc, int maxsamples,
KASSERT(TRAPF_USERMODE(tf) == 0,("[arm,%d] not a kernel backtrace",
__LINE__));
td = curthread;
pc = PMC_TRAPFRAME_TO_PC(tf);
*cc++ = pc;
if ((td = curthread) == NULL)
return (1);
if (maxsamples <= 1)
return (1);
@ -126,12 +124,10 @@ pmc_save_user_callchain(uintptr_t *cc, int maxsamples,
KASSERT(TRAPF_USERMODE(tf), ("[x86,%d] Not a user trap frame tf=%p",
__LINE__, (void *) tf));
td = curthread;
pc = PMC_TRAPFRAME_TO_PC(tf);
*cc++ = pc;
if ((td = curthread) == NULL)
return (1);
if (maxsamples <= 1)
return (1);

View File

@ -161,6 +161,7 @@ pmc_save_kernel_callchain(uintptr_t *cc, int nframes, struct trapframe *tf)
KASSERT(TRAPF_USERMODE(tf) == 0,("[x86,%d] not a kernel backtrace",
__LINE__));
td = curthread;
pc = PMC_TRAPFRAME_TO_PC(tf);
fp = PMC_TRAPFRAME_TO_FP(tf);
sp = PMC_TRAPFRAME_TO_KERNEL_SP(tf);
@ -168,9 +169,6 @@ pmc_save_kernel_callchain(uintptr_t *cc, int nframes, struct trapframe *tf)
*cc++ = pc;
r = fp + sizeof(uintptr_t); /* points to return address */
if ((td = curthread) == NULL)
return (1);
if (nframes <= 1)
return (1);

View File

@ -50,7 +50,7 @@ __FBSDID("$FreeBSD$");
* Common sanity checks for cv_wait* functions.
*/
#define CV_ASSERT(cvp, lock, td) do { \
KASSERT((td) != NULL, ("%s: curthread NULL", __func__)); \
KASSERT((td) != NULL, ("%s: td NULL", __func__)); \
KASSERT(TD_IS_RUNNING(td), ("%s: not TDS_RUNNING", __func__)); \
KASSERT((cvp) != NULL, ("%s: cvp NULL", __func__)); \
KASSERT((lock) != NULL, ("%s: lock NULL", __func__)); \

View File

@ -200,7 +200,6 @@ _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
MPASS(curthread != NULL);
KASSERT(!TD_IS_IDLETHREAD(curthread),
("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
curthread, m->lock_object.lo_name, file, line));
@ -225,7 +224,6 @@ _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
@ -248,7 +246,6 @@ _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
@ -272,7 +269,6 @@ _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
@ -303,7 +299,6 @@ mtx_trylock_flags_(struct mtx *m, int opts, const char *file, int line)
if (SCHEDULER_STOPPED())
return (1);
MPASS(curthread != NULL);
KASSERT(!TD_IS_IDLETHREAD(curthread),
("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
curthread, m->lock_object.lo_name, file, line));

View File

@ -241,7 +241,6 @@ _rw_wlock(struct rwlock *rw, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
MPASS(curthread != NULL);
KASSERT(!TD_IS_IDLETHREAD(curthread),
("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
curthread, rw->lock_object.lo_name, file, line));
@ -292,7 +291,6 @@ _rw_wunlock(struct rwlock *rw, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
MPASS(curthread != NULL);
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
_rw_assert(rw, RA_WLOCKED, file, line);

View File

@ -249,7 +249,6 @@ _sx_slock(struct sx *sx, int opts, const char *file, int line)
if (SCHEDULER_STOPPED())
return (0);
MPASS(curthread != NULL);
KASSERT(!TD_IS_IDLETHREAD(curthread),
("sx_slock() by idle thread %p on sx %s @ %s:%d",
curthread, sx->lock_object.lo_name, file, line));
@ -303,7 +302,6 @@ _sx_xlock(struct sx *sx, int opts, const char *file, int line)
if (SCHEDULER_STOPPED())
return (0);
MPASS(curthread != NULL);
KASSERT(!TD_IS_IDLETHREAD(curthread),
("sx_xlock() by idle thread %p on sx %s @ %s:%d",
curthread, sx->lock_object.lo_name, file, line));
@ -330,7 +328,6 @@ sx_try_xlock_(struct sx *sx, const char *file, int line)
if (SCHEDULER_STOPPED())
return (1);
MPASS(curthread != NULL);
KASSERT(!TD_IS_IDLETHREAD(curthread),
("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
curthread, sx->lock_object.lo_name, file, line));
@ -361,7 +358,6 @@ _sx_sunlock(struct sx *sx, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
MPASS(curthread != NULL);
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_sunlock() of destroyed sx @ %s:%d", file, line));
_sx_assert(sx, SA_SLOCKED, file, line);
@ -378,7 +374,6 @@ _sx_xunlock(struct sx *sx, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
MPASS(curthread != NULL);
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_xunlock() of destroyed sx @ %s:%d", file, line));
_sx_assert(sx, SA_XLOCKED, file, line);

View File

@ -622,7 +622,6 @@ thread_single(int mode)
p = td->td_proc;
mtx_assert(&Giant, MA_NOTOWNED);
PROC_LOCK_ASSERT(p, MA_OWNED);
KASSERT((td != NULL), ("curthread is NULL"));
if ((p->p_flag & P_HADTHREADS) == 0)
return (0);

View File

@ -3416,7 +3416,6 @@ vfs_unmountall(void)
struct thread *td;
int error;
KASSERT(curthread != NULL, ("vfs_unmountall: NULL curthread"));
CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__);
td = curthread;