Fixup r244240: mp_ncpus will be 1 also in the !SMP and smp_disabled=1
case. There is no point in optimizing further the code and use a TRUE litteral for a path that does heavyweight stuff anyway (like lock acq), at the price of obfuscated code. Use the appropriate check where necessary and remove a macro. Sponsored by: EMC / Isilon storage division MFC after: 3 days
This commit is contained in:
parent
552f8bf56d
commit
27fa8d59ff
@ -4724,12 +4724,6 @@ mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp)
|
||||
*mvp = NULL;
|
||||
}
|
||||
|
||||
#ifdef SMP
|
||||
#define ALWAYS_YIELD (mp_ncpus == 1)
|
||||
#else
|
||||
#define ALWAYS_YIELD 1
|
||||
#endif
|
||||
|
||||
static struct vnode *
|
||||
mnt_vnode_next_active(struct vnode **mvp, struct mount *mp)
|
||||
{
|
||||
@ -4746,7 +4740,7 @@ mnt_vnode_next_active(struct vnode **mvp, struct mount *mp)
|
||||
continue;
|
||||
}
|
||||
if (!VI_TRYLOCK(vp)) {
|
||||
if (ALWAYS_YIELD || should_yield()) {
|
||||
if (mp_ncpus == 1 || should_yield()) {
|
||||
TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist);
|
||||
mtx_unlock(&vnode_free_list_mtx);
|
||||
kern_yield(PRI_USER);
|
||||
@ -4777,7 +4771,6 @@ mnt_vnode_next_active(struct vnode **mvp, struct mount *mp)
|
||||
KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp));
|
||||
return (vp);
|
||||
}
|
||||
#undef ALWAYS_YIELD
|
||||
|
||||
struct vnode *
|
||||
__mnt_vnode_next_active(struct vnode **mvp, struct mount *mp)
|
||||
|
Loading…
Reference in New Issue
Block a user