Based on discussions on the svn-src mailing list, rework r218195:

- entirely eliminate some calls to uio_yeild() as being unnecessary,
   such as in a sysctl handler.

 - move should_yield() and maybe_yield() to kern_synch.c and move the
   prototypes from sys/uio.h to sys/proc.h

 - add a slightly more generic kern_yield() that can replace the
   functionality of uio_yield().

 - replace source uses of uio_yield() with the functional equivalent,
   or in some cases do not change the thread priority when switching.

 - fix a logic inversion bug in vlrureclaim(), pointed out by bde@.

 - instead of using the per-cpu last switched ticks, use a per thread
   variable for should_yield().  With PREEMPTION, the only reasonable
   use of this is to determine if a lock has been held a long time and
   relinquish it.  Without PREEMPTION, this is essentially the same as
   the per-cpu variable.
This commit is contained in:
Matthew D Fleming 2011-02-08 00:16:36 +00:00
parent 2306fc3f4c
commit e7ceb1e99b
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=218424
12 changed files with 48 additions and 39 deletions

View File

@ -1466,7 +1466,6 @@ sysctl_siots(SYSCTL_HANDLER_ARGS)
error = SYSCTL_OUT(req, buf, len);
if (error != 0)
return (error);
uio_yield();
}
return (0);
}

View File

@ -413,9 +413,10 @@ mi_switch(int flags, struct thread *newtd)
*/
if (kdb_active)
kdb_switch();
if (flags & SW_VOL)
if (flags & SW_VOL) {
td->td_ru.ru_nvcsw++;
else
td->td_swvoltick = ticks;
} else
td->td_ru.ru_nivcsw++;
#ifdef SCHED_STATS
SCHED_STAT_INC(sched_switch_stats[flags & SW_TYPE_MASK]);
@ -538,6 +539,36 @@ synch_setup(void *dummy)
loadav(NULL);
}
int
should_yield(void)
{
return (ticks - curthread->td_swvoltick >= hogticks);
}
void
maybe_yield(void)
{
if (should_yield())
kern_yield(curthread->td_user_pri);
}
void
kern_yield(int prio)
{
struct thread *td;
td = curthread;
DROP_GIANT();
thread_lock(td);
if (prio >= 0)
sched_prio(td, prio);
mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
thread_unlock(td);
PICKUP_GIANT();
}
/*
* General purpose yield system call.
*/

View File

@ -1568,7 +1568,7 @@ userland_sysctl(struct thread *td, int *name, u_int namelen, void *old,
SYSCTL_XUNLOCK();
if (error != EAGAIN)
break;
uio_yield();
kern_yield(curthread->td_user_pri);
}
CURVNET_RESTORE();

View File

@ -352,33 +352,11 @@ ureadc(int c, struct uio *uio)
return (0);
}
int
should_yield(void)
{
return (ticks - PCPU_GET(switchticks) >= hogticks);
}
void
maybe_yield(void)
{
if (should_yield())
uio_yield();
}
void
uio_yield(void)
{
struct thread *td;
td = curthread;
DROP_GIANT();
thread_lock(td);
sched_prio(td, td->td_user_pri);
mi_switch(SW_INVOL | SWT_RELINQUISH, NULL);
thread_unlock(td);
PICKUP_GIANT();
kern_yield(curthread->td_user_pri);
}
int

View File

@ -2234,7 +2234,7 @@ buf_daemon()
while (numdirtybuffers > lodirtybuffers) {
if (buf_do_flush(NULL) == 0)
break;
uio_yield();
kern_yield(-1);
}
lodirtybuffers = lodirtysave;

View File

@ -1661,7 +1661,7 @@ __mnt_vnode_next(struct vnode **mvp, struct mount *mp)
KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
if (should_yield()) {
MNT_IUNLOCK(mp);
uio_yield();
kern_yield(-1);
MNT_ILOCK(mp);
}
vp = TAILQ_NEXT(*mvp, v_nmntvnodes);

View File

@ -707,15 +707,15 @@ vlrureclaim(struct mount *mp)
vdropl(vp);
done++;
next_iter_mntunlocked:
if (should_yield())
if (!should_yield())
goto relock_mnt;
goto yield;
next_iter:
if (should_yield())
if (!should_yield())
continue;
MNT_IUNLOCK(mp);
yield:
uio_yield();
kern_yield(-1);
relock_mnt:
MNT_ILOCK(mp);
}
@ -828,7 +828,7 @@ vnlru_proc(void)
vnlru_nowhere++;
tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
} else
uio_yield();
kern_yield(-1);
}
}

View File

@ -444,7 +444,7 @@ vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred,
* Package up an I/O request on a vnode into a uio and do it. The I/O
* request is split up into smaller chunks and we try to avoid saturating
* the buffer cache while potentially holding a vnode locked, so we
* check bwillwrite() before calling vn_rdwr(). We also call uio_yield()
* check bwillwrite() before calling vn_rdwr(). We also call kern_yield()
* to give other processes a chance to lock the vnode (either other processes
* core'ing the same binary, or unrelated processes scanning the directory).
*/
@ -491,7 +491,7 @@ vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred,
break;
offset += chunk;
base = (char *)base + chunk;
uio_yield();
kern_yield(curthread->td_user_pri);
} while (len);
if (aresid)
*aresid = len + iaresid;

View File

@ -2250,7 +2250,6 @@ sysctl_siots(SYSCTL_HANDLER_ARGS)
error = SYSCTL_OUT(req, buf, len);
if (error != 0)
return (error);
uio_yield();
}
return (0);
}

View File

@ -242,6 +242,7 @@ struct thread {
u_int td_estcpu; /* (t) estimated cpu utilization */
int td_slptick; /* (t) Time at sleep. */
int td_blktick; /* (t) Time spent blocked. */
int td_swvoltick; /* (t) Time at last SW_VOL switch. */
struct rusage td_ru; /* (t) rusage information. */
struct rusage_ext td_rux; /* (t) Internal rusage information. */
uint64_t td_incruntime; /* (t) Cpu ticks to transfer to proc. */
@ -822,9 +823,11 @@ void fork_exit(void (*)(void *, struct trapframe *), void *,
struct trapframe *);
void fork_return(struct thread *, struct trapframe *);
int inferior(struct proc *p);
void kern_yield(int);
void kick_proc0(void);
int leavepgrp(struct proc *p);
int maybe_preempt(struct thread *td);
void maybe_yield(void);
void mi_switch(int flags, struct thread *newtd);
int p_candebug(struct thread *td, struct proc *p);
int p_cansee(struct thread *td, struct proc *p);
@ -847,6 +850,7 @@ void sess_hold(struct session *);
void sess_release(struct session *);
int setrunnable(struct thread *);
void setsugid(struct proc *p);
int should_yield(void);
int sigonstack(size_t sp);
void sleepinit(void);
void stopevent(struct proc *, u_int, u_int);

View File

@ -95,8 +95,6 @@ int copyinstrfrom(const void * __restrict src, void * __restrict dst,
size_t len, size_t * __restrict copied, int seg);
int copyinuio(struct iovec *iovp, u_int iovcnt, struct uio **uiop);
void uio_yield(void);
void maybe_yield(void);
int should_yield(void);
int uiomove(void *cp, int n, struct uio *uio);
int uiomove_frombuf(void *buf, int buflen, struct uio *uio);
int uiomove_fromphys(struct vm_page *ma[], vm_offset_t offset, int n,

View File

@ -1380,7 +1380,7 @@ softdep_process_worklist(mp, full)
*/
if (should_yield()) {
FREE_LOCK(&lk);
uio_yield();
kern_yield(-1);
bwillwrite();
ACQUIRE_LOCK(&lk);
}