Put the general logic for being a CPU hog into a new function
should_yield(). Use this in various places. Encapsulate the common case of check-and-yield into a new function maybe_yield(). Change several checks for a magic number of iterations to use should_yield() instead. MFC after: 1 week
This commit is contained in:
parent
01ab52c021
commit
08b163fa51
@ -88,8 +88,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
page_offset;
|
||||
switch (uio->uio_segflg) {
|
||||
case UIO_USERSPACE:
|
||||
if (ticks - PCPU_GET(switchticks) >= hogticks)
|
||||
uio_yield();
|
||||
maybe_yield();
|
||||
if (uio->uio_rw == UIO_READ)
|
||||
error = copyout(cp, iov->iov_base, cnt);
|
||||
else
|
||||
|
@ -94,8 +94,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
cp = (char*)sf_buf_kva(sf) + page_offset;
|
||||
switch (uio->uio_segflg) {
|
||||
case UIO_USERSPACE:
|
||||
if (ticks - PCPU_GET(switchticks) >= hogticks)
|
||||
uio_yield();
|
||||
maybe_yield();
|
||||
if (uio->uio_rw == UIO_READ)
|
||||
error = copyout(cp, iov->iov_base, cnt);
|
||||
else
|
||||
|
@ -90,8 +90,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
cp = (char *)sf_buf_kva(sf) + page_offset;
|
||||
switch (uio->uio_segflg) {
|
||||
case UIO_USERSPACE:
|
||||
if (ticks - PCPU_GET(switchticks) >= hogticks)
|
||||
uio_yield();
|
||||
maybe_yield();
|
||||
if (uio->uio_rw == UIO_READ)
|
||||
error = copyout(cp, iov->iov_base, cnt);
|
||||
else
|
||||
|
@ -89,8 +89,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
page_offset;
|
||||
switch (uio->uio_segflg) {
|
||||
case UIO_USERSPACE:
|
||||
if (ticks - PCPU_GET(switchticks) >= hogticks)
|
||||
uio_yield();
|
||||
maybe_yield();
|
||||
if (uio->uio_rw == UIO_READ)
|
||||
error = copyout(cp, iov->iov_base, cnt);
|
||||
else
|
||||
|
@ -1622,8 +1622,7 @@ compress_core (gzFile file, char *inbuf, char *dest_buf, unsigned int len,
|
||||
}
|
||||
inbuf += chunk_len;
|
||||
len -= chunk_len;
|
||||
if (ticks - PCPU_GET(switchticks) >= hogticks)
|
||||
uio_yield();
|
||||
maybe_yield();
|
||||
}
|
||||
|
||||
return (error);
|
||||
|
@ -158,8 +158,7 @@ uiomove(void *cp, int n, struct uio *uio)
|
||||
switch (uio->uio_segflg) {
|
||||
|
||||
case UIO_USERSPACE:
|
||||
if (ticks - PCPU_GET(switchticks) >= hogticks)
|
||||
uio_yield();
|
||||
maybe_yield();
|
||||
if (uio->uio_rw == UIO_READ)
|
||||
error = copyout(cp, iov->iov_base, cnt);
|
||||
else
|
||||
@ -283,11 +282,8 @@ uiomoveco(void *cp, int n, struct uio *uio, int disposable)
|
||||
switch (uio->uio_segflg) {
|
||||
|
||||
case UIO_USERSPACE:
|
||||
if (ticks - PCPU_GET(switchticks) >= hogticks)
|
||||
uio_yield();
|
||||
|
||||
maybe_yield();
|
||||
error = userspaceco(cp, cnt, uio, disposable);
|
||||
|
||||
if (error)
|
||||
return (error);
|
||||
break;
|
||||
@ -356,6 +352,21 @@ ureadc(int c, struct uio *uio)
|
||||
return (0);
|
||||
}
|
||||
|
||||
int
|
||||
should_yield(void)
|
||||
{
|
||||
|
||||
return (ticks - PCPU_GET(switchticks) >= hogticks);
|
||||
}
|
||||
|
||||
void
|
||||
maybe_yield(void)
|
||||
{
|
||||
|
||||
if (should_yield())
|
||||
uio_yield();
|
||||
}
|
||||
|
||||
void
|
||||
uio_yield(void)
|
||||
{
|
||||
|
@ -1659,9 +1659,8 @@ __mnt_vnode_next(struct vnode **mvp, struct mount *mp)
|
||||
mtx_assert(MNT_MTX(mp), MA_OWNED);
|
||||
|
||||
KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
|
||||
if ((*mvp)->v_yield++ == 500) {
|
||||
if (should_yield()) {
|
||||
MNT_IUNLOCK(mp);
|
||||
(*mvp)->v_yield = 0;
|
||||
uio_yield();
|
||||
MNT_ILOCK(mp);
|
||||
}
|
||||
|
@ -707,11 +707,11 @@ vlrureclaim(struct mount *mp)
|
||||
vdropl(vp);
|
||||
done++;
|
||||
next_iter_mntunlocked:
|
||||
if ((count % 256) != 0)
|
||||
if (should_yield())
|
||||
goto relock_mnt;
|
||||
goto yield;
|
||||
next_iter:
|
||||
if ((count % 256) != 0)
|
||||
if (should_yield())
|
||||
continue;
|
||||
MNT_IUNLOCK(mp);
|
||||
yield:
|
||||
|
@ -107,8 +107,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
}
|
||||
switch (uio->uio_segflg) {
|
||||
case UIO_USERSPACE:
|
||||
if (ticks - PCPU_GET(switchticks) >= hogticks)
|
||||
uio_yield();
|
||||
maybe_yield();
|
||||
if (uio->uio_rw == UIO_READ)
|
||||
error = copyout(cp, iov->iov_base, cnt);
|
||||
else
|
||||
|
@ -97,8 +97,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
|
||||
switch (uio->uio_segflg) {
|
||||
case UIO_USERSPACE:
|
||||
if (ticks - PCPU_GET(switchticks) >= hogticks)
|
||||
uio_yield();
|
||||
maybe_yield();
|
||||
if (uio->uio_rw == UIO_READ)
|
||||
error = copyout(cp, iov->iov_base, cnt);
|
||||
else
|
||||
|
@ -103,8 +103,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
}
|
||||
switch (uio->uio_segflg) {
|
||||
case UIO_USERSPACE:
|
||||
if (ticks - PCPU_GET(switchticks) >= hogticks)
|
||||
uio_yield();
|
||||
maybe_yield();
|
||||
if (uio->uio_rw == UIO_READ)
|
||||
error = copyout(cp, iov->iov_base, cnt);
|
||||
else
|
||||
|
@ -94,8 +94,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
cp = (char *)TLB_PHYS_TO_DIRECT(pa) + page_offset;
|
||||
switch (uio->uio_segflg) {
|
||||
case UIO_USERSPACE:
|
||||
if (ticks - PCPU_GET(switchticks) >= hogticks)
|
||||
uio_yield();
|
||||
maybe_yield();
|
||||
if (uio->uio_rw == UIO_READ)
|
||||
error = copyout(cp, iov->iov_base, cnt);
|
||||
else
|
||||
|
@ -95,6 +95,8 @@ int copyinstrfrom(const void * __restrict src, void * __restrict dst,
|
||||
size_t len, size_t * __restrict copied, int seg);
|
||||
int copyinuio(struct iovec *iovp, u_int iovcnt, struct uio **uiop);
|
||||
void uio_yield(void);
|
||||
void maybe_yield(void);
|
||||
int should_yield(void);
|
||||
int uiomove(void *cp, int n, struct uio *uio);
|
||||
int uiomove_frombuf(void *buf, int buflen, struct uio *uio);
|
||||
int uiomove_fromphys(struct vm_page *ma[], vm_offset_t offset, int n,
|
||||
|
@ -121,7 +121,6 @@ struct vnode {
|
||||
struct socket *vu_socket; /* v unix domain net (VSOCK) */
|
||||
struct cdev *vu_cdev; /* v device (VCHR, VBLK) */
|
||||
struct fifoinfo *vu_fifoinfo; /* v fifo (VFIFO) */
|
||||
int vu_yield; /* yield count (VMARKER) */
|
||||
} v_un;
|
||||
|
||||
/*
|
||||
@ -177,7 +176,6 @@ struct vnode {
|
||||
#define v_socket v_un.vu_socket
|
||||
#define v_rdev v_un.vu_cdev
|
||||
#define v_fifoinfo v_un.vu_fifoinfo
|
||||
#define v_yield v_un.vu_yield
|
||||
|
||||
/* XXX: These are temporary to avoid a source sweep at this time */
|
||||
#define v_object v_bufobj.bo_object
|
||||
|
@ -243,8 +243,7 @@ ffs_rawread_readahead(struct vnode *vp,
|
||||
if (vmapbuf(bp) < 0)
|
||||
return EFAULT;
|
||||
|
||||
if (ticks - PCPU_GET(switchticks) >= hogticks)
|
||||
uio_yield();
|
||||
maybe_yield();
|
||||
bzero(bp->b_data, bp->b_bufsize);
|
||||
|
||||
/* Mark operation completed (similar to bufdone()) */
|
||||
|
@ -1342,7 +1342,7 @@ softdep_process_worklist(mp, full)
|
||||
int full;
|
||||
{
|
||||
struct thread *td = curthread;
|
||||
int cnt, matchcnt, loopcount;
|
||||
int cnt, matchcnt;
|
||||
struct ufsmount *ump;
|
||||
long starttime;
|
||||
|
||||
@ -1354,7 +1354,6 @@ softdep_process_worklist(mp, full)
|
||||
matchcnt = 0;
|
||||
ump = VFSTOUFS(mp);
|
||||
ACQUIRE_LOCK(&lk);
|
||||
loopcount = 1;
|
||||
starttime = time_second;
|
||||
softdep_process_journal(mp, full?MNT_WAIT:0);
|
||||
while (ump->softdep_on_worklist > 0) {
|
||||
@ -1379,7 +1378,7 @@ softdep_process_worklist(mp, full)
|
||||
* We do not generally want to stop for buffer space, but if
|
||||
* we are really being a buffer hog, we will stop and wait.
|
||||
*/
|
||||
if (loopcount++ % 128 == 0) {
|
||||
if (should_yield()) {
|
||||
FREE_LOCK(&lk);
|
||||
uio_yield();
|
||||
bwillwrite();
|
||||
|
Loading…
Reference in New Issue
Block a user