From 08b163fa51d57ffe78bbdcc7a7912243dbb8b853 Mon Sep 17 00:00:00 2001 From: Matthew D Fleming Date: Wed, 2 Feb 2011 16:35:10 +0000 Subject: [PATCH] Put the general logic for being a CPU hog into a new function should_yield(). Use this in various places. Encapsulate the common case of check-and-yield into a new function maybe_yield(). Change several checks for a magic number of iterations to use should_yield() instead. MFC after: 1 week --- sys/amd64/amd64/uio_machdep.c | 3 +-- sys/arm/arm/uio_machdep.c | 3 +-- sys/i386/i386/uio_machdep.c | 3 +-- sys/ia64/ia64/uio_machdep.c | 3 +-- sys/kern/imgact_elf.c | 3 +-- sys/kern/subr_uio.c | 23 +++++++++++++++++------ sys/kern/vfs_mount.c | 3 +-- sys/kern/vfs_subr.c | 4 ++-- sys/mips/mips/uio_machdep.c | 3 +-- sys/powerpc/powerpc/uio_machdep.c | 3 +-- sys/sparc64/sparc64/uio_machdep.c | 3 +-- sys/sun4v/sun4v/uio_machdep.c | 3 +-- sys/sys/uio.h | 2 ++ sys/sys/vnode.h | 2 -- sys/ufs/ffs/ffs_rawread.c | 3 +-- sys/ufs/ffs/ffs_softdep.c | 5 ++--- 16 files changed, 34 insertions(+), 35 deletions(-) diff --git a/sys/amd64/amd64/uio_machdep.c b/sys/amd64/amd64/uio_machdep.c index d3897cf14d9a..2d24c7cf6260 100644 --- a/sys/amd64/amd64/uio_machdep.c +++ b/sys/amd64/amd64/uio_machdep.c @@ -88,8 +88,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) page_offset; switch (uio->uio_segflg) { case UIO_USERSPACE: - if (ticks - PCPU_GET(switchticks) >= hogticks) - uio_yield(); + maybe_yield(); if (uio->uio_rw == UIO_READ) error = copyout(cp, iov->iov_base, cnt); else diff --git a/sys/arm/arm/uio_machdep.c b/sys/arm/arm/uio_machdep.c index bc52a6e5dc4e..0a8e4adddc1d 100644 --- a/sys/arm/arm/uio_machdep.c +++ b/sys/arm/arm/uio_machdep.c @@ -94,8 +94,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) cp = (char*)sf_buf_kva(sf) + page_offset; switch (uio->uio_segflg) { case UIO_USERSPACE: - if (ticks - PCPU_GET(switchticks) >= hogticks) - uio_yield(); + maybe_yield(); if (uio->uio_rw == UIO_READ) error = copyout(cp, iov->iov_base, cnt); else diff --git a/sys/i386/i386/uio_machdep.c b/sys/i386/i386/uio_machdep.c index 3558ec01377a..c095a440f308 100644 --- a/sys/i386/i386/uio_machdep.c +++ b/sys/i386/i386/uio_machdep.c @@ -90,8 +90,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) cp = (char *)sf_buf_kva(sf) + page_offset; switch (uio->uio_segflg) { case UIO_USERSPACE: - if (ticks - PCPU_GET(switchticks) >= hogticks) - uio_yield(); + maybe_yield(); if (uio->uio_rw == UIO_READ) error = copyout(cp, iov->iov_base, cnt); else diff --git a/sys/ia64/ia64/uio_machdep.c b/sys/ia64/ia64/uio_machdep.c index 30f6250b0e2e..c087a804aaff 100644 --- a/sys/ia64/ia64/uio_machdep.c +++ b/sys/ia64/ia64/uio_machdep.c @@ -89,8 +89,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) page_offset; switch (uio->uio_segflg) { case UIO_USERSPACE: - if (ticks - PCPU_GET(switchticks) >= hogticks) - uio_yield(); + maybe_yield(); if (uio->uio_rw == UIO_READ) error = copyout(cp, iov->iov_base, cnt); else diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c index 38029b0dad58..80bb7000a635 100644 --- a/sys/kern/imgact_elf.c +++ b/sys/kern/imgact_elf.c @@ -1622,8 +1622,7 @@ compress_core (gzFile file, char *inbuf, char *dest_buf, unsigned int len, } inbuf += chunk_len; len -= chunk_len; - if (ticks - PCPU_GET(switchticks) >= hogticks) - uio_yield(); + maybe_yield(); } return (error); diff --git a/sys/kern/subr_uio.c b/sys/kern/subr_uio.c index 9b93005ea913..96f9331a4a91 100644 --- a/sys/kern/subr_uio.c +++ b/sys/kern/subr_uio.c @@ -158,8 +158,7 @@ uiomove(void *cp, int n, struct uio *uio) switch (uio->uio_segflg) { case UIO_USERSPACE: - if (ticks - PCPU_GET(switchticks) >= hogticks) - uio_yield(); + maybe_yield(); if (uio->uio_rw == UIO_READ) error = copyout(cp, iov->iov_base, cnt); else @@ -283,11 +282,8 @@ uiomoveco(void *cp, int n, struct uio *uio, int disposable) switch (uio->uio_segflg) { case UIO_USERSPACE: - if (ticks - PCPU_GET(switchticks) >= hogticks) - uio_yield(); - + maybe_yield(); error = userspaceco(cp, cnt, uio, disposable); - if (error) return (error); break; @@ -356,6 +352,21 @@ ureadc(int c, struct uio *uio) return (0); } +int +should_yield(void) +{ + + return (ticks - PCPU_GET(switchticks) >= hogticks); +} + +void +maybe_yield(void) +{ + + if (should_yield()) + uio_yield(); +} + void uio_yield(void) { diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c index 000a5446ac2b..4ca514b3233d 100644 --- a/sys/kern/vfs_mount.c +++ b/sys/kern/vfs_mount.c @@ -1659,9 +1659,8 @@ __mnt_vnode_next(struct vnode **mvp, struct mount *mp) mtx_assert(MNT_MTX(mp), MA_OWNED); KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); - if ((*mvp)->v_yield++ == 500) { + if (should_yield()) { MNT_IUNLOCK(mp); - (*mvp)->v_yield = 0; uio_yield(); MNT_ILOCK(mp); } diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index 58061b4e250c..b4db0a1dbe14 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -707,11 +707,11 @@ vlrureclaim(struct mount *mp) vdropl(vp); done++; next_iter_mntunlocked: - if ((count % 256) != 0) + if (should_yield()) goto relock_mnt; goto yield; next_iter: - if ((count % 256) != 0) + if (should_yield()) continue; MNT_IUNLOCK(mp); yield: diff --git a/sys/mips/mips/uio_machdep.c b/sys/mips/mips/uio_machdep.c index a550ef0739cf..10aa1d6a7631 100644 --- a/sys/mips/mips/uio_machdep.c +++ b/sys/mips/mips/uio_machdep.c @@ -107,8 +107,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) } switch (uio->uio_segflg) { case UIO_USERSPACE: - if (ticks - PCPU_GET(switchticks) >= hogticks) - uio_yield(); + maybe_yield(); if (uio->uio_rw == UIO_READ) error = copyout(cp, iov->iov_base, cnt); else diff --git a/sys/powerpc/powerpc/uio_machdep.c b/sys/powerpc/powerpc/uio_machdep.c index 6d171145f2c9..ee4f2e48234a 100644 --- a/sys/powerpc/powerpc/uio_machdep.c +++ b/sys/powerpc/powerpc/uio_machdep.c @@ -97,8 +97,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) switch (uio->uio_segflg) { case UIO_USERSPACE: - if (ticks - PCPU_GET(switchticks) >= hogticks) - uio_yield(); + maybe_yield(); if (uio->uio_rw == UIO_READ) error = copyout(cp, iov->iov_base, cnt); else diff --git a/sys/sparc64/sparc64/uio_machdep.c b/sys/sparc64/sparc64/uio_machdep.c index 434713fb84f7..007a8b0ad02d 100644 --- a/sys/sparc64/sparc64/uio_machdep.c +++ b/sys/sparc64/sparc64/uio_machdep.c @@ -103,8 +103,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) } switch (uio->uio_segflg) { case UIO_USERSPACE: - if (ticks - PCPU_GET(switchticks) >= hogticks) - uio_yield(); + maybe_yield(); if (uio->uio_rw == UIO_READ) error = copyout(cp, iov->iov_base, cnt); else diff --git a/sys/sun4v/sun4v/uio_machdep.c b/sys/sun4v/sun4v/uio_machdep.c index bee80d68fc67..513991279c17 100644 --- a/sys/sun4v/sun4v/uio_machdep.c +++ b/sys/sun4v/sun4v/uio_machdep.c @@ -94,8 +94,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) cp = (char *)TLB_PHYS_TO_DIRECT(pa) + page_offset; switch (uio->uio_segflg) { case UIO_USERSPACE: - if (ticks - PCPU_GET(switchticks) >= hogticks) - uio_yield(); + maybe_yield(); if (uio->uio_rw == UIO_READ) error = copyout(cp, iov->iov_base, cnt); else diff --git a/sys/sys/uio.h b/sys/sys/uio.h index 1de8880b41ed..60af2b7d7880 100644 --- a/sys/sys/uio.h +++ b/sys/sys/uio.h @@ -95,6 +95,8 @@ int copyinstrfrom(const void * __restrict src, void * __restrict dst, size_t len, size_t * __restrict copied, int seg); int copyinuio(struct iovec *iovp, u_int iovcnt, struct uio **uiop); void uio_yield(void); +void maybe_yield(void); +int should_yield(void); int uiomove(void *cp, int n, struct uio *uio); int uiomove_frombuf(void *buf, int buflen, struct uio *uio); int uiomove_fromphys(struct vm_page *ma[], vm_offset_t offset, int n, diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h index 86ff8b636b5d..e7ff2f4cf664 100644 --- a/sys/sys/vnode.h +++ b/sys/sys/vnode.h @@ -121,7 +121,6 @@ struct vnode { struct socket *vu_socket; /* v unix domain net (VSOCK) */ struct cdev *vu_cdev; /* v device (VCHR, VBLK) */ struct fifoinfo *vu_fifoinfo; /* v fifo (VFIFO) */ - int vu_yield; /* yield count (VMARKER) */ } v_un; /* @@ -177,7 +176,6 @@ struct vnode { #define v_socket v_un.vu_socket #define v_rdev v_un.vu_cdev #define v_fifoinfo v_un.vu_fifoinfo -#define v_yield v_un.vu_yield /* XXX: These are temporary to avoid a source sweep at this time */ #define v_object v_bufobj.bo_object diff --git a/sys/ufs/ffs/ffs_rawread.c b/sys/ufs/ffs/ffs_rawread.c index 574d89c57d33..6c30d144cc19 100644 --- a/sys/ufs/ffs/ffs_rawread.c +++ b/sys/ufs/ffs/ffs_rawread.c @@ -243,8 +243,7 @@ ffs_rawread_readahead(struct vnode *vp, if (vmapbuf(bp) < 0) return EFAULT; - if (ticks - PCPU_GET(switchticks) >= hogticks) - uio_yield(); + maybe_yield(); bzero(bp->b_data, bp->b_bufsize); /* Mark operation completed (similar to bufdone()) */ diff --git a/sys/ufs/ffs/ffs_softdep.c b/sys/ufs/ffs/ffs_softdep.c index 60d46008701b..9bc21d3ce5ab 100644 --- a/sys/ufs/ffs/ffs_softdep.c +++ b/sys/ufs/ffs/ffs_softdep.c @@ -1342,7 +1342,7 @@ softdep_process_worklist(mp, full) int full; { struct thread *td = curthread; - int cnt, matchcnt, loopcount; + int cnt, matchcnt; struct ufsmount *ump; long starttime; @@ -1354,7 +1354,6 @@ softdep_process_worklist(mp, full) matchcnt = 0; ump = VFSTOUFS(mp); ACQUIRE_LOCK(&lk); - loopcount = 1; starttime = time_second; softdep_process_journal(mp, full?MNT_WAIT:0); while (ump->softdep_on_worklist > 0) { @@ -1379,7 +1378,7 @@ softdep_process_worklist(mp, full) * We do not generally want to stop for buffer space, but if * we are really being a buffer hog, we will stop and wait. */ - if (loopcount++ % 128 == 0) { + if (should_yield()) { FREE_LOCK(&lk); uio_yield(); bwillwrite();