Improved scheduling in uiomove(), etc. resched_wanted() is true too

often for it to be a good criterion for switching kernel cpu hogs --
it is true after most wakeups.  Use the criterion "has been running
for >= 2 quanta" instead.
This commit is contained in:
bde 1999-02-22 16:57:48 +00:00
parent 4e6745d688
commit d51135c0c3
3 changed files with 15 additions and 8 deletions

@ -36,11 +36,12 @@
* SUCH DAMAGE.
*
* @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
* $Id: kern_subr.c,v 1.24 1999/01/10 01:58:24 eivind Exp $
* $Id: kern_subr.c,v 1.25 1999/02/02 12:11:01 bde Exp $
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/proc.h>
#include <sys/malloc.h>
#include <sys/lock.h>
@ -52,8 +53,6 @@
#include <vm/vm_page.h>
#include <vm/vm_map.h>
#include <machine/cpu.h>
static void uio_yield __P((void));
int
@ -86,7 +85,7 @@ uiomove(cp, n, uio)
case UIO_USERSPACE:
case UIO_USERISPACE:
if (resched_wanted())
if (ticks - switchticks >= hogticks)
uio_yield();
if (uio->uio_rw == UIO_READ)
error = copyout(cp, iov->iov_base, cnt);
@ -146,7 +145,7 @@ uiomoveco(cp, n, uio, obj)
case UIO_USERSPACE:
case UIO_USERISPACE:
if (resched_wanted())
if (ticks - switchticks >= hogticks)
uio_yield();
if (uio->uio_rw == UIO_READ) {
if (vfs_ioopt && ((cnt & PAGE_MASK) == 0) &&
@ -223,7 +222,7 @@ uioread(n, uio, obj, nread)
cnt &= ~PAGE_MASK;
if (resched_wanted())
if (ticks - switchticks >= hogticks)
uio_yield();
error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
uio->uio_offset, cnt,
@ -408,6 +407,7 @@ uio_yield()
int s;
p = curproc;
p->p_priority = p->p_usrpri;
s = splhigh();
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
* $Id: kern_synch.c,v 1.71 1999/01/08 17:31:10 eivind Exp $
* $Id: kern_synch.c,v 1.72 1999/01/10 01:58:24 eivind Exp $
*/
#include "opt_ktrace.h"
@ -66,6 +66,7 @@ static void rqinit __P((void *));
SYSINIT(runqueue, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, rqinit, NULL)
u_char curpriority; /* usrpri of curproc */
int hogticks;
int lbolt; /* once a second sleep address */
static void endtsleep __P((void *));
@ -94,6 +95,7 @@ sysctl_kern_quantum SYSCTL_HANDLER_ARGS
error = EINVAL;
}
}
hogticks = 2 * (hz / quantum);
return (error);
}
@ -360,6 +362,7 @@ sleepinit()
{
int i;
hogticks = 2 * (hz / quantum);
for (i = 0; i < TABLESIZE; i++)
TAILQ_INIT(&slpque[i]);
}
@ -828,6 +831,8 @@ mi_switch()
p->p_switchtime = switchtime;
else
microuptime(&p->p_switchtime);
switchticks = ticks;
splx(x);
}

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)proc.h 8.15 (Berkeley) 5/19/95
* $Id: proc.h,v 1.68 1999/02/01 13:23:18 newton Exp $
* $Id: proc.h,v 1.69 1999/02/03 08:21:44 bde Exp $
*/
#ifndef _SYS_PROC_H_
@ -326,8 +326,10 @@ extern u_long pgrphash;
extern struct proc *curproc; /* Current running proc. */
extern struct proc proc0; /* Process slot for swapper. */
extern int hogticks; /* Limit on kernel cpu hogs. */
extern int nprocs, maxproc; /* Current and max number of procs. */
extern int maxprocperuid; /* Max procs per uid. */
extern int switchticks; /* `ticks' at last context switch. */
extern struct timeval switchtime; /* Uptime at last context switch */
LIST_HEAD(proclist, proc);