diff --git a/sys/alpha/alpha/vm_machdep.c b/sys/alpha/alpha/vm_machdep.c index 77d73cf2d9a6..195a9349f1a5 100644 --- a/sys/alpha/alpha/vm_machdep.c +++ b/sys/alpha/alpha/vm_machdep.c @@ -406,70 +406,6 @@ grow_stack(p, sp) } -static int cnt_prezero; - -SYSCTL_INT(_machdep, OID_AUTO, cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, ""); - -/* - * Implement the pre-zeroed page mechanism. - * This routine is called from the idle loop. - */ - -#define ZIDLE_LO(v) ((v) * 2 / 3) -#define ZIDLE_HI(v) ((v) * 4 / 5) - -int -vm_page_zero_idle() -{ - static int free_rover; - static int zero_state; - vm_page_t m; - int s; - - /* - * Attempt to maintain approximately 1/2 of our free pages in a - * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid - * generally zeroing a page when the system is near steady-state. - * Otherwise we might get 'flutter' during disk I/O / IPC or - * fast sleeps. We also do not want to be continuously zeroing - * pages because doing so may flush our L1 and L2 caches too much. - */ - - if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) { - return(0); - } - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) { - return(0); - } - if (mtx_trylock(&Giant)) { - s = splvm(); - m = vm_pageq_find(PQ_FREE, free_rover, FALSE); - zero_state = 0; - if (m != NULL && (m->flags & PG_ZERO) == 0) { - vm_page_queues[m->queue].lcnt--; - TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq); - m->queue = PQ_NONE; - splx(s); - pmap_zero_page(VM_PAGE_TO_PHYS(m)); - (void)splvm(); - vm_page_flag_set(m, PG_ZERO); - m->queue = PQ_FREE + m->pc; - vm_page_queues[m->queue].lcnt++; - TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, - pageq); - ++vm_page_zero_count; - ++cnt_prezero; - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) - zero_state = 1; - } - free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; - splx(s); - mtx_unlock(&Giant); - return (1); - } - return(0); -} - /* * Software interrupt handler for queued VM system processing. */ diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c index 51072e76d803..e5e5ea666389 100644 --- a/sys/amd64/amd64/vm_machdep.c +++ b/sys/amd64/amd64/vm_machdep.c @@ -550,67 +550,6 @@ grow_stack(p, sp) return (1); } -SYSCTL_DECL(_vm_stats_misc); - -static int cnt_prezero; - -SYSCTL_INT(_vm_stats_misc, OID_AUTO, - cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, ""); - -/* - * Implement the pre-zeroed page mechanism. - * This routine is called from the idle loop. - */ - -#define ZIDLE_LO(v) ((v) * 2 / 3) -#define ZIDLE_HI(v) ((v) * 4 / 5) - -int -vm_page_zero_idle() -{ - static int free_rover; - static int zero_state; - vm_page_t m; - - /* - * Attempt to maintain approximately 1/2 of our free pages in a - * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid - * generally zeroing a page when the system is near steady-state. - * Otherwise we might get 'flutter' during disk I/O / IPC or - * fast sleeps. We also do not want to be continuously zeroing - * pages because doing so may flush our L1 and L2 caches too much. - */ - - if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) - return(0); - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) - return(0); - - if (mtx_trylock(&Giant)) { - zero_state = 0; - m = vm_pageq_find(PQ_FREE, free_rover, FALSE); - if (m != NULL && (m->flags & PG_ZERO) == 0) { - vm_page_queues[m->queue].lcnt--; - TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq); - m->queue = PQ_NONE; - pmap_zero_page(VM_PAGE_TO_PHYS(m)); - vm_page_flag_set(m, PG_ZERO); - m->queue = PQ_FREE + m->pc; - vm_page_queues[m->queue].lcnt++; - TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, - pageq); - ++vm_page_zero_count; - ++cnt_prezero; - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) - zero_state = 1; - } - free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; - mtx_unlock(&Giant); - return (1); - } - return(0); -} - /* * Software interrupt handler for queued VM system processing. */ diff --git a/sys/conf/files b/sys/conf/files index 06f46e262889..adb512cbd805 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -1254,6 +1254,7 @@ vm/vm_object.c standard vm/vm_page.c standard vm/vm_pageq.c standard vm/vm_contig.c standard +vm/vm_zeroidle.c standard vm/vm_pageout.c standard vm/vm_pager.c standard vm/vm_swap.c standard diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c index 51072e76d803..e5e5ea666389 100644 --- a/sys/i386/i386/vm_machdep.c +++ b/sys/i386/i386/vm_machdep.c @@ -550,67 +550,6 @@ grow_stack(p, sp) return (1); } -SYSCTL_DECL(_vm_stats_misc); - -static int cnt_prezero; - -SYSCTL_INT(_vm_stats_misc, OID_AUTO, - cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, ""); - -/* - * Implement the pre-zeroed page mechanism. - * This routine is called from the idle loop. - */ - -#define ZIDLE_LO(v) ((v) * 2 / 3) -#define ZIDLE_HI(v) ((v) * 4 / 5) - -int -vm_page_zero_idle() -{ - static int free_rover; - static int zero_state; - vm_page_t m; - - /* - * Attempt to maintain approximately 1/2 of our free pages in a - * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid - * generally zeroing a page when the system is near steady-state. - * Otherwise we might get 'flutter' during disk I/O / IPC or - * fast sleeps. We also do not want to be continuously zeroing - * pages because doing so may flush our L1 and L2 caches too much. - */ - - if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) - return(0); - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) - return(0); - - if (mtx_trylock(&Giant)) { - zero_state = 0; - m = vm_pageq_find(PQ_FREE, free_rover, FALSE); - if (m != NULL && (m->flags & PG_ZERO) == 0) { - vm_page_queues[m->queue].lcnt--; - TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq); - m->queue = PQ_NONE; - pmap_zero_page(VM_PAGE_TO_PHYS(m)); - vm_page_flag_set(m, PG_ZERO); - m->queue = PQ_FREE + m->pc; - vm_page_queues[m->queue].lcnt++; - TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, - pageq); - ++vm_page_zero_count; - ++cnt_prezero; - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) - zero_state = 1; - } - free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; - mtx_unlock(&Giant); - return (1); - } - return(0); -} - /* * Software interrupt handler for queued VM system processing. */ diff --git a/sys/ia64/ia64/vm_machdep.c b/sys/ia64/ia64/vm_machdep.c index c38f983ae1f0..35a45b97a716 100644 --- a/sys/ia64/ia64/vm_machdep.c +++ b/sys/ia64/ia64/vm_machdep.c @@ -444,71 +444,6 @@ grow_stack(p, sp) return (1); } - -static int cnt_prezero; - -SYSCTL_INT(_machdep, OID_AUTO, cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, ""); - -/* - * Implement the pre-zeroed page mechanism. - * This routine is called from the idle loop. - */ - -#define ZIDLE_LO(v) ((v) * 2 / 3) -#define ZIDLE_HI(v) ((v) * 4 / 5) - -int -vm_page_zero_idle() -{ - static int free_rover; - static int zero_state; - vm_page_t m; - int s; - - /* - * Attempt to maintain approximately 1/2 of our free pages in a - * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid - * generally zeroing a page when the system is near steady-state. - * Otherwise we might get 'flutter' during disk I/O / IPC or - * fast sleeps. We also do not want to be continuously zeroing - * pages because doing so may flush our L1 and L2 caches too much. - */ - - if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) { - return(0); - } - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) { - return(0); - } - if (mtx_trylock(&Giant)) { - s = splvm(); - m = vm_pageq_find(PQ_FREE, free_rover, FALSE); - zero_state = 0; - if (m != NULL && (m->flags & PG_ZERO) == 0) { - vm_page_queues[m->queue].lcnt--; - TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq); - m->queue = PQ_NONE; - splx(s); - pmap_zero_page(VM_PAGE_TO_PHYS(m)); - (void)splvm(); - vm_page_flag_set(m, PG_ZERO); - m->queue = PQ_FREE + m->pc; - vm_page_queues[m->queue].lcnt++; - TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, - pageq); - ++vm_page_zero_count; - ++cnt_prezero; - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) - zero_state = 1; - } - free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; - splx(s); - mtx_unlock(&Giant); - return (1); - } - return(0); -} - /* * Software interrupt handler for queued VM system processing. */ diff --git a/sys/powerpc/aim/vm_machdep.c b/sys/powerpc/aim/vm_machdep.c index 98bbd275e0fc..6d50e278826f 100644 --- a/sys/powerpc/aim/vm_machdep.c +++ b/sys/powerpc/aim/vm_machdep.c @@ -316,77 +316,6 @@ grow_stack(p, sp) return (1); } - -static int cnt_prezero; - -SYSCTL_INT(_machdep, OID_AUTO, cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, ""); - -/* - * Implement the pre-zeroed page mechanism. - * This routine is called from the idle loop. - */ - -#define ZIDLE_LO(v) ((v) * 2 / 3) -#define ZIDLE_HI(v) ((v) * 4 / 5) - -int -vm_page_zero_idle() -{ - static int free_rover; - static int zero_state; - vm_page_t m; - int s; - - /* - * Attempt to maintain approximately 1/2 of our free pages in a - * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid - * generally zeroing a page when the system is near steady-state. - * Otherwise we might get 'flutter' during disk I/O / IPC or - * fast sleeps. We also do not want to be continuously zeroing - * pages because doing so may flush our L1 and L2 caches too much. - */ - - if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) { - return(0); - } - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) { - return(0); - } - if (mtx_trylock(&Giant)) { - s = splvm(); - m = vm_pageq_find(PQ_FREE, free_rover, FALSE); - zero_state = 0; - if (m != NULL && (m->flags & PG_ZERO) == 0) { - vm_page_queues[m->queue].lcnt--; - TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq); - m->queue = PQ_NONE; - splx(s); -#if 0 - rel_mplock(); -#endif - pmap_zero_page(VM_PAGE_TO_PHYS(m)); -#if 0 - get_mplock(); -#endif - (void)splvm(); - vm_page_flag_set(m, PG_ZERO); - m->queue = PQ_FREE + m->pc; - vm_page_queues[m->queue].lcnt++; - TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, - pageq); - ++vm_page_zero_count; - ++cnt_prezero; - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) - zero_state = 1; - } - free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; - splx(s); - mtx_unlock(&Giant); - return (1); - } - return(0); -} - /* * Software interrupt handler for queued VM system processing. */ diff --git a/sys/powerpc/powerpc/vm_machdep.c b/sys/powerpc/powerpc/vm_machdep.c index 98bbd275e0fc..6d50e278826f 100644 --- a/sys/powerpc/powerpc/vm_machdep.c +++ b/sys/powerpc/powerpc/vm_machdep.c @@ -316,77 +316,6 @@ grow_stack(p, sp) return (1); } - -static int cnt_prezero; - -SYSCTL_INT(_machdep, OID_AUTO, cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, ""); - -/* - * Implement the pre-zeroed page mechanism. - * This routine is called from the idle loop. - */ - -#define ZIDLE_LO(v) ((v) * 2 / 3) -#define ZIDLE_HI(v) ((v) * 4 / 5) - -int -vm_page_zero_idle() -{ - static int free_rover; - static int zero_state; - vm_page_t m; - int s; - - /* - * Attempt to maintain approximately 1/2 of our free pages in a - * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid - * generally zeroing a page when the system is near steady-state. - * Otherwise we might get 'flutter' during disk I/O / IPC or - * fast sleeps. We also do not want to be continuously zeroing - * pages because doing so may flush our L1 and L2 caches too much. - */ - - if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) { - return(0); - } - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) { - return(0); - } - if (mtx_trylock(&Giant)) { - s = splvm(); - m = vm_pageq_find(PQ_FREE, free_rover, FALSE); - zero_state = 0; - if (m != NULL && (m->flags & PG_ZERO) == 0) { - vm_page_queues[m->queue].lcnt--; - TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq); - m->queue = PQ_NONE; - splx(s); -#if 0 - rel_mplock(); -#endif - pmap_zero_page(VM_PAGE_TO_PHYS(m)); -#if 0 - get_mplock(); -#endif - (void)splvm(); - vm_page_flag_set(m, PG_ZERO); - m->queue = PQ_FREE + m->pc; - vm_page_queues[m->queue].lcnt++; - TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, - pageq); - ++vm_page_zero_count; - ++cnt_prezero; - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) - zero_state = 1; - } - free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; - splx(s); - mtx_unlock(&Giant); - return (1); - } - return(0); -} - /* * Software interrupt handler for queued VM system processing. */ diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c new file mode 100644 index 000000000000..c1361ded8616 --- /dev/null +++ b/sys/vm/vm_zeroidle.c @@ -0,0 +1,117 @@ +/*- + * Copyright (c) 1994 John Dyson + * Copyright (c) 2001 Matt Dillon + * + * All rights reserved. Terms for use and redistribution + * are covered by the BSD Copyright as found in /usr/src/COPYRIGHT. + * + * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 + * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ + * $FreeBSD$ + */ + +#include "opt_npx.h" +#ifdef PC98 +#include "opt_pc98.h" +#endif +#include "opt_reset.h" +#include "opt_isa.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifdef PC98 +#include +#else +#include +#endif + +SYSCTL_DECL(_vm_stats_misc); + +static int cnt_prezero; + +SYSCTL_INT(_vm_stats_misc, OID_AUTO, + cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, ""); + +/* + * Implement the pre-zeroed page mechanism. + * This routine is called from the idle loop. + */ + +#define ZIDLE_LO(v) ((v) * 2 / 3) +#define ZIDLE_HI(v) ((v) * 4 / 5) + +int +vm_page_zero_idle(void) +{ + static int free_rover; + static int zero_state; + vm_page_t m; + + /* + * Attempt to maintain approximately 1/2 of our free pages in a + * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid + * generally zeroing a page when the system is near steady-state. + * Otherwise we might get 'flutter' during disk I/O / IPC or + * fast sleeps. We also do not want to be continuously zeroing + * pages because doing so may flush our L1 and L2 caches too much. + */ + + if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) + return(0); + if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) + return(0); + + if (mtx_trylock(&Giant)) { + zero_state = 0; + m = vm_pageq_find(PQ_FREE, free_rover, FALSE); + if (m != NULL && (m->flags & PG_ZERO) == 0) { + vm_page_queues[m->queue].lcnt--; + TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq); + m->queue = PQ_NONE; + pmap_zero_page(VM_PAGE_TO_PHYS(m)); + vm_page_flag_set(m, PG_ZERO); + m->queue = PQ_FREE + m->pc; + vm_page_queues[m->queue].lcnt++; + TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, + pageq); + ++vm_page_zero_count; + ++cnt_prezero; + if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) + zero_state = 1; + } + free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; + mtx_unlock(&Giant); + return (1); + } + return(0); +} +