diff --git a/share/man/man9/Makefile b/share/man/man9/Makefile index 765395626e2e..e54328fa6497 100644 --- a/share/man/man9/Makefile +++ b/share/man/man9/Makefile @@ -1391,8 +1391,7 @@ MLINKS+=pmap_quick_enter_page.9 pmap_quick_remove_page.9 MLINKS+=pmap_remove.9 pmap_remove_all.9 \ pmap_remove.9 pmap_remove_pages.9 MLINKS+=pmap_resident_count.9 pmap_wired_count.9 -MLINKS+=pmap_zero_page.9 pmap_zero_area.9 \ - pmap_zero_page.9 pmap_zero_idle.9 +MLINKS+=pmap_zero_page.9 pmap_zero_area.9 MLINKS+=printf.9 log.9 \ printf.9 tprintf.9 \ printf.9 uprintf.9 diff --git a/share/man/man9/pmap.9 b/share/man/man9/pmap.9 index 231749c16499..0b1987a19d3b 100644 --- a/share/man/man9/pmap.9 +++ b/share/man/man9/pmap.9 @@ -25,7 +25,7 @@ .\" .\" $FreeBSD$ .\" -.Dd August 3, 2014 +.Dd August 30, 2016 .Dt PMAP 9 .Os .Sh NAME @@ -121,7 +121,6 @@ operation. .Xr pmap_unwire 9 , .Xr pmap_wired_count 9 , .Xr pmap_zero_area 9 , -.Xr pmap_zero_idle 9 , .Xr pmap_zero_page 9 , .Xr vm_map 9 .Sh AUTHORS diff --git a/share/man/man9/pmap_zero_page.9 b/share/man/man9/pmap_zero_page.9 index 519e76b1871f..493cb38fe9ff 100644 --- a/share/man/man9/pmap_zero_page.9 +++ b/share/man/man9/pmap_zero_page.9 @@ -25,13 +25,12 @@ .\" .\" $FreeBSD$ .\" -.Dd July 21, 2003 +.Dd August 30, 2016 .Dt PMAP_ZERO 9 .Os .Sh NAME .Nm pmap_zero_page , .Nm pmap_zero_area , -.Nm pmap_zero_idle .Nd zero-fill a page using machine-dependent optimizations .Sh SYNOPSIS .In sys/param.h @@ -41,8 +40,6 @@ .Fn pmap_zero_page "vm_page_t m" .Ft void .Fn pmap_zero_page_area "vm_page_t m" "int off" "int size" -.Ft void -.Fn pmap_zero_page_idle "vm_page_t m" .Sh DESCRIPTION The .Fn pmap_zero_page @@ -53,14 +50,6 @@ function is used to zero-fill an area of a page. The range specified must not cross a page boundary; it must be contained entirely within a single page. .Pp -The -.Fn pmap_zero_page_idle -interface is used by the -.Nm vm_pagezero -process. -The system-wide -.Va Giant -lock should not be required to be held in order to call this interface. .Sh IMPLEMENTATION NOTES This function is required to be implemented for each architecture supported by .Fx . diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 0357bd745537..81b9e5d88291 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -5178,19 +5178,6 @@ pmap_zero_page_area(vm_page_t m, int off, int size) bzero((char *)va + off, size); } -/* - * Zero the specified hardware page in a way that minimizes cache thrashing. - * This is intended to be called from the vm_pagezero process only and - * outside of Giant. - */ -void -pmap_zero_page_idle(vm_page_t m) -{ - vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); - - sse2_pagezero((void *)va); -} - /* * Copy 1 specified hardware page to another. */ diff --git a/sys/arm/arm/pmap-v4.c b/sys/arm/arm/pmap-v4.c index 3bdc9e43835f..e7f8b9820793 100644 --- a/sys/arm/arm/pmap-v4.c +++ b/sys/arm/arm/pmap-v4.c @@ -4079,19 +4079,6 @@ pmap_zero_page_area(vm_page_t m, int off, int size) } -/* - * pmap_zero_page_idle zeros the specified hardware page by mapping - * the page into KVM and using bzero to clear its contents. This - * is intended to be called from the vm_pagezero process only and - * outside of Giant. - */ -void -pmap_zero_page_idle(vm_page_t m) -{ - - pmap_zero_page(m); -} - #if 0 /* * pmap_clean_page() diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c index 6eaab42a5e1d..20ad424a46d6 100644 --- a/sys/arm/arm/pmap-v6.c +++ b/sys/arm/arm/pmap-v6.c @@ -306,8 +306,6 @@ struct sysmaps { caddr_t CADDR3; }; static struct sysmaps sysmaps_pcpu[MAXCPU]; -static pt2_entry_t *CMAP3; -static caddr_t CADDR3; caddr_t _tmppt = 0; struct msgbuf *msgbufp = NULL; /* XXX move it to machdep.c */ @@ -1176,7 +1174,6 @@ pmap_bootstrap(vm_offset_t firstaddr) /* * Local CMAP1/CMAP2 are used for zeroing and copying pages. * Local CMAP3 is used for data cache cleaning. - * Global CMAP3 is used for the idle process page zeroing. */ for (i = 0; i < MAXCPU; i++) { sysmaps = &sysmaps_pcpu[i]; @@ -1185,7 +1182,6 @@ pmap_bootstrap(vm_offset_t firstaddr) SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1); SYSMAP(caddr_t, sysmaps->CMAP3, sysmaps->CADDR3, 1); } - SYSMAP(caddr_t, CMAP3, CADDR3, 1); /* * Crashdump maps. @@ -5804,27 +5800,6 @@ pmap_zero_page_area(vm_page_t m, int off, int size) mtx_unlock(&sysmaps->lock); } -/* - * pmap_zero_page_idle zeros the specified hardware page by mapping - * the page into KVM and using bzero to clear its contents. This - * is intended to be called from the vm_pagezero process only and - * outside of Giant. - */ -void -pmap_zero_page_idle(vm_page_t m) -{ - - if (pte2_load(CMAP3) != 0) - panic("%s: CMAP3 busy", __func__); - sched_pin(); - pte2_store(CMAP3, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, - vm_page_pte2_attr(m))); - pagezero(CADDR3); - pte2_clear(CMAP3); - tlb_flush((vm_offset_t)CADDR3); - sched_unpin(); -} - /* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c index 71acdd21fb9c..e8d0ca12029e 100644 --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -3263,20 +3263,6 @@ pmap_zero_page_area(vm_page_t m, int off, int size) bzero((char *)va + off, size); } -/* - * pmap_zero_page_idle zeros the specified hardware page by mapping - * the page into KVM and using bzero to clear its contents. This - * is intended to be called from the vm_pagezero process only and - * outside of Giant. - */ -void -pmap_zero_page_idle(vm_page_t m) -{ - vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); - - pagezero((void *)va); -} - /* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using diff --git a/sys/conf/files b/sys/conf/files index 1e6e19abc719..dd8c828759ee 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -4369,7 +4369,6 @@ vm/vm_radix.c standard vm/vm_reserv.c standard vm/vm_domain.c standard vm/vm_unix.c standard -vm/vm_zeroidle.c standard vm/vnode_pager.c standard xen/features.c optional xenhvm xen/xenbus/xenbus_if.m optional xenhvm diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index c366bed6ba6f..036808844613 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -444,7 +444,7 @@ pmap_bootstrap(vm_paddr_t firstaddr) /* * CMAP1/CMAP2 are used for zeroing and copying pages. - * CMAP3 is used for the idle process page zeroing. + * CMAP3 is used for the boot-time memory test. */ for (i = 0; i < MAXCPU; i++) { sysmaps = &sysmaps_pcpu[i]; @@ -452,7 +452,7 @@ pmap_bootstrap(vm_paddr_t firstaddr) SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1) SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1) } - SYSMAP(caddr_t, CMAP3, CADDR3, 1) + SYSMAP(caddr_t, CMAP3, CADDR3, 1); /* * Crashdump maps. @@ -4241,26 +4241,6 @@ pmap_zero_page_area(vm_page_t m, int off, int size) mtx_unlock(&sysmaps->lock); } -/* - * Zero the specified hardware page in a way that minimizes cache thrashing. - * This is intended to be called from the vm_pagezero process only and - * outside of Giant. - */ -void -pmap_zero_page_idle(vm_page_t m) -{ - - if (*CMAP3) - panic("pmap_zero_page_idle: CMAP3 busy"); - sched_pin(); - *CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | - pmap_cache_bits(m->md.pat_mode, 0); - invlcaddr(CADDR3); - pagezero(CADDR3); - *CMAP3 = 0; - sched_unpin(); -} - /* * Copy 1 specified hardware page to another. */ diff --git a/sys/i386/include/pmap.h b/sys/i386/include/pmap.h index f2d8c586e4fc..dbfc554c6599 100644 --- a/sys/i386/include/pmap.h +++ b/sys/i386/include/pmap.h @@ -353,7 +353,7 @@ struct pv_chunk { #ifdef _KERNEL -extern caddr_t CADDR3; +extern caddr_t CADDR3; extern pt_entry_t *CMAP3; extern vm_paddr_t phys_avail[]; extern vm_paddr_t dump_avail[]; diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c index 7081b74cdd99..2bc7787dc051 100644 --- a/sys/mips/mips/pmap.c +++ b/sys/mips/mips/pmap.c @@ -2558,24 +2558,6 @@ pmap_zero_page_area(vm_page_t m, int off, int size) } } -void -pmap_zero_page_idle(vm_page_t m) -{ - vm_offset_t va; - vm_paddr_t phys = VM_PAGE_TO_PHYS(m); - - if (MIPS_DIRECT_MAPPABLE(phys)) { - va = MIPS_PHYS_TO_DIRECT(phys); - bzero((caddr_t)va, PAGE_SIZE); - mips_dcache_wbinv_range(va, PAGE_SIZE); - } else { - va = pmap_lmem_map1(phys); - bzero((caddr_t)va, PAGE_SIZE); - mips_dcache_wbinv_range(va, PAGE_SIZE); - pmap_lmem_unmap(); - } -} - /* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c index 4aef628a0a23..04066418fcc1 100644 --- a/sys/powerpc/aim/mmu_oea.c +++ b/sys/powerpc/aim/mmu_oea.c @@ -300,7 +300,6 @@ void moea_remove_write(mmu_t, vm_page_t); void moea_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); void moea_zero_page(mmu_t, vm_page_t); void moea_zero_page_area(mmu_t, vm_page_t, int, int); -void moea_zero_page_idle(mmu_t, vm_page_t); void moea_activate(mmu_t, struct thread *); void moea_deactivate(mmu_t, struct thread *); void moea_cpu_bootstrap(mmu_t, int); @@ -349,7 +348,6 @@ static mmu_method_t moea_methods[] = { MMUMETHOD(mmu_unwire, moea_unwire), MMUMETHOD(mmu_zero_page, moea_zero_page), MMUMETHOD(mmu_zero_page_area, moea_zero_page_area), - MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle), MMUMETHOD(mmu_activate, moea_activate), MMUMETHOD(mmu_deactivate, moea_deactivate), MMUMETHOD(mmu_page_set_memattr, moea_page_set_memattr), @@ -1081,13 +1079,6 @@ moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) bzero(va, size); } -void -moea_zero_page_idle(mmu_t mmu, vm_page_t m) -{ - - moea_zero_page(mmu, m); -} - vm_offset_t moea_quick_enter_page(mmu_t mmu, vm_page_t m) { diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c index 71ce72a734d6..c0461ff57453 100644 --- a/sys/powerpc/aim/mmu_oea64.c +++ b/sys/powerpc/aim/mmu_oea64.c @@ -265,7 +265,6 @@ void moea64_remove_write(mmu_t, vm_page_t); void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); void moea64_zero_page(mmu_t, vm_page_t); void moea64_zero_page_area(mmu_t, vm_page_t, int, int); -void moea64_zero_page_idle(mmu_t, vm_page_t); void moea64_activate(mmu_t, struct thread *); void moea64_deactivate(mmu_t, struct thread *); void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t); @@ -314,7 +313,6 @@ static mmu_method_t moea64_methods[] = { MMUMETHOD(mmu_unwire, moea64_unwire), MMUMETHOD(mmu_zero_page, moea64_zero_page), MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), - MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), MMUMETHOD(mmu_activate, moea64_activate), MMUMETHOD(mmu_deactivate, moea64_deactivate), MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr), @@ -1230,13 +1228,6 @@ moea64_zero_page(mmu_t mmu, vm_page_t m) mtx_unlock(&moea64_scratchpage_mtx); } -void -moea64_zero_page_idle(mmu_t mmu, vm_page_t m) -{ - - moea64_zero_page(mmu, m); -} - vm_offset_t moea64_quick_enter_page(mmu_t mmu, vm_page_t m) { diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c index 9322315e5ece..7aa2845e1204 100644 --- a/sys/powerpc/booke/pmap.c +++ b/sys/powerpc/booke/pmap.c @@ -130,12 +130,6 @@ static struct mtx zero_page_mutex; static struct mtx tlbivax_mutex; -/* - * Reserved KVA space for mmu_booke_zero_page_idle. This is used - * by idle thred only, no lock required. - */ -static vm_offset_t zero_page_idle_va; - /* Reserved KVA space and mutex for mmu_booke_copy_page. */ static vm_offset_t copy_page_src_va; static vm_offset_t copy_page_dst_va; @@ -312,7 +306,6 @@ static void mmu_booke_remove_write(mmu_t, vm_page_t); static void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); static void mmu_booke_zero_page(mmu_t, vm_page_t); static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); -static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); static void mmu_booke_activate(mmu_t, struct thread *); static void mmu_booke_deactivate(mmu_t, struct thread *); static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); @@ -371,7 +364,6 @@ static mmu_method_t mmu_booke_methods[] = { MMUMETHOD(mmu_unwire, mmu_booke_unwire), MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), - MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), MMUMETHOD(mmu_activate, mmu_booke_activate), MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page), @@ -1147,14 +1139,11 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) /* Allocate KVA space for page zero/copy operations. */ zero_page_va = virtual_avail; virtual_avail += PAGE_SIZE; - zero_page_idle_va = virtual_avail; - virtual_avail += PAGE_SIZE; copy_page_src_va = virtual_avail; virtual_avail += PAGE_SIZE; copy_page_dst_va = virtual_avail; virtual_avail += PAGE_SIZE; debugf("zero_page_va = 0x%08x\n", zero_page_va); - debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); @@ -2326,23 +2315,6 @@ mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, mtx_unlock(©_page_mutex); } -/* - * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it - * into virtual memory and using bzero to clear its contents. This is intended - * to be called from the vm_pagezero process only and outside of Giant. No - * lock is required. - */ -static void -mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) -{ - vm_offset_t va; - - va = zero_page_idle_va; - mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); - bzero((caddr_t)va, PAGE_SIZE); - mmu_booke_kremove(mmu, va); -} - static vm_offset_t mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m) { diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m index b2b79a908644..b840ead8d334 100644 --- a/sys/powerpc/powerpc/mmu_if.m +++ b/sys/powerpc/powerpc/mmu_if.m @@ -658,18 +658,6 @@ METHOD void zero_page_area { }; -/** - * @brief Called from the idle loop to zero pages. XXX I think locking - * constraints might be different here compared to zero_page. - * - * @param _pg physical page - */ -METHOD void zero_page_idle { - mmu_t _mmu; - vm_page_t _pg; -}; - - /** * @brief Extract mincore(2) information from a mapping. * diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c index 35f8d1eda77c..960133bccb16 100644 --- a/sys/powerpc/powerpc/pmap_dispatch.c +++ b/sys/powerpc/powerpc/pmap_dispatch.c @@ -380,14 +380,6 @@ pmap_zero_page_area(vm_page_t m, int off, int size) MMU_ZERO_PAGE_AREA(mmu_obj, m, off, size); } -void -pmap_zero_page_idle(vm_page_t m) -{ - - CTR2(KTR_PMAP, "%s(%p)", __func__, m); - MMU_ZERO_PAGE_IDLE(mmu_obj, m); -} - int pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) { diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c index f09fc857a897..4fce538d0597 100644 --- a/sys/riscv/riscv/pmap.c +++ b/sys/riscv/riscv/pmap.c @@ -2537,20 +2537,6 @@ pmap_zero_page_area(vm_page_t m, int off, int size) bzero((char *)va + off, size); } -/* - * pmap_zero_page_idle zeros the specified hardware page by mapping - * the page into KVM and using bzero to clear its contents. This - * is intended to be called from the vm_pagezero process only and - * outside of Giant. - */ -void -pmap_zero_page_idle(vm_page_t m) -{ - vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); - - pagezero((void *)va); -} - /* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c index 21800dddc9fe..a82e046ae429 100644 --- a/sys/sparc64/sparc64/pmap.c +++ b/sys/sparc64/sparc64/pmap.c @@ -223,10 +223,6 @@ PMAP_STATS_VAR(pmap_nzero_page_area); PMAP_STATS_VAR(pmap_nzero_page_area_c); PMAP_STATS_VAR(pmap_nzero_page_area_oc); PMAP_STATS_VAR(pmap_nzero_page_area_nc); -PMAP_STATS_VAR(pmap_nzero_page_idle); -PMAP_STATS_VAR(pmap_nzero_page_idle_c); -PMAP_STATS_VAR(pmap_nzero_page_idle_oc); -PMAP_STATS_VAR(pmap_nzero_page_idle_nc); PMAP_STATS_VAR(pmap_ncopy_page); PMAP_STATS_VAR(pmap_ncopy_page_c); PMAP_STATS_VAR(pmap_ncopy_page_oc); @@ -1848,35 +1844,6 @@ pmap_zero_page_area(vm_page_t m, int off, int size) } } -void -pmap_zero_page_idle(vm_page_t m) -{ - struct tte *tp; - vm_offset_t va; - vm_paddr_t pa; - - KASSERT((m->flags & PG_FICTITIOUS) == 0, - ("pmap_zero_page_idle: fake page")); - PMAP_STATS_INC(pmap_nzero_page_idle); - pa = VM_PAGE_TO_PHYS(m); - if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) { - PMAP_STATS_INC(pmap_nzero_page_idle_c); - va = TLB_PHYS_TO_DIRECT(pa); - cpu_block_zero((void *)va, PAGE_SIZE); - } else if (m->md.color == -1) { - PMAP_STATS_INC(pmap_nzero_page_idle_nc); - aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE); - } else { - PMAP_STATS_INC(pmap_nzero_page_idle_oc); - va = pmap_idle_map + (m->md.color * PAGE_SIZE); - tp = tsb_kvtotte(va); - tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W; - tp->tte_vpn = TV_VPN(va, TS_8K); - cpu_block_zero((void *)va, PAGE_SIZE); - tlb_page_demap(kernel_pmap, va); - } -} - void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h index 1d18823a6a19..26117b7318d6 100644 --- a/sys/vm/pmap.h +++ b/sys/vm/pmap.h @@ -153,7 +153,6 @@ boolean_t pmap_ts_referenced(vm_page_t m); void pmap_unwire(pmap_t pmap, vm_offset_t start, vm_offset_t end); void pmap_zero_page(vm_page_t); void pmap_zero_page_area(vm_page_t, int off, int size); -void pmap_zero_page_idle(vm_page_t); #define pmap_resident_count(pm) ((pm)->pm_stats.resident_count) #define pmap_wired_count(pm) ((pm)->pm_stats.wired_count) diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c index cbf412922814..d06cab33f366 100644 --- a/sys/vm/vm_meter.c +++ b/sys/vm/vm_meter.c @@ -306,6 +306,3 @@ VM_STATS_VM(v_forkpages, "VM pages affected by fork()"); VM_STATS_VM(v_vforkpages, "VM pages affected by vfork()"); VM_STATS_VM(v_rforkpages, "VM pages affected by rfork()"); VM_STATS_VM(v_kthreadpages, "VM pages affected by fork() by kernel"); - -SYSCTL_INT(_vm_stats_misc, OID_AUTO, zero_page_count, CTLFLAG_RD, - &vm_page_zero_count, 0, "Number of zero-ed free pages"); diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 47cdc6c0560d..ee52b166f332 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -134,7 +134,6 @@ struct mtx_padalign pa_lock[PA_LOCK_COUNT]; vm_page_t vm_page_array; long vm_page_array_size; long first_page; -int vm_page_zero_count; static int boot_pages = UMA_BOOT_PAGES; SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, @@ -1735,8 +1734,6 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) KASSERT(m->valid == 0, ("vm_page_alloc: free page %p is valid", m)); vm_phys_freecnt_adj(m, -1); - if ((m->flags & PG_ZERO) != 0) - vm_page_zero_count--; } mtx_unlock(&vm_page_queue_free_mtx); @@ -2042,8 +2039,6 @@ vm_page_alloc_init(vm_page_t m) KASSERT(m->valid == 0, ("vm_page_alloc_init: free page %p is valid", m)); vm_phys_freecnt_adj(m, -1); - if ((m->flags & PG_ZERO) != 0) - vm_page_zero_count--; } return (drop); } @@ -2597,7 +2592,6 @@ vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run, #endif vm_phys_free_pages(m, 0); } while ((m = SLIST_FIRST(&free)) != NULL); - vm_page_zero_idle_wakeup(); vm_page_free_wakeup(); mtx_unlock(&vm_page_queue_free_mtx); } @@ -3041,10 +3035,6 @@ vm_page_free_toq(vm_page_t m) if (TRUE) #endif vm_phys_free_pages(m, 0); - if ((m->flags & PG_ZERO) != 0) - ++vm_page_zero_count; - else - vm_page_zero_idle_wakeup(); vm_page_free_wakeup(); mtx_unlock(&vm_page_queue_free_mtx); } diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index c915598e6573..711de18de24d 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -504,7 +504,6 @@ void vm_page_test_dirty (vm_page_t); vm_page_bits_t vm_page_bits(int base, int size); void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); void vm_page_free_toq(vm_page_t m); -void vm_page_zero_idle_wakeup(void); void vm_page_dirty_KBI(vm_page_t m); void vm_page_lock_KBI(vm_page_t m, const char *file, int line); diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c index ab48f58820bc..c7299b632f15 100644 --- a/sys/vm/vm_phys.c +++ b/sys/vm/vm_phys.c @@ -132,10 +132,6 @@ CTASSERT(VM_ISADMA_BOUNDARY < VM_LOWMEM_BOUNDARY); CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY); #endif -static int cnt_prezero; -SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD, - &cnt_prezero, 0, "The number of physical pages prezeroed at idle time"); - static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS); SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info"); @@ -1297,53 +1293,6 @@ vm_phys_unfree_page(vm_page_t m) return (TRUE); } -/* - * Try to zero one physical page. Used by an idle priority thread. - */ -boolean_t -vm_phys_zero_pages_idle(void) -{ - static struct vm_freelist *fl; - static int flind, oind, pind; - vm_page_t m, m_tmp; - int domain; - - domain = vm_rr_selectdomain(); - fl = vm_phys_free_queues[domain][0][0]; - mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); - for (;;) { - TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, plinks.q) { - for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) { - if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) { - vm_phys_unfree_page(m_tmp); - vm_phys_freecnt_adj(m, -1); - mtx_unlock(&vm_page_queue_free_mtx); - pmap_zero_page_idle(m_tmp); - m_tmp->flags |= PG_ZERO; - mtx_lock(&vm_page_queue_free_mtx); - vm_phys_freecnt_adj(m, 1); - vm_phys_free_pages(m_tmp, 0); - vm_page_zero_count++; - cnt_prezero++; - return (TRUE); - } - } - } - oind++; - if (oind == VM_NFREEORDER) { - oind = 0; - pind++; - if (pind == VM_NFREEPOOL) { - pind = 0; - flind++; - if (flind == vm_nfreelists) - flind = 0; - } - fl = vm_phys_free_queues[domain][flind][pind]; - } - } -} - /* * Allocate a contiguous set of physical pages of the given size * "npages" from the free lists. All of the physical pages must be at diff --git a/sys/vm/vm_phys.h b/sys/vm/vm_phys.h index ee4aa2de1f14..2ce5ba3cfe40 100644 --- a/sys/vm/vm_phys.h +++ b/sys/vm/vm_phys.h @@ -88,7 +88,6 @@ vm_page_t vm_phys_scan_contig(u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, int options); void vm_phys_set_pool(int pool, vm_page_t m, int order); boolean_t vm_phys_unfree_page(vm_page_t m); -boolean_t vm_phys_zero_pages_idle(void); int vm_phys_mem_affinity(int f, int t); /* diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c deleted file mode 100644 index dac4abe3a46e..000000000000 --- a/sys/vm/vm_zeroidle.c +++ /dev/null @@ -1,162 +0,0 @@ -/*- - * Copyright (c) 1994 John Dyson - * Copyright (c) 2001 Matt Dillon - * - * All Rights Reserved. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS - * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 - * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ - * from: FreeBSD: .../i386/vm_machdep.c,v 1.165 2001/07/04 23:27:04 dillon - */ - -#include -__FBSDID("$FreeBSD$"); - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -static int idlezero_enable_default = 0; -/* Defer setting the enable flag until the kthread is running. */ -static int idlezero_enable = 0; -SYSCTL_INT(_vm, OID_AUTO, idlezero_enable, CTLFLAG_RWTUN, &idlezero_enable, 0, - "Allow the kernel to use idle cpu cycles to zero-out pages"); -/* - * Implement the pre-zeroed page mechanism. - */ - -#define ZIDLE_LO(v) ((v) * 2 / 3) -#define ZIDLE_HI(v) ((v) * 4 / 5) - -static boolean_t wakeup_needed = FALSE; -static int zero_state; - -static int -vm_page_zero_check(void) -{ - - if (!idlezero_enable) - return (0); - /* - * Attempt to maintain approximately 1/2 of our free pages in a - * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid - * generally zeroing a page when the system is near steady-state. - * Otherwise we might get 'flutter' during disk I/O / IPC or - * fast sleeps. We also do not want to be continuously zeroing - * pages because doing so may flush our L1 and L2 caches too much. - */ - if (zero_state && vm_page_zero_count >= ZIDLE_LO(vm_cnt.v_free_count)) - return (0); - if (vm_page_zero_count >= ZIDLE_HI(vm_cnt.v_free_count)) - return (0); - return (1); -} - -static void -vm_page_zero_idle(void) -{ - - mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); - zero_state = 0; - if (vm_phys_zero_pages_idle()) { - if (vm_page_zero_count >= ZIDLE_HI(vm_cnt.v_free_count)) - zero_state = 1; - } -} - -/* Called by vm_page_free to hint that a new page is available. */ -void -vm_page_zero_idle_wakeup(void) -{ - - mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); - if (wakeup_needed && vm_page_zero_check()) { - wakeup_needed = FALSE; - wakeup(&zero_state); - } -} - -static void -vm_pagezero(void __unused *arg) -{ - - idlezero_enable = idlezero_enable_default; - - mtx_lock(&vm_page_queue_free_mtx); - for (;;) { - if (vm_page_zero_check()) { - vm_page_zero_idle(); -#ifndef PREEMPTION - if (sched_runnable()) { - thread_lock(curthread); - mi_switch(SW_VOL | SWT_IDLE, NULL); - thread_unlock(curthread); - } -#endif - } else { - wakeup_needed = TRUE; - msleep(&zero_state, &vm_page_queue_free_mtx, 0, - "pgzero", hz * 300); - } - } -} - -static void -pagezero_start(void __unused *arg) -{ - int error; - struct proc *p; - struct thread *td; - - error = kproc_create(vm_pagezero, NULL, &p, RFSTOPPED, 0, "pagezero"); - if (error) - panic("pagezero_start: error %d\n", error); - td = FIRST_THREAD_IN_PROC(p); - thread_lock(td); - - /* We're an idle task, don't count us in the load. */ - td->td_flags |= TDF_NOLOAD; - sched_class(td, PRI_IDLE); - sched_prio(td, PRI_MAX_IDLE); - sched_add(td, SRQ_BORING); - thread_unlock(td); -} -SYSINIT(pagezero, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, pagezero_start, NULL);