freebsd-dev/sys/vm/vm_kern.c

900 lines
25 KiB
C
Raw Normal View History

/*-
* SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
*
1994-05-24 10:09:53 +00:00
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* The Mach Operating System project at Carnegie-Mellon University.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
1994-05-24 10:09:53 +00:00
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
1994-08-02 07:55:43 +00:00
* from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
1994-05-24 10:09:53 +00:00
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
* All rights reserved.
*
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
These changes embody the support of the fully coherent merged VM buffer cache, much higher filesystem I/O performance, and much better paging performance. It represents the culmination of over 6 months of R&D. The majority of the merged VM/cache work is by John Dyson. The following highlights the most significant changes. Additionally, there are (mostly minor) changes to the various filesystem modules (nfs, msdosfs, etc) to support the new VM/buffer scheme. vfs_bio.c: Significant rewrite of most of vfs_bio to support the merged VM buffer cache scheme. The scheme is almost fully compatible with the old filesystem interface. Significant improvement in the number of opportunities for write clustering. vfs_cluster.c, vfs_subr.c Upgrade and performance enhancements in vfs layer code to support merged VM/buffer cache. Fixup of vfs_cluster to eliminate the bogus pagemove stuff. vm_object.c: Yet more improvements in the collapse code. Elimination of some windows that can cause list corruption. vm_pageout.c: Fixed it, it really works better now. Somehow in 2.0, some "enhancements" broke the code. This code has been reworked from the ground-up. vm_fault.c, vm_page.c, pmap.c, vm_object.c Support for small-block filesystems with merged VM/buffer cache scheme. pmap.c vm_map.c Dynamic kernel VM size, now we dont have to pre-allocate excessive numbers of kernel PTs. vm_glue.c Much simpler and more effective swapping code. No more gratuitous swapping. proc.h Fixed the problem that the p_lock flag was not being cleared on a fork. swap_pager.c, vnode_pager.c Removal of old vfs_bio cruft to support the past pseudo-coherency. Now the code doesn't need it anymore. machdep.c Changes to better support the parameter values for the merged VM/buffer cache scheme. machdep.c, kern_exec.c, vm_glue.c Implemented a seperate submap for temporary exec string space and another one to contain process upages. This eliminates all map fragmentation problems that previously existed. ffs_inode.c, ufs_inode.c, ufs_readwrite.c Changes for merged VM/buffer cache. Add "bypass" support for sneaking in on busy buffers. Submitted by: John Dyson and David Greenman
1995-01-09 16:06:02 +00:00
*
1994-05-24 10:09:53 +00:00
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
These changes embody the support of the fully coherent merged VM buffer cache, much higher filesystem I/O performance, and much better paging performance. It represents the culmination of over 6 months of R&D. The majority of the merged VM/cache work is by John Dyson. The following highlights the most significant changes. Additionally, there are (mostly minor) changes to the various filesystem modules (nfs, msdosfs, etc) to support the new VM/buffer scheme. vfs_bio.c: Significant rewrite of most of vfs_bio to support the merged VM buffer cache scheme. The scheme is almost fully compatible with the old filesystem interface. Significant improvement in the number of opportunities for write clustering. vfs_cluster.c, vfs_subr.c Upgrade and performance enhancements in vfs layer code to support merged VM/buffer cache. Fixup of vfs_cluster to eliminate the bogus pagemove stuff. vm_object.c: Yet more improvements in the collapse code. Elimination of some windows that can cause list corruption. vm_pageout.c: Fixed it, it really works better now. Somehow in 2.0, some "enhancements" broke the code. This code has been reworked from the ground-up. vm_fault.c, vm_page.c, pmap.c, vm_object.c Support for small-block filesystems with merged VM/buffer cache scheme. pmap.c vm_map.c Dynamic kernel VM size, now we dont have to pre-allocate excessive numbers of kernel PTs. vm_glue.c Much simpler and more effective swapping code. No more gratuitous swapping. proc.h Fixed the problem that the p_lock flag was not being cleared on a fork. swap_pager.c, vnode_pager.c Removal of old vfs_bio cruft to support the past pseudo-coherency. Now the code doesn't need it anymore. machdep.c Changes to better support the parameter values for the merged VM/buffer cache scheme. machdep.c, kern_exec.c, vm_glue.c Implemented a seperate submap for temporary exec string space and another one to contain process upages. This eliminates all map fragmentation problems that previously existed. ffs_inode.c, ufs_inode.c, ufs_readwrite.c Changes for merged VM/buffer cache. Add "bypass" support for sneaking in on busy buffers. Submitted by: John Dyson and David Greenman
1995-01-09 16:06:02 +00:00
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
1994-05-24 10:09:53 +00:00
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
These changes embody the support of the fully coherent merged VM buffer cache, much higher filesystem I/O performance, and much better paging performance. It represents the culmination of over 6 months of R&D. The majority of the merged VM/cache work is by John Dyson. The following highlights the most significant changes. Additionally, there are (mostly minor) changes to the various filesystem modules (nfs, msdosfs, etc) to support the new VM/buffer scheme. vfs_bio.c: Significant rewrite of most of vfs_bio to support the merged VM buffer cache scheme. The scheme is almost fully compatible with the old filesystem interface. Significant improvement in the number of opportunities for write clustering. vfs_cluster.c, vfs_subr.c Upgrade and performance enhancements in vfs layer code to support merged VM/buffer cache. Fixup of vfs_cluster to eliminate the bogus pagemove stuff. vm_object.c: Yet more improvements in the collapse code. Elimination of some windows that can cause list corruption. vm_pageout.c: Fixed it, it really works better now. Somehow in 2.0, some "enhancements" broke the code. This code has been reworked from the ground-up. vm_fault.c, vm_page.c, pmap.c, vm_object.c Support for small-block filesystems with merged VM/buffer cache scheme. pmap.c vm_map.c Dynamic kernel VM size, now we dont have to pre-allocate excessive numbers of kernel PTs. vm_glue.c Much simpler and more effective swapping code. No more gratuitous swapping. proc.h Fixed the problem that the p_lock flag was not being cleared on a fork. swap_pager.c, vnode_pager.c Removal of old vfs_bio cruft to support the past pseudo-coherency. Now the code doesn't need it anymore. machdep.c Changes to better support the parameter values for the merged VM/buffer cache scheme. machdep.c, kern_exec.c, vm_glue.c Implemented a seperate submap for temporary exec string space and another one to contain process upages. This eliminates all map fragmentation problems that previously existed. ffs_inode.c, ufs_inode.c, ufs_readwrite.c Changes for merged VM/buffer cache. Add "bypass" support for sneaking in on busy buffers. Submitted by: John Dyson and David Greenman
1995-01-09 16:06:02 +00:00
*
1994-05-24 10:09:53 +00:00
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*/
/*
* Kernel memory management.
*/
2003-06-11 23:50:51 +00:00
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_vm.h"
1994-05-24 10:09:53 +00:00
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h> /* for ticks and hz */
#include <sys/domainset.h>
#include <sys/eventhandler.h>
#include <sys/lock.h>
#include <sys/proc.h>
swap_pager.c: Fixed long standing bug in freeing swap space during object collapses. Fixed 'out of space' messages from printing out too often. Modified to use new kmem_malloc() calling convention. Implemented an additional stat in the swap pager struct to count the amount of space allocated to that pager. This may be removed at some point in the future. Minimized unnecessary wakeups. vm_fault.c: Don't try to collect fault stats on 'swapped' processes - there aren't any upages to store the stats in. Changed read-ahead policy (again!). vm_glue.c: Be sure to gain a reference to the process's map before swapping. Be sure to lose it when done. kern_malloc.c: Added the ability to specify if allocations are at interrupt time or are 'safe'; this affects what types of pages can be allocated. vm_map.c: Fixed a variety of map lock problems; there's still a lurking bug that will eventually bite. vm_object.c: Explicitly initialize the object fields rather than bzeroing the struct. Eliminated the 'rcollapse' code and folded it's functionality into the "real" collapse routine. Moved an object_unlock() so that the backing_object is protected in the qcollapse routine. Make sure nobody fools with the backing_object when we're destroying it. Added some diagnostic code which can be called from the debugger that looks through all the internal objects and makes certain that they all belong to someone. vm_page.c: Fixed a rather serious logic bug that would result in random system crashes. Changed pagedaemon wakeup policy (again!). vm_pageout.c: Removed unnecessary page rotations on the inactive queue. Changed the number of pages to explicitly free to just free_reserved level. Submitted by: John Dyson
1995-02-02 09:09:15 +00:00
#include <sys/malloc.h>
Switch the vm_object mutex to be a rwlock. This will enable in the future further optimizations where the vm_object lock will be held in read mode most of the time the page cache resident pool of pages are accessed for reading purposes. The change is mostly mechanical but few notes are reported: * The KPI changes as follow: - VM_OBJECT_LOCK() -> VM_OBJECT_WLOCK() - VM_OBJECT_TRYLOCK() -> VM_OBJECT_TRYWLOCK() - VM_OBJECT_UNLOCK() -> VM_OBJECT_WUNLOCK() - VM_OBJECT_LOCK_ASSERT(MA_OWNED) -> VM_OBJECT_ASSERT_WLOCKED() (in order to avoid visibility of implementation details) - The read-mode operations are added: VM_OBJECT_RLOCK(), VM_OBJECT_TRYRLOCK(), VM_OBJECT_RUNLOCK(), VM_OBJECT_ASSERT_RLOCKED(), VM_OBJECT_ASSERT_LOCKED() * The vm/vm_pager.h namespace pollution avoidance (forcing requiring sys/mutex.h in consumers directly to cater its inlining functions using VM_OBJECT_LOCK()) imposes that all the vm/vm_pager.h consumers now must include also sys/rwlock.h. * zfs requires a quite convoluted fix to include FreeBSD rwlocks into the compat layer because the name clash between FreeBSD and solaris versions must be avoided. At this purpose zfs redefines the vm_object locking functions directly, isolating the FreeBSD components in specific compat stubs. The KPI results heavilly broken by this commit. Thirdy part ports must be updated accordingly (I can think off-hand of VirtualBox, for example). Sponsored by: EMC / Isilon storage division Reviewed by: jeff Reviewed by: pjd (ZFS specific review) Discussed with: alc Tested by: pho
2013-03-09 02:32:23 +00:00
#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/vmem.h>
#include <sys/vmmeter.h>
1994-05-24 10:09:53 +00:00
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_domainset.h>
#include <vm/vm_kern.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
1994-05-24 10:09:53 +00:00
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <vm/vm_phys.h>
#include <vm/vm_pagequeue.h>
#include <vm/vm_radix.h>
#include <vm/vm_extern.h>
#include <vm/uma.h>
1994-05-24 10:09:53 +00:00
struct vm_map kernel_map_store;
struct vm_map exec_map_store;
struct vm_map pipe_map_store;
const void *zero_region;
CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0);
/* NB: Used by kernel debuggers. */
const u_long vm_maxuser_address = VM_MAXUSER_ADDRESS;
u_int exec_map_entry_size;
u_int exec_map_entries;
SYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD,
SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS, "Min kernel address");
SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD,
#if defined(__arm__)
&vm_max_kernel_address, 0,
#else
SYSCTL_NULL_ULONG_PTR, VM_MAX_KERNEL_ADDRESS,
#endif
"Max kernel address");
#if VM_NRESERVLEVEL > 0
#define KVA_QUANTUM_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT)
#else
/* On non-superpage architectures we want large import sizes. */
#define KVA_QUANTUM_SHIFT (8 + PAGE_SHIFT)
#endif
#define KVA_QUANTUM (1 << KVA_QUANTUM_SHIFT)
#define KVA_NUMA_IMPORT_QUANTUM (KVA_QUANTUM * 128)
extern void uma_startup2(void);
/*
* kva_alloc:
*
* Allocate a virtual address range with no underlying object and
* no initial mapping to physical memory. Any mapping from this
* range to physical memory must be explicitly created prior to
* its use, typically with pmap_qenter(). Any attempt to create
* a mapping on demand through vm_fault() will result in a panic.
*/
vm_offset_t
kva_alloc(vm_size_t size)
{
vm_offset_t addr;
size = round_page(size);
if (vmem_alloc(kernel_arena, size, M_BESTFIT | M_NOWAIT, &addr))
return (0);
return (addr);
}
/*
* kva_free:
*
* Release a region of kernel virtual memory allocated
* with kva_alloc, and return the physical pages
* associated with that region.
*
* This routine may not block on kernel maps.
*/
void
kva_free(vm_offset_t addr, vm_size_t size)
1994-05-24 10:09:53 +00:00
{
size = round_page(size);
vmem_free(kernel_arena, addr, size);
}
static vm_page_t
kmem_alloc_contig_pages(vm_object_t object, vm_pindex_t pindex, int domain,
int pflags, u_long npages, vm_paddr_t low, vm_paddr_t high,
u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
{
vm_page_t m;
int tries;
bool wait;
VM_OBJECT_ASSERT_WLOCKED(object);
wait = (pflags & VM_ALLOC_WAITOK) != 0;
pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
pflags |= VM_ALLOC_NOWAIT;
for (tries = wait ? 3 : 1;; tries--) {
m = vm_page_alloc_contig_domain(object, pindex, domain, pflags,
npages, low, high, alignment, boundary, memattr);
if (m != NULL || tries == 0)
break;
VM_OBJECT_WUNLOCK(object);
if (!vm_page_reclaim_contig_domain(domain, pflags, npages,
low, high, alignment, boundary) && wait)
vm_wait_domain(domain);
VM_OBJECT_WLOCK(object);
}
return (m);
}
/*
* Allocates a region from the kernel address map and physical pages
* within the specified address range to the kernel object. Creates a
* wired mapping from this region to these pages, and returns the
* region's starting virtual address. The allocated pages are not
* necessarily physically contiguous. If M_ZERO is specified through the
* given flags, then the pages are zeroed before they are mapped.
*/
static vm_offset_t
kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, vm_memattr_t memattr)
{
vmem_t *vmem;
vm_object_t object;
vm_offset_t addr, i, offset;
vm_page_t m;
int pflags;
vm_prot_t prot;
object = kernel_object;
size = round_page(size);
vmem = vm_dom[domain].vmd_kernel_arena;
if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr))
return (0);
offset = addr - VM_MIN_KERNEL_ADDRESS;
pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
prot = (flags & M_EXEC) != 0 ? VM_PROT_ALL : VM_PROT_RW;
Switch the vm_object mutex to be a rwlock. This will enable in the future further optimizations where the vm_object lock will be held in read mode most of the time the page cache resident pool of pages are accessed for reading purposes. The change is mostly mechanical but few notes are reported: * The KPI changes as follow: - VM_OBJECT_LOCK() -> VM_OBJECT_WLOCK() - VM_OBJECT_TRYLOCK() -> VM_OBJECT_TRYWLOCK() - VM_OBJECT_UNLOCK() -> VM_OBJECT_WUNLOCK() - VM_OBJECT_LOCK_ASSERT(MA_OWNED) -> VM_OBJECT_ASSERT_WLOCKED() (in order to avoid visibility of implementation details) - The read-mode operations are added: VM_OBJECT_RLOCK(), VM_OBJECT_TRYRLOCK(), VM_OBJECT_RUNLOCK(), VM_OBJECT_ASSERT_RLOCKED(), VM_OBJECT_ASSERT_LOCKED() * The vm/vm_pager.h namespace pollution avoidance (forcing requiring sys/mutex.h in consumers directly to cater its inlining functions using VM_OBJECT_LOCK()) imposes that all the vm/vm_pager.h consumers now must include also sys/rwlock.h. * zfs requires a quite convoluted fix to include FreeBSD rwlocks into the compat layer because the name clash between FreeBSD and solaris versions must be avoided. At this purpose zfs redefines the vm_object locking functions directly, isolating the FreeBSD components in specific compat stubs. The KPI results heavilly broken by this commit. Thirdy part ports must be updated accordingly (I can think off-hand of VirtualBox, for example). Sponsored by: EMC / Isilon storage division Reviewed by: jeff Reviewed by: pjd (ZFS specific review) Discussed with: alc Tested by: pho
2013-03-09 02:32:23 +00:00
VM_OBJECT_WLOCK(object);
for (i = 0; i < size; i += PAGE_SIZE) {
m = kmem_alloc_contig_pages(object, atop(offset + i),
domain, pflags, 1, low, high, PAGE_SIZE, 0, memattr);
if (m == NULL) {
Switch the vm_object mutex to be a rwlock. This will enable in the future further optimizations where the vm_object lock will be held in read mode most of the time the page cache resident pool of pages are accessed for reading purposes. The change is mostly mechanical but few notes are reported: * The KPI changes as follow: - VM_OBJECT_LOCK() -> VM_OBJECT_WLOCK() - VM_OBJECT_TRYLOCK() -> VM_OBJECT_TRYWLOCK() - VM_OBJECT_UNLOCK() -> VM_OBJECT_WUNLOCK() - VM_OBJECT_LOCK_ASSERT(MA_OWNED) -> VM_OBJECT_ASSERT_WLOCKED() (in order to avoid visibility of implementation details) - The read-mode operations are added: VM_OBJECT_RLOCK(), VM_OBJECT_TRYRLOCK(), VM_OBJECT_RUNLOCK(), VM_OBJECT_ASSERT_RLOCKED(), VM_OBJECT_ASSERT_LOCKED() * The vm/vm_pager.h namespace pollution avoidance (forcing requiring sys/mutex.h in consumers directly to cater its inlining functions using VM_OBJECT_LOCK()) imposes that all the vm/vm_pager.h consumers now must include also sys/rwlock.h. * zfs requires a quite convoluted fix to include FreeBSD rwlocks into the compat layer because the name clash between FreeBSD and solaris versions must be avoided. At this purpose zfs redefines the vm_object locking functions directly, isolating the FreeBSD components in specific compat stubs. The KPI results heavilly broken by this commit. Thirdy part ports must be updated accordingly (I can think off-hand of VirtualBox, for example). Sponsored by: EMC / Isilon storage division Reviewed by: jeff Reviewed by: pjd (ZFS specific review) Discussed with: alc Tested by: pho
2013-03-09 02:32:23 +00:00
VM_OBJECT_WUNLOCK(object);
kmem_unback(object, addr, i);
vmem_free(vmem, addr, size);
return (0);
}
KASSERT(vm_phys_domain(m) == domain,
("kmem_alloc_attr_domain: Domain mismatch %d != %d",
vm_phys_domain(m), domain));
if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
vm_page_valid(m);
pmap_enter(kernel_pmap, addr + i, m, prot,
prot | PMAP_ENTER_WIRED, 0);
}
Switch the vm_object mutex to be a rwlock. This will enable in the future further optimizations where the vm_object lock will be held in read mode most of the time the page cache resident pool of pages are accessed for reading purposes. The change is mostly mechanical but few notes are reported: * The KPI changes as follow: - VM_OBJECT_LOCK() -> VM_OBJECT_WLOCK() - VM_OBJECT_TRYLOCK() -> VM_OBJECT_TRYWLOCK() - VM_OBJECT_UNLOCK() -> VM_OBJECT_WUNLOCK() - VM_OBJECT_LOCK_ASSERT(MA_OWNED) -> VM_OBJECT_ASSERT_WLOCKED() (in order to avoid visibility of implementation details) - The read-mode operations are added: VM_OBJECT_RLOCK(), VM_OBJECT_TRYRLOCK(), VM_OBJECT_RUNLOCK(), VM_OBJECT_ASSERT_RLOCKED(), VM_OBJECT_ASSERT_LOCKED() * The vm/vm_pager.h namespace pollution avoidance (forcing requiring sys/mutex.h in consumers directly to cater its inlining functions using VM_OBJECT_LOCK()) imposes that all the vm/vm_pager.h consumers now must include also sys/rwlock.h. * zfs requires a quite convoluted fix to include FreeBSD rwlocks into the compat layer because the name clash between FreeBSD and solaris versions must be avoided. At this purpose zfs redefines the vm_object locking functions directly, isolating the FreeBSD components in specific compat stubs. The KPI results heavilly broken by this commit. Thirdy part ports must be updated accordingly (I can think off-hand of VirtualBox, for example). Sponsored by: EMC / Isilon storage division Reviewed by: jeff Reviewed by: pjd (ZFS specific review) Discussed with: alc Tested by: pho
2013-03-09 02:32:23 +00:00
VM_OBJECT_WUNLOCK(object);
return (addr);
}
vm_offset_t
kmem_alloc_attr(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high,
vm_memattr_t memattr)
{
return (kmem_alloc_attr_domainset(DOMAINSET_RR(), size, flags, low,
high, memattr));
}
vm_offset_t
kmem_alloc_attr_domainset(struct domainset *ds, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr)
{
struct vm_domainset_iter di;
vm_offset_t addr;
int domain;
vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
do {
addr = kmem_alloc_attr_domain(domain, size, flags, low, high,
memattr);
if (addr != 0)
break;
} while (vm_domainset_iter_policy(&di, &domain) == 0);
return (addr);
}
/*
* Allocates a region from the kernel address map and physically
* contiguous pages within the specified address range to the kernel
* object. Creates a wired mapping from this region to these pages, and
* returns the region's starting virtual address. If M_ZERO is specified
* through the given flags, then the pages are zeroed before they are
* mapped.
*/
static vm_offset_t
kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr)
{
vmem_t *vmem;
vm_object_t object;
vm_offset_t addr, offset, tmp;
vm_page_t end_m, m;
u_long npages;
int pflags;
object = kernel_object;
size = round_page(size);
vmem = vm_dom[domain].vmd_kernel_arena;
if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
return (0);
offset = addr - VM_MIN_KERNEL_ADDRESS;
pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
npages = atop(size);
Switch the vm_object mutex to be a rwlock. This will enable in the future further optimizations where the vm_object lock will be held in read mode most of the time the page cache resident pool of pages are accessed for reading purposes. The change is mostly mechanical but few notes are reported: * The KPI changes as follow: - VM_OBJECT_LOCK() -> VM_OBJECT_WLOCK() - VM_OBJECT_TRYLOCK() -> VM_OBJECT_TRYWLOCK() - VM_OBJECT_UNLOCK() -> VM_OBJECT_WUNLOCK() - VM_OBJECT_LOCK_ASSERT(MA_OWNED) -> VM_OBJECT_ASSERT_WLOCKED() (in order to avoid visibility of implementation details) - The read-mode operations are added: VM_OBJECT_RLOCK(), VM_OBJECT_TRYRLOCK(), VM_OBJECT_RUNLOCK(), VM_OBJECT_ASSERT_RLOCKED(), VM_OBJECT_ASSERT_LOCKED() * The vm/vm_pager.h namespace pollution avoidance (forcing requiring sys/mutex.h in consumers directly to cater its inlining functions using VM_OBJECT_LOCK()) imposes that all the vm/vm_pager.h consumers now must include also sys/rwlock.h. * zfs requires a quite convoluted fix to include FreeBSD rwlocks into the compat layer because the name clash between FreeBSD and solaris versions must be avoided. At this purpose zfs redefines the vm_object locking functions directly, isolating the FreeBSD components in specific compat stubs. The KPI results heavilly broken by this commit. Thirdy part ports must be updated accordingly (I can think off-hand of VirtualBox, for example). Sponsored by: EMC / Isilon storage division Reviewed by: jeff Reviewed by: pjd (ZFS specific review) Discussed with: alc Tested by: pho
2013-03-09 02:32:23 +00:00
VM_OBJECT_WLOCK(object);
m = kmem_alloc_contig_pages(object, atop(offset), domain,
pflags, npages, low, high, alignment, boundary, memattr);
if (m == NULL) {
Switch the vm_object mutex to be a rwlock. This will enable in the future further optimizations where the vm_object lock will be held in read mode most of the time the page cache resident pool of pages are accessed for reading purposes. The change is mostly mechanical but few notes are reported: * The KPI changes as follow: - VM_OBJECT_LOCK() -> VM_OBJECT_WLOCK() - VM_OBJECT_TRYLOCK() -> VM_OBJECT_TRYWLOCK() - VM_OBJECT_UNLOCK() -> VM_OBJECT_WUNLOCK() - VM_OBJECT_LOCK_ASSERT(MA_OWNED) -> VM_OBJECT_ASSERT_WLOCKED() (in order to avoid visibility of implementation details) - The read-mode operations are added: VM_OBJECT_RLOCK(), VM_OBJECT_TRYRLOCK(), VM_OBJECT_RUNLOCK(), VM_OBJECT_ASSERT_RLOCKED(), VM_OBJECT_ASSERT_LOCKED() * The vm/vm_pager.h namespace pollution avoidance (forcing requiring sys/mutex.h in consumers directly to cater its inlining functions using VM_OBJECT_LOCK()) imposes that all the vm/vm_pager.h consumers now must include also sys/rwlock.h. * zfs requires a quite convoluted fix to include FreeBSD rwlocks into the compat layer because the name clash between FreeBSD and solaris versions must be avoided. At this purpose zfs redefines the vm_object locking functions directly, isolating the FreeBSD components in specific compat stubs. The KPI results heavilly broken by this commit. Thirdy part ports must be updated accordingly (I can think off-hand of VirtualBox, for example). Sponsored by: EMC / Isilon storage division Reviewed by: jeff Reviewed by: pjd (ZFS specific review) Discussed with: alc Tested by: pho
2013-03-09 02:32:23 +00:00
VM_OBJECT_WUNLOCK(object);
vmem_free(vmem, addr, size);
return (0);
}
KASSERT(vm_phys_domain(m) == domain,
("kmem_alloc_contig_domain: Domain mismatch %d != %d",
vm_phys_domain(m), domain));
end_m = m + npages;
tmp = addr;
for (; m < end_m; m++) {
if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
vm_page_valid(m);
pmap_enter(kernel_pmap, tmp, m, VM_PROT_RW,
VM_PROT_RW | PMAP_ENTER_WIRED, 0);
tmp += PAGE_SIZE;
}
Switch the vm_object mutex to be a rwlock. This will enable in the future further optimizations where the vm_object lock will be held in read mode most of the time the page cache resident pool of pages are accessed for reading purposes. The change is mostly mechanical but few notes are reported: * The KPI changes as follow: - VM_OBJECT_LOCK() -> VM_OBJECT_WLOCK() - VM_OBJECT_TRYLOCK() -> VM_OBJECT_TRYWLOCK() - VM_OBJECT_UNLOCK() -> VM_OBJECT_WUNLOCK() - VM_OBJECT_LOCK_ASSERT(MA_OWNED) -> VM_OBJECT_ASSERT_WLOCKED() (in order to avoid visibility of implementation details) - The read-mode operations are added: VM_OBJECT_RLOCK(), VM_OBJECT_TRYRLOCK(), VM_OBJECT_RUNLOCK(), VM_OBJECT_ASSERT_RLOCKED(), VM_OBJECT_ASSERT_LOCKED() * The vm/vm_pager.h namespace pollution avoidance (forcing requiring sys/mutex.h in consumers directly to cater its inlining functions using VM_OBJECT_LOCK()) imposes that all the vm/vm_pager.h consumers now must include also sys/rwlock.h. * zfs requires a quite convoluted fix to include FreeBSD rwlocks into the compat layer because the name clash between FreeBSD and solaris versions must be avoided. At this purpose zfs redefines the vm_object locking functions directly, isolating the FreeBSD components in specific compat stubs. The KPI results heavilly broken by this commit. Thirdy part ports must be updated accordingly (I can think off-hand of VirtualBox, for example). Sponsored by: EMC / Isilon storage division Reviewed by: jeff Reviewed by: pjd (ZFS specific review) Discussed with: alc Tested by: pho
2013-03-09 02:32:23 +00:00
VM_OBJECT_WUNLOCK(object);
return (addr);
1994-05-24 10:09:53 +00:00
}
vm_offset_t
kmem_alloc_contig(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high,
u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
{
return (kmem_alloc_contig_domainset(DOMAINSET_RR(), size, flags, low,
high, alignment, boundary, memattr));
}
vm_offset_t
kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr)
{
struct vm_domainset_iter di;
vm_offset_t addr;
int domain;
vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
do {
addr = kmem_alloc_contig_domain(domain, size, flags, low, high,
alignment, boundary, memattr);
if (addr != 0)
break;
} while (vm_domainset_iter_policy(&di, &domain) == 0);
return (addr);
}
1994-05-24 10:09:53 +00:00
/*
* kmem_subinit:
1994-05-24 10:09:53 +00:00
*
* Initializes a map to manage a subrange
1994-05-24 10:09:53 +00:00
* of the kernel virtual address space.
*
* Arguments are as follows:
*
* parent Map to take range from
* min, max Returned endpoints of map
* size Size of range to find
* superpage_align Request that min is superpage aligned
1994-05-24 10:09:53 +00:00
*/
void
kmem_subinit(vm_map_t map, vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
vm_size_t size, bool superpage_align)
1994-05-24 10:09:53 +00:00
{
int ret;
1994-05-24 10:09:53 +00:00
size = round_page(size);
*min = vm_map_min(parent);
ret = vm_map_find(parent, NULL, 0, min, size, 0, superpage_align ?
VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
MAP_ACC_NO_CHARGE);
if (ret != KERN_SUCCESS)
panic("kmem_subinit: bad status return of %d", ret);
1994-05-24 10:09:53 +00:00
*max = *min + size;
vm_map_init(map, vm_map_pmap(parent), *min, *max);
if (vm_map_submap(parent, *min, *max, map) != KERN_SUCCESS)
panic("kmem_subinit: unable to change range to submap");
1994-05-24 10:09:53 +00:00
}
/*
* kmem_malloc_domain:
*
* Allocate wired-down pages in the kernel's address space.
1994-05-24 10:09:53 +00:00
*/
static vm_offset_t
kmem_malloc_domain(int domain, vm_size_t size, int flags)
1994-05-24 10:09:53 +00:00
{
vmem_t *arena;
These changes embody the support of the fully coherent merged VM buffer cache, much higher filesystem I/O performance, and much better paging performance. It represents the culmination of over 6 months of R&D. The majority of the merged VM/cache work is by John Dyson. The following highlights the most significant changes. Additionally, there are (mostly minor) changes to the various filesystem modules (nfs, msdosfs, etc) to support the new VM/buffer scheme. vfs_bio.c: Significant rewrite of most of vfs_bio to support the merged VM buffer cache scheme. The scheme is almost fully compatible with the old filesystem interface. Significant improvement in the number of opportunities for write clustering. vfs_cluster.c, vfs_subr.c Upgrade and performance enhancements in vfs layer code to support merged VM/buffer cache. Fixup of vfs_cluster to eliminate the bogus pagemove stuff. vm_object.c: Yet more improvements in the collapse code. Elimination of some windows that can cause list corruption. vm_pageout.c: Fixed it, it really works better now. Somehow in 2.0, some "enhancements" broke the code. This code has been reworked from the ground-up. vm_fault.c, vm_page.c, pmap.c, vm_object.c Support for small-block filesystems with merged VM/buffer cache scheme. pmap.c vm_map.c Dynamic kernel VM size, now we dont have to pre-allocate excessive numbers of kernel PTs. vm_glue.c Much simpler and more effective swapping code. No more gratuitous swapping. proc.h Fixed the problem that the p_lock flag was not being cleared on a fork. swap_pager.c, vnode_pager.c Removal of old vfs_bio cruft to support the past pseudo-coherency. Now the code doesn't need it anymore. machdep.c Changes to better support the parameter values for the merged VM/buffer cache scheme. machdep.c, kern_exec.c, vm_glue.c Implemented a seperate submap for temporary exec string space and another one to contain process upages. This eliminates all map fragmentation problems that previously existed. ffs_inode.c, ufs_inode.c, ufs_readwrite.c Changes for merged VM/buffer cache. Add "bypass" support for sneaking in on busy buffers. Submitted by: John Dyson and David Greenman
1995-01-09 16:06:02 +00:00
vm_offset_t addr;
int rv;
1994-05-24 10:09:53 +00:00
if (__predict_true((flags & M_EXEC) == 0))
arena = vm_dom[domain].vmd_kernel_arena;
else
arena = vm_dom[domain].vmd_kernel_rwx_arena;
1994-05-24 10:09:53 +00:00
size = round_page(size);
if (vmem_alloc(arena, size, flags | M_BESTFIT, &addr))
return (0);
1994-05-24 10:09:53 +00:00
rv = kmem_back_domain(domain, kernel_object, addr, size, flags);
if (rv != KERN_SUCCESS) {
vmem_free(arena, addr, size);
return (0);
1994-05-24 10:09:53 +00:00
}
return (addr);
}
vm_offset_t
kmem_malloc(vm_size_t size, int flags)
{
return (kmem_malloc_domainset(DOMAINSET_RR(), size, flags));
}
vm_offset_t
kmem_malloc_domainset(struct domainset *ds, vm_size_t size, int flags)
{
struct vm_domainset_iter di;
vm_offset_t addr;
int domain;
vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
do {
addr = kmem_malloc_domain(domain, size, flags);
if (addr != 0)
break;
} while (vm_domainset_iter_policy(&di, &domain) == 0);
return (addr);
}
/*
* kmem_back_domain:
*
* Allocate physical pages from the specified domain for the specified
* virtual address range.
*/
int
kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr,
vm_size_t size, int flags)
{
vm_offset_t offset, i;
vm_page_t m, mpred;
vm_prot_t prot;
int pflags;
KASSERT(object == kernel_object,
("kmem_back_domain: only supports kernel object."));
offset = addr - VM_MIN_KERNEL_ADDRESS;
pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
if (flags & M_WAITOK)
pflags |= VM_ALLOC_WAITFAIL;
prot = (flags & M_EXEC) != 0 ? VM_PROT_ALL : VM_PROT_RW;
2002-06-19 20:47:18 +00:00
i = 0;
VM_OBJECT_WLOCK(object);
retry:
mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i));
for (; i < size; i += PAGE_SIZE, mpred = m) {
m = vm_page_alloc_domain_after(object, atop(offset + i),
domain, pflags, mpred);
1994-05-24 10:09:53 +00:00
/*
These changes embody the support of the fully coherent merged VM buffer cache, much higher filesystem I/O performance, and much better paging performance. It represents the culmination of over 6 months of R&D. The majority of the merged VM/cache work is by John Dyson. The following highlights the most significant changes. Additionally, there are (mostly minor) changes to the various filesystem modules (nfs, msdosfs, etc) to support the new VM/buffer scheme. vfs_bio.c: Significant rewrite of most of vfs_bio to support the merged VM buffer cache scheme. The scheme is almost fully compatible with the old filesystem interface. Significant improvement in the number of opportunities for write clustering. vfs_cluster.c, vfs_subr.c Upgrade and performance enhancements in vfs layer code to support merged VM/buffer cache. Fixup of vfs_cluster to eliminate the bogus pagemove stuff. vm_object.c: Yet more improvements in the collapse code. Elimination of some windows that can cause list corruption. vm_pageout.c: Fixed it, it really works better now. Somehow in 2.0, some "enhancements" broke the code. This code has been reworked from the ground-up. vm_fault.c, vm_page.c, pmap.c, vm_object.c Support for small-block filesystems with merged VM/buffer cache scheme. pmap.c vm_map.c Dynamic kernel VM size, now we dont have to pre-allocate excessive numbers of kernel PTs. vm_glue.c Much simpler and more effective swapping code. No more gratuitous swapping. proc.h Fixed the problem that the p_lock flag was not being cleared on a fork. swap_pager.c, vnode_pager.c Removal of old vfs_bio cruft to support the past pseudo-coherency. Now the code doesn't need it anymore. machdep.c Changes to better support the parameter values for the merged VM/buffer cache scheme. machdep.c, kern_exec.c, vm_glue.c Implemented a seperate submap for temporary exec string space and another one to contain process upages. This eliminates all map fragmentation problems that previously existed. ffs_inode.c, ufs_inode.c, ufs_readwrite.c Changes for merged VM/buffer cache. Add "bypass" support for sneaking in on busy buffers. Submitted by: John Dyson and David Greenman
1995-01-09 16:06:02 +00:00
* Ran out of space, free everything up and return. Don't need
* to lock page queues here as we know that the pages we got
* aren't on any queues.
1994-05-24 10:09:53 +00:00
*/
if (m == NULL) {
if ((flags & M_NOWAIT) == 0)
This set of commits to the VM system does the following, and contain contributions or ideas from Stephen McKay <syssgm@devetir.qld.gov.au>, Alan Cox <alc@cs.rice.edu>, David Greenman <davidg@freebsd.org> and me: More usage of the TAILQ macros. Additional minor fix to queue.h. Performance enhancements to the pageout daemon. Addition of a wait in the case that the pageout daemon has to run immediately. Slightly modify the pageout algorithm. Significant revamp of the pmap/fork code: 1) PTE's and UPAGES's are NO LONGER in the process's map. 2) PTE's and UPAGES's reside in their own objects. 3) TOTAL elimination of recursive page table pagefaults. 4) The page directory now resides in the PTE object. 5) Implemented pmap_copy, thereby speeding up fork time. 6) Changed the pv entries so that the head is a pointer and not an entire entry. 7) Significant cleanup of pmap_protect, and pmap_remove. 8) Removed significant amounts of machine dependent fork code from vm_glue. Pushed much of that code into the machine dependent pmap module. 9) Support more completely the reuse of already zeroed pages (Page table pages and page directories) as being already zeroed. Performance and code cleanups in vm_map: 1) Improved and simplified allocation of map entries. 2) Improved vm_map_copy code. 3) Corrected some minor problems in the simplify code. Implemented splvm (combo of splbio and splimp.) The VM code now seldom uses splhigh. Improved the speed of and simplified kmem_malloc. Minor mod to vm_fault to avoid using pre-zeroed pages in the case of objects with backing objects along with the already existant condition of having a vnode. (If there is a backing object, there will likely be a COW... With a COW, it isn't necessary to start with a pre-zeroed page.) Minor reorg of source to perhaps improve locality of ref.
1996-05-18 03:38:05 +00:00
goto retry;
VM_OBJECT_WUNLOCK(object);
kmem_unback(object, addr, i);
return (KERN_NO_SPACE);
1994-05-24 10:09:53 +00:00
}
KASSERT(vm_phys_domain(m) == domain,
("kmem_back_domain: Domain mismatch %d != %d",
vm_phys_domain(m), domain));
if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
KASSERT((m->oflags & VPO_UNMANAGED) != 0,
("kmem_malloc: page %p is managed", m));
vm_page_valid(m);
pmap_enter(kernel_pmap, addr + i, m, prot,
prot | PMAP_ENTER_WIRED, 0);
if (__predict_false((prot & VM_PROT_EXECUTE) != 0))
m->oflags |= VPO_KMEM_EXEC;
1994-05-24 10:09:53 +00:00
}
VM_OBJECT_WUNLOCK(object);
1994-05-24 10:09:53 +00:00
return (KERN_SUCCESS);
}
1994-05-24 10:09:53 +00:00
/*
* kmem_back:
*
* Allocate physical pages for the specified virtual address range.
*/
int
kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
{
vm_offset_t end, next, start;
int domain, rv;
KASSERT(object == kernel_object,
("kmem_back: only supports kernel object."));
for (start = addr, end = addr + size; addr < end; addr = next) {
/*
* We must ensure that pages backing a given large virtual page
* all come from the same physical domain.
*/
if (vm_ndomains > 1) {
domain = (addr >> KVA_QUANTUM_SHIFT) % vm_ndomains;
while (VM_DOMAIN_EMPTY(domain))
domain++;
next = roundup2(addr + 1, KVA_QUANTUM);
if (next > end || next < start)
next = end;
} else {
domain = 0;
next = end;
}
rv = kmem_back_domain(domain, object, addr, next - addr, flags);
if (rv != KERN_SUCCESS) {
kmem_unback(object, start, addr - start);
break;
}
}
return (rv);
}
/*
* kmem_unback:
*
* Unmap and free the physical pages underlying the specified virtual
* address range.
*
* A physical page must exist within the specified object at each index
* that is being unmapped.
*/
static struct vmem *
_kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
{
struct vmem *arena;
vm_page_t m, next;
vm_offset_t end, offset;
int domain;
KASSERT(object == kernel_object,
("kmem_unback: only supports kernel object."));
if (size == 0)
return (NULL);
pmap_remove(kernel_pmap, addr, addr + size);
offset = addr - VM_MIN_KERNEL_ADDRESS;
end = offset + size;
VM_OBJECT_WLOCK(object);
m = vm_page_lookup(object, atop(offset));
domain = vm_phys_domain(m);
if (__predict_true((m->oflags & VPO_KMEM_EXEC) == 0))
arena = vm_dom[domain].vmd_kernel_arena;
else
arena = vm_dom[domain].vmd_kernel_rwx_arena;
for (; offset < end; offset += PAGE_SIZE, m = next) {
next = vm_page_next(m);
vm_page_xbusy_claim(m);
vm_page_unwire_noq(m);
vm_page_free(m);
1994-05-24 10:09:53 +00:00
}
VM_OBJECT_WUNLOCK(object);
return (arena);
}
void
kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
{
(void)_kmem_unback(object, addr, size);
}
1994-05-24 10:09:53 +00:00
/*
* kmem_free:
*
* Free memory allocated with kmem_malloc. The size must match the
* original allocation.
*/
void
kmem_free(vm_offset_t addr, vm_size_t size)
{
struct vmem *arena;
size = round_page(size);
arena = _kmem_unback(kernel_object, addr, size);
if (arena != NULL)
vmem_free(arena, addr, size);
1994-05-24 10:09:53 +00:00
}
/*
* kmap_alloc_wait:
1994-05-24 10:09:53 +00:00
*
* Allocates pageable memory from a sub-map of the kernel. If the submap
* has no room, the caller sleeps waiting for more memory in the submap.
*
* This routine may block.
1994-05-24 10:09:53 +00:00
*/
1995-05-30 08:16:23 +00:00
vm_offset_t
kmap_alloc_wait(vm_map_t map, vm_size_t size)
1994-05-24 10:09:53 +00:00
{
These changes embody the support of the fully coherent merged VM buffer cache, much higher filesystem I/O performance, and much better paging performance. It represents the culmination of over 6 months of R&D. The majority of the merged VM/cache work is by John Dyson. The following highlights the most significant changes. Additionally, there are (mostly minor) changes to the various filesystem modules (nfs, msdosfs, etc) to support the new VM/buffer scheme. vfs_bio.c: Significant rewrite of most of vfs_bio to support the merged VM buffer cache scheme. The scheme is almost fully compatible with the old filesystem interface. Significant improvement in the number of opportunities for write clustering. vfs_cluster.c, vfs_subr.c Upgrade and performance enhancements in vfs layer code to support merged VM/buffer cache. Fixup of vfs_cluster to eliminate the bogus pagemove stuff. vm_object.c: Yet more improvements in the collapse code. Elimination of some windows that can cause list corruption. vm_pageout.c: Fixed it, it really works better now. Somehow in 2.0, some "enhancements" broke the code. This code has been reworked from the ground-up. vm_fault.c, vm_page.c, pmap.c, vm_object.c Support for small-block filesystems with merged VM/buffer cache scheme. pmap.c vm_map.c Dynamic kernel VM size, now we dont have to pre-allocate excessive numbers of kernel PTs. vm_glue.c Much simpler and more effective swapping code. No more gratuitous swapping. proc.h Fixed the problem that the p_lock flag was not being cleared on a fork. swap_pager.c, vnode_pager.c Removal of old vfs_bio cruft to support the past pseudo-coherency. Now the code doesn't need it anymore. machdep.c Changes to better support the parameter values for the merged VM/buffer cache scheme. machdep.c, kern_exec.c, vm_glue.c Implemented a seperate submap for temporary exec string space and another one to contain process upages. This eliminates all map fragmentation problems that previously existed. ffs_inode.c, ufs_inode.c, ufs_readwrite.c Changes for merged VM/buffer cache. Add "bypass" support for sneaking in on busy buffers. Submitted by: John Dyson and David Greenman
1995-01-09 16:06:02 +00:00
vm_offset_t addr;
1994-05-24 10:09:53 +00:00
size = round_page(size);
if (!swap_reserve(size))
return (0);
1994-05-24 10:09:53 +00:00
for (;;) {
/*
These changes embody the support of the fully coherent merged VM buffer cache, much higher filesystem I/O performance, and much better paging performance. It represents the culmination of over 6 months of R&D. The majority of the merged VM/cache work is by John Dyson. The following highlights the most significant changes. Additionally, there are (mostly minor) changes to the various filesystem modules (nfs, msdosfs, etc) to support the new VM/buffer scheme. vfs_bio.c: Significant rewrite of most of vfs_bio to support the merged VM buffer cache scheme. The scheme is almost fully compatible with the old filesystem interface. Significant improvement in the number of opportunities for write clustering. vfs_cluster.c, vfs_subr.c Upgrade and performance enhancements in vfs layer code to support merged VM/buffer cache. Fixup of vfs_cluster to eliminate the bogus pagemove stuff. vm_object.c: Yet more improvements in the collapse code. Elimination of some windows that can cause list corruption. vm_pageout.c: Fixed it, it really works better now. Somehow in 2.0, some "enhancements" broke the code. This code has been reworked from the ground-up. vm_fault.c, vm_page.c, pmap.c, vm_object.c Support for small-block filesystems with merged VM/buffer cache scheme. pmap.c vm_map.c Dynamic kernel VM size, now we dont have to pre-allocate excessive numbers of kernel PTs. vm_glue.c Much simpler and more effective swapping code. No more gratuitous swapping. proc.h Fixed the problem that the p_lock flag was not being cleared on a fork. swap_pager.c, vnode_pager.c Removal of old vfs_bio cruft to support the past pseudo-coherency. Now the code doesn't need it anymore. machdep.c Changes to better support the parameter values for the merged VM/buffer cache scheme. machdep.c, kern_exec.c, vm_glue.c Implemented a seperate submap for temporary exec string space and another one to contain process upages. This eliminates all map fragmentation problems that previously existed. ffs_inode.c, ufs_inode.c, ufs_readwrite.c Changes for merged VM/buffer cache. Add "bypass" support for sneaking in on busy buffers. Submitted by: John Dyson and David Greenman
1995-01-09 16:06:02 +00:00
* To make this work for more than one map, use the map's lock
* to lock out sleepers/wakers.
1994-05-24 10:09:53 +00:00
*/
vm_map_lock(map);
addr = vm_map_findspace(map, vm_map_min(map), size);
if (addr + size <= vm_map_max(map))
1994-05-24 10:09:53 +00:00
break;
/* no space now; see if we can ever get space */
if (vm_map_max(map) - vm_map_min(map) < size) {
vm_map_unlock(map);
swap_release(size);
1994-05-24 10:09:53 +00:00
return (0);
}
map->needs_wakeup = TRUE;
vm_map_unlock_and_wait(map, 0);
1994-05-24 10:09:53 +00:00
}
vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_RW, VM_PROT_RW,
MAP_ACC_CHARGED);
1994-05-24 10:09:53 +00:00
vm_map_unlock(map);
return (addr);
}
/*
* kmap_free_wakeup:
1994-05-24 10:09:53 +00:00
*
NOTE: libkvm, w, ps, 'top', and any other utility which depends on struct proc or any VM system structure will have to be rebuilt!!! Much needed overhaul of the VM system. Included in this first round of changes: 1) Improved pager interfaces: init, alloc, dealloc, getpages, putpages, haspage, and sync operations are supported. The haspage interface now provides information about clusterability. All pager routines now take struct vm_object's instead of "pagers". 2) Improved data structures. In the previous paradigm, there is constant confusion caused by pagers being both a data structure ("allocate a pager") and a collection of routines. The idea of a pager structure has escentially been eliminated. Objects now have types, and this type is used to index the appropriate pager. In most cases, items in the pager structure were duplicated in the object data structure and thus were unnecessary. In the few cases that remained, a un_pager structure union was created in the object to contain these items. 3) Because of the cleanup of #1 & #2, a lot of unnecessary layering can now be removed. For instance, vm_object_enter(), vm_object_lookup(), vm_object_remove(), and the associated object hash list were some of the things that were removed. 4) simple_lock's removed. Discussion with several people reveals that the SMP locking primitives used in the VM system aren't likely the mechanism that we'll be adopting. Even if it were, the locking that was in the code was very inadequate and would have to be mostly re-done anyway. The locking in a uni-processor kernel was a no-op but went a long way toward making the code difficult to read and debug. 5) Places that attempted to kludge-up the fact that we don't have kernel thread support have been fixed to reflect the reality that we are really dealing with processes, not threads. The VM system didn't have complete thread support, so the comments and mis-named routines were just wrong. We now use tsleep and wakeup directly in the lock routines, for instance. 6) Where appropriate, the pagers have been improved, especially in the pager_alloc routines. Most of the pager_allocs have been rewritten and are now faster and easier to maintain. 7) The pagedaemon pageout clustering algorithm has been rewritten and now tries harder to output an even number of pages before and after the requested page. This is sort of the reverse of the ideal pagein algorithm and should provide better overall performance. 8) Unnecessary (incorrect) casts to caddr_t in calls to tsleep & wakeup have been removed. Some other unnecessary casts have also been removed. 9) Some almost useless debugging code removed. 10) Terminology of shadow objects vs. backing objects straightened out. The fact that the vm_object data structure escentially had this backwards really confused things. The use of "shadow" and "backing object" throughout the code is now internally consistent and correct in the Mach terminology. 11) Several minor bug fixes, including one in the vm daemon that caused 0 RSS objects to not get purged as intended. 12) A "default pager" has now been created which cleans up the transition of objects to the "swap" type. The previous checks throughout the code for swp->pg_data != NULL were really ugly. This change also provides the rudiments for future backing of "anonymous" memory by something other than the swap pager (via the vnode pager, for example), and it allows the decision about which of these pagers to use to be made dynamically (although will need some additional decision code to do this, of course). 13) (dyson) MAP_COPY has been deprecated and the corresponding "copy object" code has been removed. MAP_COPY was undocumented and non- standard. It was furthermore broken in several ways which caused its behavior to degrade to MAP_PRIVATE. Binaries that use MAP_COPY will continue to work correctly, but via the slightly different semantics of MAP_PRIVATE. 14) (dyson) Sharing maps have been removed. It's marginal usefulness in a threads design can be worked around in other ways. Both #12 and #13 were done to simplify the code and improve readability and maintain- ability. (As were most all of these changes) TODO: 1) Rewrite most of the vnode pager to use VOP_GETPAGES/PUTPAGES. Doing this will reduce the vnode pager to a mere fraction of its current size. 2) Rewrite vm_fault and the swap/vnode pagers to use the clustering information provided by the new haspage pager interface. This will substantially reduce the overhead by eliminating a large number of VOP_BMAP() calls. The VOP_BMAP() filesystem interface should be improved to provide both a "behind" and "ahead" indication of contiguousness. 3) Implement the extended features of pager_haspage in swap_pager_haspage(). It currently just says 0 pages ahead/behind. 4) Re-implement the swap device (swstrategy) in a more elegant way, perhaps via a much more general mechanism that could also be used for disk striping of regular filesystems. 5) Do something to improve the architecture of vm_object_collapse(). The fact that it makes calls into the swap pager and knows too much about how the swap pager operates really bothers me. It also doesn't allow for collapsing of non-swap pager objects ("unnamed" objects backed by other pagers).
1995-07-13 08:48:48 +00:00
* Returns memory to a submap of the kernel, and wakes up any processes
1994-05-24 10:09:53 +00:00
* waiting for memory in that map.
*/
1995-05-30 08:16:23 +00:00
void
kmap_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size)
1994-05-24 10:09:53 +00:00
{
1994-05-24 10:09:53 +00:00
vm_map_lock(map);
(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
if (map->needs_wakeup) {
map->needs_wakeup = FALSE;
vm_map_wakeup(map);
}
1994-05-24 10:09:53 +00:00
vm_map_unlock(map);
}
void
kmem_init_zero_region(void)
{
vm_offset_t addr, i;
vm_page_t m;
/*
* Map a single physical page of zeros to a larger virtual range.
* This requires less looping in places that want large amounts of
* zeros, while not using much more physical resources.
*/
addr = kva_alloc(ZERO_REGION_SIZE);
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE)
pmap_qenter(addr + i, &m, 1);
pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ);
zero_region = (const void *)addr;
}
/*
* Import KVA from the kernel map into the kernel arena.
*/
static int
kva_import(void *unused, vmem_size_t size, int flags, vmem_addr_t *addrp)
{
vm_offset_t addr;
int result;
KASSERT((size % KVA_QUANTUM) == 0,
("kva_import: Size %jd is not a multiple of %d",
(intmax_t)size, (int)KVA_QUANTUM));
addr = vm_map_min(kernel_map);
result = vm_map_find(kernel_map, NULL, 0, &addr, size, 0,
VMFS_SUPER_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
if (result != KERN_SUCCESS)
return (ENOMEM);
*addrp = addr;
return (0);
}
/*
* Import KVA from a parent arena into a per-domain arena. Imports must be
* KVA_QUANTUM-aligned and a multiple of KVA_QUANTUM in size.
*/
static int
kva_import_domain(void *arena, vmem_size_t size, int flags, vmem_addr_t *addrp)
{
KASSERT((size % KVA_QUANTUM) == 0,
("kva_import_domain: Size %jd is not a multiple of %d",
(intmax_t)size, (int)KVA_QUANTUM));
return (vmem_xalloc(arena, size, KVA_QUANTUM, 0, 0, VMEM_ADDR_MIN,
VMEM_ADDR_MAX, flags, addrp));
}
1994-05-24 10:09:53 +00:00
/*
* kmem_init:
*
* Create the kernel map; insert a mapping covering kernel text,
* data, bss, and all space allocated thus far (`boostrap' data). The
* new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
* `start' as allocated, and the range between `start' and `end' as free.
* Create the kernel vmem arena and its per-domain children.
1994-05-24 10:09:53 +00:00
*/
1995-05-30 08:16:23 +00:00
void
kmem_init(vm_offset_t start, vm_offset_t end)
1994-05-24 10:09:53 +00:00
{
vm_size_t quantum;
int domain;
1994-05-24 10:09:53 +00:00
vm_map_init(kernel_map, kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
kernel_map->system_map = 1;
vm_map_lock(kernel_map);
1994-05-24 10:09:53 +00:00
/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
(void)vm_map_insert(kernel_map, NULL, 0,
#ifdef __amd64__
KERNBASE,
#else
VM_MIN_KERNEL_ADDRESS,
#endif
start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
1994-05-24 10:09:53 +00:00
/* ... and ending with the completion of the above `insert' */
#ifdef __amd64__
/*
* Mark KVA used for the page array as allocated. Other platforms
* that handle vm_page_array allocation can simply adjust virtual_avail
* instead.
*/
(void)vm_map_insert(kernel_map, NULL, 0, (vm_offset_t)vm_page_array,
(vm_offset_t)vm_page_array + round_2mpage(vm_page_array_size *
sizeof(struct vm_page)),
VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT);
#endif
vm_map_unlock(kernel_map);
/*
* Use a large import quantum on NUMA systems. This helps minimize
* interleaving of superpages, reducing internal fragmentation within
* the per-domain arenas.
*/
if (vm_ndomains > 1 && PMAP_HAS_DMAP)
quantum = KVA_NUMA_IMPORT_QUANTUM;
else
quantum = KVA_QUANTUM;
/*
* Initialize the kernel_arena. This can grow on demand.
*/
vmem_init(kernel_arena, "kernel arena", 0, 0, PAGE_SIZE, 0, 0);
vmem_set_import(kernel_arena, kva_import, NULL, NULL, quantum);
for (domain = 0; domain < vm_ndomains; domain++) {
/*
* Initialize the per-domain arenas. These are used to color
* the KVA space in a way that ensures that virtual large pages
* are backed by memory from the same physical domain,
* maximizing the potential for superpage promotion.
*/
vm_dom[domain].vmd_kernel_arena = vmem_create(
"kernel arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
vmem_set_import(vm_dom[domain].vmd_kernel_arena,
kva_import_domain, NULL, kernel_arena, quantum);
/*
* In architectures with superpages, maintain separate arenas
* for allocations with permissions that differ from the
* "standard" read/write permissions used for kernel memory,
* so as not to inhibit superpage promotion.
*
* Use the base import quantum since this arena is rarely used.
*/
#if VM_NRESERVLEVEL > 0
vm_dom[domain].vmd_kernel_rwx_arena = vmem_create(
"kernel rwx arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
vmem_set_import(vm_dom[domain].vmd_kernel_rwx_arena,
kva_import_domain, (vmem_release_t *)vmem_xfree,
kernel_arena, KVA_QUANTUM);
#else
vm_dom[domain].vmd_kernel_rwx_arena =
vm_dom[domain].vmd_kernel_arena;
#endif
}
/*
* This must be the very first call so that the virtual address
* space used for early allocations is properly marked used in
* the map.
*/
uma_startup2();
1994-05-24 10:09:53 +00:00
}
/*
* kmem_bootstrap_free:
*
* Free pages backing preloaded data (e.g., kernel modules) to the
* system. Currently only supported on platforms that create a
* vm_phys segment for preloaded data.
*/
void
kmem_bootstrap_free(vm_offset_t start, vm_size_t size)
{
#if defined(__i386__) || defined(__amd64__)
struct vm_domain *vmd;
vm_offset_t end, va;
vm_paddr_t pa;
vm_page_t m;
end = trunc_page(start + size);
start = round_page(start);
#ifdef __amd64__
/*
* Preloaded files do not have execute permissions by default on amd64.
* Restore the default permissions to ensure that the direct map alias
* is updated.
*/
pmap_change_prot(start, end - start, VM_PROT_RW);
#endif
for (va = start; va < end; va += PAGE_SIZE) {
pa = pmap_kextract(va);
m = PHYS_TO_VM_PAGE(pa);
vmd = vm_pagequeue_domain(m);
vm_domain_free_lock(vmd);
vm_phys_free_pages(m, 0);
vm_domain_free_unlock(vmd);
vm_domain_freecnt_inc(vmd, 1);
vm_cnt.v_page_count++;
}
pmap_remove(kernel_pmap, start, end);
(void)vmem_add(kernel_arena, start, end - start, M_WAITOK);
#endif
}
/*
* Allow userspace to directly trigger the VM drain routine for testing
* purposes.
*/
static int
debug_vm_lowmem(SYSCTL_HANDLER_ARGS)
{
int error, i;
i = 0;
error = sysctl_handle_int(oidp, &i, 0, req);
if (error)
return (error);
if ((i & ~(VM_LOW_KMEM | VM_LOW_PAGES)) != 0)
return (EINVAL);
if (i != 0)
EVENTHANDLER_INVOKE(vm_lowmem, i);
return (0);
}
SYSCTL_PROC(_debug, OID_AUTO, vm_lowmem, CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, 0,
debug_vm_lowmem, "I", "set to trigger vm_lowmem event with given flags");