Eliminate every mention of PG_CACHED pages from the comments in the machine-

independent layer of the virtual memory system.  Update some of the nearby
comments to eliminate redundancy and improve clarity.

In vm/vm_reserv.c, do not use hyphens after adverbs ending in -ly per
The Chicago Manual of Style.

Update the comment in vm/vm_page.h defining the four types of page queues to
reflect the elimination of PG_CACHED pages and the introduction of the
laundry queue.

Reviewed by:	kib, markj
Sponsored by:	Dell EMC Isilon
Differential Revision:	https://reviews.freebsd.org/D8752
This commit is contained in:
Alan Cox 2016-12-12 17:47:09 +00:00
parent 9f0136cb26
commit 3453bca864
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=309898
6 changed files with 72 additions and 112 deletions

View File

@ -1858,9 +1858,7 @@ vm_map_submap(
* limited number of page mappings are created at the low-end of the
* specified address range. (For this purpose, a superpage mapping
* counts as one page mapping.) Otherwise, all resident pages within
* the specified address range are mapped. Because these mappings are
* being created speculatively, cached pages are not reactivated and
* mapped.
* the specified address range are mapped.
*/
static void
vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,

View File

@ -1356,7 +1356,7 @@ vm_object_split(vm_map_entry_t entry)
goto retry;
}
/* vm_page_rename() will handle dirty and cache. */
/* vm_page_rename() will dirty the page. */
if (vm_page_rename(m, new_object, idx)) {
VM_OBJECT_WUNLOCK(new_object);
VM_OBJECT_WUNLOCK(orig_object);
@ -1446,7 +1446,7 @@ vm_object_scan_all_shadowed(vm_object_t object)
/*
* Initial conditions:
*
* We do not want to have to test for the existence of cache or swap
* We do not want to have to test for the existence of swap
* pages in the backing object. XXX but with the new swapper this
* would be pretty easy to do.
*/
@ -1590,8 +1590,7 @@ vm_object_collapse_scan(vm_object_t object, int op)
* backing object to the main object.
*
* If the page was mapped to a process, it can remain mapped
* through the rename. vm_page_rename() will handle dirty and
* cache.
* through the rename. vm_page_rename() will dirty the page.
*/
if (vm_page_rename(p, object, new_pindex)) {
next = vm_object_collapse_scan_wait(object, NULL, next,

View File

@ -79,17 +79,6 @@
*
* vm_object_t Virtual memory object.
*
* The root of cached pages pool is protected by both the per-object lock
* and the free pages queue mutex.
* On insert in the cache radix trie, the per-object lock is expected
* to be already held and the free pages queue mutex will be
* acquired during the operation too.
* On remove and lookup from the cache radix trie, only the free
* pages queue mutex is expected to be locked.
* These rules allow for reliably checking for the presence of cached
* pages with only the per-object lock held, thereby reducing contention
* for the free pages queue mutex.
*
* List of locks
* (c) const until freed
* (o) per-object lock

View File

@ -1409,9 +1409,7 @@ vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex)
*
* Note: we *always* dirty the page. It is necessary both for the
* fact that we moved it, and because we may be invalidating
* swap. If the page is on the cache, we have to deactivate it
* or vm_page_dirty() will panic. Dirty pages are not allowed
* on the cache.
* swap.
*
* The objects must be locked.
*/
@ -2042,18 +2040,18 @@ vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
} else if (level >= 0) {
/*
* The page is reserved but not yet allocated. In
* other words, it is still cached or free. Extend
* the current run by one page.
* other words, it is still free. Extend the current
* run by one page.
*/
run_ext = 1;
#endif
} else if ((order = m->order) < VM_NFREEORDER) {
/*
* The page is enqueued in the physical memory
* allocator's cache/free page queues. Moreover, it
* is the first page in a power-of-two-sized run of
* contiguous cache/free pages. Add these pages to
* the end of the current run, and jump ahead.
* allocator's free page queues. Moreover, it is the
* first page in a power-of-two-sized run of
* contiguous free pages. Add these pages to the end
* of the current run, and jump ahead.
*/
run_ext = 1 << order;
m_inc = 1 << order;
@ -2061,16 +2059,15 @@ vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
/*
* Skip the page for one of the following reasons: (1)
* It is enqueued in the physical memory allocator's
* cache/free page queues. However, it is not the
* first page in a run of contiguous cache/free pages.
* (This case rarely occurs because the scan is
* performed in ascending order.) (2) It is not
* reserved, and it is transitioning from free to
* allocated. (Conversely, the transition from
* allocated to free for managed pages is blocked by
* the page lock.) (3) It is allocated but not
* contained by an object and not wired, e.g.,
* allocated by Xen's balloon driver.
* free page queues. However, it is not the first
* page in a run of contiguous free pages. (This case
* rarely occurs because the scan is performed in
* ascending order.) (2) It is not reserved, and it is
* transitioning from free to allocated. (Conversely,
* the transition from allocated to free for managed
* pages is blocked by the page lock.) (3) It is
* allocated but not contained by an object and not
* wired, e.g., allocated by Xen's balloon driver.
*/
run_ext = 0;
}
@ -2282,11 +2279,11 @@ vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run,
if (order < VM_NFREEORDER) {
/*
* The page is enqueued in the physical memory
* allocator's cache/free page queues.
* Moreover, it is the first page in a power-
* of-two-sized run of contiguous cache/free
* pages. Jump ahead to the last page within
* that run, and continue from there.
* allocator's free page queues. Moreover, it
* is the first page in a power-of-two-sized
* run of contiguous free pages. Jump ahead
* to the last page within that run, and
* continue from there.
*/
m += (1 << order) - 1;
}
@ -2334,9 +2331,9 @@ CTASSERT(powerof2(NRUNS));
* conditions by relocating the virtual pages using that physical memory.
* Returns true if reclamation is successful and false otherwise. Since
* relocation requires the allocation of physical pages, reclamation may
* fail due to a shortage of cache/free pages. When reclamation fails,
* callers are expected to perform VM_WAIT before retrying a failed
* allocation operation, e.g., vm_page_alloc_contig().
* fail due to a shortage of free pages. When reclamation fails, callers
* are expected to perform VM_WAIT before retrying a failed allocation
* operation, e.g., vm_page_alloc_contig().
*
* The caller must always specify an allocation class through "req".
*
@ -2371,8 +2368,8 @@ vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high,
req_class = VM_ALLOC_SYSTEM;
/*
* Return if the number of cached and free pages cannot satisfy the
* requested allocation.
* Return if the number of free pages cannot satisfy the requested
* allocation.
*/
count = vm_cnt.v_free_count;
if (count < npages + vm_cnt.v_free_reserved || (count < npages +
@ -2642,9 +2639,8 @@ vm_page_activate(vm_page_t m)
/*
* vm_page_free_wakeup:
*
* Helper routine for vm_page_free_toq() and vm_page_cache(). This
* routine is called when a page has been added to the cache or free
* queues.
* Helper routine for vm_page_free_toq(). This routine is called
* when a page is added to the free queues.
*
* The page queues must be locked.
*/
@ -2732,8 +2728,8 @@ vm_page_free_toq(vm_page_t m)
pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
/*
* Insert the page into the physical memory allocator's
* cache/free page queues.
* Insert the page into the physical memory allocator's free
* page queues.
*/
mtx_lock(&vm_page_queue_free_mtx);
vm_phys_freecnt_adj(m, 1);
@ -2833,21 +2829,10 @@ vm_page_unwire(vm_page_t m, uint8_t queue)
/*
* Move the specified page to the inactive queue.
*
* Many pages placed on the inactive queue should actually go
* into the cache, but it is difficult to figure out which. What
* we do instead, if the inactive target is well met, is to put
* clean pages at the head of the inactive queue instead of the tail.
* This will cause them to be moved to the cache more quickly and
* if not actively re-referenced, reclaimed more quickly. If we just
* stick these pages at the end of the inactive queue, heavy filesystem
* meta-data accesses can cause an unnecessary paging load on memory bound
* processes. This optimization causes one-time-use metadata to be
* reused more quickly.
*
* Normally noreuse is FALSE, resulting in LRU operation. noreuse is set
* to TRUE if we want this page to be 'as if it were placed in the cache',
* except without unmapping it from the process address space. In
* practice this is implemented by inserting the page at the head of the
* Normally, "noreuse" is FALSE, resulting in LRU ordering of the inactive
* queue. However, setting "noreuse" to TRUE will accelerate the specified
* page's reclamation, but it will not unmap the page from any address space.
* This is implemented by inserting the page near the head of the inactive
* queue, using a marker page to guide FIFO insertion ordering.
*
* The page must be locked.
@ -2974,16 +2959,9 @@ vm_page_advise(vm_page_t m, int advice)
if (advice == MADV_FREE)
/*
* Mark the page clean. This will allow the page to be freed
* up by the system. However, such pages are often reused
* quickly by malloc() so we do not do anything that would
* cause a page fault if we can help it.
*
* Specifically, we do not try to actually free the page now
* nor do we try to put it in the cache (which would cause a
* page fault on reuse).
*
* But we do make the page as freeable as we can without
* actually taking the step of unmapping it.
* without first paging it out. MADV_FREE pages are often
* quickly reused by malloc(3), so we do not do anything that
* would result in a page fault on a later access.
*/
vm_page_undirty(m);
else if (advice != MADV_DONTNEED)

View File

@ -352,19 +352,16 @@ extern struct mtx_padalign pa_lock[];
* free
* Available for allocation now.
*
* cache
* Almost available for allocation. Still associated with
* an object, but clean and immediately freeable.
*
* The following lists are LRU sorted:
*
* inactive
* Low activity, candidates for reclamation.
* This list is approximately LRU ordered.
*
* laundry
* This is the list of pages that should be
* paged out next.
*
* active
* Pages that are "active" i.e. they have been
* Pages that are "active", i.e., they have been
* recently referenced.
*
*/

View File

@ -62,7 +62,7 @@ __FBSDID("$FreeBSD$");
/*
* The reservation system supports the speculative allocation of large physical
* pages ("superpages"). Speculative allocation enables the fully-automatic
* pages ("superpages"). Speculative allocation enables the fully automatic
* utilization of superpages by the virtual memory system. In other words, no
* programmatic directives are required to use superpages.
*/
@ -155,11 +155,11 @@ popmap_is_set(popmap_t popmap[], int i)
* physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
* within that object. The reservation's "popcnt" tracks the number of these
* small physical pages that are in use at any given time. When and if the
* reservation is not fully utilized, it appears in the queue of partially-
* reservation is not fully utilized, it appears in the queue of partially
* populated reservations. The reservation always appears on the containing
* object's list of reservations.
*
* A partially-populated reservation can be broken and reclaimed at any time.
* A partially populated reservation can be broken and reclaimed at any time.
*/
struct vm_reserv {
TAILQ_ENTRY(vm_reserv) partpopq;
@ -196,11 +196,11 @@ struct vm_reserv {
static vm_reserv_t vm_reserv_array;
/*
* The partially-populated reservation queue
* The partially populated reservation queue
*
* This queue enables the fast recovery of an unused cached or free small page
* from a partially-populated reservation. The reservation at the head of
* this queue is the least-recently-changed, partially-populated reservation.
* This queue enables the fast recovery of an unused free small page from a
* partially populated reservation. The reservation at the head of this queue
* is the least recently changed, partially populated reservation.
*
* Access to this queue is synchronized by the free page queue lock.
*/
@ -225,7 +225,7 @@ SYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
sysctl_vm_reserv_partpopq, "A", "Partially-populated reservation queues");
sysctl_vm_reserv_partpopq, "A", "Partially populated reservation queues");
static long vm_reserv_reclaimed;
SYSCTL_LONG(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
@ -267,7 +267,7 @@ sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS)
}
/*
* Describes the current state of the partially-populated reservation queue.
* Describes the current state of the partially populated reservation queue.
*/
static int
sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
@ -301,7 +301,7 @@ sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
/*
* Reduces the given reservation's population count. If the population count
* becomes zero, the reservation is destroyed. Additionally, moves the
* reservation to the tail of the partially-populated reservation queue if the
* reservation to the tail of the partially populated reservation queue if the
* population count is non-zero.
*
* The free page queue lock must be held.
@ -363,7 +363,7 @@ vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
/*
* Increases the given reservation's population count. Moves the reservation
* to the tail of the partially-populated reservation queue.
* to the tail of the partially populated reservation queue.
*
* The free page queue must be locked.
*/
@ -597,7 +597,7 @@ vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages,
}
/*
* Allocates a page from an existing or newly-created reservation.
* Allocates a page from an existing or newly created reservation.
*
* The page "mpred" must immediately precede the offset "pindex" within the
* specified object.
@ -721,12 +721,12 @@ vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, vm_page_t mpred)
}
/*
* Breaks the given reservation. Except for the specified cached or free
* page, all cached and free pages in the reservation are returned to the
* physical memory allocator. The reservation's population count and map are
* reset to their initial state.
* Breaks the given reservation. Except for the specified free page, all free
* pages in the reservation are returned to the physical memory allocator.
* The reservation's population count and map are reset to their initial
* state.
*
* The given reservation must not be in the partially-populated reservation
* The given reservation must not be in the partially populated reservation
* queue. The free page queue lock must be held.
*/
static void
@ -895,7 +895,7 @@ vm_reserv_level(vm_page_t m)
}
/*
* Returns a reservation level if the given page belongs to a fully-populated
* Returns a reservation level if the given page belongs to a fully populated
* reservation and -1 otherwise.
*/
int
@ -908,8 +908,8 @@ vm_reserv_level_iffullpop(vm_page_t m)
}
/*
* Breaks the given partially-populated reservation, releasing its cached and
* free pages to the physical memory allocator.
* Breaks the given partially populated reservation, releasing its free pages
* to the physical memory allocator.
*
* The free page queue lock must be held.
*/
@ -927,9 +927,9 @@ vm_reserv_reclaim(vm_reserv_t rv)
}
/*
* Breaks the reservation at the head of the partially-populated reservation
* queue, releasing its cached and free pages to the physical memory
* allocator. Returns TRUE if a reservation is broken and FALSE otherwise.
* Breaks the reservation at the head of the partially populated reservation
* queue, releasing its free pages to the physical memory allocator. Returns
* TRUE if a reservation is broken and FALSE otherwise.
*
* The free page queue lock must be held.
*/
@ -947,11 +947,10 @@ vm_reserv_reclaim_inactive(void)
}
/*
* Searches the partially-populated reservation queue for the least recently
* active reservation with unused pages, i.e., cached or free, that satisfy the
* given request for contiguous physical memory. If a satisfactory reservation
* is found, it is broken. Returns TRUE if a reservation is broken and FALSE
* otherwise.
* Searches the partially populated reservation queue for the least recently
* changed reservation with free pages that satisfy the given request for
* contiguous physical memory. If a satisfactory reservation is found, it is
* broken. Returns TRUE if a reservation is broken and FALSE otherwise.
*
* The free page queue lock must be held.
*/