Change the type of the map entry's next_read field from a vm_pindex_t to a

vm_offset_t.  (This field is used to detect sequential access to the virtual
address range represented by the map entry.)  There are three reasons to
make this change.  First, a vm_offset_t is smaller on 32-bit architectures.
Consequently, a struct vm_map_entry is now smaller on 32-bit architectures.
Second, a vm_offset_t can be written atomically, whereas it may not be
possible to write a vm_pindex_t atomically on a 32-bit architecture.  Third,
using a vm_pindex_t makes the next_read field dependent on which object in
the shadow chain is being read from.

Replace an "XXX" comment.

Reviewed by:	kib
Approved by:	re (gjb)
Sponsored by:	EMC / Isilon Storage Division
This commit is contained in:
Alan Cox 2016-07-07 20:58:16 +00:00
parent 932d4e19e6
commit 381b724280
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=302399
3 changed files with 10 additions and 10 deletions

View File

@ -570,9 +570,9 @@ RetryFault:;
behind = 0;
nera = VM_FAULT_READ_AHEAD_MAX;
ahead = nera;
if (fs.pindex == fs.entry->next_read)
if (vaddr == fs.entry->next_read)
vm_fault_dontneed(&fs, vaddr, ahead);
} else if (fs.pindex == fs.entry->next_read) {
} else if (vaddr == fs.entry->next_read) {
/*
* This is a sequential fault. Arithmetically
* increase the requested number of pages in
@ -927,15 +927,15 @@ RetryFault:;
prot &= retry_prot;
}
}
/*
* If the page was filled by a pager, update the map entry's
* last read offset.
*
* XXX The following assignment modifies the map
* without holding a write lock on it.
* If the page was filled by a pager, save the virtual address that
* should be faulted on next under a sequential access pattern to the
* map entry. A read lock on the map suffices to update this address
* safely.
*/
if (hardfault)
fs.entry->next_read = fs.pindex + ahead + 1;
fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE;
vm_fault_dirty(fs.entry, fs.m, prot, fault_type, fault_flags, TRUE);
vm_page_assert_xbusied(fs.m);

View File

@ -1330,7 +1330,7 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
new_entry->wired_count = 0;
new_entry->wiring_thread = NULL;
new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
new_entry->next_read = OFF_TO_IDX(offset);
new_entry->next_read = start;
KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry));

View File

@ -104,6 +104,7 @@ struct vm_map_entry {
vm_offset_t start; /* start address */
vm_offset_t end; /* end address */
vm_offset_t avail_ssize; /* amt can grow if this is a stack */
vm_offset_t next_read; /* vaddr of the next sequential read */
vm_size_t adj_free; /* amount of adjacent free space */
vm_size_t max_free; /* max free space in subtree */
union vm_map_object object; /* object I point to */
@ -114,7 +115,6 @@ struct vm_map_entry {
vm_inherit_t inheritance; /* inheritance */
uint8_t read_ahead; /* pages in the read-ahead window */
int wired_count; /* can be paged if = 0 */
vm_pindex_t next_read; /* index of the next sequential read */
struct ucred *cred; /* tmp storage for creator ref */
struct thread *wiring_thread;
};