vm: consistently prefix fault helpers with vm_fault_

Reviewed by:	kib
Differential Revision:	https://reviews.freebsd.org/D39029
This commit is contained in:
Mateusz Guzik 2023-03-12 18:38:29 +00:00
parent 90bc2120b5
commit 0a310c94ee

View File

@ -195,7 +195,7 @@ SYSCTL_INT(_vm, OID_AUTO, pfault_oom_wait, CTLFLAG_RWTUN,
"the page fault handler");
static inline void
fault_page_release(vm_page_t *mp)
vm_fault_page_release(vm_page_t *mp)
{
vm_page_t m;
@ -213,7 +213,7 @@ fault_page_release(vm_page_t *mp)
}
static inline void
fault_page_free(vm_page_t *mp)
vm_fault_page_free(vm_page_t *mp)
{
vm_page_t m;
@ -235,7 +235,7 @@ fault_page_free(vm_page_t *mp)
* be checked quickly.
*/
static inline bool
fault_object_needs_getpages(vm_object_t object)
vm_fault_object_needs_getpages(vm_object_t object)
{
VM_OBJECT_ASSERT_LOCKED(object);
@ -244,7 +244,7 @@ fault_object_needs_getpages(vm_object_t object)
}
static inline void
unlock_map(struct faultstate *fs)
vm_fault_unlock_map(struct faultstate *fs)
{
if (fs->lookup_still_valid) {
@ -254,7 +254,7 @@ unlock_map(struct faultstate *fs)
}
static void
unlock_vp(struct faultstate *fs)
vm_fault_unlock_vp(struct faultstate *fs)
{
if (fs->vp != NULL) {
@ -264,29 +264,29 @@ unlock_vp(struct faultstate *fs)
}
static void
fault_deallocate(struct faultstate *fs)
vm_fault_deallocate(struct faultstate *fs)
{
fault_page_release(&fs->m_cow);
fault_page_release(&fs->m);
vm_fault_page_release(&fs->m_cow);
vm_fault_page_release(&fs->m);
vm_object_pip_wakeup(fs->object);
if (fs->object != fs->first_object) {
VM_OBJECT_WLOCK(fs->first_object);
fault_page_free(&fs->first_m);
vm_fault_page_free(&fs->first_m);
VM_OBJECT_WUNLOCK(fs->first_object);
vm_object_pip_wakeup(fs->first_object);
}
vm_object_deallocate(fs->first_object);
unlock_map(fs);
unlock_vp(fs);
vm_fault_unlock_map(fs);
vm_fault_unlock_vp(fs);
}
static void
unlock_and_deallocate(struct faultstate *fs)
vm_fault_unlock_and_deallocate(struct faultstate *fs)
{
VM_OBJECT_UNLOCK(fs->object);
fault_deallocate(fs);
vm_fault_deallocate(fs);
}
static void
@ -471,8 +471,8 @@ vm_fault_populate(struct faultstate *fs)
pager_first = OFF_TO_IDX(fs->entry->offset);
pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1;
unlock_map(fs);
unlock_vp(fs);
vm_fault_unlock_map(fs);
vm_fault_unlock_vp(fs);
res = FAULT_SUCCESS;
@ -781,7 +781,7 @@ vm_fault_lock_vnode(struct faultstate *fs, bool objlocked)
* Perform an unlock in case the desired vnode changed while
* the map was unlocked during a retry.
*/
unlock_vp(fs);
vm_fault_unlock_vp(fs);
locked = VOP_ISLOCKED(vp);
if (locked != LK_EXCLUSIVE)
@ -801,9 +801,9 @@ vm_fault_lock_vnode(struct faultstate *fs, bool objlocked)
vhold(vp);
if (objlocked)
unlock_and_deallocate(fs);
vm_fault_unlock_and_deallocate(fs);
else
fault_deallocate(fs);
vm_fault_deallocate(fs);
error = vget(vp, locked | LK_RETRY | LK_CANRECURSE);
vdrop(vp);
fs->vp = vp;
@ -875,7 +875,7 @@ vm_fault_lookup(struct faultstate *fs)
VM_PROT_FAULT_LOOKUP, &fs->entry, &fs->first_object,
&fs->first_pindex, &fs->prot, &fs->wired);
if (result != KERN_SUCCESS) {
unlock_vp(fs);
vm_fault_unlock_vp(fs);
return (result);
}
@ -892,7 +892,7 @@ vm_fault_lookup(struct faultstate *fs)
vm_map_lock(fs->map);
if (vm_map_lookup_entry(fs->map, fs->vaddr, &fs->entry) &&
(fs->entry->eflags & MAP_ENTRY_IN_TRANSITION)) {
unlock_vp(fs);
vm_fault_unlock_vp(fs);
fs->entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
vm_map_unlock_and_wait(fs->map, 0);
} else
@ -1095,10 +1095,10 @@ vm_fault_next(struct faultstate *fs)
} else if (fs->m != NULL) {
if (!vm_fault_object_ensure_wlocked(fs)) {
fs->can_read_lock = false;
unlock_and_deallocate(fs);
vm_fault_unlock_and_deallocate(fs);
return (FAULT_NEXT_RESTART);
}
fault_page_free(&fs->m);
vm_fault_page_free(&fs->m);
}
/*
@ -1163,7 +1163,7 @@ vm_fault_allocate_oom(struct faultstate *fs)
{
struct timeval now;
unlock_and_deallocate(fs);
vm_fault_unlock_and_deallocate(fs);
if (vm_pfault_oom_attempts < 0)
return (true);
if (!fs->oom_started) {
@ -1203,7 +1203,7 @@ vm_fault_allocate(struct faultstate *fs)
}
if (fs->pindex >= fs->object->size) {
unlock_and_deallocate(fs);
vm_fault_unlock_and_deallocate(fs);
return (FAULT_OUT_OF_BOUNDS);
}
@ -1215,7 +1215,7 @@ vm_fault_allocate(struct faultstate *fs)
case FAULT_SUCCESS:
case FAULT_FAILURE:
case FAULT_RESTART:
unlock_and_deallocate(fs);
vm_fault_unlock_and_deallocate(fs);
return (res);
case FAULT_CONTINUE:
/*
@ -1248,7 +1248,7 @@ vm_fault_allocate(struct faultstate *fs)
vm_object_color(fs->object, atop(fs->vaddr) - fs->pindex);
#endif
if (!vm_pager_can_alloc_page(fs->object, fs->pindex)) {
unlock_and_deallocate(fs);
vm_fault_unlock_and_deallocate(fs);
return (FAULT_FAILURE);
}
fs->m = vm_page_alloc(fs->object, fs->pindex,
@ -1309,7 +1309,7 @@ vm_fault_getpages(struct faultstate *fs, int *behindp, int *aheadp)
* a shadow, then an earlier iteration of this loop
* may have already unlocked the map.)
*/
unlock_map(fs);
vm_fault_unlock_map(fs);
status = vm_fault_lock_vnode(fs, false);
MPASS(status == FAULT_CONTINUE || status == FAULT_RESTART);
@ -1364,8 +1364,8 @@ vm_fault_getpages(struct faultstate *fs, int *behindp, int *aheadp)
*/
if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) {
VM_OBJECT_WLOCK(fs->object);
fault_page_free(&fs->m);
unlock_and_deallocate(fs);
vm_fault_page_free(&fs->m);
vm_fault_unlock_and_deallocate(fs);
return (FAULT_OUT_OF_BOUNDS);
}
KASSERT(rv == VM_PAGER_FAIL,
@ -1396,11 +1396,11 @@ vm_fault_busy_sleep(struct faultstate *fs)
*/
vm_page_aflag_set(fs->m, PGA_REFERENCED);
if (fs->object != fs->first_object) {
fault_page_release(&fs->first_m);
vm_fault_page_release(&fs->first_m);
vm_object_pip_wakeup(fs->first_object);
}
vm_object_pip_wakeup(fs->object);
unlock_map(fs);
vm_fault_unlock_map(fs);
if (fs->m != vm_page_lookup(fs->object, fs->pindex) ||
!vm_page_busy_sleep(fs->m, "vmpfw", 0))
VM_OBJECT_UNLOCK(fs->object);
@ -1434,7 +1434,7 @@ vm_fault_object(struct faultstate *fs, int *behindp, int *aheadp)
*/
if ((fs->object->flags & OBJ_DEAD) != 0) {
dead = fs->object->type == OBJT_DEAD;
unlock_and_deallocate(fs);
vm_fault_unlock_and_deallocate(fs);
if (dead)
return (FAULT_PROTECTION_FAILURE);
pause("vmf_de", 1);
@ -1467,11 +1467,11 @@ vm_fault_object(struct faultstate *fs, int *behindp, int *aheadp)
* or this is the beginning of the search, allocate a new
* page.
*/
if (fs->m == NULL && (fault_object_needs_getpages(fs->object) ||
if (fs->m == NULL && (vm_fault_object_needs_getpages(fs->object) ||
fs->object == fs->first_object)) {
if (!vm_fault_object_ensure_wlocked(fs)) {
fs->can_read_lock = false;
unlock_and_deallocate(fs);
vm_fault_unlock_and_deallocate(fs);
return (FAULT_RESTART);
}
res = vm_fault_allocate(fs);
@ -1484,7 +1484,7 @@ vm_fault_object(struct faultstate *fs, int *behindp, int *aheadp)
* If not, skip to the next object without dropping the lock to
* preserve atomicity of shadow faults.
*/
if (fault_object_needs_getpages(fs->object)) {
if (vm_fault_object_needs_getpages(fs->object)) {
/*
* At this point, we have either allocated a new page
* or found an existing page that is only partially
@ -1647,8 +1647,8 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
MPASS(res_next == FAULT_NEXT_NOOBJ);
if ((fs.fault_flags & VM_FAULT_NOFILL) != 0) {
if (fs.first_object == fs.object)
fault_page_free(&fs.first_m);
unlock_and_deallocate(&fs);
vm_fault_page_free(&fs.first_m);
vm_fault_unlock_and_deallocate(&fs);
return (KERN_OUT_OF_BOUNDS);
}
VM_OBJECT_UNLOCK(fs.object);
@ -1698,7 +1698,7 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
if (!fs.lookup_still_valid) {
rv = vm_fault_relookup(&fs);
if (rv != KERN_SUCCESS) {
fault_deallocate(&fs);
vm_fault_deallocate(&fs);
if (rv == KERN_RESTART)
goto RetryFault;
return (rv);
@ -1757,7 +1757,7 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
/*
* Unlock everything, and return
*/
fault_deallocate(&fs);
vm_fault_deallocate(&fs);
if (hardfault) {
VM_CNT_INC(v_io_faults);
curthread->td_ru.ru_majflt++;
@ -1909,7 +1909,7 @@ vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
if (!obj_locked)
VM_OBJECT_RLOCK(lobject);
while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
!fault_object_needs_getpages(lobject) &&
!vm_fault_object_needs_getpages(lobject) &&
(backing_object = lobject->backing_object) != NULL) {
KASSERT((lobject->backing_object_offset & PAGE_MASK) ==
0, ("vm_fault_prefault: unaligned object offset"));