o In do_sendfile(), replace vm_page_sleep_busy() by vm_page_sleep_if_busy()

and extend the scope of the page queues lock to cover all accesses
   to the page's flags and busy fields.
This commit is contained in:
Alan Cox 2002-07-30 18:51:07 +00:00
parent e66c87b70e
commit 1161b86a15
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=100962

View File

@ -1788,17 +1788,17 @@ do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
VM_WAIT;
goto retry_lookup;
}
vm_page_lock_queues();
vm_page_wakeup(pg);
} else {
if (vm_page_sleep_busy(pg, TRUE, "sfpbsy"))
vm_page_lock_queues();
if (vm_page_sleep_if_busy(pg, TRUE, "sfpbsy"))
goto retry_lookup;
/*
* Wire the page so it does not get ripped out from
* under us.
*/
vm_page_lock_queues();
vm_page_wire(pg);
vm_page_unlock_queues();
}
/*
@ -1813,6 +1813,7 @@ do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
* completes.
*/
vm_page_io_start(pg);
vm_page_unlock_queues();
/*
* Get the page from backing store.
@ -1824,10 +1825,10 @@ do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
IO_VMIO | ((MAXBSIZE / bsize) << 16),
td->td_ucred, NULL, td);
VOP_UNLOCK(vp, 0, td);
vm_page_lock_queues();
vm_page_flag_clear(pg, PG_ZERO);
vm_page_io_finish(pg);
if (error) {
vm_page_lock_queues();
vm_page_unwire(pg, 0);
/*
* See if anyone else might know about this page.
@ -1844,7 +1845,7 @@ do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
goto done;
}
}
vm_page_unlock_queues();
/*
* Get a sendfile buf. We usually wait as long as necessary,