Remove the deprecated VM_ALLOC_RETRY flag for the vm_page_grab(9).
The flag was mandatory since r209792, where vm_page_grab(9) was changed to only support the alloc retry semantic. Suggested and reviewed by: alc Sponsored by: The FreeBSD Foundation
This commit is contained in:
parent
b105165323
commit
5944de8ecd
@ -539,7 +539,7 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
|
||||
int bytes = MIN(PAGESIZE, len);
|
||||
|
||||
pp = vm_page_grab(obj, OFF_TO_IDX(start), VM_ALLOC_SBUSY |
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_IGN_SBUSY);
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_IGN_SBUSY);
|
||||
if (pp->valid == 0) {
|
||||
zfs_vmobject_wunlock(obj);
|
||||
va = zfs_map_page(pp, &sf);
|
||||
|
@ -555,7 +555,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
|
||||
* the pages will be allocated and zeroed.
|
||||
*/
|
||||
m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
|
||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
|
||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO);
|
||||
AGP_DPF("found page pa=%#jx\n", (uintmax_t)VM_PAGE_TO_PHYS(m));
|
||||
}
|
||||
VM_OBJECT_WUNLOCK(mem->am_obj);
|
||||
|
@ -1970,7 +1970,7 @@ agp_i810_alloc_memory(device_t dev, int type, vm_size_t size)
|
||||
*/
|
||||
VM_OBJECT_WLOCK(mem->am_obj);
|
||||
m = vm_page_grab(mem->am_obj, 0, VM_ALLOC_NOBUSY |
|
||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
|
||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO);
|
||||
VM_OBJECT_WUNLOCK(mem->am_obj);
|
||||
mem->am_physical = VM_PAGE_TO_PHYS(m);
|
||||
} else {
|
||||
|
@ -2099,9 +2099,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
|
||||
obj->gtt_space = NULL;
|
||||
/*
|
||||
* i915_gem_object_get_pages_gtt() cannot return
|
||||
* ENOMEM, since we use vm_page_grab(VM_ALLOC_RETRY)
|
||||
* (which does not support operation without a flag
|
||||
* anyway).
|
||||
* ENOMEM, since we use vm_page_grab().
|
||||
*/
|
||||
return (ret);
|
||||
}
|
||||
@ -2516,7 +2514,7 @@ i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex)
|
||||
int rv;
|
||||
|
||||
VM_OBJECT_ASSERT_WLOCKED(object);
|
||||
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
|
||||
if (m->valid != VM_PAGE_BITS_ALL) {
|
||||
if (vm_pager_has_page(object, pindex, NULL, NULL)) {
|
||||
rv = vm_pager_get_pages(object, &m, 1, 0);
|
||||
|
@ -288,7 +288,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
vm_object_pip_add(obj, 1);
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
from_page = vm_page_grab(obj, i, VM_ALLOC_RETRY);
|
||||
from_page = vm_page_grab(obj, i, VM_ALLOC_NORMAL);
|
||||
if (from_page->valid != VM_PAGE_BITS_ALL) {
|
||||
if (vm_pager_has_page(obj, i, NULL, NULL)) {
|
||||
rv = vm_pager_get_pages(obj, &from_page, 1, 0);
|
||||
@ -351,7 +351,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage)
|
||||
from_page = ttm->pages[i];
|
||||
if (unlikely(from_page == NULL))
|
||||
continue;
|
||||
to_page = vm_page_grab(obj, i, VM_ALLOC_RETRY);
|
||||
to_page = vm_page_grab(obj, i, VM_ALLOC_NORMAL);
|
||||
pmap_copy_page(from_page, to_page);
|
||||
vm_page_dirty(to_page);
|
||||
to_page->valid = VM_PAGE_BITS_ALL;
|
||||
|
@ -826,8 +826,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
|
||||
vm_object_pip_add(sc->object, 1);
|
||||
for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
|
||||
len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
|
||||
m = vm_page_grab(sc->object, i, VM_ALLOC_NORMAL |
|
||||
VM_ALLOC_RETRY);
|
||||
m = vm_page_grab(sc->object, i, VM_ALLOC_NORMAL);
|
||||
if (bp->bio_cmd == BIO_READ) {
|
||||
if (m->valid == VM_PAGE_BITS_ALL)
|
||||
rv = VM_PAGER_OK;
|
||||
|
@ -934,7 +934,7 @@ exec_map_first_page(imgp)
|
||||
object->pg_color = 0;
|
||||
}
|
||||
#endif
|
||||
ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL);
|
||||
if (ma[0]->valid != VM_PAGE_BITS_ALL) {
|
||||
initial_pagein = VM_INITIAL_PAGEIN;
|
||||
if (initial_pagein > object->size)
|
||||
|
@ -108,8 +108,7 @@ shared_page_init(void *dummy __unused)
|
||||
shared_page_obj = vm_pager_allocate(OBJT_PHYS, 0, PAGE_SIZE,
|
||||
VM_PROT_DEFAULT, 0, NULL);
|
||||
VM_OBJECT_WLOCK(shared_page_obj);
|
||||
m = vm_page_grab(shared_page_obj, 0, VM_ALLOC_RETRY | VM_ALLOC_NOBUSY |
|
||||
VM_ALLOC_ZERO);
|
||||
m = vm_page_grab(shared_page_obj, 0, VM_ALLOC_NOBUSY | VM_ALLOC_ZERO);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
VM_OBJECT_WUNLOCK(shared_page_obj);
|
||||
addr = kva_alloc(PAGE_SIZE);
|
||||
|
@ -165,7 +165,7 @@ uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
|
||||
* lock to page out tobj's pages because tobj is a OBJT_SWAP
|
||||
* type object.
|
||||
*/
|
||||
m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL);
|
||||
if (m->valid != VM_PAGE_BITS_ALL) {
|
||||
if (vm_pager_has_page(obj, idx, NULL, NULL)) {
|
||||
rv = vm_pager_get_pages(obj, &m, 1, 0);
|
||||
|
@ -2230,7 +2230,7 @@ retry_space:
|
||||
pindex = OFF_TO_IDX(off);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
pg = vm_page_grab(obj, pindex, VM_ALLOC_NOBUSY |
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_RETRY);
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
|
||||
|
||||
/*
|
||||
* Check if page is valid for what we need,
|
||||
|
@ -3490,7 +3490,7 @@ allocbuf(struct buf *bp, int size)
|
||||
m = vm_page_grab(obj, OFF_TO_IDX(bp->b_offset) +
|
||||
bp->b_npages, VM_ALLOC_NOBUSY |
|
||||
VM_ALLOC_SYSTEM | VM_ALLOC_WIRED |
|
||||
VM_ALLOC_RETRY | VM_ALLOC_IGN_SBUSY |
|
||||
VM_ALLOC_IGN_SBUSY |
|
||||
VM_ALLOC_COUNT(desiredpages - bp->b_npages));
|
||||
if (m->valid == 0)
|
||||
bp->b_flags &= ~B_CACHE;
|
||||
|
@ -1230,7 +1230,7 @@ pmap_pinit(pmap_t pm)
|
||||
VM_OBJECT_WLOCK(pm->pm_tsb_obj);
|
||||
for (i = 0; i < TSB_PAGES; i++) {
|
||||
m = vm_page_grab(pm->pm_tsb_obj, i, VM_ALLOC_NOBUSY |
|
||||
VM_ALLOC_RETRY | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
|
||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
m->md.pmap = pm;
|
||||
ma[i] = m;
|
||||
|
@ -1711,7 +1711,7 @@ swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex)
|
||||
vm_page_t m;
|
||||
|
||||
vm_object_pip_add(object, 1);
|
||||
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
|
||||
if (m->valid == VM_PAGE_BITS_ALL) {
|
||||
vm_object_pip_subtract(object, 1);
|
||||
vm_page_dirty(m);
|
||||
|
@ -233,7 +233,7 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
|
||||
|
||||
VM_OBJECT_WLOCK(object);
|
||||
pindex = OFF_TO_IDX(offset);
|
||||
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
|
||||
if (m->valid != VM_PAGE_BITS_ALL) {
|
||||
ma[0] = m;
|
||||
rv = vm_pager_get_pages(object, ma, 1, 0);
|
||||
@ -395,7 +395,7 @@ vm_thread_new(struct thread *td, int pages)
|
||||
* Get a kernel stack page.
|
||||
*/
|
||||
m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
|
||||
ma[i] = m;
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
@ -527,7 +527,7 @@ vm_thread_swapin(struct thread *td)
|
||||
ksobj = td->td_kstack_obj;
|
||||
VM_OBJECT_WLOCK(ksobj);
|
||||
for (i = 0; i < pages; i++)
|
||||
ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
|
||||
ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL |
|
||||
VM_ALLOC_WIRED);
|
||||
for (i = 0; i < pages; i++) {
|
||||
if (ma[i]->valid != VM_PAGE_BITS_ALL) {
|
||||
|
@ -2034,8 +2034,7 @@ vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
|
||||
|
||||
VM_OBJECT_ASSERT_WLOCKED(object);
|
||||
for (pindex = start; pindex < end; pindex++) {
|
||||
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL |
|
||||
VM_ALLOC_RETRY);
|
||||
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
|
||||
if (m->valid != VM_PAGE_BITS_ALL) {
|
||||
ma[0] = m;
|
||||
rv = vm_pager_get_pages(object, ma, 1, 0);
|
||||
|
@ -2685,9 +2685,6 @@ vm_page_advise(vm_page_t m, int advice)
|
||||
* to be in the object. If the page doesn't exist, first allocate it
|
||||
* and then conditionally zero it.
|
||||
*
|
||||
* The caller must always specify the VM_ALLOC_RETRY flag. This is intended
|
||||
* to facilitate its eventual removal.
|
||||
*
|
||||
* This routine may sleep.
|
||||
*
|
||||
* The object must be locked on entry. The lock will, however, be released
|
||||
@ -2700,8 +2697,6 @@ vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
|
||||
int sleep;
|
||||
|
||||
VM_OBJECT_ASSERT_WLOCKED(object);
|
||||
KASSERT((allocflags & VM_ALLOC_RETRY) != 0,
|
||||
("vm_page_grab: VM_ALLOC_RETRY is required"));
|
||||
KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
|
||||
(allocflags & VM_ALLOC_IGN_SBUSY) != 0,
|
||||
("vm_page_grab: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
|
||||
@ -2735,8 +2730,7 @@ retrylookup:
|
||||
return (m);
|
||||
}
|
||||
}
|
||||
m = vm_page_alloc(object, pindex, allocflags & ~(VM_ALLOC_RETRY |
|
||||
VM_ALLOC_IGN_SBUSY));
|
||||
m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_IGN_SBUSY);
|
||||
if (m == NULL) {
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
VM_WAIT;
|
||||
|
@ -389,7 +389,6 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
|
||||
/* page allocation flags: */
|
||||
#define VM_ALLOC_WIRED 0x0020 /* non pageable */
|
||||
#define VM_ALLOC_ZERO 0x0040 /* Try to obtain a zeroed page */
|
||||
#define VM_ALLOC_RETRY 0x0080 /* Mandatory with vm_page_grab() */
|
||||
#define VM_ALLOC_NOOBJ 0x0100 /* No associated object */
|
||||
#define VM_ALLOC_NOBUSY 0x0200 /* Do not busy the page */
|
||||
#define VM_ALLOC_IFCACHED 0x0400 /* Fail if the page is not cached */
|
||||
|
Loading…
x
Reference in New Issue
Block a user