o Retire vm_page_zero_fill() and vm_page_zero_fill_area(). Ever since

pmap_zero_page() and pmap_zero_page_area() were modified to accept
   a struct vm_page * instead of a physical address, vm_page_zero_fill()
   and vm_page_zero_fill_area() have served no purpose.
This commit is contained in:
Alan Cox 2002-08-25 00:22:31 +00:00
parent fd559a8a39
commit fff6062ab6
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=102382
11 changed files with 10 additions and 37 deletions

View File

@ -423,7 +423,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
if ((m->flags & PG_ZERO) == 0)
vm_page_zero_fill(m);
pmap_zero_page(m);
AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
/*

View File

@ -373,7 +373,7 @@ agp_i810_alloc_memory(device_t dev, int type, vm_size_t size)
m = vm_page_grab(mem->am_obj, 0,
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
if ((m->flags & PG_ZERO) == 0)
vm_page_zero_fill(m);
pmap_zero_page(m);
vm_page_lock_queues();
mem->am_physical = VM_PAGE_TO_PHYS(m);
vm_page_wakeup(m);

View File

@ -423,7 +423,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
if ((m->flags & PG_ZERO) == 0)
vm_page_zero_fill(m);
pmap_zero_page(m);
AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
/*

View File

@ -373,7 +373,7 @@ agp_i810_alloc_memory(device_t dev, int type, vm_size_t size)
m = vm_page_grab(mem->am_obj, 0,
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
if ((m->flags & PG_ZERO) == 0)
vm_page_zero_fill(m);
pmap_zero_page(m);
vm_page_lock_queues();
mem->am_physical = VM_PAGE_TO_PHYS(m);
vm_page_wakeup(m);

View File

@ -930,7 +930,7 @@ ffs_getpages(ap)
vm_page_unlock_queues();
if (reqblkno == -1) {
if ((mreq->flags & PG_ZERO) == 0)
vm_page_zero_fill(mreq);
pmap_zero_page(mreq);
vm_page_undirty(mreq);
mreq->valid = VM_PAGE_BITS_ALL;
return VM_PAGER_OK;

View File

@ -144,7 +144,7 @@ phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
*/
for (i = 0; i < count; i++) {
if ((m[i]->flags & PG_ZERO) == 0)
vm_page_zero_fill(m[i]);
pmap_zero_page(m[i]);
vm_page_flag_set(m[i], PG_ZERO);
/* Switch off pv_entries */
vm_page_lock_queues();

View File

@ -622,7 +622,7 @@ RetryFault:;
* Zero the page if necessary and mark it valid.
*/
if ((fs.m->flags & PG_ZERO) == 0) {
vm_page_zero_fill(fs.m);
pmap_zero_page(fs.m);
} else {
cnt.v_ozfod++;
}

View File

@ -199,7 +199,7 @@ kmem_alloc(map, size)
mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i),
VM_ALLOC_ZERO | VM_ALLOC_RETRY);
if ((mem->flags & PG_ZERO) == 0)
vm_page_zero_fill(mem);
pmap_zero_page(mem);
mem->valid = VM_PAGE_BITS_ALL;
vm_page_flag_clear(mem, PG_ZERO);
vm_page_wakeup(mem);
@ -395,7 +395,7 @@ kmem_malloc(map, size, flags)
goto bad;
}
if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
vm_page_zero_fill(m);
pmap_zero_page(m);
vm_page_flag_clear(m, PG_ZERO);
m->valid = VM_PAGE_BITS_ALL;
}

View File

@ -454,31 +454,6 @@ vm_page_protect(vm_page_t mem, int prot)
vm_page_flag_clear(mem, PG_WRITEABLE);
}
}
/*
* vm_page_zero_fill:
*
* Zero-fill the specified page.
* Written as a standard pagein routine, to
* be used by the zero-fill object.
*/
boolean_t
vm_page_zero_fill(vm_page_t m)
{
pmap_zero_page(m);
return (TRUE);
}
/*
* vm_page_zero_fill_area:
*
* Like vm_page_zero_fill but only fill the specified area.
*/
boolean_t
vm_page_zero_fill_area(vm_page_t m, int off, int size)
{
pmap_zero_page_area(m, off, size);
return (TRUE);
}
/*
* vm_page_copy:

View File

@ -325,8 +325,6 @@ void vm_page_io_finish(vm_page_t m);
void vm_page_hold(vm_page_t mem);
void vm_page_unhold(vm_page_t mem);
void vm_page_protect(vm_page_t mem, int prot);
boolean_t vm_page_zero_fill(vm_page_t m);
boolean_t vm_page_zero_fill_area(vm_page_t m, int off, int len);
void vm_page_copy(vm_page_t src_m, vm_page_t dest_m);
void vm_page_free(vm_page_t m);
void vm_page_free_zero(vm_page_t m);

View File

@ -328,7 +328,7 @@ vnode_pager_setsize(vp, nsize)
* Clear out partial-page garbage in case
* the page has been mapped.
*/
vm_page_zero_fill_area(m, base, size);
pmap_zero_page_area(m, base, size);
/*
* XXX work around SMP data integrity race