Pass vm_page_t instead of physical addresses to pmap_zero_page[_area]()

and pmap_copy_page().  This gets rid of a couple more physical addresses
in upper layers, with the eventual aim of supporting PAE and dealing with
the physical addressing mostly within pmap.  (We will need either 64 bit
physical addresses or page indexes, possibly both depending on the
circumstances.  Leaving this to pmap itself gives more flexibilitly.)

Reviewed by:	jake
Tested on:	i386, ia64 and (I believe) sparc64. (my alpha was hosed)
This commit is contained in:
Peter Wemm 2002-04-15 16:00:03 +00:00
parent 1c9fd646f6
commit 1a87a0da66
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=94777
12 changed files with 73 additions and 76 deletions

View File

@ -1731,9 +1731,9 @@ pmap_growkernel(vm_offset_t addr)
nklev2++;
vm_page_wire(nkpg);
pa = VM_PAGE_TO_PHYS(nkpg);
pmap_zero_page(pa);
pmap_zero_page(nkpg);
pa = VM_PAGE_TO_PHYS(nkpg);
newlev1 = pmap_phys_to_pte(pa)
| PG_V | PG_ASM | PG_KRE | PG_KWE;
@ -1765,8 +1765,8 @@ pmap_growkernel(vm_offset_t addr)
nklev3++;
vm_page_wire(nkpg);
pmap_zero_page(nkpg);
pa = VM_PAGE_TO_PHYS(nkpg);
pmap_zero_page(pa);
newlev2 = pmap_phys_to_pte(pa) | PG_V | PG_ASM | PG_KRE | PG_KWE;
*pte = newlev2;
@ -2709,9 +2709,9 @@ pmap_kernel()
*/
void
pmap_zero_page(vm_offset_t pa)
pmap_zero_page(vm_page_t m)
{
vm_offset_t va = ALPHA_PHYS_TO_K0SEG(pa);
vm_offset_t va = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(m));
bzero((caddr_t) va, PAGE_SIZE);
}
@ -2725,9 +2725,9 @@ pmap_zero_page(vm_offset_t pa)
*/
void
pmap_zero_page_area(vm_offset_t pa, int off, int size)
pmap_zero_page_area(vm_page_t m, int off, int size)
{
vm_offset_t va = ALPHA_PHYS_TO_K0SEG(pa);
vm_offset_t va = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(m));
bzero((char *)(caddr_t)va + off, size);
}
@ -2738,10 +2738,10 @@ pmap_zero_page_area(vm_offset_t pa, int off, int size)
* time.
*/
void
pmap_copy_page(vm_offset_t src, vm_offset_t dst)
pmap_copy_page(vm_page_t src, vm_page_t dst)
{
src = ALPHA_PHYS_TO_K0SEG(src);
dst = ALPHA_PHYS_TO_K0SEG(dst);
src = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(src));
dst = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(dst));
bcopy((caddr_t) src, (caddr_t) dst, PAGE_SIZE);
}

View File

@ -1464,7 +1464,7 @@ _pmap_allocpte(pmap, ptepindex)
pteva = VM_MAXUSER_ADDRESS + i386_ptob(ptepindex);
bzero((caddr_t) pteva, PAGE_SIZE);
} else {
pmap_zero_page(ptepa);
pmap_zero_page(m);
}
}
@ -1629,8 +1629,8 @@ pmap_growkernel(vm_offset_t addr)
nkpt++;
vm_page_wire(nkpg);
pmap_zero_page(nkpg);
ptppaddr = VM_PAGE_TO_PHYS(nkpg);
pmap_zero_page(ptppaddr);
newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
pdir_pde(PTD, kernel_vm_end) = newpdir;
@ -2861,13 +2861,14 @@ pmap_kernel()
* the page into KVM and using bzero to clear its contents.
*/
void
pmap_zero_page(vm_offset_t phys)
pmap_zero_page(vm_page_t m)
{
vm_offset_t phys = VM_PAGE_TO_PHYS(m);
if (*CMAP2)
panic("pmap_zero_page: CMAP2 busy");
*CMAP2 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
*CMAP2 = PG_V | PG_RW | phys | PG_A | PG_M;
invltlb_1pg((vm_offset_t)CADDR2);
#if defined(I686_CPU)
@ -2886,13 +2887,14 @@ pmap_zero_page(vm_offset_t phys)
* off and size may not cover an area beyond a single hardware page.
*/
void
pmap_zero_page_area(vm_offset_t phys, int off, int size)
pmap_zero_page_area(vm_page_t m, int off, int size)
{
vm_offset_t phys = VM_PAGE_TO_PHYS(m);
if (*CMAP2)
panic("pmap_zero_page: CMAP2 busy");
*CMAP2 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
*CMAP2 = PG_V | PG_RW | phys | PG_A | PG_M;
invltlb_1pg((vm_offset_t)CADDR2);
#if defined(I686_CPU)
@ -2911,7 +2913,7 @@ pmap_zero_page_area(vm_offset_t phys, int off, int size)
* time.
*/
void
pmap_copy_page(vm_offset_t src, vm_offset_t dst)
pmap_copy_page(vm_page_t src, vm_page_t dst)
{
if (*CMAP1)
@ -2919,8 +2921,8 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst)
if (*CMAP2)
panic("pmap_copy_page: CMAP2 busy");
*CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
*CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
*CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A;
*CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M;
#ifdef I386_CPU
invltlb();
#else

View File

@ -1464,7 +1464,7 @@ _pmap_allocpte(pmap, ptepindex)
pteva = VM_MAXUSER_ADDRESS + i386_ptob(ptepindex);
bzero((caddr_t) pteva, PAGE_SIZE);
} else {
pmap_zero_page(ptepa);
pmap_zero_page(m);
}
}
@ -1629,8 +1629,8 @@ pmap_growkernel(vm_offset_t addr)
nkpt++;
vm_page_wire(nkpg);
pmap_zero_page(nkpg);
ptppaddr = VM_PAGE_TO_PHYS(nkpg);
pmap_zero_page(ptppaddr);
newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
pdir_pde(PTD, kernel_vm_end) = newpdir;
@ -2861,13 +2861,14 @@ pmap_kernel()
* the page into KVM and using bzero to clear its contents.
*/
void
pmap_zero_page(vm_offset_t phys)
pmap_zero_page(vm_page_t m)
{
vm_offset_t phys = VM_PAGE_TO_PHYS(m);
if (*CMAP2)
panic("pmap_zero_page: CMAP2 busy");
*CMAP2 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
*CMAP2 = PG_V | PG_RW | phys | PG_A | PG_M;
invltlb_1pg((vm_offset_t)CADDR2);
#if defined(I686_CPU)
@ -2886,13 +2887,14 @@ pmap_zero_page(vm_offset_t phys)
* off and size may not cover an area beyond a single hardware page.
*/
void
pmap_zero_page_area(vm_offset_t phys, int off, int size)
pmap_zero_page_area(vm_page_t m, int off, int size)
{
vm_offset_t phys = VM_PAGE_TO_PHYS(m);
if (*CMAP2)
panic("pmap_zero_page: CMAP2 busy");
*CMAP2 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
*CMAP2 = PG_V | PG_RW | phys | PG_A | PG_M;
invltlb_1pg((vm_offset_t)CADDR2);
#if defined(I686_CPU)
@ -2911,7 +2913,7 @@ pmap_zero_page_area(vm_offset_t phys, int off, int size)
* time.
*/
void
pmap_copy_page(vm_offset_t src, vm_offset_t dst)
pmap_copy_page(vm_page_t src, vm_page_t dst)
{
if (*CMAP1)
@ -2919,8 +2921,8 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst)
if (*CMAP2)
panic("pmap_copy_page: CMAP2 busy");
*CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
*CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
*CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A;
*CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M;
#ifdef I386_CPU
invltlb();
#else

View File

@ -2113,9 +2113,9 @@ pmap_kernel()
*/
void
pmap_zero_page(vm_offset_t pa)
pmap_zero_page(vm_page_t m)
{
vm_offset_t va = IA64_PHYS_TO_RR7(pa);
vm_offset_t va = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m));
bzero((caddr_t) va, PAGE_SIZE);
}
@ -2129,9 +2129,9 @@ pmap_zero_page(vm_offset_t pa)
*/
void
pmap_zero_page_area(vm_offset_t pa, int off, int size)
pmap_zero_page_area(vm_page_t m, int off, int size)
{
vm_offset_t va = IA64_PHYS_TO_RR7(pa);
vm_offset_t va = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m));
bzero((char *)(caddr_t)va + off, size);
}
@ -2142,10 +2142,10 @@ pmap_zero_page_area(vm_offset_t pa, int off, int size)
* time.
*/
void
pmap_copy_page(vm_offset_t src, vm_offset_t dst)
pmap_copy_page(vm_page_t src, vm_page_t dst)
{
src = IA64_PHYS_TO_RR7(src);
dst = IA64_PHYS_TO_RR7(dst);
src = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(src));
dst = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(dst));
bcopy((caddr_t) src, (caddr_t) dst, PAGE_SIZE);
}

View File

@ -817,7 +817,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
}
void
pmap_copy_page(vm_offset_t src, vm_offset_t dst)
pmap_copy_page(vm_page_t src, vm_page_t dst)
{
TODO;
}
@ -826,8 +826,9 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst)
* Zero a page of physical memory by temporarily mapping it into the tlb.
*/
void
pmap_zero_page(vm_offset_t pa)
pmap_zero_page(vm_page_t m)
{
vm_offset_t pa = VM_PAGE_TO_PHYS(m);
caddr_t va;
int i;
@ -854,7 +855,7 @@ pmap_zero_page(vm_offset_t pa)
}
void
pmap_zero_page_area(vm_offset_t pa, int off, int size)
pmap_zero_page_area(vm_page_t m, int off, int size)
{
TODO;
}

View File

@ -817,7 +817,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
}
void
pmap_copy_page(vm_offset_t src, vm_offset_t dst)
pmap_copy_page(vm_page_t src, vm_page_t dst)
{
TODO;
}
@ -826,8 +826,9 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst)
* Zero a page of physical memory by temporarily mapping it into the tlb.
*/
void
pmap_zero_page(vm_offset_t pa)
pmap_zero_page(vm_page_t m)
{
vm_offset_t pa = VM_PAGE_TO_PHYS(m);
caddr_t va;
int i;
@ -854,7 +855,7 @@ pmap_zero_page(vm_offset_t pa)
}
void
pmap_zero_page_area(vm_offset_t pa, int off, int size)
pmap_zero_page_area(vm_page_t m, int off, int size)
{
TODO;
}

View File

@ -817,7 +817,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
}
void
pmap_copy_page(vm_offset_t src, vm_offset_t dst)
pmap_copy_page(vm_page_t src, vm_page_t dst)
{
TODO;
}
@ -826,8 +826,9 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst)
* Zero a page of physical memory by temporarily mapping it into the tlb.
*/
void
pmap_zero_page(vm_offset_t pa)
pmap_zero_page(vm_page_t m)
{
vm_offset_t pa = VM_PAGE_TO_PHYS(m);
caddr_t va;
int i;
@ -854,7 +855,7 @@ pmap_zero_page(vm_offset_t pa)
}
void
pmap_zero_page_area(vm_offset_t pa, int off, int size)
pmap_zero_page_area(vm_page_t m, int off, int size)
{
TODO;
}

View File

@ -1144,7 +1144,7 @@ pmap_pinit(pmap_t pm)
m = vm_page_grab(pm->pm_tsb_obj, i,
VM_ALLOC_RETRY | VM_ALLOC_ZERO);
if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(VM_PAGE_TO_PHYS(m));
pmap_zero_page(m);
m->wire_count++;
cnt.v_wire_count++;
@ -1598,8 +1598,9 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
* Zero a page of physical memory by temporarily mapping it into the tlb.
*/
void
pmap_zero_page(vm_offset_t pa)
pmap_zero_page(vm_page_t m)
{
vm_offset_t pa = VM_PAGE_TO_PHYS(m);
CTR1(KTR_PMAP, "pmap_zero_page: pa=%#lx", pa);
dcache_inval_phys(pa, pa + PAGE_SIZE);
@ -1607,8 +1608,9 @@ pmap_zero_page(vm_offset_t pa)
}
void
pmap_zero_page_area(vm_offset_t pa, int off, int size)
pmap_zero_page_area(vm_page_t m, int off, int size)
{
vm_offset_t pa = VM_PAGE_TO_PHYS(m);
CTR3(KTR_PMAP, "pmap_zero_page_area: pa=%#lx off=%#x size=%#x",
pa, off, size);
@ -1621,8 +1623,10 @@ pmap_zero_page_area(vm_offset_t pa, int off, int size)
* Copy a page of physical memory by temporarily mapping it into the tlb.
*/
void
pmap_copy_page(vm_offset_t src, vm_offset_t dst)
pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
{
vm_offset_t src = VM_PAGE_TO_PHYS(msrc);
vm_offset_t dst = VM_PAGE_TO_PHYS(mdst);
CTR2(KTR_PMAP, "pmap_copy_page: src=%#lx dst=%#lx", src, dst);
dcache_inval_phys(dst, dst + PAGE_SIZE);

View File

@ -97,7 +97,7 @@ void pmap_clear_modify(vm_page_t m);
void pmap_clear_reference(vm_page_t m);
void pmap_collect(void);
void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
void pmap_copy_page(vm_offset_t, vm_offset_t);
void pmap_copy_page(vm_page_t, vm_page_t);
void pmap_destroy(pmap_t);
void pmap_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
boolean_t);
@ -126,8 +126,8 @@ void pmap_reference(pmap_t);
void pmap_release(pmap_t);
void pmap_remove(pmap_t, vm_offset_t, vm_offset_t);
void pmap_remove_pages(pmap_t, vm_offset_t, vm_offset_t);
void pmap_zero_page(vm_offset_t);
void pmap_zero_page_area(vm_offset_t, int off, int size);
void pmap_zero_page(vm_page_t);
void pmap_zero_page_area(vm_page_t, int off, int size);
void pmap_prefault(pmap_t, vm_offset_t, vm_map_entry_t);
int pmap_mincore(pmap_t pmap, vm_offset_t addr);
void pmap_new_proc(struct proc *p);

View File

@ -3221,7 +3221,7 @@ vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa)
}
vm_page_protect(m_in, VM_PROT_NONE);
pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out));
pmap_copy_page(m_in, m_out);
m_out->valid = m_in->valid;
vm_page_dirty(m_out);
vm_page_activate(m_out);

View File

@ -455,7 +455,7 @@ vm_page_protect(vm_page_t mem, int prot)
boolean_t
vm_page_zero_fill(vm_page_t m)
{
pmap_zero_page(VM_PAGE_TO_PHYS(m));
pmap_zero_page(m);
return (TRUE);
}
@ -467,7 +467,7 @@ vm_page_zero_fill(vm_page_t m)
void
vm_page_copy(vm_page_t src_m, vm_page_t dest_m)
{
pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
pmap_copy_page(src_m, dest_m);
dest_m->valid = VM_PAGE_BITS_ALL;
}
@ -1582,14 +1582,8 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
* first block.
*/
if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
(m->valid & (1 << (base >> DEV_BSHIFT))) == 0
) {
pmap_zero_page_area(
VM_PAGE_TO_PHYS(m),
frag,
base - frag
);
}
(m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
pmap_zero_page_area(m, frag, base - frag);
/*
* If the ending offset is not DEV_BSIZE aligned and the
@ -1598,14 +1592,9 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
*/
endoff = base + size;
if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
(m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0
) {
pmap_zero_page_area(
VM_PAGE_TO_PHYS(m),
endoff,
DEV_BSIZE - (endoff & (DEV_BSIZE - 1))
);
}
(m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
pmap_zero_page_area(m, endoff,
DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
/*
* Set valid, clear dirty bits. If validating the entire
@ -1702,11 +1691,8 @@ vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
(m->valid & (1 << i))
) {
if (i > b) {
pmap_zero_page_area(
VM_PAGE_TO_PHYS(m),
b << DEV_BSHIFT,
(i - b) << DEV_BSHIFT
);
pmap_zero_page_area(m,
b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
}
b = i + 1;
}

View File

@ -82,7 +82,7 @@ vm_page_zero_idle(void)
TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq);
m->queue = PQ_NONE;
/* maybe drop out of Giant here */
pmap_zero_page(VM_PAGE_TO_PHYS(m));
pmap_zero_page(m);
/* and return here */
vm_page_flag_set(m, PG_ZERO);
m->queue = PQ_FREE + m->pc;