Prevent the overflow in the calculation of the next page directory.

The overflow causes the wraparound with consequent corruption of the
(almost) whole address space mapping.

As Alan noted, pmap_copy() does not require the wrap-around checks
because it cannot be applied to the kernel's pmap. The checks there are
included for consistency.

Reported and tested by:	kris (i386/pmap.c:pmap_remove() part)
Reviewed by:	alc
MFC after:	1 week
This commit is contained in:
kib 2008-03-23 07:07:27 +00:00
parent fcd39263e4
commit 53a15ee1ea
2 changed files with 24 additions and 0 deletions

View File

@ -2444,12 +2444,16 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
pml4e = pmap_pml4e(pmap, sva);
if ((*pml4e & PG_V) == 0) {
va_next = (sva + NBPML4) & ~PML4MASK;
if (va_next < sva)
va_next = eva;
continue;
}
pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
if ((*pdpe & PG_V) == 0) {
va_next = (sva + NBPDP) & ~PDPMASK;
if (va_next < sva)
va_next = eva;
continue;
}
@ -2457,6 +2461,8 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
* Calculate index for next page table.
*/
va_next = (sva + NBPDR) & ~PDRMASK;
if (va_next < sva)
va_next = eva;
pde = pmap_pdpe_to_pde(pdpe, sva);
ptpaddr = *pde;
@ -2672,16 +2678,22 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
pml4e = pmap_pml4e(pmap, sva);
if ((*pml4e & PG_V) == 0) {
va_next = (sva + NBPML4) & ~PML4MASK;
if (va_next < sva)
va_next = eva;
continue;
}
pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
if ((*pdpe & PG_V) == 0) {
va_next = (sva + NBPDP) & ~PDPMASK;
if (va_next < sva)
va_next = eva;
continue;
}
va_next = (sva + NBPDR) & ~PDRMASK;
if (va_next < sva)
va_next = eva;
pde = pmap_pdpe_to_pde(pdpe, sva);
ptpaddr = *pde;
@ -3485,16 +3497,22 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
pml4e = pmap_pml4e(src_pmap, addr);
if ((*pml4e & PG_V) == 0) {
va_next = (addr + NBPML4) & ~PML4MASK;
if (va_next < addr)
va_next = end_addr;
continue;
}
pdpe = pmap_pml4e_to_pdpe(pml4e, addr);
if ((*pdpe & PG_V) == 0) {
va_next = (addr + NBPDP) & ~PDPMASK;
if (va_next < addr)
va_next = end_addr;
continue;
}
va_next = (addr + NBPDR) & ~PDRMASK;
if (va_next < addr)
va_next = end_addr;
pde = pmap_pdpe_to_pde(pdpe, addr);
srcptepaddr = *pde;

View File

@ -2046,6 +2046,8 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
* Calculate index for next page table.
*/
pdnxt = (sva + NBPDR) & ~PDRMASK;
if (pdnxt < sva)
pdnxt = eva;
if (pmap->pm_stats.resident_count == 0)
break;
@ -2194,6 +2196,8 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
unsigned pdirindex;
pdnxt = (sva + NBPDR) & ~PDRMASK;
if (pdnxt < sva)
pdnxt = eva;
pdirindex = sva >> PDRSHIFT;
ptpaddr = pmap->pm_pdir[pdirindex];
@ -2782,6 +2786,8 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
("pmap_copy: invalid to pmap_copy page tables"));
pdnxt = (addr + NBPDR) & ~PDRMASK;
if (pdnxt < addr)
pdnxt = end_addr;
ptepindex = addr >> PDRSHIFT;
srcptepaddr = src_pmap->pm_pdir[ptepindex];