Retire pmap_pinit2(). Alpha was the last platform that used it. However,
ever since alpha/alpha/pmap.c revision 1.81 introduced the list allpmaps, there has been no reason for having this function on Alpha. Briefly, when pmap_growkernel() relied upon the list of all processes to find and update the various pmaps to reflect a growth in the kernel's valid address space, pmap_init2() served to avoid a race between pmap initialization and pmap_growkernel(). Specifically, pmap_pinit2() was responsible for initializing the kernel portions of the pmap and pmap_pinit2() was called after the process structure contained a pointer to the new pmap for use by pmap_growkernel(). Thus, an update to the kernel's address space might be applied to the new pmap unnecessarily, but an update would never be lost.
This commit is contained in:
parent
7594cde032
commit
fcffa790e9
@ -1109,18 +1109,6 @@ pmap_pinit(pmap)
|
||||
mtx_lock_spin(&allpmaps_lock);
|
||||
LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
|
||||
mtx_unlock_spin(&allpmaps_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wire in kernel global address entries. To avoid a race condition
|
||||
* between pmap initialization and pmap_growkernel, this procedure
|
||||
* should be called after the vmspace is attached to the process
|
||||
* but before this pmap is activated.
|
||||
*/
|
||||
void
|
||||
pmap_pinit2(pmap)
|
||||
struct pmap *pmap;
|
||||
{
|
||||
bcopy(PTlev1 + K1SEGLEV1I, pmap->pm_lev1 + K1SEGLEV1I, nklev2 * PTESIZE);
|
||||
}
|
||||
|
||||
|
@ -1102,19 +1102,6 @@ pmap_pinit(pmap)
|
||||
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wire in kernel global address entries. To avoid a race condition
|
||||
* between pmap initialization and pmap_growkernel, this procedure
|
||||
* should be called after the vmspace is attached to the process
|
||||
* but before this pmap is activated.
|
||||
*/
|
||||
void
|
||||
pmap_pinit2(pmap)
|
||||
struct pmap *pmap;
|
||||
{
|
||||
/* XXX: Remove this stub when no longer called */
|
||||
}
|
||||
|
||||
/*
|
||||
* this routine is called if the page table page is not
|
||||
* mapped correctly.
|
||||
|
@ -1146,19 +1146,6 @@ pmap_pinit(pmap)
|
||||
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wire in kernel global address entries. To avoid a race condition
|
||||
* between pmap initialization and pmap_growkernel, this procedure
|
||||
* should be called after the vmspace is attached to the process
|
||||
* but before this pmap is activated.
|
||||
*/
|
||||
void
|
||||
pmap_pinit2(pmap)
|
||||
struct pmap *pmap;
|
||||
{
|
||||
/* XXX: Remove this stub when no longer called */
|
||||
}
|
||||
|
||||
/*
|
||||
* this routine is called if the page table page is not
|
||||
* mapped correctly.
|
||||
|
@ -735,17 +735,6 @@ pmap_pinit(struct pmap *pmap)
|
||||
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wire in kernel global address entries. To avoid a race condition
|
||||
* between pmap initialization and pmap_growkernel, this procedure
|
||||
* should be called after the vmspace is attached to the process
|
||||
* but before this pmap is activated.
|
||||
*/
|
||||
void
|
||||
pmap_pinit2(struct pmap *pmap)
|
||||
{
|
||||
}
|
||||
|
||||
/***************************************************
|
||||
* Pmap allocation/deallocation routines.
|
||||
***************************************************/
|
||||
|
@ -1443,12 +1443,6 @@ pmap_pinit0(pmap_t pm)
|
||||
bzero(&pm->pm_stats, sizeof(pm->pm_stats));
|
||||
}
|
||||
|
||||
void
|
||||
pmap_pinit2(pmap_t pmap)
|
||||
{
|
||||
/* XXX: Remove this stub when no longer called */
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the physical protection on the specified range of this map as requested.
|
||||
*/
|
||||
|
@ -1443,12 +1443,6 @@ pmap_pinit0(pmap_t pm)
|
||||
bzero(&pm->pm_stats, sizeof(pm->pm_stats));
|
||||
}
|
||||
|
||||
void
|
||||
pmap_pinit2(pmap_t pmap)
|
||||
{
|
||||
/* XXX: Remove this stub when no longer called */
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the physical protection on the specified range of this map as requested.
|
||||
*/
|
||||
|
@ -1443,12 +1443,6 @@ pmap_pinit0(pmap_t pm)
|
||||
bzero(&pm->pm_stats, sizeof(pm->pm_stats));
|
||||
}
|
||||
|
||||
void
|
||||
pmap_pinit2(pmap_t pmap)
|
||||
{
|
||||
/* XXX: Remove this stub when no longer called */
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the physical protection on the specified range of this map as requested.
|
||||
*/
|
||||
|
@ -1027,12 +1027,6 @@ pmap_pinit(pmap_t pm)
|
||||
bzero(&pm->pm_stats, sizeof(pm->pm_stats));
|
||||
}
|
||||
|
||||
void
|
||||
pmap_pinit2(pmap_t pmap)
|
||||
{
|
||||
/* XXX: Remove this stub when no longer called */
|
||||
}
|
||||
|
||||
/*
|
||||
* Release any resources held by the given physical map.
|
||||
* Called when a pmap initialized by pmap_pinit is being released.
|
||||
|
@ -120,7 +120,6 @@ boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m);
|
||||
void pmap_page_protect(vm_page_t m, vm_prot_t prot);
|
||||
void pmap_pinit(pmap_t);
|
||||
void pmap_pinit0(pmap_t);
|
||||
void pmap_pinit2(pmap_t);
|
||||
void pmap_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
|
||||
void pmap_qenter(vm_offset_t, vm_page_t *, int);
|
||||
void pmap_qremove(vm_offset_t, int);
|
||||
|
@ -675,9 +675,6 @@ vm_forkproc(td, p2, td2, flags)
|
||||
|
||||
if ((flags & RFMEM) == 0) {
|
||||
p2->p_vmspace = vmspace_fork(p1->p_vmspace);
|
||||
|
||||
pmap_pinit2(vmspace_pmap(p2->p_vmspace));
|
||||
|
||||
if (p1->p_vmspace->vm_shm)
|
||||
shmfork(p1, p2);
|
||||
}
|
||||
|
@ -2798,7 +2798,6 @@ vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
|
||||
* here, it is a good idea to keep this form for future mods.
|
||||
*/
|
||||
p->p_vmspace = newvmspace;
|
||||
pmap_pinit2(vmspace_pmap(newvmspace));
|
||||
if (p == curthread->td_proc) /* XXXKSE ? */
|
||||
pmap_activate(curthread);
|
||||
vmspace_free(oldvmspace);
|
||||
@ -2819,7 +2818,6 @@ vmspace_unshare(struct proc *p)
|
||||
return;
|
||||
newvmspace = vmspace_fork(oldvmspace);
|
||||
p->p_vmspace = newvmspace;
|
||||
pmap_pinit2(vmspace_pmap(newvmspace));
|
||||
if (p == curthread->td_proc) /* XXXKSE ? */
|
||||
pmap_activate(curthread);
|
||||
vmspace_free(oldvmspace);
|
||||
|
Loading…
x
Reference in New Issue
Block a user