Revert r254501. Instead, reuse the type stability of the struct pmap

which is the part of struct vmspace, allocated from UMA_ZONE_NOFREE
zone.  Initialize the pmap lock in the vmspace zone init function, and
remove pmap lock initialization and destruction from pmap_pinit() and
pmap_release().

Suggested and reviewed by:	alc (previous version)
Tested by:	pho
Sponsored by:	The FreeBSD Foundation
This commit is contained in:
Konstantin Belousov 2013-08-22 18:12:24 +00:00
parent b544368a22
commit e68c64f0ba
12 changed files with 8 additions and 33 deletions

View File

@ -1687,8 +1687,6 @@ pmap_pinit(pmap_t pmap)
vm_page_t pml4pg;
int i;
PMAP_LOCK_INIT(pmap);
/*
* allocate the page directory page
*/
@ -1959,9 +1957,6 @@ pmap_release(pmap_t pmap)
KASSERT(vm_radix_is_empty(&pmap->pm_root),
("pmap_release: pmap has reserved page table page(s)"));
rw_wlock(&pvh_global_lock);
rw_wunlock(&pvh_global_lock);
m = PHYS_TO_VM_PAGE(pmap->pm_pml4[PML4PML4I] & PG_FRAME);
for (i = 0; i < NKPML4E; i++) /* KVA */
@ -1973,7 +1968,6 @@ pmap_release(pmap_t pmap)
m->wire_count--;
atomic_subtract_int(&cnt.v_wire_count, 1);
vm_page_free_zero(m);
PMAP_LOCK_DESTROY(pmap);
}
static int

View File

@ -1814,7 +1814,6 @@ pmap_release(pmap_t pmap)
}
pmap_free_l1(pmap);
PMAP_LOCK_DESTROY(pmap);
dprintf("pmap_release()\n");
}
@ -3225,7 +3224,6 @@ pmap_pinit(pmap_t pmap)
{
PDEBUG(1, printf("pmap_pinit: pmap = %08x\n", (uint32_t) pmap));
PMAP_LOCK_INIT(pmap);
pmap_alloc_l1(pmap);
bzero(pmap->pm_l2, sizeof(pmap->pm_l2));

View File

@ -2479,7 +2479,6 @@ pmap_release(pmap_t pmap)
}
pmap_free_l1(pmap);
PMAP_LOCK_DESTROY(pmap);
dprintf("pmap_release()\n");
}
@ -3819,7 +3818,6 @@ pmap_pinit(pmap_t pmap)
{
PDEBUG(1, printf("pmap_pinit: pmap = %08x\n", (uint32_t) pmap));
PMAP_LOCK_INIT(pmap);
pmap_alloc_l1(pmap);
bzero(pmap->pm_l2, sizeof(pmap->pm_l2));

View File

@ -1738,8 +1738,6 @@ pmap_pinit(pmap_t pmap)
vm_paddr_t pa;
int i;
PMAP_LOCK_INIT(pmap);
/*
* No need to allocate page table space yet but we do need a valid
* page directory table.
@ -2051,7 +2049,6 @@ pmap_release(pmap_t pmap)
atomic_subtract_int(&cnt.v_wire_count, 1);
vm_page_free_zero(m);
}
PMAP_LOCK_DESTROY(pmap);
}
static int

View File

@ -1452,8 +1452,6 @@ pmap_pinit(pmap_t pmap)
mtx_lock(&createdelete_lock);
#endif
PMAP_LOCK_INIT(pmap);
/*
* No need to allocate page table space yet but we do need a valid
* page directory table.
@ -1821,7 +1819,6 @@ pmap_release(pmap_t pmap)
#ifdef PAE
pmap_qremove((vm_offset_t)pmap->pm_pdpt, 1);
#endif
PMAP_LOCK_DESTROY(pmap);
#ifdef HAMFISTED_LOCKING
mtx_unlock(&createdelete_lock);

View File

@ -622,6 +622,8 @@ pmap_free_rid(uint32_t rid)
void
pmap_pinit0(struct pmap *pmap)
{
PMAP_LOCK_INIT(pmap);
/* kernel_pmap is the same as any other pmap. */
pmap_pinit(pmap);
}
@ -635,7 +637,6 @@ pmap_pinit(struct pmap *pmap)
{
int i;
PMAP_LOCK_INIT(pmap);
for (i = 0; i < IA64_VM_MINKERN_REGION; i++)
pmap->pm_rid[i] = pmap_allocate_rid();
TAILQ_INIT(&pmap->pm_pvchunk);
@ -660,7 +661,6 @@ pmap_release(pmap_t pmap)
for (i = 0; i < IA64_VM_MINKERN_REGION; i++)
if (pmap->pm_rid[i])
pmap_free_rid(pmap->pm_rid[i]);
PMAP_LOCK_DESTROY(pmap);
}
/*

View File

@ -1070,8 +1070,6 @@ pmap_pinit(pmap_t pmap)
vm_page_t ptdpg;
int i;
PMAP_LOCK_INIT(pmap);
/*
* allocate the page directory page
*/
@ -1231,7 +1229,6 @@ pmap_release(pmap_t pmap)
ptdpg->wire_count--;
atomic_subtract_int(&cnt.v_wire_count, 1);
vm_page_free_zero(ptdpg);
PMAP_LOCK_DESTROY(pmap);
}
/*

View File

@ -1655,7 +1655,6 @@ moea_pinit(mmu_t mmu, pmap_t pmap)
u_int entropy;
KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("moea_pinit: virt pmap"));
PMAP_LOCK_INIT(pmap);
RB_INIT(&pmap->pmap_pvo);
entropy = 0;
@ -1719,6 +1718,7 @@ void
moea_pinit0(mmu_t mmu, pmap_t pm)
{
PMAP_LOCK_INIT(pm);
moea_pinit(mmu, pm);
bzero(&pm->pm_stats, sizeof(pm->pm_stats));
}
@ -1824,7 +1824,6 @@ moea_release(mmu_t mmu, pmap_t pmap)
idx /= VSID_NBPW;
moea_vsid_bitmap[idx] &= ~mask;
mtx_unlock(&moea_vsid_mutex);
PMAP_LOCK_DESTROY(pmap);
}
/*

View File

@ -1879,7 +1879,7 @@ moea64_get_unique_vsid(void) {
void
moea64_pinit(mmu_t mmu, pmap_t pmap)
{
PMAP_LOCK_INIT(pmap);
RB_INIT(&pmap->pmap_pvo);
pmap->pm_slb_tree_root = slb_alloc_tree();
@ -1893,7 +1893,6 @@ moea64_pinit(mmu_t mmu, pmap_t pmap)
int i;
uint32_t hash;
PMAP_LOCK_INIT(pmap);
RB_INIT(&pmap->pmap_pvo);
if (pmap_bootstrapped)
@ -1920,6 +1919,8 @@ moea64_pinit(mmu_t mmu, pmap_t pmap)
void
moea64_pinit0(mmu_t mmu, pmap_t pm)
{
PMAP_LOCK_INIT(pm);
moea64_pinit(mmu, pm);
bzero(&pm->pm_stats, sizeof(pm->pm_stats));
}
@ -2074,8 +2075,6 @@ moea64_release(mmu_t mmu, pmap_t pmap)
moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0]));
#endif
PMAP_LOCK_DESTROY(pmap);
}
/*

View File

@ -1477,6 +1477,7 @@ static void
mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
{
PMAP_LOCK_INIT(pmap);
mmu_booke_pinit(mmu, pmap);
PCPU_SET(curpmap, pmap);
}
@ -1495,7 +1496,6 @@ mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
PMAP_LOCK_INIT(pmap);
for (i = 0; i < MAXCPU; i++)
pmap->pm_tid[i] = TID_NONE;
CPU_ZERO(&kernel_pmap->pm_active);
@ -1516,8 +1516,6 @@ mmu_booke_release(mmu_t mmu, pmap_t pmap)
KASSERT(pmap->pm_stats.resident_count == 0,
("pmap_release: pmap resident count %ld != 0",
pmap->pm_stats.resident_count));
PMAP_LOCK_DESTROY(pmap);
}
/*

View File

@ -1204,8 +1204,6 @@ pmap_pinit(pmap_t pm)
vm_page_t m;
int i;
PMAP_LOCK_INIT(pm);
/*
* Allocate KVA space for the TSB.
*/
@ -1299,7 +1297,6 @@ pmap_release(pmap_t pm)
vm_page_free_zero(m);
}
VM_OBJECT_WUNLOCK(obj);
PMAP_LOCK_DESTROY(pm);
}
/*

View File

@ -226,6 +226,7 @@ vmspace_zinit(void *mem, int size, int flags)
vm->vm_map.pmap = NULL;
(void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
PMAP_LOCK_INIT(vmspace_pmap(vm));
return (0);
}