Move the per-CPU vmspace pointer fixup that is required before a

struct vmspace is freed from cpu_sched_exit() to pmap_release().

This has the advantage of being able to rely on MI code to decide
when a free should occur, instead of having to inspect the reference
count ourselves.

At the same time, turn the per-CPU vmspace pointer into a pmap pointer,
so that pmap_release() can deal with pmaps exclusively.

Reviewed (and embrassing bug spotted) by: jake
This commit is contained in:
Thomas Moestl 2004-05-26 12:06:52 +00:00
parent 6cbd3e99ec
commit 3e519a2cf4
5 changed files with 41 additions and 36 deletions

View File

@ -38,7 +38,7 @@
#define ALT_STACK_SIZE 128
struct vmspace;
struct pmap;
/*
* Inside the kernel, the globally reserved register g7 is used to
@ -49,7 +49,7 @@ struct vmspace;
struct intr_request *pc_irhead; \
struct intr_request **pc_irtail; \
struct intr_request *pc_irfree; \
struct vmspace *pc_vmspace; \
struct pmap *pc_pmap; \
vm_offset_t pc_addr; \
u_int pc_mid; \
u_int pc_node; \

View File

@ -198,7 +198,7 @@ ASSYM(PC_MID, offsetof(struct pcpu, pc_mid));
ASSYM(PC_TLB_CTX, offsetof(struct pcpu, pc_tlb_ctx));
ASSYM(PC_TLB_CTX_MAX, offsetof(struct pcpu, pc_tlb_ctx_max));
ASSYM(PC_TLB_CTX_MIN, offsetof(struct pcpu, pc_tlb_ctx_min));
ASSYM(PC_VMSPACE, offsetof(struct pcpu, pc_vmspace));
ASSYM(PC_PMAP, offsetof(struct pcpu, pc_pmap));
ASSYM(PC_SIZEOF, sizeof(struct pcpu));
ASSYM(IH_SHIFT, IH_SHIFT);

View File

@ -1027,12 +1027,33 @@ pmap_release(pmap_t pm)
{
vm_object_t obj;
vm_page_t m;
struct pcpu *pc;
CTR2(KTR_PMAP, "pmap_release: ctx=%#x tsb=%p",
pm->pm_context[PCPU_GET(cpuid)], pm->pm_tsb);
KASSERT(pmap_resident_count(pm) == 0,
("pmap_release: resident pages %ld != 0",
pmap_resident_count(pm)));
/*
* After the pmap was freed, it might be reallocated to a new process.
* When switching, this might lead us to wrongly assume that we need
* not switch contexts because old and new pmap pointer are equal.
* Therefore, make sure that this pmap is not referenced by any PCPU
* pointer any more. This could happen in two cases:
* - A process that referenced the pmap is currently exiting on a CPU.
* However, it is guaranteed to not switch in any more after setting
* its state to PRS_ZOMBIE.
* - A process that referenced this pmap ran on a CPU, but we switched
* to a kernel thread, leaving the pmap pointer unchanged.
*/
mtx_lock_spin(&sched_lock);
SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
if (pc->pc_pmap == pm)
pc->pc_pmap = NULL;
}
mtx_unlock_spin(&sched_lock);
obj = pm->pm_tsb_obj;
VM_OBJECT_LOCK(obj);
KASSERT(obj->ref_count == 1, ("pmap_release: tsbobj ref count != 1"));
@ -1825,7 +1846,7 @@ pmap_activate(struct thread *td)
pm->pm_context[PCPU_GET(cpuid)] = context;
pm->pm_active |= PCPU_GET(cpumask);
PCPU_SET(vmspace, vm);
PCPU_SET(pmap, pm);
stxa(AA_DMMU_TSB, ASI_DMMU, pm->pm_tsb);
stxa(AA_IMMU_TSB, ASI_IMMU, pm->pm_tsb);

View File

@ -127,15 +127,16 @@ ENTRY(cpu_switch)
sub %fp, CCFSZ, %sp
/*
* Point to the vmspaces of the new process, and of the last non-kernel
* Point to the pmaps of the new process, and of the last non-kernel
* process to run.
*/
ldx [%i0 + TD_PROC], %i2
ldx [PCPU(VMSPACE)], %l2
ldx [%i2 + P_VMSPACE], %i2
ldx [PCPU(PMAP)], %l2
ldx [%i2 + P_VMSPACE], %i5
add %i5, VM_PMAP, %i2
#if KTR_COMPILE & KTR_PROC
CATR(KTR_PROC, "cpu_switch: new vm=%p old vm=%p"
CATR(KTR_PROC, "cpu_switch: new pmap=%p old pmap=%p"
, %g1, %g2, %g3, 7, 8, 9)
stx %i2, [%g1 + KTR_PARM1]
stx %l2, [%g1 + KTR_PARM2]
@ -154,12 +155,12 @@ ENTRY(cpu_switch)
* context active and avoid recycling its context number.
*/
SET(vmspace0, %i4, %i3)
cmp %i2, %i3
cmp %i5, %i3
be,a,pn %xcc, 5f
nop
/*
* If there was no non-kernel vmspace, don't try to deactivate it.
* If there was no non-kernel pmap, don't try to deactivate it.
*/
brz,a,pn %l2, 3f
nop
@ -168,17 +169,17 @@ ENTRY(cpu_switch)
* Mark the pmap of the last non-kernel vmspace to run as no longer
* active on this cpu.
*/
lduw [%l2 + VM_PMAP + PM_ACTIVE], %l3
lduw [%l2 + PM_ACTIVE], %l3
lduw [PCPU(CPUMASK)], %l4
andn %l3, %l4, %l3
stw %l3, [%l2 + VM_PMAP + PM_ACTIVE]
stw %l3, [%l2 + PM_ACTIVE]
/*
* Take away its context number.
*/
lduw [PCPU(CPUID)], %l3
sllx %l3, INT_SHIFT, %l3
add %l2, VM_PMAP + PM_CONTEXT, %l4
add %l2, PM_CONTEXT, %l4
mov -1, %l5
stw %l5, [%l3 + %l4]
@ -208,27 +209,27 @@ ENTRY(cpu_switch)
*/
lduw [PCPU(CPUID)], %i4
sllx %i4, INT_SHIFT, %i4
add %i2, VM_PMAP + PM_CONTEXT, %i5
add %i2, PM_CONTEXT, %i5
stw %i3, [%i4 + %i5]
/*
* Mark the pmap as active on this cpu.
*/
lduw [%i2 + VM_PMAP + PM_ACTIVE], %i4
lduw [%i2 + PM_ACTIVE], %i4
lduw [PCPU(CPUMASK)], %i5
or %i4, %i5, %i4
stw %i4, [%i2 + VM_PMAP + PM_ACTIVE]
stw %i4, [%i2 + PM_ACTIVE]
/*
* Make note of the change in vmspace.
* Make note of the change in pmap.
*/
stx %i2, [PCPU(VMSPACE)]
stx %i2, [PCPU(PMAP)]
/*
* Fiddle the hardware bits. Set the tsb registers and install the
* new context number in the cpu.
*/
ldx [%i2 + VM_PMAP + PM_TSB], %i4
ldx [%i2 + PM_TSB], %i4
mov AA_DMMU_TSB, %i5
stxa %i4, [%i5] ASI_DMMU
mov AA_IMMU_TSB, %i5

View File

@ -123,23 +123,6 @@ cpu_exit(struct thread *td)
void
cpu_sched_exit(struct thread *td)
{
struct vmspace *vm;
struct pcpu *pc;
struct proc *p;
mtx_assert(&sched_lock, MA_OWNED);
p = td->td_proc;
vm = p->p_vmspace;
if (vm->vm_refcnt > 0)
return;
SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
if (pc->pc_vmspace == vm) {
vm->vm_pmap.pm_active &= ~pc->pc_cpumask;
vm->vm_pmap.pm_context[pc->pc_cpuid] = -1;
pc->pc_vmspace = NULL;
}
}
}
void