Kill the LAZYPMAP ifdefs. While they worked, they didn't do anything

to help the AMD cpus (which have a hardware tlb flush filter).  I held
off to see what the 64 bit Intel cpus did, but it doesn't seem to help
much there either.  Oh well, store it in the Attic.
This commit is contained in:
Peter Wemm 2004-05-16 22:11:50 +00:00
parent 53ee59fe53
commit 12c1418ccf
6 changed files with 1 additions and 127 deletions

View File

@ -321,19 +321,4 @@ IDTVEC(rendezvous)
movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
POP_FRAME /* Why not doreti? */
iretq
#ifdef LAZY_SWITCH
/*
* Clean up when we lose out on the lazy context switch optimization.
* ie: when we are about to release a PTD but a cpu is still borrowing it.
*/
SUPERALIGN_TEXT
IDTVEC(lazypmap)
PUSH_FRAME
call pmap_lazyfix_action
movq lapic, %rax
movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
POP_FRAME /* Why not doreti? */
iretq
#endif
#endif /* SMP */

View File

@ -158,10 +158,6 @@ ENTRY(cpu_switch)
/* switch address space */
movq PCB_CR3(%r8),%rdx
#ifdef LAZY_SWITCH
cmpq %rdx,KPML4phys /* Kernel address space? */
je sw1
#endif
movq %cr3,%rax
cmpq %rdx,%rax /* Same address space? */
je sw1

View File

@ -303,11 +303,6 @@ cpu_mp_start(void)
/* Install an inter-CPU IPI for forwarding statclock() */
setidt(IPI_STATCLOCK, IDTVEC(statclock), SDT_SYSIGT, SEL_KPL, 0);
#ifdef LAZY_SWITCH
/* Install an inter-CPU IPI for lazy pmap release */
setidt(IPI_LAZYPMAP, IDTVEC(lazypmap), SDT_SYSIGT, SEL_KPL, 0);
#endif
/* Install an inter-CPU IPI for all-CPU rendezvous */
setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);

View File

@ -163,11 +163,6 @@ struct pmap kernel_pmap_store;
LIST_HEAD(pmaplist, pmap);
static struct pmaplist allpmaps;
static struct mtx allpmaps_lock;
#ifdef LAZY_SWITCH
#ifdef SMP
static struct mtx lazypmap_lock;
#endif
#endif
vm_paddr_t avail_start; /* PA of first available physical page */
vm_paddr_t avail_end; /* PA of last available physical page */
@ -474,11 +469,6 @@ pmap_bootstrap(firstaddr)
kernel_pmap->pm_active = -1; /* don't allow deactivation */
TAILQ_INIT(&kernel_pmap->pm_pvlist);
LIST_INIT(&allpmaps);
#ifdef LAZY_SWITCH
#ifdef SMP
mtx_init(&lazypmap_lock, "lazypmap", NULL, MTX_SPIN);
#endif
#endif
mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN);
mtx_lock_spin(&allpmaps_lock);
LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
@ -1288,93 +1278,6 @@ pmap_allocpte(pmap_t pmap, vm_offset_t va)
* Pmap allocation/deallocation routines.
***************************************************/
#ifdef LAZY_SWITCH
#ifdef SMP
/*
* Deal with a SMP shootdown of other users of the pmap that we are
* trying to dispose of. This can be a bit hairy.
*/
static u_int *lazymask;
static register_t lazyptd;
static volatile u_int lazywait;
void pmap_lazyfix_action(void);
void
pmap_lazyfix_action(void)
{
u_int mymask = PCPU_GET(cpumask);
if (rcr3() == lazyptd)
load_cr3(PCPU_GET(curpcb)->pcb_cr3);
atomic_clear_int(lazymask, mymask);
atomic_store_rel_int(&lazywait, 1);
}
static void
pmap_lazyfix_self(u_int mymask)
{
if (rcr3() == lazyptd)
load_cr3(PCPU_GET(curpcb)->pcb_cr3);
atomic_clear_int(lazymask, mymask);
}
static void
pmap_lazyfix(pmap_t pmap)
{
u_int mymask = PCPU_GET(cpumask);
u_int mask;
register u_int spins;
while ((mask = pmap->pm_active) != 0) {
spins = 50000000;
mask = mask & -mask; /* Find least significant set bit */
mtx_lock_spin(&lazypmap_lock);
lazyptd = vtophys(pmap->pm_pml4);
if (mask == mymask) {
lazymask = &pmap->pm_active;
pmap_lazyfix_self(mymask);
} else {
atomic_store_rel_long((u_long *)&lazymask,
(u_long)&pmap->pm_active);
atomic_store_rel_int(&lazywait, 0);
ipi_selected(mask, IPI_LAZYPMAP);
while (lazywait == 0) {
ia32_pause();
if (--spins == 0)
break;
}
}
mtx_unlock_spin(&lazypmap_lock);
if (spins == 0)
printf("pmap_lazyfix: spun for 50000000\n");
}
}
#else /* SMP */
/*
* Cleaning up on uniprocessor is easy. For various reasons, we're
* unlikely to have to even execute this code, including the fact
* that the cleanup is deferred until the parent does a wait(2), which
* means that another userland process has run.
*/
static void
pmap_lazyfix(pmap_t pmap)
{
u_long cr3;
cr3 = vtophys(pmap->pm_pml4);
if (cr3 == rcr3()) {
load_cr3(PCPU_GET(curpcb)->pcb_cr3);
pmap->pm_active &= ~(PCPU_GET(cpumask));
}
}
#endif /* SMP */
#endif
/*
* Release any resources held by the given physical map.
* Called when a pmap initialized by pmap_pinit is being released.
@ -1389,9 +1292,6 @@ pmap_release(pmap_t pmap)
("pmap_release: pmap resident count %ld != 0",
pmap->pm_stats.resident_count));
#ifdef LAZY_SWITCH
pmap_lazyfix(pmap);
#endif
mtx_lock_spin(&allpmaps_lock);
LIST_REMOVE(pmap, pm_list);
mtx_unlock_spin(&allpmaps_lock);

View File

@ -93,7 +93,6 @@
#define IPI_INVLTLB (APIC_IPI_INTS + 1) /* TLB Shootdown IPIs */
#define IPI_INVLPG (APIC_IPI_INTS + 2)
#define IPI_INVLRNG (APIC_IPI_INTS + 3)
#define IPI_LAZYPMAP (APIC_IPI_INTS + 4) /* Lazy pmap release. */
#define IPI_HARDCLOCK (APIC_IPI_INTS + 8) /* Inter-CPU clock handling. */
#define IPI_STATCLOCK (APIC_IPI_INTS + 9)
#define IPI_RENDEZVOUS (APIC_IPI_INTS + 10) /* Inter-CPU rendezvous. */

View File

@ -44,8 +44,7 @@ inthand_t
IDTVEC(statclock), /* Forward statclock() */
IDTVEC(cpuast), /* Additional software trap on other cpu */
IDTVEC(cpustop), /* CPU stops & waits to be restarted */
IDTVEC(rendezvous), /* handle CPU rendezvous */
IDTVEC(lazypmap); /* handle lazy pmap release */
IDTVEC(rendezvous); /* handle CPU rendezvous */
/* functions in mp_machdep.c */
void cpu_add(u_int apic_id, char boot_cpu);