Add a new 'pmap_invalidate_cache()' to flush the CPU caches via the

wbinvd() instruction.  This includes a new IPI so that all CPU caches on
all CPUs are flushed for the SMP case.

MFC after:	1 month
This commit is contained in:
John Baldwin 2006-05-01 21:36:47 +00:00
parent ada5d7d5b0
commit 4ac60df584
12 changed files with 145 additions and 5 deletions

View File

@ -170,6 +170,25 @@ IDTVEC(invlrng)
popq %rax
iretq
/*
* Invalidate cache.
*/
.text
SUPERALIGN_TEXT
IDTVEC(invlcache)
pushq %rax
wbinvd
movq lapic, %rax
movl $0, LA_EOI(%rax) /* End Of Interrupt to APIC */
lock
incl smp_tlb_wait
popq %rax
iretq
/*
* Handler for IPIs sent via the per-cpu IPI bitmap.
*/

View File

@ -874,6 +874,14 @@ smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offse
ia32_pause();
}
void
smp_cache_flush(void)
{
if (smp_started)
smp_tlb_shootdown(IPI_INVLCACHE, 0, 0);
}
void
smp_invltlb(void)
{

View File

@ -732,6 +732,30 @@ pmap_invalidate_all(pmap_t pmap)
else
critical_exit();
}
void
pmap_invalidate_cache(void)
{
if (smp_started) {
if (!(read_rflags() & PSL_I))
panic("%s: interrupts disabled", __func__);
mtx_lock_spin(&smp_ipi_mtx);
} else
critical_enter();
/*
* We need to disable interrupt preemption but MUST NOT have
* interrupts disabled here.
* XXX we may need to hold schedlock to get a coherent pm_active
* XXX critical sections disable interrupts again
*/
wbinvd();
smp_cache_flush();
if (smp_started)
mtx_unlock_spin(&smp_ipi_mtx);
else
critical_exit();
}
#else /* !SMP */
/*
* Normal, non-SMP, invalidation functions.
@ -762,6 +786,13 @@ pmap_invalidate_all(pmap_t pmap)
if (pmap == kernel_pmap || pmap->pm_active)
invltlb();
}
PMAP_INLINE void
pmap_invalidate_cache(void)
{
wbinvd();
}
#endif /* !SMP */
/*

View File

@ -118,8 +118,9 @@
#define IPI_INVLTLB (APIC_IPI_INTS + 1) /* TLB Shootdown IPIs */
#define IPI_INVLPG (APIC_IPI_INTS + 2)
#define IPI_INVLRNG (APIC_IPI_INTS + 3)
#define IPI_INVLCACHE (APIC_IPI_INTS + 4)
/* Vector to handle bitmap based IPIs */
#define IPI_BITMAP_VECTOR (APIC_IPI_INTS + 5)
#define IPI_BITMAP_VECTOR (APIC_IPI_INTS + 6)
/* IPIs handled by IPI_BITMAPED_VECTOR (XXX ups is there a better place?) */
#define IPI_AST 0 /* Generate software trap. */
@ -127,7 +128,7 @@
#define IPI_BITMAP_LAST IPI_PREEMPT
#define IPI_IS_BITMAPED(x) ((x) <= IPI_BITMAP_LAST)
#define IPI_STOP (APIC_IPI_INTS + 6) /* Stop CPU until restarted. */
#define IPI_STOP (APIC_IPI_INTS + 7) /* Stop CPU until restarted. */
/*
* The spurious interrupt can share the priority class with the IPIs since

View File

@ -309,6 +309,7 @@ void pmap_unmapdev(vm_offset_t, vm_size_t);
void pmap_invalidate_page(pmap_t, vm_offset_t);
void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
void pmap_invalidate_all(pmap_t);
void pmap_invalidate_cache(void);
#endif /* _KERNEL */

View File

@ -40,6 +40,7 @@ inthand_t
IDTVEC(invltlb), /* TLB shootdowns - global */
IDTVEC(invlpg), /* TLB shootdowns - 1 page */
IDTVEC(invlrng), /* TLB shootdowns - page range */
IDTVEC(invlcache), /* Write back and invalidate cache */
IDTVEC(ipi_intr_bitmap_handler), /* Bitmap based IPIs */
IDTVEC(cpustop), /* CPU stops & waits to be restarted */
IDTVEC(rendezvous); /* handle CPU rendezvous */
@ -56,6 +57,7 @@ void ipi_bitmap_handler(struct trapframe frame);
u_int mp_bootaddress(u_int);
int mp_grab_cpu_hlt(void);
void mp_topology(void);
void smp_cache_flush(void);
void smp_invlpg(vm_offset_t addr);
void smp_masked_invlpg(u_int mask, vm_offset_t addr);
void smp_invlpg_range(vm_offset_t startva, vm_offset_t endva);

View File

@ -232,6 +232,39 @@ IDTVEC(invlrng)
popl %eax
iret
/*
* Invalidate cache.
*/
.text
SUPERALIGN_TEXT
IDTVEC(invlcache)
pushl %eax
pushl %ds
movl $KDSEL, %eax /* Kernel data selector */
movl %eax, %ds
#ifdef COUNT_IPIS
pushl %fs
movl $KPSEL, %eax /* Private space selector */
movl %eax, %fs
movl PCPU(CPUID), %eax
popl %fs
movl ipi_invlcache_counts(,%eax,4),%eax
incl (%eax)
#endif
wbinvd
movl lapic, %eax
movl $0, LA_EOI(%eax) /* End Of Interrupt to APIC */
lock
incl smp_tlb_wait
popl %ds
popl %eax
iret
/*
* Handler for IPIs sent via the per-cpu IPI bitmap.
*/

View File

@ -171,6 +171,7 @@ static u_long *ipi_ast_counts[MAXCPU];
u_long *ipi_invltlb_counts[MAXCPU];
u_long *ipi_invlrng_counts[MAXCPU];
u_long *ipi_invlpg_counts[MAXCPU];
u_long *ipi_invlcache_counts[MAXCPU];
u_long *ipi_rendezvous_counts[MAXCPU];
u_long *ipi_lazypmap_counts[MAXCPU];
#endif
@ -1046,6 +1047,14 @@ smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offse
ia32_pause();
}
void
smp_cache_flush(void)
{
if (smp_started)
smp_tlb_shootdown(IPI_INVLCACHE, 0, 0);
}
void
smp_invltlb(void)
{

View File

@ -698,6 +698,30 @@ pmap_invalidate_all(pmap_t pmap)
else
critical_exit();
}
void
pmap_invalidate_cache(void)
{
if (smp_started) {
if (!(read_eflags() & PSL_I))
panic("%s: interrupts disabled", __func__);
mtx_lock_spin(&smp_ipi_mtx);
} else
critical_enter();
/*
* We need to disable interrupt preemption but MUST NOT have
* interrupts disabled here.
* XXX we may need to hold schedlock to get a coherent pm_active
* XXX critical sections disable interrupts again
*/
wbinvd();
smp_cache_flush();
if (smp_started)
mtx_unlock_spin(&smp_ipi_mtx);
else
critical_exit();
}
#else /* !SMP */
/*
* Normal, non-SMP, 486+ invalidation functions.
@ -728,6 +752,13 @@ pmap_invalidate_all(pmap_t pmap)
if (pmap == kernel_pmap || pmap->pm_active)
invltlb();
}
PMAP_INLINE void
pmap_invalidate_cache(void)
{
wbinvd();
}
#endif /* !SMP */
/*

View File

@ -116,9 +116,10 @@
#define IPI_INVLTLB (APIC_IPI_INTS + 1) /* TLB Shootdown IPIs */
#define IPI_INVLPG (APIC_IPI_INTS + 2)
#define IPI_INVLRNG (APIC_IPI_INTS + 3)
#define IPI_LAZYPMAP (APIC_IPI_INTS + 4) /* Lazy pmap release. */
#define IPI_INVLCACHE (APIC_IPI_INTS + 4)
#define IPI_LAZYPMAP (APIC_IPI_INTS + 5) /* Lazy pmap release. */
/* Vector to handle bitmap based IPIs */
#define IPI_BITMAP_VECTOR (APIC_IPI_INTS + 5)
#define IPI_BITMAP_VECTOR (APIC_IPI_INTS + 6)
/* IPIs handled by IPI_BITMAPED_VECTOR (XXX ups is there a better place?) */
#define IPI_AST 0 /* Generate software trap. */
@ -126,7 +127,7 @@
#define IPI_BITMAP_LAST IPI_PREEMPT
#define IPI_IS_BITMAPED(x) ((x) <= IPI_BITMAP_LAST)
#define IPI_STOP (APIC_IPI_INTS + 6) /* Stop CPU until restarted. */
#define IPI_STOP (APIC_IPI_INTS + 7) /* Stop CPU until restarted. */
/*
* The spurious interrupt can share the priority class with the IPIs since

View File

@ -378,6 +378,7 @@ void pmap_set_pg(void);
void pmap_invalidate_page(pmap_t, vm_offset_t);
void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
void pmap_invalidate_all(pmap_t);
void pmap_invalidate_cache(void);
#endif /* _KERNEL */

View File

@ -39,6 +39,7 @@ extern struct mtx smp_tlb_mtx;
extern u_long *ipi_invltlb_counts[MAXCPU];
extern u_long *ipi_invlrng_counts[MAXCPU];
extern u_long *ipi_invlpg_counts[MAXCPU];
extern u_long *ipi_invlcache_counts[MAXCPU];
extern u_long *ipi_rendezvous_counts[MAXCPU];
extern u_long *ipi_lazypmap_counts[MAXCPU];
#endif
@ -48,6 +49,7 @@ inthand_t
IDTVEC(invltlb), /* TLB shootdowns - global */
IDTVEC(invlpg), /* TLB shootdowns - 1 page */
IDTVEC(invlrng), /* TLB shootdowns - page range */
IDTVEC(invlcache), /* Write back and invalidate cache */
IDTVEC(ipi_intr_bitmap_handler), /* Bitmap based IPIs */
IDTVEC(cpustop), /* CPU stops & waits to be restarted */
IDTVEC(rendezvous), /* handle CPU rendezvous */
@ -65,6 +67,7 @@ void ipi_bitmap_handler(struct trapframe frame);
u_int mp_bootaddress(u_int);
int mp_grab_cpu_hlt(void);
void mp_topology(void);
void smp_cache_flush(void);
void smp_invlpg(vm_offset_t addr);
void smp_masked_invlpg(u_int mask, vm_offset_t addr);
void smp_invlpg_range(vm_offset_t startva, vm_offset_t endva);