Fix a bug in the data access error recorvery. Before re-enabling the data

cache after a data access error we must discard all cache lines.  When
disabled existing cache lines are not invalidated by stores to memory, so
we risk reading stale data that was cached before the data access error if
we don't flush them.  This is especially fatal when the memory involved
is the active part of the kernel or user stack.  For good measure we also
flush the instruction cache.

This fixes random crashes when the X server probes the PCI bus through
/dev/pci.
This commit is contained in:
Jake Burkholder 2003-11-11 06:41:54 +00:00
parent 129540b834
commit 0019917542
6 changed files with 60 additions and 4 deletions

View File

@ -97,16 +97,25 @@ struct cacheinfo {
#ifdef _KERNEL
typedef void cache_enable_t(void);
typedef void cache_flush_t(void);
typedef void dcache_page_inval_t(vm_paddr_t pa);
typedef void icache_page_inval_t(vm_paddr_t pa);
void cache_init(phandle_t node);
cache_enable_t cheetah_cache_enable;
cache_flush_t cheetah_cache_flush;
dcache_page_inval_t cheetah_dcache_page_inval;
icache_page_inval_t cheetah_icache_page_inval;
cache_enable_t spitfire_cache_enable;
cache_flush_t spitfire_cache_flush;
dcache_page_inval_t spitfire_dcache_page_inval;
icache_page_inval_t spitfire_icache_page_inval;
extern cache_enable_t *cache_enable;
extern cache_flush_t *cache_flush;
extern dcache_page_inval_t *dcache_page_inval;
extern icache_page_inval_t *icache_page_inval;

View File

@ -86,6 +86,8 @@
struct cacheinfo cache;
cache_enable_t *cache_enable;
cache_flush_t *cache_flush;
dcache_page_inval_t *dcache_page_inval;
icache_page_inval_t *icache_page_inval;
@ -125,10 +127,14 @@ cache_init(phandle_t node)
panic("cache_init: E$ set size not a power of 2");
if (cpu_impl >= CPU_IMPL_ULTRASPARCIII) {
cache_enable = cheetah_cache_enable;
cache_flush = cheetah_cache_flush;
dcache_page_inval = cheetah_dcache_page_inval;
icache_page_inval = cheetah_icache_page_inval;
tlb_flush_user = cheetah_tlb_flush_user;
} else {
cache_enable = spitfire_cache_enable;
cache_flush = spitfire_cache_flush;
dcache_page_inval = spitfire_dcache_page_inval;
icache_page_inval = spitfire_icache_page_inval;
tlb_flush_user = spitfire_tlb_flush_user;

View File

@ -45,6 +45,22 @@
#include <machine/smp.h>
#include <machine/tlb.h>
/*
* Enable level 1 caches.
*/
void
cheetah_cache_enable(void)
{
}
/*
* Flush all lines from the level 1 caches.
*/
void
cheetah_cache_flush(void)
{
}
/*
* Flush a physical page from the data cache.
*/

View File

@ -42,6 +42,7 @@
#include <machine/cache.h>
#include <machine/cpufunc.h>
#include <machine/lsu.h>
#include <machine/smp.h>
#include <machine/tlb.h>
@ -52,6 +53,32 @@ PMAP_STATS_VAR(spitfire_dcache_npage_inval_match);
PMAP_STATS_VAR(spitfire_icache_npage_inval);
PMAP_STATS_VAR(spitfire_icache_npage_inval_match);
/*
* Enable the level 1 caches.
*/
void
spitfire_cache_enable(void)
{
u_long lsu;
lsu = ldxa(0, ASI_LSU_CTL_REG);
stxa_sync(0, ASI_LSU_CTL_REG, lsu | LSU_IC | LSU_DC);
}
/*
* Flush all lines from the level 1 caches.
*/
void
spitfire_cache_flush(void)
{
u_long addr;
for (addr = 0; addr < cache.dc_size; addr += cache.dc_linesize)
stxa_sync(addr, ASI_DCACHE_TAG, 0);
for (addr = 0; addr < cache.ic_size; addr += cache.ic_linesize)
stxa_sync(addr, ASI_ICACHE_TAG, 0);
}
/*
* Flush a physical page from the data cache.
*/

View File

@ -575,10 +575,6 @@ fas_nofault_end:
.globl fas_fault
ENTRY(fas_fault)
ldxa [%g0] ASI_LSU_CTL_REG, %o0
or %o0, LSU_IC | LSU_DC, %o0
stxa %o0, [%g0] ASI_LSU_CTL_REG
membar #Sync
retl
mov -1, %o0
END(fas_fault)

View File

@ -345,6 +345,8 @@ trap(struct trapframe *tf)
tf->tf_tpc < (u_long)fas_nofault_end &&
*(u_int32_t *)tf->tf_tpc == MEMBARSYNC_INST &&
((u_int32_t *)tf->tf_tpc)[-2] == MEMBARSYNC_INST) {
cache_flush();
cache_enable();
tf->tf_tpc = (u_long)fas_fault;
tf->tf_tnpc = tf->tf_tpc + 4;
error = 0;