From 5a896dc0ff42038d80ec99e785b47e2d53ebe135 Mon Sep 17 00:00:00 2001 From: br Date: Tue, 20 Jan 2015 11:10:25 +0000 Subject: [PATCH] Add 128-byte cache flushing routines. Leave CNMIPS untouched as these functions depends on config2 register. --- sys/mips/include/cache_mipsNN.h | 2 - sys/mips/mips/cache.c | 4 - sys/mips/mips/cache_mipsNN.c | 219 ++++++++++++++++++++++++++++++++ 3 files changed, 219 insertions(+), 6 deletions(-) diff --git a/sys/mips/include/cache_mipsNN.h b/sys/mips/include/cache_mipsNN.h index 435993b9b72b..1969ab1a9115 100644 --- a/sys/mips/include/cache_mipsNN.h +++ b/sys/mips/include/cache_mipsNN.h @@ -57,7 +57,6 @@ void mipsNN_pdcache_inv_range_16(vm_offset_t, vm_size_t); void mipsNN_pdcache_inv_range_32(vm_offset_t, vm_size_t); void mipsNN_pdcache_wb_range_16(vm_offset_t, vm_size_t); void mipsNN_pdcache_wb_range_32(vm_offset_t, vm_size_t); -#ifdef CPU_CNMIPS void mipsNN_icache_sync_all_128(void); void mipsNN_icache_sync_range_128(vm_offset_t, vm_size_t); void mipsNN_icache_sync_range_index_128(vm_offset_t, vm_size_t); @@ -66,7 +65,6 @@ void mipsNN_pdcache_wbinv_range_128(vm_offset_t, vm_size_t); void mipsNN_pdcache_wbinv_range_index_128(vm_offset_t, vm_size_t); void mipsNN_pdcache_inv_range_128(vm_offset_t, vm_size_t); void mipsNN_pdcache_wb_range_128(vm_offset_t, vm_size_t); -#endif void mipsNN_sdcache_wbinv_all_32(void); void mipsNN_sdcache_wbinv_range_32(vm_offset_t, vm_size_t); void mipsNN_sdcache_wbinv_range_index_32(vm_offset_t, vm_size_t); diff --git a/sys/mips/mips/cache.c b/sys/mips/mips/cache.c index 70a1db380421..59172d705b0d 100644 --- a/sys/mips/mips/cache.c +++ b/sys/mips/mips/cache.c @@ -104,7 +104,6 @@ mips_config_cache(struct mips_cpuinfo * cpuinfo) mips_cache_ops.mco_icache_sync_range_index = mipsNN_icache_sync_range_index_32; break; -#ifdef CPU_CNMIPS case 128: mips_cache_ops.mco_icache_sync_all = mipsNN_icache_sync_all_128; mips_cache_ops.mco_icache_sync_range = @@ -112,7 +111,6 @@ mips_config_cache(struct mips_cpuinfo * cpuinfo) mips_cache_ops.mco_icache_sync_range_index = mipsNN_icache_sync_range_index_128; break; -#endif #ifdef MIPS_DISABLE_L1_CACHE case 0: @@ -172,7 +170,6 @@ mips_config_cache(struct mips_cpuinfo * cpuinfo) mipsNN_pdcache_wb_range_32; #endif break; -#ifdef CPU_CNMIPS case 128: mips_cache_ops.mco_pdcache_wbinv_all = mips_cache_ops.mco_intern_pdcache_wbinv_all = @@ -188,7 +185,6 @@ mips_config_cache(struct mips_cpuinfo * cpuinfo) mips_cache_ops.mco_intern_pdcache_wb_range = mipsNN_pdcache_wb_range_128; break; -#endif #ifdef MIPS_DISABLE_L1_CACHE case 0: mips_cache_ops.mco_pdcache_wbinv_all = diff --git a/sys/mips/mips/cache_mipsNN.c b/sys/mips/mips/cache_mipsNN.c index ac104c2cfd2a..2bb1fa159c72 100644 --- a/sys/mips/mips/cache_mipsNN.c +++ b/sys/mips/mips/cache_mipsNN.c @@ -647,6 +647,225 @@ mipsNN_pdcache_wb_range_128(vm_offset_t va, vm_size_t size) SYNC; } +#else + +void +mipsNN_icache_sync_all_128(void) +{ + vm_offset_t va, eva; + + va = MIPS_PHYS_TO_KSEG0(0); + eva = va + picache_size; + + /* + * Since we're hitting the whole thing, we don't have to + * worry about the N different "ways". + */ + + mips_intern_dcache_wbinv_all(); + + while (va < eva) { + cache_r4k_op_32lines_128(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); + va += (32 * 128); + } + + SYNC; +} + +void +mipsNN_icache_sync_range_128(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva; + + eva = round_line128(va + size); + va = trunc_line128(va); + + mips_intern_dcache_wb_range(va, (eva - va)); + + while ((eva - va) >= (32 * 128)) { + cache_r4k_op_32lines_128(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); + va += (32 * 128); + } + + while (va < eva) { + cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); + va += 128; + } + + SYNC; +} + +void +mipsNN_icache_sync_range_index_128(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva, tmpva; + int i, stride, loopcount; + + /* + * Since we're doing Index ops, we expect to not be able + * to access the address we've been given. So, get the + * bits that determine the cache index, and make a KSEG0 + * address out of them. + */ + va = MIPS_PHYS_TO_KSEG0(va & picache_way_mask); + + eva = round_line128(va + size); + va = trunc_line128(va); + + /* + * GCC generates better code in the loops if we reference local + * copies of these global variables. + */ + stride = picache_stride; + loopcount = picache_loopcount; + + mips_intern_dcache_wbinv_range_index(va, (eva - va)); + + while ((eva - va) >= (32 * 128)) { + tmpva = va; + for (i = 0; i < loopcount; i++, tmpva += stride) + cache_r4k_op_32lines_128(tmpva, + CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); + va += 32 * 128; + } + + while (va < eva) { + tmpva = va; + for (i = 0; i < loopcount; i++, tmpva += stride) + cache_op_r4k_line(tmpva, + CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); + va += 128; + } +} + +void +mipsNN_pdcache_wbinv_all_128(void) +{ + vm_offset_t va, eva; + + va = MIPS_PHYS_TO_KSEG0(0); + eva = va + pdcache_size; + + /* + * Since we're hitting the whole thing, we don't have to + * worry about the N different "ways". + */ + + while (va < eva) { + cache_r4k_op_32lines_128(va, + CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); + va += (32 * 128); + } + + SYNC; +} + + +void +mipsNN_pdcache_wbinv_range_128(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva; + + eva = round_line128(va + size); + va = trunc_line128(va); + + while ((eva - va) >= (32 * 128)) { + cache_r4k_op_32lines_128(va, + CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV); + va += (32 * 128); + } + + while (va < eva) { + cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV); + va += 128; + } + + SYNC; +} + +void +mipsNN_pdcache_wbinv_range_index_128(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva, tmpva; + int i, stride, loopcount; + + /* + * Since we're doing Index ops, we expect to not be able + * to access the address we've been given. So, get the + * bits that determine the cache index, and make a KSEG0 + * address out of them. + */ + va = MIPS_PHYS_TO_KSEG0(va & pdcache_way_mask); + + eva = round_line128(va + size); + va = trunc_line128(va); + + /* + * GCC generates better code in the loops if we reference local + * copies of these global variables. + */ + stride = pdcache_stride; + loopcount = pdcache_loopcount; + + while ((eva - va) >= (32 * 128)) { + tmpva = va; + for (i = 0; i < loopcount; i++, tmpva += stride) + cache_r4k_op_32lines_128(tmpva, + CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); + va += 32 * 128; + } + + while (va < eva) { + tmpva = va; + for (i = 0; i < loopcount; i++, tmpva += stride) + cache_op_r4k_line(tmpva, + CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); + va += 128; + } +} + +void +mipsNN_pdcache_inv_range_128(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva; + + eva = round_line128(va + size); + va = trunc_line128(va); + + while ((eva - va) >= (32 * 128)) { + cache_r4k_op_32lines_128(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV); + va += (32 * 128); + } + + while (va < eva) { + cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV); + va += 128; + } + + SYNC; +} + +void +mipsNN_pdcache_wb_range_128(vm_offset_t va, vm_size_t size) +{ + vm_offset_t eva; + + eva = round_line128(va + size); + va = trunc_line128(va); + + while ((eva - va) >= (32 * 128)) { + cache_r4k_op_32lines_128(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB); + va += (32 * 128); + } + + while (va < eva) { + cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB); + va += 128; + } + + SYNC; +} + #endif void