Rework the way we get the cacheline size. Instead of having a table of
CPUs known to use 128 byte cache lines and defaulting to 32, use the dcbz instruction to measure it. Also make dcbz behave the way you would expect on PPC 970.
This commit is contained in:
parent
692f8aa2fa
commit
32d233ecc5
@ -117,6 +117,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <machine/powerpc.h>
|
||||
#include <machine/reg.h>
|
||||
#include <machine/sigframe.h>
|
||||
#include <machine/spr.h>
|
||||
#include <machine/trap.h>
|
||||
#include <machine/vmparam.h>
|
||||
|
||||
@ -254,8 +255,8 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
|
||||
size_t trap_offset;
|
||||
void *kmdp;
|
||||
char *env;
|
||||
int vers;
|
||||
uint32_t msr, scratch;
|
||||
uint8_t *cache_check;
|
||||
|
||||
end = 0;
|
||||
kmdp = NULL;
|
||||
@ -325,22 +326,6 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
|
||||
printf("powerpc_init: no loader metadata.\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Set cacheline_size based on the CPU model.
|
||||
*/
|
||||
|
||||
vers = mfpvr() >> 16;
|
||||
switch (vers) {
|
||||
case IBM970:
|
||||
case IBM970FX:
|
||||
case IBM970MP:
|
||||
case IBM970GX:
|
||||
cacheline_size = 128;
|
||||
break;
|
||||
default:
|
||||
cacheline_size = 32;
|
||||
}
|
||||
|
||||
/*
|
||||
* Init KDB
|
||||
*/
|
||||
@ -348,14 +333,54 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
|
||||
kdb_init();
|
||||
|
||||
/*
|
||||
* XXX: Initialize the interrupt tables.
|
||||
* Disable translation in case the vector area
|
||||
* hasn't been mapped (G5)
|
||||
* PowerPC 970 CPUs have a misfeature requested by Apple that makes
|
||||
* them pretend they have a 32-byte cacheline. Turn this off
|
||||
* before we measure the cacheline size.
|
||||
*/
|
||||
|
||||
switch (mfpvr() >> 16) {
|
||||
case IBM970:
|
||||
case IBM970FX:
|
||||
case IBM970MP:
|
||||
case IBM970GX:
|
||||
scratch = mfspr64upper(SPR_HID5,msr);
|
||||
scratch &= ~HID5_970_DCBZ_SIZE_HI;
|
||||
mtspr64(SPR_HID5, scratch, mfspr(SPR_HID5), msr);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the interrupt tables and figure out our cache line
|
||||
* size and whether or not we need the 64-bit bridge code.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Disable translation in case the vector area hasn't been
|
||||
* mapped (G5).
|
||||
*/
|
||||
|
||||
msr = mfmsr();
|
||||
mtmsr(msr & ~(PSL_IR | PSL_DR));
|
||||
isync();
|
||||
|
||||
/*
|
||||
* Measure the cacheline size using dcbz
|
||||
*
|
||||
* Use EXC_PGM as a playground. We are about to overwrite it
|
||||
* anyway, we know it exists, and we know it is cache-aligned.
|
||||
*/
|
||||
|
||||
cache_check = (void *)EXC_PGM;
|
||||
|
||||
for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++)
|
||||
cache_check[cacheline_size] = 0xff;
|
||||
|
||||
__asm __volatile("dcbz %0,0":: "r" (cache_check) : "memory");
|
||||
|
||||
/* Find the first byte dcbz did not zero to get the cache line size */
|
||||
for (cacheline_size = 0; cacheline_size < 0x100 &&
|
||||
cache_check[cacheline_size] == 0; cacheline_size++);
|
||||
|
||||
/*
|
||||
* Figure out whether we need to use the 64 bit PMAP. This works by
|
||||
* executing an instruction that is only legal on 64-bit PPC (mtmsrd),
|
||||
|
@ -147,4 +147,6 @@
|
||||
* 7457: XBSEN = Extended BAT Block Size Enable
|
||||
*/
|
||||
|
||||
#define HID5_970_DCBZ_SIZE_HI 0x01000000 /* dcbz does a 32-byte store */
|
||||
|
||||
#endif /* _POWERPC_HID_H_ */
|
||||
|
@ -391,6 +391,8 @@
|
||||
#define SPR_SRR3 0x3df /* 4.. Save/Restore Register 3 */
|
||||
#define SPR_HID0 0x3f0 /* ..8 Hardware Implementation Register 0 */
|
||||
#define SPR_HID1 0x3f1 /* ..8 Hardware Implementation Register 1 */
|
||||
#define SPR_HID4 0x3f4 /* ..8 Hardware Implementation Register 4 */
|
||||
#define SPR_HID5 0x3f6 /* ..8 Hardware Implementation Register 5 */
|
||||
|
||||
#if defined(AIM)
|
||||
#define SPR_DBSR 0x3f0 /* 4.. Debug Status Register */
|
||||
|
Loading…
Reference in New Issue
Block a user