Style(9) these files. No changes in the compiled code. (Verified by

diff'ing objdump -d output).
This commit is contained in:
jhb 2008-03-11 21:41:36 +00:00
parent 6ab428b54b
commit 384b4a0305
3 changed files with 914 additions and 832 deletions

View File

@ -47,31 +47,31 @@ __FBSDID("$FreeBSD$");
static char *mem_owner_bios = "BIOS";
#define MR686_FIXMTRR (1<<0)
#define MR686_FIXMTRR (1<<0)
#define mrwithin(mr, a) \
(((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
#define mroverlap(mra, mrb) \
(mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base))
#define mrwithin(mr, a) \
(((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
#define mroverlap(mra, mrb) \
(mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base))
#define mrvalid(base, len) \
((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \
((len) >= (1 << 12)) && /* length is >= 4k */ \
powerof2((len)) && /* ... and power of two */ \
!((base) & ((len) - 1))) /* range is not discontiuous */
#define mrvalid(base, len) \
((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \
((len) >= (1 << 12)) && /* length is >= 4k */ \
powerof2((len)) && /* ... and power of two */ \
!((base) & ((len) - 1))) /* range is not discontiuous */
#define mrcopyflags(curr, new) (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
#define mrcopyflags(curr, new) \
(((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
static int mtrrs_disabled;
static int mtrrs_disabled;
TUNABLE_INT("machdep.disable_mtrrs", &mtrrs_disabled);
SYSCTL_INT(_machdep, OID_AUTO, disable_mtrrs, CTLFLAG_RDTUN,
&mtrrs_disabled, 0, "Disable amd64 MTRRs.");
&mtrrs_disabled, 0, "Disable amd64 MTRRs.");
static void amd64_mrinit(struct mem_range_softc *sc);
static int amd64_mrset(struct mem_range_softc *sc,
struct mem_range_desc *mrd,
int *arg);
static void amd64_mrAPinit(struct mem_range_softc *sc);
static void amd64_mrinit(struct mem_range_softc *sc);
static int amd64_mrset(struct mem_range_softc *sc,
struct mem_range_desc *mrd, int *arg);
static void amd64_mrAPinit(struct mem_range_softc *sc);
static struct mem_range_ops amd64_mrops = {
amd64_mrinit,
@ -80,37 +80,35 @@ static struct mem_range_ops amd64_mrops = {
};
/* XXX for AP startup hook */
static u_int64_t mtrrcap, mtrrdef;
static u_int64_t mtrrcap, mtrrdef;
static struct mem_range_desc *mem_range_match(struct mem_range_softc *sc,
struct mem_range_desc *mrd);
static void amd64_mrfetch(struct mem_range_softc *sc);
static int amd64_mtrrtype(int flags);
static int amd64_mrt2mtrr(int flags, int oldval);
static int amd64_mtrrconflict(int flag1, int flag2);
static void amd64_mrstore(struct mem_range_softc *sc);
static void amd64_mrstoreone(void *arg);
static struct mem_range_desc *amd64_mtrrfixsearch(struct mem_range_softc *sc,
u_int64_t addr);
static int amd64_mrsetlow(struct mem_range_softc *sc,
struct mem_range_desc *mrd,
int *arg);
static int amd64_mrsetvariable(struct mem_range_softc *sc,
struct mem_range_desc *mrd,
int *arg);
static struct mem_range_desc *mem_range_match(struct mem_range_softc *sc,
struct mem_range_desc *mrd);
static void amd64_mrfetch(struct mem_range_softc *sc);
static int amd64_mtrrtype(int flags);
static int amd64_mrt2mtrr(int flags, int oldval);
static int amd64_mtrrconflict(int flag1, int flag2);
static void amd64_mrstore(struct mem_range_softc *sc);
static void amd64_mrstoreone(void *arg);
static struct mem_range_desc *amd64_mtrrfixsearch(struct mem_range_softc *sc,
u_int64_t addr);
static int amd64_mrsetlow(struct mem_range_softc *sc,
struct mem_range_desc *mrd, int *arg);
static int amd64_mrsetvariable(struct mem_range_softc *sc,
struct mem_range_desc *mrd, int *arg);
/* amd64 MTRR type to memory range type conversion */
static int amd64_mtrrtomrt[] = {
MDF_UNCACHEABLE,
MDF_WRITECOMBINE,
MDF_UNKNOWN,
MDF_UNKNOWN,
MDF_WRITETHROUGH,
MDF_WRITEPROTECT,
MDF_WRITEBACK
MDF_UNCACHEABLE,
MDF_WRITECOMBINE,
MDF_UNKNOWN,
MDF_UNKNOWN,
MDF_WRITETHROUGH,
MDF_WRITEPROTECT,
MDF_WRITEBACK
};
#define MTRRTOMRTLEN (sizeof(amd64_mtrrtomrt) / sizeof(amd64_mtrrtomrt[0]))
#define MTRRTOMRTLEN (sizeof(amd64_mtrrtomrt) / sizeof(amd64_mtrrtomrt[0]))
/*
* Used in /dev/mem drivers and elsewhere
@ -120,118 +118,123 @@ MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors");
static int
amd64_mtrr2mrt(int val)
{
if (val < 0 || val >= MTRRTOMRTLEN)
return MDF_UNKNOWN;
return amd64_mtrrtomrt[val];
return (MDF_UNKNOWN);
return (amd64_mtrrtomrt[val]);
}
/*
/*
* amd64 MTRR conflicts. Writeback and uncachable may overlap.
*/
static int
amd64_mtrrconflict(int flag1, int flag2)
{
flag1 &= MDF_ATTRMASK;
flag2 &= MDF_ATTRMASK;
if ((flag1 & MDF_UNKNOWN) || (flag2 & MDF_UNKNOWN))
return 1;
return (1);
if (flag1 == flag2 ||
(flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) ||
(flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE))
return 0;
return 1;
return (0);
return (1);
}
/*
* Look for an exactly-matching range.
*/
static struct mem_range_desc *
mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd)
mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd)
{
struct mem_range_desc *cand;
int i;
for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++)
if ((cand->mr_base == mrd->mr_base) &&
(cand->mr_len == mrd->mr_len))
return(cand);
return(NULL);
struct mem_range_desc *cand;
int i;
for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++)
if ((cand->mr_base == mrd->mr_base) &&
(cand->mr_len == mrd->mr_len))
return (cand);
return (NULL);
}
/*
* Fetch the current mtrr settings from the current CPU (assumed to all
* be in sync in the SMP case). Note that if we are here, we assume
* that MTRRs are enabled, and we may or may not have fixed MTRRs.
* Fetch the current mtrr settings from the current CPU (assumed to
* all be in sync in the SMP case). Note that if we are here, we
* assume that MTRRs are enabled, and we may or may not have fixed
* MTRRs.
*/
static void
amd64_mrfetch(struct mem_range_softc *sc)
{
struct mem_range_desc *mrd;
u_int64_t msrv;
int i, j, msr;
struct mem_range_desc *mrd;
u_int64_t msrv;
int i, j, msr;
mrd = sc->mr_desc;
mrd = sc->mr_desc;
/* Get fixed-range MTRRs */
if (sc->mr_cap & MR686_FIXMTRR) {
msr = MSR_MTRR64kBase;
for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
msrv = rdmsr(msr);
for (j = 0; j < 8; j++, mrd++) {
mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
amd64_mtrr2mrt(msrv & 0xff) |
MDF_ACTIVE;
if (mrd->mr_owner[0] == 0)
strcpy(mrd->mr_owner, mem_owner_bios);
msrv = msrv >> 8;
}
/* Get fixed-range MTRRs. */
if (sc->mr_cap & MR686_FIXMTRR) {
msr = MSR_MTRR64kBase;
for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
msrv = rdmsr(msr);
for (j = 0; j < 8; j++, mrd++) {
mrd->mr_flags =
(mrd->mr_flags & ~MDF_ATTRMASK) |
amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
if (mrd->mr_owner[0] == 0)
strcpy(mrd->mr_owner, mem_owner_bios);
msrv = msrv >> 8;
}
}
msr = MSR_MTRR16kBase;
for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
msrv = rdmsr(msr);
for (j = 0; j < 8; j++, mrd++) {
mrd->mr_flags =
(mrd->mr_flags & ~MDF_ATTRMASK) |
amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
if (mrd->mr_owner[0] == 0)
strcpy(mrd->mr_owner, mem_owner_bios);
msrv = msrv >> 8;
}
}
msr = MSR_MTRR4kBase;
for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
msrv = rdmsr(msr);
for (j = 0; j < 8; j++, mrd++) {
mrd->mr_flags =
(mrd->mr_flags & ~MDF_ATTRMASK) |
amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
if (mrd->mr_owner[0] == 0)
strcpy(mrd->mr_owner, mem_owner_bios);
msrv = msrv >> 8;
}
}
}
msr = MSR_MTRR16kBase;
for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
msrv = rdmsr(msr);
for (j = 0; j < 8; j++, mrd++) {
mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
amd64_mtrr2mrt(msrv & 0xff) |
MDF_ACTIVE;
if (mrd->mr_owner[0] == 0)
strcpy(mrd->mr_owner, mem_owner_bios);
msrv = msrv >> 8;
}
}
msr = MSR_MTRR4kBase;
for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
msrv = rdmsr(msr);
for (j = 0; j < 8; j++, mrd++) {
mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
amd64_mtrr2mrt(msrv & 0xff) |
MDF_ACTIVE;
if (mrd->mr_owner[0] == 0)
strcpy(mrd->mr_owner, mem_owner_bios);
msrv = msrv >> 8;
}
}
}
/* Get remainder which must be variable MTRRs */
msr = MSR_MTRRVarBase;
for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
msrv = rdmsr(msr);
mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
amd64_mtrr2mrt(msrv & MTRR_PHYSBASE_TYPE);
mrd->mr_base = msrv & MTRR_PHYSBASE_PHYSBASE;
msrv = rdmsr(msr + 1);
mrd->mr_flags = (msrv & MTRR_PHYSMASK_VALID) ?
(mrd->mr_flags | MDF_ACTIVE) :
(mrd->mr_flags & ~MDF_ACTIVE);
/* Compute the range from the mask. Ick. */
mrd->mr_len = (~(msrv & MTRR_PHYSMASK_PHYSMASK)
& (MTRR_PHYSMASK_PHYSMASK | 0xfffL)) + 1;
if (!mrvalid(mrd->mr_base, mrd->mr_len))
mrd->mr_flags |= MDF_BOGUS;
/* If unclaimed and active, must be the BIOS */
if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0))
strcpy(mrd->mr_owner, mem_owner_bios);
}
/* Get remainder which must be variable MTRRs. */
msr = MSR_MTRRVarBase;
for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
msrv = rdmsr(msr);
mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
amd64_mtrr2mrt(msrv & MTRR_PHYSBASE_TYPE);
mrd->mr_base = msrv & MTRR_PHYSBASE_PHYSBASE;
msrv = rdmsr(msr + 1);
mrd->mr_flags = (msrv & MTRR_PHYSMASK_VALID) ?
(mrd->mr_flags | MDF_ACTIVE) :
(mrd->mr_flags & ~MDF_ACTIVE);
/* Compute the range from the mask. Ick. */
mrd->mr_len = (~(msrv & MTRR_PHYSMASK_PHYSMASK)
& (MTRR_PHYSMASK_PHYSMASK | 0xfffL)) + 1;
if (!mrvalid(mrd->mr_base, mrd->mr_len))
mrd->mr_flags |= MDF_BOGUS;
/* If unclaimed and active, must be the BIOS. */
if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0))
strcpy(mrd->mr_owner, mem_owner_bios);
}
}
/*
@ -240,17 +243,17 @@ amd64_mrfetch(struct mem_range_softc *sc)
static int
amd64_mtrrtype(int flags)
{
int i;
int i;
flags &= MDF_ATTRMASK;
flags &= MDF_ATTRMASK;
for (i = 0; i < MTRRTOMRTLEN; i++) {
if (amd64_mtrrtomrt[i] == MDF_UNKNOWN)
continue;
if (flags == amd64_mtrrtomrt[i])
return(i);
}
return(-1);
for (i = 0; i < MTRRTOMRTLEN; i++) {
if (amd64_mtrrtomrt[i] == MDF_UNKNOWN)
continue;
if (flags == amd64_mtrrtomrt[i])
return (i);
}
return (-1);
}
static int
@ -259,8 +262,8 @@ amd64_mrt2mtrr(int flags, int oldval)
int val;
if ((val = amd64_mtrrtype(flags)) == -1)
return oldval & 0xff;
return val & 0xff;
return (oldval & 0xff);
return (val & 0xff);
}
/*
@ -273,105 +276,124 @@ static void
amd64_mrstore(struct mem_range_softc *sc)
{
#ifdef SMP
/*
* We should use ipi_all_but_self() to call other CPUs into a
* locking gate, then call a target function to do this work.
* The "proper" solution involves a generalised locking gate
* implementation, not ready yet.
*/
smp_rendezvous(NULL, amd64_mrstoreone, NULL, (void *)sc);
/*
* We should use ipi_all_but_self() to call other CPUs into a
* locking gate, then call a target function to do this work.
* The "proper" solution involves a generalised locking gate
* implementation, not ready yet.
*/
smp_rendezvous(NULL, amd64_mrstoreone, NULL, sc);
#else
disable_intr(); /* disable interrupts */
amd64_mrstoreone((void *)sc);
enable_intr();
disable_intr(); /* disable interrupts */
amd64_mrstoreone(sc);
enable_intr();
#endif
}
/*
* Update the current CPU's MTRRs with those represented in the
* descriptor list. Note that we do this wholesale rather than
* just stuffing one entry; this is simpler (but slower, of course).
* descriptor list. Note that we do this wholesale rather than just
* stuffing one entry; this is simpler (but slower, of course).
*/
static void
amd64_mrstoreone(void *arg)
{
struct mem_range_softc *sc = (struct mem_range_softc *)arg;
struct mem_range_desc *mrd;
u_int64_t omsrv, msrv;
int i, j, msr;
u_int cr4save;
struct mem_range_softc *sc = arg;
struct mem_range_desc *mrd;
u_int64_t omsrv, msrv;
int i, j, msr;
u_int cr4save;
mrd = sc->mr_desc;
mrd = sc->mr_desc;
cr4save = rcr4(); /* save cr4 */
if (cr4save & CR4_PGE)
load_cr4(cr4save & ~CR4_PGE);
load_cr0((rcr0() & ~CR0_NW) | CR0_CD); /* disable caches (CD = 1, NW = 0) */
wbinvd(); /* flush caches, TLBs */
wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRR_DEF_ENABLE); /* disable MTRRs (E = 0) */
/* Disable PGE. */
cr4save = rcr4();
if (cr4save & CR4_PGE)
load_cr4(cr4save & ~CR4_PGE);
/* Set fixed-range MTRRs */
if (sc->mr_cap & MR686_FIXMTRR) {
msr = MSR_MTRR64kBase;
for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
msrv = 0;
omsrv = rdmsr(msr);
for (j = 7; j >= 0; j--) {
msrv = msrv << 8;
msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8));
}
wrmsr(msr, msrv);
mrd += 8;
}
msr = MSR_MTRR16kBase;
for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
msrv = 0;
omsrv = rdmsr(msr);
for (j = 7; j >= 0; j--) {
msrv = msrv << 8;
msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8));
}
wrmsr(msr, msrv);
mrd += 8;
}
msr = MSR_MTRR4kBase;
for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
msrv = 0;
omsrv = rdmsr(msr);
for (j = 7; j >= 0; j--) {
msrv = msrv << 8;
msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8));
}
wrmsr(msr, msrv);
mrd += 8;
}
}
/* Disable caches (CD = 1, NW = 0). */
load_cr0((rcr0() & ~CR0_NW) | CR0_CD);
/* Set remainder which must be variable MTRRs */
msr = MSR_MTRRVarBase;
for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
/* base/type register */
omsrv = rdmsr(msr);
if (mrd->mr_flags & MDF_ACTIVE) {
msrv = mrd->mr_base & MTRR_PHYSBASE_PHYSBASE;
msrv |= amd64_mrt2mtrr(mrd->mr_flags, omsrv);
} else {
msrv = 0;
/* Flushes caches and TLBs. */
wbinvd();
/* Disable MTRRs (E = 0). */
wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRR_DEF_ENABLE);
/* Set fixed-range MTRRs. */
if (sc->mr_cap & MR686_FIXMTRR) {
msr = MSR_MTRR64kBase;
for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
msrv = 0;
omsrv = rdmsr(msr);
for (j = 7; j >= 0; j--) {
msrv = msrv << 8;
msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
omsrv >> (j * 8));
}
wrmsr(msr, msrv);
mrd += 8;
}
msr = MSR_MTRR16kBase;
for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
msrv = 0;
omsrv = rdmsr(msr);
for (j = 7; j >= 0; j--) {
msrv = msrv << 8;
msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
omsrv >> (j * 8));
}
wrmsr(msr, msrv);
mrd += 8;
}
msr = MSR_MTRR4kBase;
for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
msrv = 0;
omsrv = rdmsr(msr);
for (j = 7; j >= 0; j--) {
msrv = msrv << 8;
msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
omsrv >> (j * 8));
}
wrmsr(msr, msrv);
mrd += 8;
}
}
wrmsr(msr, msrv);
/* mask/active register */
if (mrd->mr_flags & MDF_ACTIVE) {
msrv = MTRR_PHYSMASK_VALID | (~(mrd->mr_len - 1) & MTRR_PHYSMASK_PHYSMASK);
} else {
msrv = 0;
/* Set remainder which must be variable MTRRs. */
msr = MSR_MTRRVarBase;
for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
/* base/type register */
omsrv = rdmsr(msr);
if (mrd->mr_flags & MDF_ACTIVE) {
msrv = mrd->mr_base & MTRR_PHYSBASE_PHYSBASE;
msrv |= amd64_mrt2mtrr(mrd->mr_flags, omsrv);
} else {
msrv = 0;
}
wrmsr(msr, msrv);
/* mask/active register */
if (mrd->mr_flags & MDF_ACTIVE) {
msrv = MTRR_PHYSMASK_VALID |
(~(mrd->mr_len - 1) & MTRR_PHYSMASK_PHYSMASK);
} else {
msrv = 0;
}
wrmsr(msr + 1, msrv);
}
wrmsr(msr + 1, msrv);
}
wbinvd(); /* flush caches, TLBs */
wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | MTRR_DEF_ENABLE); /* restore MTRR state */
load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* enable caches CD = 0 and NW = 0 */
load_cr4(cr4save); /* restore cr4 */
/* Flush caches, TLBs. */
wbinvd();
/* Enable MTRRs. */
wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | MTRR_DEF_ENABLE);
/* Enable caches (CD = 0, NW = 0). */
load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
/* Restore PGE. */
load_cr4(cr4save);
}
/*
@ -380,227 +402,242 @@ amd64_mrstoreone(void *arg)
static struct mem_range_desc *
amd64_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
{
struct mem_range_desc *mrd;
int i;
for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K); i++, mrd++)
if ((addr >= mrd->mr_base) && (addr < (mrd->mr_base + mrd->mr_len)))
return(mrd);
return(NULL);
struct mem_range_desc *mrd;
int i;
for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K);
i++, mrd++)
if ((addr >= mrd->mr_base) &&
(addr < (mrd->mr_base + mrd->mr_len)))
return (mrd);
return (NULL);
}
/*
* Try to satisfy the given range request by manipulating the fixed MTRRs that
* cover low memory.
* Try to satisfy the given range request by manipulating the fixed
* MTRRs that cover low memory.
*
* Note that we try to be generous here; we'll bloat the range out to the
* next higher/lower boundary to avoid the consumer having to know too much
* about the mechanisms here.
* Note that we try to be generous here; we'll bloat the range out to
* the next higher/lower boundary to avoid the consumer having to know
* too much about the mechanisms here.
*
* XXX note that this will have to be updated when we start supporting "busy" ranges.
* XXX note that this will have to be updated when we start supporting
* "busy" ranges.
*/
static int
amd64_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
{
struct mem_range_desc *first_md, *last_md, *curr_md;
struct mem_range_desc *first_md, *last_md, *curr_md;
/* range check */
if (((first_md = amd64_mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
((last_md = amd64_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
return(EINVAL);
/* Range check. */
if (((first_md = amd64_mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
((last_md = amd64_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
return (EINVAL);
/* check we aren't doing something risky */
if (!(mrd->mr_flags & MDF_FORCE))
/* Check that we aren't doing something risky. */
if (!(mrd->mr_flags & MDF_FORCE))
for (curr_md = first_md; curr_md <= last_md; curr_md++) {
if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)
return (EACCES);
}
/* Set flags, clear set-by-firmware flag. */
for (curr_md = first_md; curr_md <= last_md; curr_md++) {
if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)
return (EACCES);
curr_md->mr_flags = mrcopyflags(curr_md->mr_flags &
~MDF_FIRMWARE, mrd->mr_flags);
bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner));
}
/* set flags, clear set-by-firmware flag */
for (curr_md = first_md; curr_md <= last_md; curr_md++) {
curr_md->mr_flags = mrcopyflags(curr_md->mr_flags & ~MDF_FIRMWARE, mrd->mr_flags);
bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner));
}
return(0);
return (0);
}
/*
* Modify/add a variable MTRR to satisfy the request.
*
* XXX needs to be updated to properly support "busy" ranges.
*/
static int
amd64_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
amd64_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
int *arg)
{
struct mem_range_desc *curr_md, *free_md;
int i;
/*
* Scan the currently active variable descriptors, look for
* one we exactly match (straight takeover) and for possible
* accidental overlaps.
* Keep track of the first empty variable descriptor in case we
* can't perform a takeover.
*/
i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
curr_md = sc->mr_desc + i;
free_md = NULL;
for (; i < sc->mr_ndesc; i++, curr_md++) {
if (curr_md->mr_flags & MDF_ACTIVE) {
/* exact match? */
if ((curr_md->mr_base == mrd->mr_base) &&
(curr_md->mr_len == mrd->mr_len)) {
/* whoops, owned by someone */
if (curr_md->mr_flags & MDF_BUSY)
return(EBUSY);
/* check we aren't doing something risky */
if (!(mrd->mr_flags & MDF_FORCE) &&
((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN))
return (EACCES);
/* Ok, just hijack this entry */
free_md = curr_md;
break;
}
/* non-exact overlap ? */
if (mroverlap(curr_md, mrd)) {
/* between conflicting region types? */
if (amd64_mtrrconflict(curr_md->mr_flags, mrd->mr_flags))
return(EINVAL);
}
} else if (free_md == NULL) {
free_md = curr_md;
}
}
/* got somewhere to put it? */
if (free_md == NULL)
return(ENOSPC);
struct mem_range_desc *curr_md, *free_md;
int i;
/* Set up new descriptor */
free_md->mr_base = mrd->mr_base;
free_md->mr_len = mrd->mr_len;
free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags);
bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner));
return(0);
/*
* Scan the currently active variable descriptors, look for
* one we exactly match (straight takeover) and for possible
* accidental overlaps.
*
* Keep track of the first empty variable descriptor in case
* we can't perform a takeover.
*/
i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
curr_md = sc->mr_desc + i;
free_md = NULL;
for (; i < sc->mr_ndesc; i++, curr_md++) {
if (curr_md->mr_flags & MDF_ACTIVE) {
/* Exact match? */
if ((curr_md->mr_base == mrd->mr_base) &&
(curr_md->mr_len == mrd->mr_len)) {
/* Whoops, owned by someone. */
if (curr_md->mr_flags & MDF_BUSY)
return (EBUSY);
/* Check that we aren't doing something risky */
if (!(mrd->mr_flags & MDF_FORCE) &&
((curr_md->mr_flags & MDF_ATTRMASK) ==
MDF_UNKNOWN))
return (EACCES);
/* Ok, just hijack this entry. */
free_md = curr_md;
break;
}
/* Non-exact overlap? */
if (mroverlap(curr_md, mrd)) {
/* Between conflicting region types? */
if (amd64_mtrrconflict(curr_md->mr_flags,
mrd->mr_flags))
return (EINVAL);
}
} else if (free_md == NULL) {
free_md = curr_md;
}
}
/* Got somewhere to put it? */
if (free_md == NULL)
return (ENOSPC);
/* Set up new descriptor. */
free_md->mr_base = mrd->mr_base;
free_md->mr_len = mrd->mr_len;
free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags);
bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner));
return (0);
}
/*
* Handle requests to set memory range attributes by manipulating MTRRs.
*
*/
static int
amd64_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
{
struct mem_range_desc *targ;
int error = 0;
struct mem_range_desc *targ;
int error = 0;
switch(*arg) {
case MEMRANGE_SET_UPDATE:
/* make sure that what's being asked for is even possible at all */
if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
amd64_mtrrtype(mrd->mr_flags) == -1)
return(EINVAL);
switch(*arg) {
case MEMRANGE_SET_UPDATE:
/*
* Make sure that what's being asked for is even
* possible at all.
*/
if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
amd64_mtrrtype(mrd->mr_flags) == -1)
return (EINVAL);
#define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000))
#define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000))
/* are the "low memory" conditions applicable? */
if ((sc->mr_cap & MR686_FIXMTRR) &&
((mrd->mr_base + mrd->mr_len) <= FIXTOP)) {
if ((error = amd64_mrsetlow(sc, mrd, arg)) != 0)
return(error);
} else {
/* it's time to play with variable MTRRs */
if ((error = amd64_mrsetvariable(sc, mrd, arg)) != 0)
return(error);
/* Are the "low memory" conditions applicable? */
if ((sc->mr_cap & MR686_FIXMTRR) &&
((mrd->mr_base + mrd->mr_len) <= FIXTOP)) {
if ((error = amd64_mrsetlow(sc, mrd, arg)) != 0)
return (error);
} else {
/* It's time to play with variable MTRRs. */
if ((error = amd64_mrsetvariable(sc, mrd, arg)) != 0)
return (error);
}
break;
case MEMRANGE_SET_REMOVE:
if ((targ = mem_range_match(sc, mrd)) == NULL)
return (ENOENT);
if (targ->mr_flags & MDF_FIXACTIVE)
return (EPERM);
if (targ->mr_flags & MDF_BUSY)
return (EBUSY);
targ->mr_flags &= ~MDF_ACTIVE;
targ->mr_owner[0] = 0;
break;
default:
return (EOPNOTSUPP);
}
break;
case MEMRANGE_SET_REMOVE:
if ((targ = mem_range_match(sc, mrd)) == NULL)
return(ENOENT);
if (targ->mr_flags & MDF_FIXACTIVE)
return(EPERM);
if (targ->mr_flags & MDF_BUSY)
return(EBUSY);
targ->mr_flags &= ~MDF_ACTIVE;
targ->mr_owner[0] = 0;
break;
/* Update the hardware. */
amd64_mrstore(sc);
default:
return(EOPNOTSUPP);
}
/* update the hardware */
amd64_mrstore(sc);
amd64_mrfetch(sc); /* refetch to see where we're at */
return(0);
/* Refetch to see where we're at. */
amd64_mrfetch(sc);
return (0);
}
/*
* Work out how many ranges we support, initialise storage for them,
* fetch the initial settings.
* Work out how many ranges we support, initialise storage for them,
* and fetch the initial settings.
*/
static void
amd64_mrinit(struct mem_range_softc *sc)
{
struct mem_range_desc *mrd;
int nmdesc = 0;
int i;
struct mem_range_desc *mrd;
int i, nmdesc = 0;
mtrrcap = rdmsr(MSR_MTRRcap);
mtrrdef = rdmsr(MSR_MTRRdefType);
mtrrcap = rdmsr(MSR_MTRRcap);
mtrrdef = rdmsr(MSR_MTRRdefType);
/* For now, bail out if MTRRs are not enabled */
if (!(mtrrdef & MTRR_DEF_ENABLE)) {
if (bootverbose)
printf("CPU supports MTRRs but not enabled\n");
return;
}
nmdesc = mtrrcap & MTRR_CAP_VCNT;
/* If fixed MTRRs supported and enabled */
if ((mtrrcap & MTRR_CAP_FIXED) && (mtrrdef & MTRR_DEF_FIXED_ENABLE)) {
sc->mr_cap = MR686_FIXMTRR;
nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
}
sc->mr_desc =
(struct mem_range_desc *)malloc(nmdesc * sizeof(struct mem_range_desc),
M_MEMDESC, M_WAITOK | M_ZERO);
sc->mr_ndesc = nmdesc;
mrd = sc->mr_desc;
/* Populate the fixed MTRR entries' base/length */
if (sc->mr_cap & MR686_FIXMTRR) {
for (i = 0; i < MTRR_N64K; i++, mrd++) {
mrd->mr_base = i * 0x10000;
mrd->mr_len = 0x10000;
mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
/* For now, bail out if MTRRs are not enabled. */
if (!(mtrrdef & MTRR_DEF_ENABLE)) {
if (bootverbose)
printf("CPU supports MTRRs but not enabled\n");
return;
}
for (i = 0; i < MTRR_N16K; i++, mrd++) {
mrd->mr_base = i * 0x4000 + 0x80000;
mrd->mr_len = 0x4000;
mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
}
for (i = 0; i < MTRR_N4K; i++, mrd++) {
mrd->mr_base = i * 0x1000 + 0xc0000;
mrd->mr_len = 0x1000;
mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
}
}
nmdesc = mtrrcap & MTRR_CAP_VCNT;
/*
* Get current settings, anything set now is considered to have
* been set by the firmware. (XXX has something already played here?)
*/
amd64_mrfetch(sc);
mrd = sc->mr_desc;
for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
if (mrd->mr_flags & MDF_ACTIVE)
mrd->mr_flags |= MDF_FIRMWARE;
}
/* If fixed MTRRs supported and enabled. */
if ((mtrrcap & MTRR_CAP_FIXED) && (mtrrdef & MTRR_DEF_FIXED_ENABLE)) {
sc->mr_cap = MR686_FIXMTRR;
nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
}
sc->mr_desc = malloc(nmdesc * sizeof(struct mem_range_desc), M_MEMDESC,
M_WAITOK | M_ZERO);
sc->mr_ndesc = nmdesc;
mrd = sc->mr_desc;
/* Populate the fixed MTRR entries' base/length. */
if (sc->mr_cap & MR686_FIXMTRR) {
for (i = 0; i < MTRR_N64K; i++, mrd++) {
mrd->mr_base = i * 0x10000;
mrd->mr_len = 0x10000;
mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
MDF_FIXACTIVE;
}
for (i = 0; i < MTRR_N16K; i++, mrd++) {
mrd->mr_base = i * 0x4000 + 0x80000;
mrd->mr_len = 0x4000;
mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
MDF_FIXACTIVE;
}
for (i = 0; i < MTRR_N4K; i++, mrd++) {
mrd->mr_base = i * 0x1000 + 0xc0000;
mrd->mr_len = 0x1000;
mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
MDF_FIXACTIVE;
}
}
/*
* Get current settings, anything set now is considered to
* have been set by the firmware. (XXX has something already
* played here?)
*/
amd64_mrfetch(sc);
mrd = sc->mr_desc;
for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
if (mrd->mr_flags & MDF_ACTIVE)
mrd->mr_flags |= MDF_FIRMWARE;
}
}
/*
@ -609,13 +646,15 @@ amd64_mrinit(struct mem_range_softc *sc)
static void
amd64_mrAPinit(struct mem_range_softc *sc)
{
amd64_mrstoreone((void *)sc); /* set MTRRs to match BSP */
wrmsr(MSR_MTRRdefType, mtrrdef); /* set MTRR behaviour to match BSP */
amd64_mrstoreone(sc);
wrmsr(MSR_MTRRdefType, mtrrdef);
}
static void
amd64_mem_drvinit(void *unused)
{
if (mtrrs_disabled)
return;
if (!(cpu_feature & CPUID_MTRR))
@ -627,5 +666,4 @@ amd64_mem_drvinit(void *unused)
return;
mem_range_softc.mr_op = &amd64_mrops;
}
SYSINIT(amd64memdev,SI_SUB_DRIVERS,SI_ORDER_FIRST,amd64_mem_drvinit,NULL)
SYSINIT(amd64memdev, SI_SUB_DRIVERS, SI_ORDER_FIRST, amd64_mem_drvinit, NULL);

File diff suppressed because it is too large Load Diff

View File

@ -47,40 +47,43 @@ __FBSDID("$FreeBSD$");
* There are two of these in the 64-bit UWCCR.
*/
#define UWCCR 0xc0000085
#define UWCCR 0xc0000085
#define K6_REG_GET(reg, addr, mask, wc, uc) do { \
#define K6_REG_GET(reg, addr, mask, wc, uc) do { \
addr = (reg) & 0xfffe0000; \
mask = ((reg) & 0x1fffc) >> 2; \
wc = ((reg) & 0x2) >> 1; \
uc = (reg) & 0x1; \
} while (0)
#define K6_REG_MAKE(addr, mask, wc, uc) \
((addr) | ((mask) << 2) | ((wc) << 1) | uc)
#define K6_REG_MAKE(addr, mask, wc, uc) \
((addr) | ((mask) << 2) | ((wc) << 1) | uc)
static void k6_mrinit(struct mem_range_softc *sc);
static int k6_mrset(struct mem_range_softc *, struct mem_range_desc *, int *);
static void k6_mrinit(struct mem_range_softc *sc);
static int k6_mrset(struct mem_range_softc *, struct mem_range_desc *,
int *);
static __inline int k6_mrmake(struct mem_range_desc *, u_int32_t *);
static void k6_mem_drvinit(void *);
static void k6_mem_drvinit(void *);
static struct mem_range_ops k6_mrops = {
static struct mem_range_ops k6_mrops =
{
k6_mrinit,
k6_mrset,
NULL
};
static __inline int
k6_mrmake(struct mem_range_desc *desc, u_int32_t *mtrr) {
k6_mrmake(struct mem_range_desc *desc, u_int32_t *mtrr)
{
u_int32_t len = 0, wc, uc;
register int bit;
if (desc->mr_base &~ 0xfffe0000)
return EINVAL;
return (EINVAL);
if (desc->mr_len < 131072 || !powerof2(desc->mr_len))
return EINVAL;
return (EINVAL);
if (desc->mr_flags &~ (MDF_WRITECOMBINE|MDF_UNCACHEABLE|MDF_FORCE))
return EOPNOTSUPP;
return (EOPNOTSUPP);
for (bit = ffs(desc->mr_len >> 17) - 1; bit < 15; bit++)
len |= 1 << bit;
@ -88,11 +91,12 @@ k6_mrmake(struct mem_range_desc *desc, u_int32_t *mtrr) {
uc = (desc->mr_flags & MDF_UNCACHEABLE) ? 1 : 0;
*mtrr = K6_REG_MAKE(desc->mr_base, len, wc, uc);
return 0;
return (0);
}
static void
k6_mrinit(struct mem_range_softc *sc) {
k6_mrinit(struct mem_range_softc *sc)
{
u_int64_t reg;
u_int32_t addr, mask, wc, uc;
int d;
@ -100,7 +104,7 @@ k6_mrinit(struct mem_range_softc *sc) {
sc->mr_cap = 0;
sc->mr_ndesc = 2; /* XXX (BFF) For now, we only have one msr for this */
sc->mr_desc = malloc(sc->mr_ndesc * sizeof(struct mem_range_desc),
M_MEMDESC, M_NOWAIT | M_ZERO);
M_MEMDESC, M_NOWAIT | M_ZERO);
if (sc->mr_desc == NULL)
panic("k6_mrinit: malloc returns NULL");
@ -116,12 +120,13 @@ k6_mrinit(struct mem_range_softc *sc) {
if (uc)
sc->mr_desc[d].mr_flags |= MDF_UNCACHEABLE;
}
printf("K6-family MTRR support enabled (%d registers)\n", sc->mr_ndesc);
}
static int
k6_mrset(struct mem_range_softc *sc, struct mem_range_desc *desc, int *arg) {
k6_mrset(struct mem_range_softc *sc, struct mem_range_desc *desc, int *arg)
{
u_int64_t reg;
u_int32_t mtrr;
int error, d;
@ -130,7 +135,7 @@ k6_mrset(struct mem_range_softc *sc, struct mem_range_desc *desc, int *arg) {
case MEMRANGE_SET_UPDATE:
error = k6_mrmake(desc, &mtrr);
if (error)
return error;
return (error);
for (d = 0; d < sc->mr_ndesc; d++) {
if (!sc->mr_desc[d].mr_len) {
sc->mr_desc[d] = *desc;
@ -138,10 +143,9 @@ k6_mrset(struct mem_range_softc *sc, struct mem_range_desc *desc, int *arg) {
}
if (sc->mr_desc[d].mr_base == desc->mr_base &&
sc->mr_desc[d].mr_len == desc->mr_len)
return EEXIST;
return (EEXIST);
}
return ENOSPC;
return (ENOSPC);
case MEMRANGE_SET_REMOVE:
mtrr = 0;
for (d = 0; d < sc->mr_ndesc; d++)
@ -150,14 +154,11 @@ k6_mrset(struct mem_range_softc *sc, struct mem_range_desc *desc, int *arg) {
bzero(&sc->mr_desc[d], sizeof(sc->mr_desc[d]));
goto out;
}
return ENOENT;
return (ENOENT);
default:
return EOPNOTSUPP;
return (EOPNOTSUPP);
}
out:
out:
disable_intr();
wbinvd();
reg = rdmsr(UWCCR);
@ -167,18 +168,18 @@ k6_mrset(struct mem_range_softc *sc, struct mem_range_desc *desc, int *arg) {
wbinvd();
enable_intr();
return 0;
return (0);
}
static void
k6_mem_drvinit(void *unused) {
k6_mem_drvinit(void *unused)
{
if (!strcmp(cpu_vendor, "AuthenticAMD") &&
(cpu_id & 0xf00) == 0x500 &&
((cpu_id & 0xf0) > 0x80 ||
((cpu_id & 0xf0) == 0x80 &&
(cpu_id & 0xf) > 0x7))
)
((cpu_id & 0xf0) > 0x80 ||
((cpu_id & 0xf0) == 0x80 &&
(cpu_id & 0xf) > 0x7)))
mem_range_softc.mr_op = &k6_mrops;
}
SYSINIT(k6memdev, SI_SUB_DRIVERS, SI_ORDER_FIRST, k6_mem_drvinit, NULL)
SYSINIT(k6memdev, SI_SUB_DRIVERS, SI_ORDER_FIRST, k6_mem_drvinit, NULL);