amd64: Handle 5-level paging on wakeup.

We can switch into long mode directly with LA57 enabled.

Sponsored by:	The FreeBSD Foundation
Differential revision:	https://reviews.freebsd.org/D25273
This commit is contained in:
Konstantin Belousov 2020-08-23 20:43:23 +00:00
parent 177622f1fd
commit f446480b5f
3 changed files with 31 additions and 9 deletions

View File

@ -148,10 +148,18 @@ wakeup_32:
mov $bootdata32 - bootgdt, %eax mov $bootdata32 - bootgdt, %eax
mov %ax, %ds mov %ax, %ds
/* Turn on the PAE bit for when paging is enabled */ /*
* Turn on the PAE bit and optionally the LA57 bit for when paging
* is later enabled.
*/
mov %cr4, %eax mov %cr4, %eax
orl $CR4_PAE, %eax orl $CR4_PAE, %eax
mov %eax, %cr4 leal wakeup_pagetables - wakeup_start(%ebx), %ecx
movl (%ecx), %ecx
testl $0x1, %ecx
je 1f
orl $CR4_LA57, %eax
1: mov %eax, %cr4
/* /*
* Enable EFER.LME so that we get long mode when all the prereqs are * Enable EFER.LME so that we get long mode when all the prereqs are
@ -174,6 +182,7 @@ wakeup_32:
*/ */
leal wakeup_pagetables - wakeup_start(%ebx), %eax leal wakeup_pagetables - wakeup_start(%ebx), %eax
movl (%eax), %eax movl (%eax), %eax
andl $~0x1, %eax
mov %eax, %cr3 mov %eax, %cr3
/* /*

View File

@ -382,8 +382,11 @@ END(savectx)
* Resuming processor state from pcb. * Resuming processor state from pcb.
*/ */
ENTRY(resumectx) ENTRY(resumectx)
/* Switch to KPML4phys. */ /* Switch to KPML5/4phys. */
movq KPML4phys,%rax movq KPML4phys,%rax
movq KPML5phys,%rcx
cmpl $0, la57
cmovne %rcx, %rax
movq %rax,%cr3 movq %rax,%cr3
/* Force kernel segment registers. */ /* Force kernel segment registers. */

View File

@ -99,7 +99,7 @@ static void acpi_wakeup_cpus(struct acpi_softc *);
#endif #endif
#ifdef __amd64__ #ifdef __amd64__
#define ACPI_WAKEPAGES 4 #define ACPI_WAKEPAGES 5
#else #else
#define ACPI_WAKEPAGES 1 #define ACPI_WAKEPAGES 1
#endif #endif
@ -414,8 +414,8 @@ acpi_install_wakeup_handler(struct acpi_softc *sc)
static void *wakeaddr; static void *wakeaddr;
void *wakepages[ACPI_WAKEPAGES]; void *wakepages[ACPI_WAKEPAGES];
#ifdef __amd64__ #ifdef __amd64__
uint64_t *pt4, *pt3, *pt2; uint64_t *pt5, *pt4, *pt3, *pt2;
vm_paddr_t pt4pa, pt3pa, pt2pa; vm_paddr_t pt5pa, pt4pa, pt3pa, pt2pa;
int i; int i;
#endif #endif
@ -430,6 +430,10 @@ acpi_install_wakeup_handler(struct acpi_softc *sc)
sc->acpi_wakephys = vtophys(wakeaddr); sc->acpi_wakephys = vtophys(wakeaddr);
#ifdef __amd64__ #ifdef __amd64__
if (la57) {
pt5 = wakepages[4];
pt5pa = vtophys(pt5);
}
pt4 = wakepages[1]; pt4 = wakepages[1];
pt3 = wakepages[2]; pt3 = wakepages[2];
pt2 = wakepages[3]; pt2 = wakepages[3];
@ -448,7 +452,8 @@ acpi_install_wakeup_handler(struct acpi_softc *sc)
#ifdef __amd64__ #ifdef __amd64__
WAKECODE_FIXUP((wakeup_sw64 + 1), uint32_t, WAKECODE_FIXUP((wakeup_sw64 + 1), uint32_t,
sc->acpi_wakephys + wakeup_64); sc->acpi_wakephys + wakeup_64);
WAKECODE_FIXUP(wakeup_pagetables, uint32_t, pt4pa); WAKECODE_FIXUP(wakeup_pagetables, uint32_t, la57 ? (pt5pa | 0x1) :
pt4pa);
#endif #endif
/* Save pointers to some global data. */ /* Save pointers to some global data. */
@ -457,7 +462,12 @@ acpi_install_wakeup_handler(struct acpi_softc *sc)
WAKECODE_FIXUP(wakeup_cr3, register_t, pmap_get_kcr3()); WAKECODE_FIXUP(wakeup_cr3, register_t, pmap_get_kcr3());
#else /* __amd64__ */ #else /* __amd64__ */
/* Create the initial 1GB replicated page tables */ /* Create the initial 1GB replicated page tables */
for (i = 0; i < 512; i++) { for (i = 0; i < NPTEPG; i++) {
if (la57) {
pt5[i] = (uint64_t)pt4pa;
pt5[i] |= PG_V | PG_RW | PG_U;
}
/* /*
* Each slot of the level 4 pages points * Each slot of the level 4 pages points
* to the same level 3 page * to the same level 3 page
@ -473,7 +483,7 @@ acpi_install_wakeup_handler(struct acpi_softc *sc)
pt3[i] |= PG_V | PG_RW | PG_U; pt3[i] |= PG_V | PG_RW | PG_U;
/* The level 2 page slots are mapped with 2MB pages for 1GB. */ /* The level 2 page slots are mapped with 2MB pages for 1GB. */
pt2[i] = i * (2 * 1024 * 1024); pt2[i] = i * NBPDR;
pt2[i] |= PG_V | PG_RW | PG_PS | PG_U; pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
} }
#endif /* !__amd64__ */ #endif /* !__amd64__ */