Merge ^/head r338690 through r338730.
This commit is contained in:
commit
0a13c83028
@ -16,18 +16,13 @@ BIN1= \
|
||||
dhclient.conf \
|
||||
disktab \
|
||||
group \
|
||||
hosts \
|
||||
hosts.allow \
|
||||
hosts.equiv \
|
||||
libalias.conf \
|
||||
libmap.conf \
|
||||
login.access \
|
||||
mac.conf \
|
||||
netconfig \
|
||||
networks \
|
||||
nsswitch.conf \
|
||||
phones \
|
||||
protocols \
|
||||
rc.bsdextended \
|
||||
rc.firewall \
|
||||
remote \
|
||||
@ -160,10 +155,6 @@ distribution:
|
||||
${DESTDIR}/boot/device.hints
|
||||
.endif
|
||||
.endif
|
||||
.if ${MK_NIS} == "no"
|
||||
sed -i "" -e 's/.*_compat:/# &/' -e 's/compat$$/files/' \
|
||||
${DESTDIR}/etc/nsswitch.conf
|
||||
.endif
|
||||
|
||||
MTREE_CMD?= mtree
|
||||
|
||||
|
@ -66,6 +66,9 @@ ENTRY(bcopy)
|
||||
movsq
|
||||
movq %rdx,%rcx
|
||||
andq $7,%rcx /* any bytes left? */
|
||||
jne 2f
|
||||
ret
|
||||
2:
|
||||
rep
|
||||
movsb
|
||||
ret
|
||||
@ -73,11 +76,13 @@ ENTRY(bcopy)
|
||||
addq %rcx,%rdi /* copy backwards. */
|
||||
addq %rcx,%rsi
|
||||
std
|
||||
andq $7,%rcx /* any fractional bytes? */
|
||||
decq %rdi
|
||||
decq %rsi
|
||||
andq $7,%rcx /* any fractional bytes? */
|
||||
je 3f
|
||||
rep
|
||||
movsb
|
||||
3:
|
||||
movq %rdx,%rcx /* copy remainder by words */
|
||||
shrq $3,%rcx
|
||||
subq $7,%rsi
|
||||
|
@ -64,7 +64,7 @@ ENTRY(bcopy)
|
||||
movl %edi,%edx
|
||||
subl %esi,%edx
|
||||
cmpl %ecx,%edx /* overlapping? */
|
||||
jb 1f
|
||||
jb 2f
|
||||
cld /* nope, copy forwards. */
|
||||
movl %ecx,%edx
|
||||
shrl $2,%ecx /* copy by words */
|
||||
@ -72,21 +72,28 @@ ENTRY(bcopy)
|
||||
movsl
|
||||
movl %edx,%ecx
|
||||
andl $3,%ecx /* any bytes left? */
|
||||
jne 1f
|
||||
popl %edi
|
||||
popl %esi
|
||||
ret
|
||||
1:
|
||||
rep
|
||||
movsb
|
||||
popl %edi
|
||||
popl %esi
|
||||
ret
|
||||
1:
|
||||
2:
|
||||
addl %ecx,%edi /* copy backwards. */
|
||||
addl %ecx,%esi
|
||||
std
|
||||
movl %ecx,%edx
|
||||
andl $3,%ecx /* any fractional bytes? */
|
||||
decl %edi
|
||||
decl %esi
|
||||
andl $3,%ecx /* any fractional bytes? */
|
||||
je 3f
|
||||
rep
|
||||
movsb
|
||||
3:
|
||||
movl %edx,%ecx /* copy remainder by words */
|
||||
shrl $2,%ecx
|
||||
subl $3,%esi
|
||||
|
@ -4,6 +4,7 @@
|
||||
# machine-independent net sources
|
||||
.PATH: ${LIBC_SRCTOP}/net
|
||||
|
||||
CONFS+= net/hosts net/hosts.equiv net/networks net/nsswitch.conf net/protocols
|
||||
SRCS+= base64.c ether_addr.c eui64.c \
|
||||
gai_strerror.c getaddrinfo.c \
|
||||
gethostbydns.c gethostbyht.c gethostbynis.c gethostnamadr.c \
|
||||
@ -123,3 +124,8 @@ SRCS+= hesiod.c
|
||||
MAN+= hesiod.3
|
||||
.endif
|
||||
|
||||
.if ${MK_NIS} == "no"
|
||||
afterinstallconfig:
|
||||
sed -i "" -e 's/.*_compat:/# &/' -e 's/compat$$/files/' \
|
||||
${DESTDIR}/etc/nsswitch.conf
|
||||
.endif
|
||||
|
@ -1581,6 +1581,21 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
|
||||
*/
|
||||
identify_cpu2();
|
||||
|
||||
/*
|
||||
* Check for pti, pcid, and invpcid before ifuncs are
|
||||
* resolved, to correctly select the implementation for
|
||||
* pmap_activate_sw_mode().
|
||||
*/
|
||||
pti = pti_get_default();
|
||||
TUNABLE_INT_FETCH("vm.pmap.pti", &pti);
|
||||
TUNABLE_INT_FETCH("vm.pmap.pcid_enabled", &pmap_pcid_enabled);
|
||||
if ((cpu_feature2 & CPUID2_PCID) != 0 && pmap_pcid_enabled) {
|
||||
invpcid_works = (cpu_stdext_feature &
|
||||
CPUID_STDEXT_INVPCID) != 0;
|
||||
} else {
|
||||
pmap_pcid_enabled = 0;
|
||||
}
|
||||
|
||||
link_elf_ireloc(kmdp);
|
||||
|
||||
/*
|
||||
@ -1645,9 +1660,6 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
|
||||
mtx_init(&dt_lock, "descriptor tables", NULL, MTX_DEF);
|
||||
|
||||
/* exceptions */
|
||||
pti = pti_get_default();
|
||||
TUNABLE_INT_FETCH("vm.pmap.pti", &pti);
|
||||
|
||||
for (x = 0; x < NIDT; x++)
|
||||
setidt(x, pti ? &IDTVEC(rsvd_pti) : &IDTVEC(rsvd), SDT_SYSIGT,
|
||||
SEL_KPL, 0);
|
||||
|
@ -146,6 +146,7 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include <machine/intr_machdep.h>
|
||||
#include <x86/apicvar.h>
|
||||
#include <x86/ifunc.h>
|
||||
#include <machine/cpu.h>
|
||||
#include <machine/cputypes.h>
|
||||
#include <machine/md_var.h>
|
||||
@ -1179,11 +1180,7 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
|
||||
pmap_init_pat();
|
||||
|
||||
/* Initialize TLB Context Id. */
|
||||
TUNABLE_INT_FETCH("vm.pmap.pcid_enabled", &pmap_pcid_enabled);
|
||||
if ((cpu_feature2 & CPUID2_PCID) != 0 && pmap_pcid_enabled) {
|
||||
/* Check for INVPCID support */
|
||||
invpcid_works = (cpu_stdext_feature & CPUID_STDEXT_INVPCID)
|
||||
!= 0;
|
||||
if (pmap_pcid_enabled) {
|
||||
for (i = 0; i < MAXCPU; i++) {
|
||||
kernel_pmap->pm_pcids[i].pm_pcid = PMAP_PCID_KERN;
|
||||
kernel_pmap->pm_pcids[i].pm_gen = 1;
|
||||
@ -1204,8 +1201,6 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
|
||||
* during pcpu setup.
|
||||
*/
|
||||
load_cr4(rcr4() | CR4_PCIDE);
|
||||
} else {
|
||||
pmap_pcid_enabled = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -7441,17 +7436,177 @@ pmap_pcid_alloc(pmap_t pmap, u_int cpuid)
|
||||
return (0);
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
pmap_pcid_alloc_checked(pmap_t pmap, u_int cpuid)
|
||||
{
|
||||
uint64_t cached;
|
||||
|
||||
cached = pmap_pcid_alloc(pmap, cpuid);
|
||||
KASSERT(pmap->pm_pcids[cpuid].pm_pcid >= 0 &&
|
||||
pmap->pm_pcids[cpuid].pm_pcid < PMAP_PCID_OVERMAX,
|
||||
("pmap %p cpu %d pcid %#x", pmap, cpuid,
|
||||
pmap->pm_pcids[cpuid].pm_pcid));
|
||||
KASSERT(pmap->pm_pcids[cpuid].pm_pcid != PMAP_PCID_KERN ||
|
||||
pmap == kernel_pmap,
|
||||
("non-kernel pmap pmap %p cpu %d pcid %#x",
|
||||
pmap, cpuid, pmap->pm_pcids[cpuid].pm_pcid));
|
||||
return (cached);
|
||||
}
|
||||
|
||||
static void
|
||||
pmap_activate_sw_pti_post(pmap_t pmap)
|
||||
{
|
||||
|
||||
if (pmap->pm_ucr3 != PMAP_NO_CR3)
|
||||
PCPU_GET(tssp)->tss_rsp0 = ((vm_offset_t)PCPU_PTR(pti_stack) +
|
||||
PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful;
|
||||
}
|
||||
|
||||
static void inline
|
||||
pmap_activate_sw_pcid_pti(pmap_t pmap, u_int cpuid, const bool invpcid_works1)
|
||||
{
|
||||
struct invpcid_descr d;
|
||||
uint64_t cached, cr3, kcr3, ucr3;
|
||||
|
||||
cached = pmap_pcid_alloc_checked(pmap, cpuid);
|
||||
cr3 = rcr3();
|
||||
if ((cr3 & ~CR3_PCID_MASK) != pmap->pm_cr3)
|
||||
load_cr3(pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid);
|
||||
PCPU_SET(curpmap, pmap);
|
||||
kcr3 = pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid;
|
||||
ucr3 = pmap->pm_ucr3 | pmap->pm_pcids[cpuid].pm_pcid |
|
||||
PMAP_PCID_USER_PT;
|
||||
|
||||
if (!cached && pmap->pm_ucr3 != PMAP_NO_CR3) {
|
||||
/*
|
||||
* Explicitly invalidate translations cached from the
|
||||
* user page table. They are not automatically
|
||||
* flushed by reload of cr3 with the kernel page table
|
||||
* pointer above.
|
||||
*
|
||||
* Note that the if() condition is resolved statically
|
||||
* by using the function argument instead of
|
||||
* runtime-evaluated invpcid_works value.
|
||||
*/
|
||||
if (invpcid_works1) {
|
||||
d.pcid = PMAP_PCID_USER_PT |
|
||||
pmap->pm_pcids[cpuid].pm_pcid;
|
||||
d.pad = 0;
|
||||
d.addr = 0;
|
||||
invpcid(&d, INVPCID_CTX);
|
||||
} else {
|
||||
pmap_pti_pcid_invalidate(ucr3, kcr3);
|
||||
}
|
||||
}
|
||||
|
||||
PCPU_SET(kcr3, kcr3 | CR3_PCID_SAVE);
|
||||
PCPU_SET(ucr3, ucr3 | CR3_PCID_SAVE);
|
||||
if (cached)
|
||||
PCPU_INC(pm_save_cnt);
|
||||
}
|
||||
|
||||
static void
|
||||
pmap_activate_sw_pcid_invpcid_pti(pmap_t pmap, u_int cpuid)
|
||||
{
|
||||
|
||||
pmap_activate_sw_pcid_pti(pmap, cpuid, true);
|
||||
pmap_activate_sw_pti_post(pmap);
|
||||
}
|
||||
|
||||
static void
|
||||
pmap_activate_sw_pcid_noinvpcid_pti(pmap_t pmap, u_int cpuid)
|
||||
{
|
||||
register_t rflags;
|
||||
|
||||
/*
|
||||
* If the INVPCID instruction is not available,
|
||||
* invltlb_pcid_handler() is used to handle an invalidate_all
|
||||
* IPI, which checks for curpmap == smp_tlb_pmap. The below
|
||||
* sequence of operations has a window where %CR3 is loaded
|
||||
* with the new pmap's PML4 address, but the curpmap value has
|
||||
* not yet been updated. This causes the invltlb IPI handler,
|
||||
* which is called between the updates, to execute as a NOP,
|
||||
* which leaves stale TLB entries.
|
||||
*
|
||||
* Note that the most typical use of pmap_activate_sw(), from
|
||||
* the context switch, is immune to this race, because
|
||||
* interrupts are disabled (while the thread lock is owned),
|
||||
* and the IPI happens after curpmap is updated. Protect
|
||||
* other callers in a similar way, by disabling interrupts
|
||||
* around the %cr3 register reload and curpmap assignment.
|
||||
*/
|
||||
rflags = intr_disable();
|
||||
pmap_activate_sw_pcid_pti(pmap, cpuid, false);
|
||||
intr_restore(rflags);
|
||||
pmap_activate_sw_pti_post(pmap);
|
||||
}
|
||||
|
||||
static void
|
||||
pmap_activate_sw_pcid_nopti(pmap_t pmap, u_int cpuid)
|
||||
{
|
||||
uint64_t cached, cr3;
|
||||
|
||||
cached = pmap_pcid_alloc_checked(pmap, cpuid);
|
||||
cr3 = rcr3();
|
||||
if (!cached || (cr3 & ~CR3_PCID_MASK) != pmap->pm_cr3)
|
||||
load_cr3(pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid |
|
||||
cached);
|
||||
PCPU_SET(curpmap, pmap);
|
||||
if (cached)
|
||||
PCPU_INC(pm_save_cnt);
|
||||
}
|
||||
|
||||
static void
|
||||
pmap_activate_sw_pcid_noinvpcid_nopti(pmap_t pmap, u_int cpuid)
|
||||
{
|
||||
register_t rflags;
|
||||
|
||||
rflags = intr_disable();
|
||||
pmap_activate_sw_pcid_nopti(pmap, cpuid);
|
||||
intr_restore(rflags);
|
||||
}
|
||||
|
||||
static void
|
||||
pmap_activate_sw_nopcid_nopti(pmap_t pmap, u_int cpuid __unused)
|
||||
{
|
||||
|
||||
load_cr3(pmap->pm_cr3);
|
||||
PCPU_SET(curpmap, pmap);
|
||||
}
|
||||
|
||||
static void
|
||||
pmap_activate_sw_nopcid_pti(pmap_t pmap, u_int cpuid __unused)
|
||||
{
|
||||
|
||||
pmap_activate_sw_nopcid_nopti(pmap, cpuid);
|
||||
PCPU_SET(kcr3, pmap->pm_cr3);
|
||||
PCPU_SET(ucr3, pmap->pm_ucr3);
|
||||
pmap_activate_sw_pti_post(pmap);
|
||||
}
|
||||
|
||||
DEFINE_IFUNC(static, void, pmap_activate_sw_mode, (pmap_t, u_int), static)
|
||||
{
|
||||
|
||||
if (pmap_pcid_enabled && pti && invpcid_works)
|
||||
return (pmap_activate_sw_pcid_invpcid_pti);
|
||||
else if (pmap_pcid_enabled && pti && !invpcid_works)
|
||||
return (pmap_activate_sw_pcid_noinvpcid_pti);
|
||||
else if (pmap_pcid_enabled && !pti && invpcid_works)
|
||||
return (pmap_activate_sw_pcid_nopti);
|
||||
else if (pmap_pcid_enabled && !pti && !invpcid_works)
|
||||
return (pmap_activate_sw_pcid_noinvpcid_nopti);
|
||||
else if (!pmap_pcid_enabled && pti)
|
||||
return (pmap_activate_sw_nopcid_pti);
|
||||
else /* if (!pmap_pcid_enabled && !pti) */
|
||||
return (pmap_activate_sw_nopcid_nopti);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_activate_sw(struct thread *td)
|
||||
{
|
||||
pmap_t oldpmap, pmap;
|
||||
struct invpcid_descr d;
|
||||
uint64_t cached, cr3, kcr3, kern_pti_cached, rsp0, ucr3;
|
||||
register_t rflags;
|
||||
u_int cpuid;
|
||||
struct amd64tss *tssp;
|
||||
|
||||
rflags = 0;
|
||||
oldpmap = PCPU_GET(curpmap);
|
||||
pmap = vmspace_pmap(td->td_proc->p_vmspace);
|
||||
if (oldpmap == pmap)
|
||||
@ -7462,91 +7617,7 @@ pmap_activate_sw(struct thread *td)
|
||||
#else
|
||||
CPU_SET(cpuid, &pmap->pm_active);
|
||||
#endif
|
||||
cr3 = rcr3();
|
||||
if (pmap_pcid_enabled) {
|
||||
cached = pmap_pcid_alloc(pmap, cpuid);
|
||||
KASSERT(pmap->pm_pcids[cpuid].pm_pcid >= 0 &&
|
||||
pmap->pm_pcids[cpuid].pm_pcid < PMAP_PCID_OVERMAX,
|
||||
("pmap %p cpu %d pcid %#x", pmap, cpuid,
|
||||
pmap->pm_pcids[cpuid].pm_pcid));
|
||||
KASSERT(pmap->pm_pcids[cpuid].pm_pcid != PMAP_PCID_KERN ||
|
||||
pmap == kernel_pmap,
|
||||
("non-kernel pmap thread %p pmap %p cpu %d pcid %#x",
|
||||
td, pmap, cpuid, pmap->pm_pcids[cpuid].pm_pcid));
|
||||
|
||||
/*
|
||||
* If the INVPCID instruction is not available,
|
||||
* invltlb_pcid_handler() is used for handle
|
||||
* invalidate_all IPI, which checks for curpmap ==
|
||||
* smp_tlb_pmap. Below operations sequence has a
|
||||
* window where %CR3 is loaded with the new pmap's
|
||||
* PML4 address, but curpmap value is not yet updated.
|
||||
* This causes invltlb IPI handler, called between the
|
||||
* updates, to execute as NOP, which leaves stale TLB
|
||||
* entries.
|
||||
*
|
||||
* Note that the most typical use of
|
||||
* pmap_activate_sw(), from the context switch, is
|
||||
* immune to this race, because interrupts are
|
||||
* disabled (while the thread lock is owned), and IPI
|
||||
* happens after curpmap is updated. Protect other
|
||||
* callers in a similar way, by disabling interrupts
|
||||
* around the %cr3 register reload and curpmap
|
||||
* assignment.
|
||||
*/
|
||||
if (!invpcid_works)
|
||||
rflags = intr_disable();
|
||||
|
||||
kern_pti_cached = pti ? 0 : cached;
|
||||
if (!kern_pti_cached || (cr3 & ~CR3_PCID_MASK) != pmap->pm_cr3) {
|
||||
load_cr3(pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid |
|
||||
kern_pti_cached);
|
||||
}
|
||||
PCPU_SET(curpmap, pmap);
|
||||
if (pti) {
|
||||
kcr3 = pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid;
|
||||
ucr3 = pmap->pm_ucr3 | pmap->pm_pcids[cpuid].pm_pcid |
|
||||
PMAP_PCID_USER_PT;
|
||||
|
||||
if (!cached && pmap->pm_ucr3 != PMAP_NO_CR3) {
|
||||
/*
|
||||
* Manually invalidate translations cached
|
||||
* from the user page table. They are not
|
||||
* flushed by reload of cr3 with the kernel
|
||||
* page table pointer above.
|
||||
*/
|
||||
if (invpcid_works) {
|
||||
d.pcid = PMAP_PCID_USER_PT |
|
||||
pmap->pm_pcids[cpuid].pm_pcid;
|
||||
d.pad = 0;
|
||||
d.addr = 0;
|
||||
invpcid(&d, INVPCID_CTX);
|
||||
} else {
|
||||
pmap_pti_pcid_invalidate(ucr3, kcr3);
|
||||
}
|
||||
}
|
||||
|
||||
PCPU_SET(kcr3, kcr3 | CR3_PCID_SAVE);
|
||||
PCPU_SET(ucr3, ucr3 | CR3_PCID_SAVE);
|
||||
}
|
||||
if (!invpcid_works)
|
||||
intr_restore(rflags);
|
||||
if (cached)
|
||||
PCPU_INC(pm_save_cnt);
|
||||
} else {
|
||||
load_cr3(pmap->pm_cr3);
|
||||
PCPU_SET(curpmap, pmap);
|
||||
if (pti) {
|
||||
PCPU_SET(kcr3, pmap->pm_cr3);
|
||||
PCPU_SET(ucr3, pmap->pm_ucr3);
|
||||
}
|
||||
}
|
||||
if (pmap->pm_ucr3 != PMAP_NO_CR3) {
|
||||
rsp0 = ((vm_offset_t)PCPU_PTR(pti_stack) +
|
||||
PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful;
|
||||
tssp = PCPU_GET(tssp);
|
||||
tssp->tss_rsp0 = rsp0;
|
||||
}
|
||||
pmap_activate_sw_mode(pmap, cpuid);
|
||||
#ifdef SMP
|
||||
CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
|
||||
#else
|
||||
|
@ -108,40 +108,40 @@ END(sse2_pagezero)
|
||||
*/
|
||||
ENTRY(memmove_std)
|
||||
PUSH_FRAME_POINTER
|
||||
movq %rdi,%r9
|
||||
movq %rdi,%rax
|
||||
movq %rdx,%rcx
|
||||
|
||||
movq %rdi,%rax
|
||||
subq %rsi,%rax
|
||||
cmpq %rcx,%rax /* overlapping && src < dst? */
|
||||
jb 1f
|
||||
movq %rdi,%r8
|
||||
subq %rsi,%r8
|
||||
cmpq %rcx,%r8 /* overlapping && src < dst? */
|
||||
jb 2f
|
||||
|
||||
shrq $3,%rcx /* copy by 64-bit words */
|
||||
rep
|
||||
movsq
|
||||
movq %rdx,%rcx
|
||||
andq $7,%rcx /* any bytes left? */
|
||||
jne 2f
|
||||
movq %r9,%rax
|
||||
jne 1f
|
||||
POP_FRAME_POINTER
|
||||
ret
|
||||
2:
|
||||
1:
|
||||
rep
|
||||
movsb
|
||||
movq %r9,%rax
|
||||
POP_FRAME_POINTER
|
||||
ret
|
||||
|
||||
/* ALIGN_TEXT */
|
||||
1:
|
||||
2:
|
||||
addq %rcx,%rdi /* copy backwards */
|
||||
addq %rcx,%rsi
|
||||
decq %rdi
|
||||
decq %rsi
|
||||
andq $7,%rcx /* any fractional bytes? */
|
||||
std
|
||||
andq $7,%rcx /* any fractional bytes? */
|
||||
je 3f
|
||||
rep
|
||||
movsb
|
||||
3:
|
||||
movq %rdx,%rcx /* copy remainder by 32-bit words */
|
||||
shrq $3,%rcx
|
||||
subq $7,%rsi
|
||||
@ -149,24 +149,22 @@ ENTRY(memmove_std)
|
||||
rep
|
||||
movsq
|
||||
cld
|
||||
movq %r9,%rax
|
||||
POP_FRAME_POINTER
|
||||
ret
|
||||
END(memmove_std)
|
||||
|
||||
ENTRY(memmove_erms)
|
||||
PUSH_FRAME_POINTER
|
||||
movq %rdi,%r9
|
||||
movq %rdi,%rax
|
||||
movq %rdx,%rcx
|
||||
|
||||
movq %rdi,%rax
|
||||
subq %rsi,%rax
|
||||
cmpq %rcx,%rax /* overlapping && src < dst? */
|
||||
movq %rdi,%r8
|
||||
subq %rsi,%r8
|
||||
cmpq %rcx,%r8 /* overlapping && src < dst? */
|
||||
jb 1f
|
||||
|
||||
rep
|
||||
movsb
|
||||
movq %r9,%rax
|
||||
POP_FRAME_POINTER
|
||||
ret
|
||||
|
||||
@ -179,7 +177,6 @@ ENTRY(memmove_erms)
|
||||
rep
|
||||
movsb
|
||||
cld
|
||||
movq %r9,%rax
|
||||
POP_FRAME_POINTER
|
||||
ret
|
||||
END(memmove_erms)
|
||||
|
@ -705,6 +705,17 @@ trap_is_smap(struct trapframe *frame)
|
||||
PGEX_P && (frame->tf_rflags & PSL_AC) == 0);
|
||||
}
|
||||
|
||||
static bool
|
||||
trap_is_pti(struct trapframe *frame)
|
||||
{
|
||||
|
||||
return (PCPU_GET(curpmap)->pm_ucr3 != PMAP_NO_CR3 &&
|
||||
pg_nx != 0 && (frame->tf_err & (PGEX_P | PGEX_W |
|
||||
PGEX_U | PGEX_I)) == (PGEX_P | PGEX_U | PGEX_I) &&
|
||||
(curpcb->pcb_saved_ucr3 & ~CR3_PCID_MASK) ==
|
||||
(PCPU_GET(curpmap)->pm_cr3 & ~CR3_PCID_MASK));
|
||||
}
|
||||
|
||||
static int
|
||||
trap_pfault(struct trapframe *frame, int usermode)
|
||||
{
|
||||
@ -806,12 +817,8 @@ trap_pfault(struct trapframe *frame, int usermode)
|
||||
* If nx protection of the usermode portion of kernel page
|
||||
* tables caused trap, panic.
|
||||
*/
|
||||
if (usermode && PCPU_GET(curpmap)->pm_ucr3 != PMAP_NO_CR3 &&
|
||||
pg_nx != 0 && (frame->tf_err & (PGEX_P | PGEX_W |
|
||||
PGEX_U | PGEX_I)) == (PGEX_P | PGEX_U | PGEX_I) &&
|
||||
(curpcb->pcb_saved_ucr3 & ~CR3_PCID_MASK)==
|
||||
(PCPU_GET(curpmap)->pm_cr3 & ~CR3_PCID_MASK))
|
||||
panic("PTI: pid %d comm %s tf_err %#lx\n", p->p_pid,
|
||||
if (usermode && trap_is_pti(frame))
|
||||
panic("PTI: pid %d comm %s tf_err %#lx", p->p_pid,
|
||||
p->p_comm, frame->tf_err);
|
||||
|
||||
/*
|
||||
|
@ -415,9 +415,10 @@ vdev_geom_io(struct g_consumer *cp, int *cmds, void **datas, off_t *offsets,
|
||||
* least one valid label was found.
|
||||
*/
|
||||
static int
|
||||
vdev_geom_read_config(struct g_consumer *cp, nvlist_t **config)
|
||||
vdev_geom_read_config(struct g_consumer *cp, nvlist_t **configp)
|
||||
{
|
||||
struct g_provider *pp;
|
||||
nvlist_t *config;
|
||||
vdev_phys_t *vdev_lists[VDEV_LABELS];
|
||||
char *buf;
|
||||
size_t buflen;
|
||||
@ -442,7 +443,6 @@ vdev_geom_read_config(struct g_consumer *cp, nvlist_t **config)
|
||||
|
||||
buflen = sizeof(vdev_lists[0]->vp_nvlist);
|
||||
|
||||
*config = NULL;
|
||||
/* Create all of the IO requests */
|
||||
for (l = 0; l < VDEV_LABELS; l++) {
|
||||
cmds[l] = BIO_READ;
|
||||
@ -458,6 +458,7 @@ vdev_geom_read_config(struct g_consumer *cp, nvlist_t **config)
|
||||
VDEV_LABELS);
|
||||
|
||||
/* Parse the labels */
|
||||
config = *configp = NULL;
|
||||
nlabels = 0;
|
||||
for (l = 0; l < VDEV_LABELS; l++) {
|
||||
if (errors[l] != 0)
|
||||
@ -465,25 +466,27 @@ vdev_geom_read_config(struct g_consumer *cp, nvlist_t **config)
|
||||
|
||||
buf = vdev_lists[l]->vp_nvlist;
|
||||
|
||||
if (nvlist_unpack(buf, buflen, config, 0) != 0)
|
||||
if (nvlist_unpack(buf, buflen, &config, 0) != 0)
|
||||
continue;
|
||||
|
||||
if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
|
||||
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
|
||||
&state) != 0 || state > POOL_STATE_L2CACHE) {
|
||||
nvlist_free(*config);
|
||||
*config = NULL;
|
||||
nvlist_free(config);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (state != POOL_STATE_SPARE &&
|
||||
state != POOL_STATE_L2CACHE &&
|
||||
(nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
|
||||
(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
|
||||
&txg) != 0 || txg == 0)) {
|
||||
nvlist_free(*config);
|
||||
*config = NULL;
|
||||
nvlist_free(config);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (*configp != NULL)
|
||||
nvlist_free(*configp);
|
||||
*configp = config;
|
||||
|
||||
nlabels++;
|
||||
}
|
||||
|
||||
|
@ -112,8 +112,8 @@ static struct ig4iic_pci_device ig4iic_pci_devices[] = {
|
||||
{ PCI_CHIP_SKYLAKE_I2C_3, "Intel Sunrise Point-LP I2C Controller-3", IG4_SKYLAKE},
|
||||
{ PCI_CHIP_SKYLAKE_I2C_4, "Intel Sunrise Point-LP I2C Controller-4", IG4_SKYLAKE},
|
||||
{ PCI_CHIP_SKYLAKE_I2C_5, "Intel Sunrise Point-LP I2C Controller-5", IG4_SKYLAKE},
|
||||
{ PCI_CHIP_KABYLAKE_I2C_0, "Intel Sunrise Point-LP I2C Controller-0", IG4_SKYLAKE},
|
||||
{ PCI_CHIP_KABYLAKE_I2C_1, "Intel Sunrise Point-LP I2C Controller-1", IG4_SKYLAKE},
|
||||
{ PCI_CHIP_KABYLAKE_I2C_0, "Intel Sunrise Point-H I2C Controller-0", IG4_SKYLAKE},
|
||||
{ PCI_CHIP_KABYLAKE_I2C_1, "Intel Sunrise Point-H I2C Controller-1", IG4_SKYLAKE},
|
||||
{ PCI_CHIP_APL_I2C_0, "Intel Apollo Lake I2C Controller-0", IG4_APL},
|
||||
{ PCI_CHIP_APL_I2C_1, "Intel Apollo Lake I2C Controller-1", IG4_APL},
|
||||
{ PCI_CHIP_APL_I2C_2, "Intel Apollo Lake I2C Controller-2", IG4_APL},
|
||||
|
@ -569,6 +569,8 @@ gre_transmit(struct ifnet *ifp, struct mbuf *m)
|
||||
goto drop;
|
||||
}
|
||||
af = m->m_pkthdr.csum_data;
|
||||
BPF_MTAP2(ifp, &af, sizeof(af), m);
|
||||
m->m_flags &= ~(M_BCAST|M_MCAST);
|
||||
M_SETFIB(m, sc->gre_fibnum);
|
||||
M_PREPEND(m, sc->gre_hlen, M_NOWAIT);
|
||||
if (m == NULL) {
|
||||
|
@ -1719,24 +1719,28 @@ pf_purge_expired_states(u_int i, int maxcheck)
|
||||
while (maxcheck > 0) {
|
||||
|
||||
ih = &V_pf_idhash[i];
|
||||
|
||||
/* only take the lock if we expect to do work */
|
||||
if (!LIST_EMPTY(&ih->states)) {
|
||||
relock:
|
||||
PF_HASHROW_LOCK(ih);
|
||||
LIST_FOREACH(s, &ih->states, entry) {
|
||||
if (pf_state_expires(s) <= time_uptime) {
|
||||
V_pf_status.states -=
|
||||
pf_unlink_state(s, PF_ENTER_LOCKED);
|
||||
goto relock;
|
||||
PF_HASHROW_LOCK(ih);
|
||||
LIST_FOREACH(s, &ih->states, entry) {
|
||||
if (pf_state_expires(s) <= time_uptime) {
|
||||
V_pf_status.states -=
|
||||
pf_unlink_state(s, PF_ENTER_LOCKED);
|
||||
goto relock;
|
||||
}
|
||||
s->rule.ptr->rule_flag |= PFRULE_REFS;
|
||||
if (s->nat_rule.ptr != NULL)
|
||||
s->nat_rule.ptr->rule_flag |= PFRULE_REFS;
|
||||
if (s->anchor.ptr != NULL)
|
||||
s->anchor.ptr->rule_flag |= PFRULE_REFS;
|
||||
s->kif->pfik_flags |= PFI_IFLAG_REFS;
|
||||
if (s->rt_kif)
|
||||
s->rt_kif->pfik_flags |= PFI_IFLAG_REFS;
|
||||
}
|
||||
s->rule.ptr->rule_flag |= PFRULE_REFS;
|
||||
if (s->nat_rule.ptr != NULL)
|
||||
s->nat_rule.ptr->rule_flag |= PFRULE_REFS;
|
||||
if (s->anchor.ptr != NULL)
|
||||
s->anchor.ptr->rule_flag |= PFRULE_REFS;
|
||||
s->kif->pfik_flags |= PFI_IFLAG_REFS;
|
||||
if (s->rt_kif)
|
||||
s->rt_kif->pfik_flags |= PFI_IFLAG_REFS;
|
||||
PF_HASHROW_UNLOCK(ih);
|
||||
}
|
||||
PF_HASHROW_UNLOCK(ih);
|
||||
|
||||
/* Return when we hit end of hash. */
|
||||
if (++i > pf_hashmask) {
|
||||
|
@ -221,14 +221,20 @@ atpic_register_sources(struct pic *pic)
|
||||
* that APIC ISA routing and allowing the ATPIC source for that IRQ
|
||||
* to leak through. We used to depend on this feature for routing
|
||||
* IRQ0 via mixed mode, but now we don't use mixed mode at all.
|
||||
*
|
||||
* To avoid the slave not register sources after the master
|
||||
* registers its sources, register all IRQs when this function is
|
||||
* called on the master.
|
||||
*/
|
||||
if (ap != &atpics[MASTER])
|
||||
return;
|
||||
for (i = 0; i < NUM_ISA_IRQS; i++)
|
||||
if (intr_lookup_source(i) != NULL)
|
||||
return;
|
||||
|
||||
/* Loop through all interrupt sources and add them. */
|
||||
for (i = 0, ai = atintrs + ap->at_irqbase; i < 8; i++, ai++) {
|
||||
if (ap->at_irqbase + i == ICU_SLAVEID)
|
||||
for (i = 0, ai = atintrs; i < NUM_ISA_IRQS; i++, ai++) {
|
||||
if (i == ICU_SLAVEID)
|
||||
continue;
|
||||
intr_register_source(&ai->at_intsrc);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user