diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index d992f2c242aa..4d2f020063e5 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -81,9 +81,9 @@ _Static_assert(offsetof(struct thread, td_flags) == 0xfc, "struct thread KBI td_flags"); _Static_assert(offsetof(struct thread, td_pflags) == 0x104, "struct thread KBI td_pflags"); -_Static_assert(offsetof(struct thread, td_frame) == 0x468, +_Static_assert(offsetof(struct thread, td_frame) == 0x470, "struct thread KBI td_frame"); -_Static_assert(offsetof(struct thread, td_emuldata) == 0x510, +_Static_assert(offsetof(struct thread, td_emuldata) == 0x518, "struct thread KBI td_emuldata"); _Static_assert(offsetof(struct proc, p_flag) == 0xb0, "struct proc KBI p_flag"); @@ -101,9 +101,9 @@ _Static_assert(offsetof(struct thread, td_flags) == 0x98, "struct thread KBI td_flags"); _Static_assert(offsetof(struct thread, td_pflags) == 0xa0, "struct thread KBI td_pflags"); -_Static_assert(offsetof(struct thread, td_frame) == 0x2e4, +_Static_assert(offsetof(struct thread, td_frame) == 0x2e8, "struct thread KBI td_frame"); -_Static_assert(offsetof(struct thread, td_emuldata) == 0x330, +_Static_assert(offsetof(struct thread, td_emuldata) == 0x334, "struct thread KBI td_emuldata"); _Static_assert(offsetof(struct proc, p_flag) == 0x68, "struct proc KBI p_flag"); diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c index 6d36b2b92919..9a8b9828737f 100644 --- a/sys/kern/subr_trap.c +++ b/sys/kern/subr_trap.c @@ -178,6 +178,8 @@ userret(struct thread *td, struct trapframe *frame) ("userret: Returning with stop signals deferred")); KASSERT(td->td_su == NULL, ("userret: Returning with SU cleanup request not handled")); + KASSERT(td->td_vslock_sz == 0, + ("userret: Returning with vslock-wired space")); #ifdef VIMAGE /* Unfortunately td_vnet_lpush needs VNET_DEBUG. */ VNET_ASSERT(curvnet == NULL, diff --git a/sys/sys/proc.h b/sys/sys/proc.h index 9bc75db8591a..82b16059dc0d 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -297,6 +297,7 @@ struct thread { void *td_su; /* (k) FFS SU private */ sbintime_t td_sleeptimo; /* (t) Sleep timeout. */ int td_rtcgen; /* (s) rtc_generation of abs. sleep */ + size_t td_vslock_sz; /* (k) amount of vslock-ed space */ #define td_endzero td_sigmask /* Copied during fork1() or create_thread(). */ diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index 5546822e3a58..25db4ad18a4b 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -196,11 +196,16 @@ vslock(void *addr, size_t len) #endif error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end, VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); + if (error == KERN_SUCCESS) { + curthread->td_vslock_sz += len; + return (0); + } + /* * Return EFAULT on error to match copy{in,out}() behaviour * rather than returning ENOMEM like mlock() would. */ - return (error == KERN_SUCCESS ? 0 : EFAULT); + return (EFAULT); } void @@ -208,6 +213,8 @@ vsunlock(void *addr, size_t len) { /* Rely on the parameter sanity checks performed by vslock(). */ + MPASS(curthread->td_vslock_sz >= len); + curthread->td_vslock_sz -= len; (void)vm_map_unwire(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);