Don't release xbusy on kmem pages. After lockless page lookup we will not
be able to guarantee that they can be racquired without blocking. Reviewed by: kib Discussed with: markj Differential Revision: https://reviews.freebsd.org/D23506
This commit is contained in:
parent
6c5f36ff30
commit
e9ceb9dd11
@ -342,10 +342,8 @@ vm_thread_stack_create(struct domainset *ds, vm_object_t *ksobjp, int pages)
|
||||
VM_OBJECT_WLOCK(ksobj);
|
||||
(void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_WIRED,
|
||||
ma, pages);
|
||||
for (i = 0; i < pages; i++) {
|
||||
for (i = 0; i < pages; i++)
|
||||
vm_page_valid(ma[i]);
|
||||
vm_page_xunbusy(ma[i]);
|
||||
}
|
||||
VM_OBJECT_WUNLOCK(ksobj);
|
||||
pmap_qenter(ks, ma, pages);
|
||||
*ksobjp = ksobj;
|
||||
@ -365,7 +363,7 @@ vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
|
||||
m = vm_page_lookup(ksobj, i);
|
||||
if (m == NULL)
|
||||
panic("%s: kstack already missing?", __func__);
|
||||
vm_page_busy_acquire(m, 0);
|
||||
vm_page_xbusy_claim(m);
|
||||
vm_page_unwire_noq(m);
|
||||
vm_page_free(m);
|
||||
}
|
||||
|
@ -224,7 +224,6 @@ kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
|
||||
if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
|
||||
pmap_zero_page(m);
|
||||
vm_page_valid(m);
|
||||
vm_page_xunbusy(m);
|
||||
pmap_enter(kernel_pmap, addr + i, m, prot,
|
||||
prot | PMAP_ENTER_WIRED, 0);
|
||||
}
|
||||
@ -317,7 +316,6 @@ kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
|
||||
if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
|
||||
pmap_zero_page(m);
|
||||
vm_page_valid(m);
|
||||
vm_page_xunbusy(m);
|
||||
pmap_enter(kernel_pmap, tmp, m, VM_PROT_RW,
|
||||
VM_PROT_RW | PMAP_ENTER_WIRED, 0);
|
||||
tmp += PAGE_SIZE;
|
||||
@ -501,7 +499,6 @@ kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) != 0,
|
||||
("kmem_malloc: page %p is managed", m));
|
||||
vm_page_valid(m);
|
||||
vm_page_xunbusy(m);
|
||||
pmap_enter(kernel_pmap, addr + i, m, prot,
|
||||
prot | PMAP_ENTER_WIRED, 0);
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
@ -591,7 +588,7 @@ _kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
|
||||
#endif
|
||||
for (; offset < end; offset += PAGE_SIZE, m = next) {
|
||||
next = vm_page_next(m);
|
||||
vm_page_busy_acquire(m, 0);
|
||||
vm_page_xbusy_claim(m);
|
||||
vm_page_unwire_noq(m);
|
||||
vm_page_free(m);
|
||||
}
|
||||
|
@ -764,9 +764,14 @@ void vm_page_object_busy_assert(vm_page_t m);
|
||||
void vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits);
|
||||
#define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) \
|
||||
vm_page_assert_pga_writeable(m, bits)
|
||||
#define vm_page_xbusy_claim(m) do { \
|
||||
vm_page_assert_xbusied_unchecked((m)); \
|
||||
(m)->busy_lock = VPB_CURTHREAD_EXCLUSIVE; \
|
||||
} while (0)
|
||||
#else
|
||||
#define VM_PAGE_OBJECT_BUSY_ASSERT(m) (void)0
|
||||
#define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) (void)0
|
||||
#define vm_page_xbusy_claim(m)
|
||||
#endif
|
||||
|
||||
#if BYTE_ORDER == BIG_ENDIAN
|
||||
|
@ -540,6 +540,7 @@ vm_thread_swapout(struct thread *td)
|
||||
if (m == NULL)
|
||||
panic("vm_thread_swapout: kstack already missing?");
|
||||
vm_page_dirty(m);
|
||||
vm_page_xunbusy_unchecked(m);
|
||||
vm_page_unwire(m, PQ_LAUNDRY);
|
||||
}
|
||||
VM_OBJECT_WUNLOCK(ksobj);
|
||||
@ -564,7 +565,6 @@ vm_thread_swapin(struct thread *td, int oom_alloc)
|
||||
for (i = 0; i < pages;) {
|
||||
vm_page_assert_xbusied(ma[i]);
|
||||
if (vm_page_all_valid(ma[i])) {
|
||||
vm_page_xunbusy(ma[i]);
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
@ -581,8 +581,6 @@ vm_thread_swapin(struct thread *td, int oom_alloc)
|
||||
KASSERT(rv == VM_PAGER_OK, ("%s: cannot get kstack for proc %d",
|
||||
__func__, td->td_proc->p_pid));
|
||||
vm_object_pip_wakeup(ksobj);
|
||||
for (j = i; j < i + count; j++)
|
||||
vm_page_xunbusy(ma[j]);
|
||||
i += count;
|
||||
}
|
||||
pmap_qenter(td->td_kstack, ma, pages);
|
||||
|
Loading…
Reference in New Issue
Block a user