Consistently use busy and vm_page_valid() rather than touching page bits

directly.  This improves API compliance, asserts, etc.

Reviewed by:	kib, markj
Differential Revision:	https://reviews.freebsd.org/D23283
This commit is contained in:
Jeff Roberson 2020-01-23 04:54:49 +00:00
parent ca9fb12a0b
commit 91e31c3c08
5 changed files with 22 additions and 15 deletions

View File

@ -325,8 +325,9 @@ spigen_mmap_single(struct cdev *cdev, vm_ooffset_t *offset,
vm_object_reference_locked(mmap->bufobj); // kernel and userland both
for (n = 0; n < pages; n++) {
m[n] = vm_page_grab(mmap->bufobj, n,
VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_WIRED);
m[n]->valid = VM_PAGE_BITS_ALL;
VM_ALLOC_ZERO | VM_ALLOC_WIRED);
vm_page_valid(m[n]);
vm_page_xunbusy(m[n]);
}
VM_OBJECT_WUNLOCK(mmap->bufobj);
pmap_qenter(mmap->kvaddr, m, pages);

View File

@ -383,8 +383,9 @@ kcov_alloc(struct kcov_info *info, size_t entries)
VM_OBJECT_WLOCK(info->bufobj);
for (n = 0; n < pages; n++) {
m = vm_page_grab(info->bufobj, n,
VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_WIRED);
m->valid = VM_PAGE_BITS_ALL;
VM_ALLOC_ZERO | VM_ALLOC_WIRED);
vm_page_valid(m);
vm_page_xunbusy(m);
pmap_qenter(info->kvaddr + n * PAGE_SIZE, &m, 1);
}
VM_OBJECT_WUNLOCK(info->bufobj);

View File

@ -388,7 +388,7 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off,
if (!vm_pager_has_page(obj, OFF_TO_IDX(vmoff(i, off)), NULL,
&a)) {
pmap_zero_page(pa[i]);
pa[i]->valid = VM_PAGE_BITS_ALL;
vm_page_valid(pa[i]);
MPASS(pa[i]->dirty == 0);
vm_page_xunbusy(pa[i]);
i++;

View File

@ -340,10 +340,12 @@ vm_thread_stack_create(struct domainset *ds, vm_object_t *ksobjp, int pages)
* page of stack.
*/
VM_OBJECT_WLOCK(ksobj);
(void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
VM_ALLOC_WIRED, ma, pages);
for (i = 0; i < pages; i++)
ma[i]->valid = VM_PAGE_BITS_ALL;
(void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_WIRED,
ma, pages);
for (i = 0; i < pages; i++) {
vm_page_valid(ma[i]);
vm_page_xunbusy(ma[i]);
}
VM_OBJECT_WUNLOCK(ksobj);
pmap_qenter(ks, ma, pages);
*ksobjp = ksobj;

View File

@ -193,7 +193,7 @@ kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr))
return (0);
offset = addr - VM_MIN_KERNEL_ADDRESS;
pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
pflags |= VM_ALLOC_NOWAIT;
prot = (flags & M_EXEC) != 0 ? VM_PROT_ALL : VM_PROT_RW;
@ -223,7 +223,8 @@ retry:
vm_phys_domain(m), domain));
if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
vm_page_valid(m);
vm_page_xunbusy(m);
pmap_enter(kernel_pmap, addr + i, m, prot,
prot | PMAP_ENTER_WIRED, 0);
}
@ -284,7 +285,7 @@ kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
return (0);
offset = addr - VM_MIN_KERNEL_ADDRESS;
pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
pflags |= VM_ALLOC_NOWAIT;
npages = atop(size);
@ -315,7 +316,8 @@ retry:
for (; m < end_m; m++) {
if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
vm_page_valid(m);
vm_page_xunbusy(m);
pmap_enter(kernel_pmap, tmp, m, VM_PROT_RW,
VM_PROT_RW | PMAP_ENTER_WIRED, 0);
tmp += PAGE_SIZE;
@ -465,7 +467,7 @@ kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr,
("kmem_back_domain: only supports kernel object."));
offset = addr - VM_MIN_KERNEL_ADDRESS;
pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
if (flags & M_WAITOK)
pflags |= VM_ALLOC_WAITFAIL;
@ -498,7 +500,8 @@ retry:
pmap_zero_page(m);
KASSERT((m->oflags & VPO_UNMANAGED) != 0,
("kmem_malloc: page %p is managed", m));
m->valid = VM_PAGE_BITS_ALL;
vm_page_valid(m);
vm_page_xunbusy(m);
pmap_enter(kernel_pmap, addr + i, m, prot,
prot | PMAP_ENTER_WIRED, 0);
#if VM_NRESERVLEVEL > 0