Call pmap_qenter for each page when creating the kcov buffer.

This removes the need to allocate a buffer to hold the vm_page_t objects
at the cost of extra IPIs on some architectures.

Reviewed by:	kib
Sponsored by:	DARPA, AFRL
Differential Revision:	https://reviews.freebsd.org/D19252
This commit is contained in:
andrew 2019-02-20 22:32:28 +00:00
parent 229dccf125
commit fc60fcc79e

View File

@ -357,7 +357,7 @@ static int
kcov_alloc(struct kcov_info *info, size_t entries) kcov_alloc(struct kcov_info *info, size_t entries)
{ {
size_t n, pages; size_t n, pages;
vm_page_t *m; vm_page_t m;
KASSERT(info->kvaddr == 0, ("kcov_alloc: Already have a buffer")); KASSERT(info->kvaddr == 0, ("kcov_alloc: Already have a buffer"));
KASSERT(info->state == KCOV_STATE_OPEN, KASSERT(info->state == KCOV_STATE_OPEN,
@ -376,16 +376,14 @@ kcov_alloc(struct kcov_info *info, size_t entries)
info->bufobj = vm_pager_allocate(OBJT_PHYS, 0, info->bufsize, info->bufobj = vm_pager_allocate(OBJT_PHYS, 0, info->bufsize,
PROT_READ | PROT_WRITE, 0, curthread->td_ucred); PROT_READ | PROT_WRITE, 0, curthread->td_ucred);
m = malloc(sizeof(*m) * pages, M_TEMP, M_WAITOK);
VM_OBJECT_WLOCK(info->bufobj); VM_OBJECT_WLOCK(info->bufobj);
for (n = 0; n < pages; n++) { for (n = 0; n < pages; n++) {
m[n] = vm_page_grab(info->bufobj, n, m = vm_page_grab(info->bufobj, n,
VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_WIRED); VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_WIRED);
m[n]->valid = VM_PAGE_BITS_ALL; m->valid = VM_PAGE_BITS_ALL;
pmap_qenter(info->kvaddr + n * PAGE_SIZE, &m, 1);
} }
VM_OBJECT_WUNLOCK(info->bufobj); VM_OBJECT_WUNLOCK(info->bufobj);
pmap_qenter(info->kvaddr, m, pages);
free(m, M_TEMP);
info->entries = entries; info->entries = entries;