Speed up vm_page_array initialization.
We currently initialize the vm_page array in three passes: one to zero the array, one to initialize the "order" field of each page (necessary when inserting them into the vm_phys buddy allocator one-by-one), and one to initialize the remaining non-zero fields and individually insert each page into the allocator. Merge the three passes into one following a suggestion from alc: initialize vm_page fields in a single pass, and use vm_phys_free_contig() to efficiently insert physical memory segments into the buddy allocator. This reduces the initialization time to a third or a quarter of what it was before on most systems that I tested. Reviewed by: alc, kib MFC after: 3 weeks Differential Revision: https://reviews.freebsd.org/D12248
This commit is contained in:
parent
092c0e867a
commit
f93f7cf199
@ -429,17 +429,15 @@ vm_page_domain_init(struct vm_domain *vmd)
|
|||||||
vm_offset_t
|
vm_offset_t
|
||||||
vm_page_startup(vm_offset_t vaddr)
|
vm_page_startup(vm_offset_t vaddr)
|
||||||
{
|
{
|
||||||
vm_offset_t mapped;
|
struct vm_domain *vmd;
|
||||||
vm_paddr_t high_avail, low_avail, page_range, size;
|
struct vm_phys_seg *seg;
|
||||||
vm_paddr_t new_end;
|
vm_page_t m;
|
||||||
int i;
|
|
||||||
vm_paddr_t pa;
|
|
||||||
vm_paddr_t last_pa;
|
|
||||||
char *list, *listend;
|
char *list, *listend;
|
||||||
vm_paddr_t end;
|
vm_offset_t mapped;
|
||||||
vm_paddr_t biggestsize;
|
vm_paddr_t end, high_avail, low_avail, new_end, page_range, size;
|
||||||
int biggestone;
|
vm_paddr_t biggestsize, last_pa, pa;
|
||||||
int pages_per_zone;
|
u_long pagecount;
|
||||||
|
int biggestone, i, pages_per_zone, segind;
|
||||||
|
|
||||||
biggestsize = 0;
|
biggestsize = 0;
|
||||||
biggestone = 0;
|
biggestone = 0;
|
||||||
@ -519,6 +517,8 @@ vm_page_startup(vm_offset_t vaddr)
|
|||||||
vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end,
|
vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end,
|
||||||
new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE);
|
new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE);
|
||||||
bzero((void *)vm_page_dump, vm_page_dump_size);
|
bzero((void *)vm_page_dump, vm_page_dump_size);
|
||||||
|
#else
|
||||||
|
(void)last_pa;
|
||||||
#endif
|
#endif
|
||||||
#if defined(__aarch64__) || defined(__amd64__) || defined(__mips__)
|
#if defined(__aarch64__) || defined(__amd64__) || defined(__mips__)
|
||||||
/*
|
/*
|
||||||
@ -623,7 +623,9 @@ vm_page_startup(vm_offset_t vaddr)
|
|||||||
new_end = trunc_page(end - page_range * sizeof(struct vm_page));
|
new_end = trunc_page(end - page_range * sizeof(struct vm_page));
|
||||||
mapped = pmap_map(&vaddr, new_end, end,
|
mapped = pmap_map(&vaddr, new_end, end,
|
||||||
VM_PROT_READ | VM_PROT_WRITE);
|
VM_PROT_READ | VM_PROT_WRITE);
|
||||||
vm_page_array = (vm_page_t) mapped;
|
vm_page_array = (vm_page_t)mapped;
|
||||||
|
vm_page_array_size = page_range;
|
||||||
|
|
||||||
#if VM_NRESERVLEVEL > 0
|
#if VM_NRESERVLEVEL > 0
|
||||||
/*
|
/*
|
||||||
* Allocate physical memory for the reservation management system's
|
* Allocate physical memory for the reservation management system's
|
||||||
@ -649,34 +651,53 @@ vm_page_startup(vm_offset_t vaddr)
|
|||||||
for (i = 0; phys_avail[i + 1] != 0; i += 2)
|
for (i = 0; phys_avail[i + 1] != 0; i += 2)
|
||||||
vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]);
|
vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]);
|
||||||
|
|
||||||
/*
|
|
||||||
* Clear all of the page structures
|
|
||||||
*/
|
|
||||||
bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
|
|
||||||
for (i = 0; i < page_range; i++)
|
|
||||||
vm_page_array[i].order = VM_NFREEORDER;
|
|
||||||
vm_page_array_size = page_range;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize the physical memory allocator.
|
* Initialize the physical memory allocator.
|
||||||
*/
|
*/
|
||||||
vm_phys_init();
|
vm_phys_init();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Add every available physical page that is not blacklisted to
|
* Initialize the page structures and add every available page to the
|
||||||
* the free lists.
|
* physical memory allocator's free lists.
|
||||||
*/
|
*/
|
||||||
vm_cnt.v_page_count = 0;
|
vm_cnt.v_page_count = 0;
|
||||||
vm_cnt.v_free_count = 0;
|
vm_cnt.v_free_count = 0;
|
||||||
for (i = 0; phys_avail[i + 1] != 0; i += 2) {
|
for (segind = 0; segind < vm_phys_nsegs; segind++) {
|
||||||
pa = phys_avail[i];
|
seg = &vm_phys_segs[segind];
|
||||||
last_pa = phys_avail[i + 1];
|
for (pa = seg->start; pa < seg->end; pa += PAGE_SIZE)
|
||||||
while (pa < last_pa) {
|
vm_phys_init_page(pa);
|
||||||
vm_phys_add_page(pa);
|
|
||||||
pa += PAGE_SIZE;
|
/*
|
||||||
|
* Add the segment to the free lists only if it is covered by
|
||||||
|
* one of the ranges in phys_avail. Because we've added the
|
||||||
|
* ranges to the vm_phys_segs array, we can assume that each
|
||||||
|
* segment is either entirely contained in one of the ranges,
|
||||||
|
* or doesn't overlap any of them.
|
||||||
|
*/
|
||||||
|
for (i = 0; phys_avail[i + 1] != 0; i += 2) {
|
||||||
|
if (seg->start < phys_avail[i] ||
|
||||||
|
seg->end > phys_avail[i + 1])
|
||||||
|
continue;
|
||||||
|
|
||||||
|
m = seg->first_page;
|
||||||
|
pagecount = (u_long)atop(seg->end - seg->start);
|
||||||
|
|
||||||
|
mtx_lock(&vm_page_queue_free_mtx);
|
||||||
|
vm_phys_free_contig(m, pagecount);
|
||||||
|
vm_phys_freecnt_adj(m, (int)pagecount);
|
||||||
|
mtx_unlock(&vm_page_queue_free_mtx);
|
||||||
|
vm_cnt.v_page_count += (u_int)pagecount;
|
||||||
|
|
||||||
|
vmd = &vm_dom[seg->domain];
|
||||||
|
vmd->vmd_page_count += (u_int)pagecount;
|
||||||
|
vmd->vmd_segs |= 1UL << m->segind;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Remove blacklisted pages from the physical memory allocator.
|
||||||
|
*/
|
||||||
TAILQ_INIT(&blacklist_head);
|
TAILQ_INIT(&blacklist_head);
|
||||||
vm_page_blacklist_load(&list, &listend);
|
vm_page_blacklist_load(&list, &listend);
|
||||||
vm_page_blacklist_check(list, listend);
|
vm_page_blacklist_check(list, listend);
|
||||||
|
@ -729,32 +729,28 @@ vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize a physical page and add it to the free lists.
|
* Initialize a physical page in preparation for adding it to the free
|
||||||
|
* lists.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_phys_add_page(vm_paddr_t pa)
|
vm_phys_init_page(vm_paddr_t pa)
|
||||||
{
|
{
|
||||||
vm_page_t m;
|
vm_page_t m;
|
||||||
struct vm_domain *vmd;
|
|
||||||
|
|
||||||
vm_cnt.v_page_count++;
|
|
||||||
m = vm_phys_paddr_to_vm_page(pa);
|
m = vm_phys_paddr_to_vm_page(pa);
|
||||||
|
m->object = NULL;
|
||||||
|
m->wire_count = 0;
|
||||||
m->busy_lock = VPB_UNBUSIED;
|
m->busy_lock = VPB_UNBUSIED;
|
||||||
|
m->hold_count = 0;
|
||||||
|
m->flags = m->aflags = m->oflags = 0;
|
||||||
m->phys_addr = pa;
|
m->phys_addr = pa;
|
||||||
m->queue = PQ_NONE;
|
m->queue = PQ_NONE;
|
||||||
|
m->psind = 0;
|
||||||
m->segind = vm_phys_paddr_to_segind(pa);
|
m->segind = vm_phys_paddr_to_segind(pa);
|
||||||
vmd = vm_phys_domain(m);
|
m->order = VM_NFREEORDER;
|
||||||
vmd->vmd_page_count++;
|
|
||||||
vmd->vmd_segs |= 1UL << m->segind;
|
|
||||||
KASSERT(m->order == VM_NFREEORDER,
|
|
||||||
("vm_phys_add_page: page %p has unexpected order %d",
|
|
||||||
m, m->order));
|
|
||||||
m->pool = VM_FREEPOOL_DEFAULT;
|
m->pool = VM_FREEPOOL_DEFAULT;
|
||||||
|
m->valid = m->dirty = 0;
|
||||||
pmap_page_init(m);
|
pmap_page_init(m);
|
||||||
mtx_lock(&vm_page_queue_free_mtx);
|
|
||||||
vm_phys_freecnt_adj(m, 1);
|
|
||||||
vm_phys_free_pages(m, 0);
|
|
||||||
mtx_unlock(&vm_page_queue_free_mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -910,6 +906,7 @@ vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start,
|
|||||||
{
|
{
|
||||||
long i;
|
long i;
|
||||||
|
|
||||||
|
bzero(range, page_count * sizeof(*range));
|
||||||
for (i = 0; i < page_count; i++) {
|
for (i = 0; i < page_count; i++) {
|
||||||
vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
|
vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
|
||||||
range[i].oflags &= ~VPO_UNMANAGED;
|
range[i].oflags &= ~VPO_UNMANAGED;
|
||||||
@ -984,7 +981,7 @@ vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
|
|||||||
alloc:
|
alloc:
|
||||||
#endif
|
#endif
|
||||||
fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
|
fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
|
||||||
M_WAITOK | M_ZERO);
|
M_WAITOK);
|
||||||
#ifdef VM_PHYSSEG_DENSE
|
#ifdef VM_PHYSSEG_DENSE
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -69,7 +69,6 @@ extern int vm_phys_nsegs;
|
|||||||
/*
|
/*
|
||||||
* The following functions are only to be used by the virtual memory system.
|
* The following functions are only to be used by the virtual memory system.
|
||||||
*/
|
*/
|
||||||
void vm_phys_add_page(vm_paddr_t pa);
|
|
||||||
void vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end);
|
void vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end);
|
||||||
vm_page_t vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
|
vm_page_t vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
|
||||||
u_long alignment, vm_paddr_t boundary);
|
u_long alignment, vm_paddr_t boundary);
|
||||||
@ -83,6 +82,7 @@ vm_page_t vm_phys_fictitious_to_vm_page(vm_paddr_t pa);
|
|||||||
void vm_phys_free_contig(vm_page_t m, u_long npages);
|
void vm_phys_free_contig(vm_page_t m, u_long npages);
|
||||||
void vm_phys_free_pages(vm_page_t m, int order);
|
void vm_phys_free_pages(vm_page_t m, int order);
|
||||||
void vm_phys_init(void);
|
void vm_phys_init(void);
|
||||||
|
void vm_phys_init_page(vm_paddr_t pa);
|
||||||
vm_page_t vm_phys_paddr_to_vm_page(vm_paddr_t pa);
|
vm_page_t vm_phys_paddr_to_vm_page(vm_paddr_t pa);
|
||||||
vm_page_t vm_phys_scan_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
|
vm_page_t vm_phys_scan_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
|
||||||
u_long alignment, vm_paddr_t boundary, int options);
|
u_long alignment, vm_paddr_t boundary, int options);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user