Define every architecture as either VM_PHYSSEG_DENSE or

VM_PHYSSEG_SPARSE depending on whether the physical address space is
densely or sparsely populated with memory.  The effect of this
definition is to determine which of two implementations of
vm_page_array and PHYS_TO_VM_PAGE() is used.  The legacy
implementation is obtained by defining VM_PHYSSEG_DENSE, and a new
implementation that trades off time for space is obtained by defining
VM_PHYSSEG_SPARSE.  For now, all architectures except for ia64 and
sparc64 define VM_PHYSSEG_DENSE.  Defining VM_PHYSSEG_SPARSE on ia64
allows the entirety of my Itanium 2's memory to be used.  Previously,
only the first 1 GB could be used.  Defining VM_PHYSSEG_SPARSE on
sparc64 allows USIIIi-based systems to boot without crashing.

This change is a combination of Nathan Whitehorn's patch and my own
work in perforce.

Discussed with: kmacy, marius, Nathan Whitehorn
PR:		112194
This commit is contained in:
Alan Cox 2007-05-05 19:50:28 +00:00
parent 4887800305
commit 04a18977c8
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=169291
10 changed files with 63 additions and 17 deletions

View File

@ -87,6 +87,11 @@
*/
#define UMA_MD_SMALL_ALLOC
/*
* The physical address space is densely populated.
*/
#define VM_PHYSSEG_DENSE
/*
* Virtual addresses of things. Derived from the page directory and
* page table indexes from pmap.h for precision.

View File

@ -72,6 +72,11 @@
#define VM_PHYSSEG_NOADD
/*
* The physical address space is densely populated.
*/
#define VM_PHYSSEG_DENSE
/*
* we support 2 free lists:
*

View File

@ -81,6 +81,11 @@
#define MAXSLP 20
/*
* The physical address space is densely populated.
*/
#define VM_PHYSSEG_DENSE
/*
* Kernel physical load address.
*/

View File

@ -701,21 +701,6 @@ ia64_init(void)
if (md->md_type != EFI_MD_TYPE_FREE)
continue;
/*
* Wimp out for now since we do not DTRT here with
* pci bus mastering (no bounce buffering, for example).
*/
if (pfn0 >= ia64_btop(0x100000000UL)) {
printf("Skipping memory chunk start 0x%lx\n",
md->md_phys);
continue;
}
if (pfn1 >= ia64_btop(0x100000000UL)) {
printf("Skipping memory chunk end 0x%lx\n",
md->md_phys + md->md_pages * 4096);
continue;
}
/*
* We have a memory descriptor that describes conventional
* memory that is for general use. We must determine if the

View File

@ -112,6 +112,11 @@
*/
#define UMA_MD_SMALL_ALLOC
/*
* The physical address space is sparsely populated.
*/
#define VM_PHYSSEG_SPARSE
/*
* Manipulating region bits of an address.
*/

View File

@ -107,6 +107,11 @@ struct pmap_physseg {
#define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
#define VM_PHYSSEG_NOADD /* can't add RAM after vm_mem_init */
/*
* The physical address space is densely populated.
*/
#define VM_PHYSSEG_DENSE
#define VM_NFREELIST 1
#define VM_FREELIST_DEFAULT 0

View File

@ -77,6 +77,11 @@
*/
#define MAXSLP 20
/*
* The physical address space is sparsely populated.
*/
#define VM_PHYSSEG_SPARSE
/*
* Address space layout.
*

View File

@ -77,6 +77,11 @@
*/
#define MAXSLP 20
/*
* The physical address space is densely populated.
*/
#define VM_PHYSSEG_DENSE
/*
* Address space layout.
*

View File

@ -298,7 +298,15 @@ vm_page_startup(vm_offset_t vaddr)
* page).
*/
first_page = low_water / PAGE_SIZE;
#ifdef VM_PHYSSEG_SPARSE
page_range = 0;
for (i = 0; phys_avail[i + 1] != 0; i += 2)
page_range += atop(phys_avail[i + 1] - phys_avail[i]);
#elif defined(VM_PHYSSEG_DENSE)
page_range = high_water / PAGE_SIZE - first_page;
#else
#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
#endif
npages = (total - (page_range * sizeof(struct vm_page)) -
(end - new_end)) / PAGE_SIZE;
end = new_end;

View File

@ -240,6 +240,9 @@ extern struct pq_coloring page_queue_coloring;
#define ACT_MAX 64
#ifdef _KERNEL
#include <vm/vm_param.h>
/*
* Each pageable resident page falls into one of four lists:
*
@ -275,8 +278,23 @@ extern long first_page; /* first physical page number */
#define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
#define PHYS_TO_VM_PAGE(pa) \
(&vm_page_array[atop(pa) - first_page ])
static __inline vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
static __inline vm_page_t
PHYS_TO_VM_PAGE(vm_paddr_t pa)
{
#ifdef VM_PHYSSEG_SPARSE
int i, j = 0;
for (i = 0; phys_avail[i + 1] <= pa || phys_avail[i] > pa; i += 2)
j += atop(phys_avail[i + 1] - phys_avail[i]);
return (&vm_page_array[j + atop(pa - phys_avail[i])]);
#elif defined(VM_PHYSSEG_DENSE)
return (&vm_page_array[atop(pa) - first_page]);
#else
#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
#endif
}
extern struct mtx vm_page_queue_mtx;
#define vm_page_lock_queues() mtx_lock(&vm_page_queue_mtx)