- Rework pmap_map() to take advantage of direct-mapped segments on

supported architectures such as the alpha.  This allows us to save
  on kernel virtual address space, TLB entries, and (on the ia64) VHPT
  entries.  pmap_map() now modifies the passed in virtual address on
  architectures that do not support direct-mapped segments to point to
  the next available virtual address.  It also returns the actual
  address that the request was mapped to.
- On the IA64 don't use a special zone of PV entries needed for early
  calls to pmap_kenter() during pmap_init().  This gets us in trouble
  because we end up trying to use the zone allocator before it is
  initialized.  Instead, with the pmap_map() change, the number of needed
  PV entries is small enough that we can get by with a static pool that is
  used until pmap_init() is complete.

Submitted by:		dfr
Debugging help:		peter
Tested by:		me
This commit is contained in:
John Baldwin 2001-03-06 06:06:42 +00:00
parent b67cb27739
commit 968950e5d1
6 changed files with 73 additions and 70 deletions

View File

@ -884,18 +884,18 @@ pmap_kremove(vm_offset_t va)
* Used to map a range of physical addresses into kernel
* virtual address space.
*
* For now, VM is already on, we only need to map the
* specified memory.
* The value passed in '*virt' is a suggested virtual address for
* the mapping. Architectures which can support a direct-mapped
* physical to virtual region can return the appropriate address
* within that region, leaving '*virt' unchanged. Other
* architectures should map the pages starting at '*virt' and
* update '*virt' with the first usable address after the mapped
* region.
*/
vm_offset_t
pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, int prot)
pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
{
while (start < end) {
pmap_kenter(virt, start);
virt += PAGE_SIZE;
start += PAGE_SIZE;
}
return (virt);
return ALPHA_PHYS_TO_K0SEG(start);
}

View File

@ -711,22 +711,30 @@ pmap_kremove(va)
* Used to map a range of physical addresses into kernel
* virtual address space.
*
* For now, VM is already on, we only need to map the
* specified memory.
* The value passed in '*virt' is a suggested virtual address for
* the mapping. Architectures which can support a direct-mapped
* physical to virtual region can return the appropriate address
* within that region, leaving '*virt' unchanged. Other
* architectures should map the pages starting at '*virt' and
* update '*virt' with the first usable address after the mapped
* region.
*/
vm_offset_t
pmap_map(virt, start, end, prot)
vm_offset_t virt;
vm_offset_t *virt;
vm_offset_t start;
vm_offset_t end;
int prot;
{
vm_offset_t sva = *virt;
vm_offset_t va = sva;
while (start < end) {
pmap_kenter(virt, start);
virt += PAGE_SIZE;
pmap_kenter(va, start);
va += PAGE_SIZE;
start += PAGE_SIZE;
}
return (virt);
*virt = va;
return (sva);
}

View File

@ -711,22 +711,30 @@ pmap_kremove(va)
* Used to map a range of physical addresses into kernel
* virtual address space.
*
* For now, VM is already on, we only need to map the
* specified memory.
* The value passed in '*virt' is a suggested virtual address for
* the mapping. Architectures which can support a direct-mapped
* physical to virtual region can return the appropriate address
* within that region, leaving '*virt' unchanged. Other
* architectures should map the pages starting at '*virt' and
* update '*virt' with the first usable address after the mapped
* region.
*/
vm_offset_t
pmap_map(virt, start, end, prot)
vm_offset_t virt;
vm_offset_t *virt;
vm_offset_t start;
vm_offset_t end;
int prot;
{
vm_offset_t sva = *virt;
vm_offset_t va = sva;
while (start < end) {
pmap_kenter(virt, start);
virt += PAGE_SIZE;
pmap_kenter(va, start);
va += PAGE_SIZE;
start += PAGE_SIZE;
}
return (virt);
*virt = va;
return (sva);
}

View File

@ -220,12 +220,9 @@ static int pmap_ridbits = 18;
static vm_zone_t pvzone;
static struct vm_zone pvzone_store;
static struct vm_object pvzone_obj;
static vm_zone_t pvbootzone;
static struct vm_zone pvbootzone_store;
static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
static int pmap_pagedaemon_waken = 0;
static struct pv_entry *pvinit;
static struct pv_entry *pvbootinit;
static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv));
static pv_entry_t get_pv_entry __P((void));
@ -271,7 +268,6 @@ void
pmap_bootstrap()
{
int i;
int boot_pvs;
/*
* Setup RIDs. We use the bits above pmap_ridbits for a
@ -318,19 +314,6 @@ pmap_bootstrap()
ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (28 << 2));
ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2));
/*
* We need some PVs to cope with pmap_kenter() calls prior to
* pmap_init(). This is all a bit flaky and needs to be
* rethought, probably by avoiding the zone allocator
* entirely.
*/
boot_pvs = 32768;
pvbootzone = &pvbootzone_store;
pvbootinit = (struct pv_entry *)
pmap_steal_memory(boot_pvs * sizeof (struct pv_entry));
zbootinit(pvbootzone, "PV ENTRY", sizeof (struct pv_entry),
pvbootinit, boot_pvs);
/*
* Set up proc0's PCB.
*/
@ -752,8 +735,23 @@ free_pv_entry(pv_entry_t pv)
static pv_entry_t
get_pv_entry(void)
{
if (!pvinit)
return zalloc(pvbootzone);
/*
* We can get called a few times really early before
* pmap_init() has finished allocating the pvzone (mostly as a
* result of the call to kmem_alloc() in pmap_init(). We allow
* a small number of entries to be allocated statically to
* cover this.
*/
if (!pvinit) {
#define PV_BOOTSTRAP_NEEDED 512
static struct pv_entry pvbootentries[PV_BOOTSTRAP_NEEDED];
static int pvbootnext = 0;
if (pvbootnext == PV_BOOTSTRAP_NEEDED)
panic("get_pv_entry: called too many times"
" before pmap_init is finished");
return &pvbootentries[pvbootnext++];
}
pv_entry_count++;
if (pv_entry_high_water &&
@ -1115,22 +1113,18 @@ pmap_kremove(vm_offset_t va)
* Used to map a range of physical addresses into kernel
* virtual address space.
*
* For now, VM is already on, we only need to map the
* specified memory.
* The value passed in '*virt' is a suggested virtual address for
* the mapping. Architectures which can support a direct-mapped
* physical to virtual region can return the appropriate address
* within that region, leaving '*virt' unchanged. Other
* architectures should map the pages starting at '*virt' and
* update '*virt' with the first usable address after the mapped
* region.
*/
vm_offset_t
pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, int prot)
pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
{
/*
* XXX We should really try to use larger pagesizes here to
* cut down the number of PVs used.
*/
while (start < end) {
pmap_kenter(virt, start);
virt += PAGE_SIZE;
start += PAGE_SIZE;
}
return (virt);
return IA64_PHYS_TO_RR7(start);
}
/*

View File

@ -110,7 +110,7 @@ boolean_t pmap_is_modified __P((vm_page_t m));
boolean_t pmap_ts_referenced __P((vm_page_t m));
void pmap_kenter __P((vm_offset_t va, vm_offset_t pa));
void pmap_kremove __P((vm_offset_t));
vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
vm_offset_t pmap_map __P((vm_offset_t *, vm_offset_t, vm_offset_t, int));
void pmap_object_init_pt __P((pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_offset_t size,
int pagelimit));

View File

@ -180,7 +180,7 @@ vm_offset_t
vm_page_startup(starta, enda, vaddr)
register vm_offset_t starta;
vm_offset_t enda;
register vm_offset_t vaddr;
vm_offset_t vaddr;
{
register vm_offset_t mapped;
register struct vm_page **bucket;
@ -242,8 +242,6 @@ vm_page_startup(starta, enda, vaddr)
*
* Note: This computation can be tweaked if desired.
*/
vm_page_buckets = (struct vm_page **)vaddr;
bucket = vm_page_buckets;
if (vm_page_bucket_count == 0) {
vm_page_bucket_count = 1;
while (vm_page_bucket_count < atop(total))
@ -257,12 +255,12 @@ vm_page_startup(starta, enda, vaddr)
*/
new_end = end - vm_page_bucket_count * sizeof(struct vm_page *);
new_end = trunc_page(new_end);
mapped = round_page(vaddr);
vaddr = pmap_map(mapped, new_end, end,
mapped = pmap_map(&vaddr, new_end, end,
VM_PROT_READ | VM_PROT_WRITE);
vaddr = round_page(vaddr);
bzero((caddr_t) mapped, vaddr - mapped);
bzero((caddr_t) mapped, end - new_end);
vm_page_buckets = (struct vm_page **)mapped;
bucket = vm_page_buckets;
for (i = 0; i < vm_page_bucket_count; i++) {
*bucket = NULL;
bucket++;
@ -281,20 +279,15 @@ vm_page_startup(starta, enda, vaddr)
(end - new_end)) / PAGE_SIZE;
end = new_end;
/*
* Initialize the mem entry structures now, and put them in the free
* queue.
*/
vm_page_array = (vm_page_t) vaddr;
mapped = vaddr;
/*
* Validate these addresses.
*/
new_end = trunc_page(end - page_range * sizeof(struct vm_page));
mapped = pmap_map(mapped, new_end, end,
mapped = pmap_map(&vaddr, new_end, end,
VM_PROT_READ | VM_PROT_WRITE);
vm_page_array = (vm_page_t) mapped;
/*
* Clear all of the page structures
@ -321,7 +314,7 @@ vm_page_startup(starta, enda, vaddr)
pa += PAGE_SIZE;
}
}
return (mapped);
return (vaddr);
}
/*