Back out the pmap_map() change for now, it isn't completely stable on the
i386.
This commit is contained in:
parent
1b6c0f0436
commit
4a01ebd482
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=73903
@ -884,18 +884,18 @@ pmap_kremove(vm_offset_t va)
|
|||||||
* Used to map a range of physical addresses into kernel
|
* Used to map a range of physical addresses into kernel
|
||||||
* virtual address space.
|
* virtual address space.
|
||||||
*
|
*
|
||||||
* The value passed in '*virt' is a suggested virtual address for
|
* For now, VM is already on, we only need to map the
|
||||||
* the mapping. Architectures which can support a direct-mapped
|
* specified memory.
|
||||||
* physical to virtual region can return the appropriate address
|
|
||||||
* within that region, leaving '*virt' unchanged. Other
|
|
||||||
* architectures should map the pages starting at '*virt' and
|
|
||||||
* update '*virt' with the first usable address after the mapped
|
|
||||||
* region.
|
|
||||||
*/
|
*/
|
||||||
vm_offset_t
|
vm_offset_t
|
||||||
pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
|
pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, int prot)
|
||||||
{
|
{
|
||||||
return ALPHA_PHYS_TO_K0SEG(start);
|
while (start < end) {
|
||||||
|
pmap_kenter(virt, start);
|
||||||
|
virt += PAGE_SIZE;
|
||||||
|
start += PAGE_SIZE;
|
||||||
|
}
|
||||||
|
return (virt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -711,30 +711,22 @@ pmap_kremove(va)
|
|||||||
* Used to map a range of physical addresses into kernel
|
* Used to map a range of physical addresses into kernel
|
||||||
* virtual address space.
|
* virtual address space.
|
||||||
*
|
*
|
||||||
* The value passed in '*virt' is a suggested virtual address for
|
* For now, VM is already on, we only need to map the
|
||||||
* the mapping. Architectures which can support a direct-mapped
|
* specified memory.
|
||||||
* physical to virtual region can return the appropriate address
|
|
||||||
* within that region, leaving '*virt' unchanged. Other
|
|
||||||
* architectures should map the pages starting at '*virt' and
|
|
||||||
* update '*virt' with the first usable address after the mapped
|
|
||||||
* region.
|
|
||||||
*/
|
*/
|
||||||
vm_offset_t
|
vm_offset_t
|
||||||
pmap_map(virt, start, end, prot)
|
pmap_map(virt, start, end, prot)
|
||||||
vm_offset_t *virt;
|
vm_offset_t virt;
|
||||||
vm_offset_t start;
|
vm_offset_t start;
|
||||||
vm_offset_t end;
|
vm_offset_t end;
|
||||||
int prot;
|
int prot;
|
||||||
{
|
{
|
||||||
vm_offset_t sva = *virt;
|
|
||||||
vm_offset_t va = sva;
|
|
||||||
while (start < end) {
|
while (start < end) {
|
||||||
pmap_kenter(va, start);
|
pmap_kenter(virt, start);
|
||||||
va += PAGE_SIZE;
|
virt += PAGE_SIZE;
|
||||||
start += PAGE_SIZE;
|
start += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
*virt = va;
|
return (virt);
|
||||||
return (sva);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -711,30 +711,22 @@ pmap_kremove(va)
|
|||||||
* Used to map a range of physical addresses into kernel
|
* Used to map a range of physical addresses into kernel
|
||||||
* virtual address space.
|
* virtual address space.
|
||||||
*
|
*
|
||||||
* The value passed in '*virt' is a suggested virtual address for
|
* For now, VM is already on, we only need to map the
|
||||||
* the mapping. Architectures which can support a direct-mapped
|
* specified memory.
|
||||||
* physical to virtual region can return the appropriate address
|
|
||||||
* within that region, leaving '*virt' unchanged. Other
|
|
||||||
* architectures should map the pages starting at '*virt' and
|
|
||||||
* update '*virt' with the first usable address after the mapped
|
|
||||||
* region.
|
|
||||||
*/
|
*/
|
||||||
vm_offset_t
|
vm_offset_t
|
||||||
pmap_map(virt, start, end, prot)
|
pmap_map(virt, start, end, prot)
|
||||||
vm_offset_t *virt;
|
vm_offset_t virt;
|
||||||
vm_offset_t start;
|
vm_offset_t start;
|
||||||
vm_offset_t end;
|
vm_offset_t end;
|
||||||
int prot;
|
int prot;
|
||||||
{
|
{
|
||||||
vm_offset_t sva = *virt;
|
|
||||||
vm_offset_t va = sva;
|
|
||||||
while (start < end) {
|
while (start < end) {
|
||||||
pmap_kenter(va, start);
|
pmap_kenter(virt, start);
|
||||||
va += PAGE_SIZE;
|
virt += PAGE_SIZE;
|
||||||
start += PAGE_SIZE;
|
start += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
*virt = va;
|
return (virt);
|
||||||
return (sva);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -220,9 +220,12 @@ static int pmap_ridbits = 18;
|
|||||||
static vm_zone_t pvzone;
|
static vm_zone_t pvzone;
|
||||||
static struct vm_zone pvzone_store;
|
static struct vm_zone pvzone_store;
|
||||||
static struct vm_object pvzone_obj;
|
static struct vm_object pvzone_obj;
|
||||||
|
static vm_zone_t pvbootzone;
|
||||||
|
static struct vm_zone pvbootzone_store;
|
||||||
static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
|
static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
|
||||||
static int pmap_pagedaemon_waken = 0;
|
static int pmap_pagedaemon_waken = 0;
|
||||||
static struct pv_entry *pvinit;
|
static struct pv_entry *pvinit;
|
||||||
|
static struct pv_entry *pvbootinit;
|
||||||
|
|
||||||
static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv));
|
static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv));
|
||||||
static pv_entry_t get_pv_entry __P((void));
|
static pv_entry_t get_pv_entry __P((void));
|
||||||
@ -268,6 +271,7 @@ void
|
|||||||
pmap_bootstrap()
|
pmap_bootstrap()
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
int boot_pvs;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Setup RIDs. We use the bits above pmap_ridbits for a
|
* Setup RIDs. We use the bits above pmap_ridbits for a
|
||||||
@ -314,6 +318,19 @@ pmap_bootstrap()
|
|||||||
ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (28 << 2));
|
ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (28 << 2));
|
||||||
ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2));
|
ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need some PVs to cope with pmap_kenter() calls prior to
|
||||||
|
* pmap_init(). This is all a bit flaky and needs to be
|
||||||
|
* rethought, probably by avoiding the zone allocator
|
||||||
|
* entirely.
|
||||||
|
*/
|
||||||
|
boot_pvs = 32768;
|
||||||
|
pvbootzone = &pvbootzone_store;
|
||||||
|
pvbootinit = (struct pv_entry *)
|
||||||
|
pmap_steal_memory(boot_pvs * sizeof (struct pv_entry));
|
||||||
|
zbootinit(pvbootzone, "PV ENTRY", sizeof (struct pv_entry),
|
||||||
|
pvbootinit, boot_pvs);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set up proc0's PCB.
|
* Set up proc0's PCB.
|
||||||
*/
|
*/
|
||||||
@ -735,23 +752,8 @@ free_pv_entry(pv_entry_t pv)
|
|||||||
static pv_entry_t
|
static pv_entry_t
|
||||||
get_pv_entry(void)
|
get_pv_entry(void)
|
||||||
{
|
{
|
||||||
/*
|
if (!pvinit)
|
||||||
* We can get called a few times really early before
|
return zalloc(pvbootzone);
|
||||||
* pmap_init() has finished allocating the pvzone (mostly as a
|
|
||||||
* result of the call to kmem_alloc() in pmap_init(). We allow
|
|
||||||
* a small number of entries to be allocated statically to
|
|
||||||
* cover this.
|
|
||||||
*/
|
|
||||||
if (!pvinit) {
|
|
||||||
#define PV_BOOTSTRAP_NEEDED 512
|
|
||||||
static struct pv_entry pvbootentries[PV_BOOTSTRAP_NEEDED];
|
|
||||||
static int pvbootnext = 0;
|
|
||||||
|
|
||||||
if (pvbootnext == PV_BOOTSTRAP_NEEDED)
|
|
||||||
panic("get_pv_entry: called too many times"
|
|
||||||
" before pmap_init is finished");
|
|
||||||
return &pvbootentries[pvbootnext++];
|
|
||||||
}
|
|
||||||
|
|
||||||
pv_entry_count++;
|
pv_entry_count++;
|
||||||
if (pv_entry_high_water &&
|
if (pv_entry_high_water &&
|
||||||
@ -1113,18 +1115,22 @@ pmap_kremove(vm_offset_t va)
|
|||||||
* Used to map a range of physical addresses into kernel
|
* Used to map a range of physical addresses into kernel
|
||||||
* virtual address space.
|
* virtual address space.
|
||||||
*
|
*
|
||||||
* The value passed in '*virt' is a suggested virtual address for
|
* For now, VM is already on, we only need to map the
|
||||||
* the mapping. Architectures which can support a direct-mapped
|
* specified memory.
|
||||||
* physical to virtual region can return the appropriate address
|
|
||||||
* within that region, leaving '*virt' unchanged. Other
|
|
||||||
* architectures should map the pages starting at '*virt' and
|
|
||||||
* update '*virt' with the first usable address after the mapped
|
|
||||||
* region.
|
|
||||||
*/
|
*/
|
||||||
vm_offset_t
|
vm_offset_t
|
||||||
pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
|
pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, int prot)
|
||||||
{
|
{
|
||||||
return IA64_PHYS_TO_RR7(start);
|
/*
|
||||||
|
* XXX We should really try to use larger pagesizes here to
|
||||||
|
* cut down the number of PVs used.
|
||||||
|
*/
|
||||||
|
while (start < end) {
|
||||||
|
pmap_kenter(virt, start);
|
||||||
|
virt += PAGE_SIZE;
|
||||||
|
start += PAGE_SIZE;
|
||||||
|
}
|
||||||
|
return (virt);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -110,7 +110,7 @@ boolean_t pmap_is_modified __P((vm_page_t m));
|
|||||||
boolean_t pmap_ts_referenced __P((vm_page_t m));
|
boolean_t pmap_ts_referenced __P((vm_page_t m));
|
||||||
void pmap_kenter __P((vm_offset_t va, vm_offset_t pa));
|
void pmap_kenter __P((vm_offset_t va, vm_offset_t pa));
|
||||||
void pmap_kremove __P((vm_offset_t));
|
void pmap_kremove __P((vm_offset_t));
|
||||||
vm_offset_t pmap_map __P((vm_offset_t *, vm_offset_t, vm_offset_t, int));
|
vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
|
||||||
void pmap_object_init_pt __P((pmap_t pmap, vm_offset_t addr,
|
void pmap_object_init_pt __P((pmap_t pmap, vm_offset_t addr,
|
||||||
vm_object_t object, vm_pindex_t pindex, vm_offset_t size,
|
vm_object_t object, vm_pindex_t pindex, vm_offset_t size,
|
||||||
int pagelimit));
|
int pagelimit));
|
||||||
|
@ -180,7 +180,7 @@ vm_offset_t
|
|||||||
vm_page_startup(starta, enda, vaddr)
|
vm_page_startup(starta, enda, vaddr)
|
||||||
register vm_offset_t starta;
|
register vm_offset_t starta;
|
||||||
vm_offset_t enda;
|
vm_offset_t enda;
|
||||||
vm_offset_t vaddr;
|
register vm_offset_t vaddr;
|
||||||
{
|
{
|
||||||
register vm_offset_t mapped;
|
register vm_offset_t mapped;
|
||||||
register struct vm_page **bucket;
|
register struct vm_page **bucket;
|
||||||
@ -242,6 +242,8 @@ vm_page_startup(starta, enda, vaddr)
|
|||||||
*
|
*
|
||||||
* Note: This computation can be tweaked if desired.
|
* Note: This computation can be tweaked if desired.
|
||||||
*/
|
*/
|
||||||
|
vm_page_buckets = (struct vm_page **)vaddr;
|
||||||
|
bucket = vm_page_buckets;
|
||||||
if (vm_page_bucket_count == 0) {
|
if (vm_page_bucket_count == 0) {
|
||||||
vm_page_bucket_count = 1;
|
vm_page_bucket_count = 1;
|
||||||
while (vm_page_bucket_count < atop(total))
|
while (vm_page_bucket_count < atop(total))
|
||||||
@ -255,12 +257,12 @@ vm_page_startup(starta, enda, vaddr)
|
|||||||
*/
|
*/
|
||||||
new_end = end - vm_page_bucket_count * sizeof(struct vm_page *);
|
new_end = end - vm_page_bucket_count * sizeof(struct vm_page *);
|
||||||
new_end = trunc_page(new_end);
|
new_end = trunc_page(new_end);
|
||||||
mapped = pmap_map(&vaddr, new_end, end,
|
mapped = round_page(vaddr);
|
||||||
|
vaddr = pmap_map(mapped, new_end, end,
|
||||||
VM_PROT_READ | VM_PROT_WRITE);
|
VM_PROT_READ | VM_PROT_WRITE);
|
||||||
bzero((caddr_t) mapped, end - new_end);
|
vaddr = round_page(vaddr);
|
||||||
|
bzero((caddr_t) mapped, vaddr - mapped);
|
||||||
|
|
||||||
vm_page_buckets = (struct vm_page **)mapped;
|
|
||||||
bucket = vm_page_buckets;
|
|
||||||
for (i = 0; i < vm_page_bucket_count; i++) {
|
for (i = 0; i < vm_page_bucket_count; i++) {
|
||||||
*bucket = NULL;
|
*bucket = NULL;
|
||||||
bucket++;
|
bucket++;
|
||||||
@ -279,15 +281,20 @@ vm_page_startup(starta, enda, vaddr)
|
|||||||
(end - new_end)) / PAGE_SIZE;
|
(end - new_end)) / PAGE_SIZE;
|
||||||
|
|
||||||
end = new_end;
|
end = new_end;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize the mem entry structures now, and put them in the free
|
* Initialize the mem entry structures now, and put them in the free
|
||||||
* queue.
|
* queue.
|
||||||
*/
|
*/
|
||||||
|
vm_page_array = (vm_page_t) vaddr;
|
||||||
|
mapped = vaddr;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Validate these addresses.
|
||||||
|
*/
|
||||||
|
|
||||||
new_end = trunc_page(end - page_range * sizeof(struct vm_page));
|
new_end = trunc_page(end - page_range * sizeof(struct vm_page));
|
||||||
mapped = pmap_map(&vaddr, new_end, end,
|
mapped = pmap_map(mapped, new_end, end,
|
||||||
VM_PROT_READ | VM_PROT_WRITE);
|
VM_PROT_READ | VM_PROT_WRITE);
|
||||||
vm_page_array = (vm_page_t) mapped;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear all of the page structures
|
* Clear all of the page structures
|
||||||
@ -314,7 +321,7 @@ vm_page_startup(starta, enda, vaddr)
|
|||||||
pa += PAGE_SIZE;
|
pa += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return (vaddr);
|
return (mapped);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
Reference in New Issue
Block a user