- Use direct mapped addresses for the message buffer, for the crash dump

mappings, and for pmap_map which is used to map the vm_page structures.
- Don't allocate kva space for any of the above.
This commit is contained in:
jake 2002-12-27 01:50:29 +00:00
parent 49979268c1
commit 073c2d289b
3 changed files with 12 additions and 62 deletions

View File

@ -35,12 +35,14 @@
#include <sys/kerneldump.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <machine/metadata.h>
#include <machine/kerneldump.h>
#include <machine/ofw_mem.h>
#include <machine/tsb.h>
#include <machine/tlb.h>
CTASSERT(sizeof(struct kerneldumpheader) == DEV_BSIZE);
@ -132,9 +134,9 @@ reg_write(struct dumperinfo *di, vm_offset_t pa, vm_size_t size)
static int
blk_dump(struct dumperinfo *di, vm_offset_t pa, vm_size_t size)
{
vm_size_t pos, npg, rsz;
void *va;
int c, counter, error, i, twiddle;
vm_size_t pos, rsz;
vm_offset_t va;
int c, counter, error, twiddle;
printf(" chunk at %#lx: %ld bytes ", (u_long)pa, (long)size);
@ -145,10 +147,8 @@ blk_dump(struct dumperinfo *di, vm_offset_t pa, vm_size_t size)
printf("%c\b", "|/-\\"[twiddle++ & 3]);
rsz = size - pos;
rsz = (rsz > MAXDUMPSZ) ? MAXDUMPSZ : rsz;
npg = rsz >> PAGE_SHIFT;
for (i = 0; i < npg; i++)
va = pmap_kenter_temporary(pa + pos + i * PAGE_SIZE, i);
error = di->dumper(di->priv, va, 0, dumplo, rsz);
va = TLB_PHYS_TO_DIRECT(pa + pos);
error = di->dumper(di->priv, (void *)va, 0, dumplo, rsz);
if (error)
break;
dumplo += rsz;

View File

@ -198,7 +198,6 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
phandle_t root;
struct pcpu *pc;
vm_offset_t end;
vm_offset_t va;
caddr_t kmdp;
u_int clock;
char *env;
@ -339,10 +338,8 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
cpu_setregs(pc);
/*
* Map and initialize the message buffer (after setting trap table).
* Initialize the message buffer (after setting trap table).
*/
va = (vm_offset_t)msgbufp;
pmap_map(&va, msgbuf_phys, msgbuf_phys + MSGBUF_SIZE, 0);
msgbufinit(msgbufp, MSGBUF_SIZE);
mutex_init();

View File

@ -145,8 +145,6 @@ vm_offset_t kernel_vm_end;
vm_offset_t vm_max_kernel_address;
static vm_offset_t crashdumpmap;
/*
* Kernel pmap.
*/
@ -328,9 +326,10 @@ pmap_bootstrap(vm_offset_t ekva)
bzero(tsb_kernel, tsb_kernel_size);
/*
* Allocate the message buffer.
* Allocate and map the message buffer.
*/
msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE);
msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(msgbuf_phys);
/*
* Patch the virtual address and the tsb mask into the trap table.
@ -396,18 +395,6 @@ pmap_bootstrap(vm_offset_t ekva)
pmap_temp_map_2 = virtual_avail;
virtual_avail += PAGE_SIZE * DCACHE_COLORS;
/*
* Allocate virtual address space for the message buffer.
*/
msgbufp = (struct msgbuf *)virtual_avail;
virtual_avail += round_page(MSGBUF_SIZE);
/*
* Allocate virtual address space to map pages during a kernel dump.
*/
crashdumpmap = virtual_avail;
virtual_avail += MAXDUMPPGS * PAGE_SIZE;
/*
* Allocate a kernel stack with guard page for thread0 and map it into
* the kernel tsb.
@ -837,25 +824,6 @@ pmap_kenter_flags(vm_offset_t va, vm_offset_t pa, u_long flags)
tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_P | flags;
}
/*
* Make a temporary mapping for a physical address. This is only intended
* to be used for panic dumps. Caching issues can be ignored completely here,
* because pages mapped this way are only read.
*/
void *
pmap_kenter_temporary(vm_offset_t pa, int i)
{
struct tte *tp;
vm_offset_t va;
va = crashdumpmap + i * PAGE_SIZE;
tlb_page_demap(kernel_pmap, va);
tp = tsb_kvtotte(va);
tp->tte_vpn = TV_VPN(va, TS_8K);
tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_CP | TD_CV | TD_P;
return ((void *)crashdumpmap);
}
/*
* Remove a wired page from kernel virtual address space.
*/
@ -897,25 +865,10 @@ pmap_kremove_flags(vm_offset_t va)
* unchanged.
*/
vm_offset_t
pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot)
pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
{
struct tte *tp;
vm_offset_t sva;
vm_offset_t va;
vm_offset_t pa;
pa = pa_start;
sva = *virt;
va = sva;
for (; pa < pa_end; pa += PAGE_SIZE, va += PAGE_SIZE) {
tp = tsb_kvtotte(va);
tp->tte_vpn = TV_VPN(va, TS_8K);
tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW |
TD_CP | TD_CV | TD_P | TD_W;
}
tlb_range_demap(kernel_pmap, sva, sva + (pa_end - pa_start) - 1);
*virt = va;
return (sva);
return (TLB_PHYS_TO_DIRECT(start));
}
/*