1. Convert the tstate saved in the pcb to a pstate and test for PSTATE_PEF

to determine if a process is using floating point. in order to avoid
   sign extending a 13 bit immediate.
2. We don't need to context switch cwp anymore, it is better to just
   fiddle the save tstate on return from traps.  See exception.s 1.10
   and 1.12.
3. Completely remove pcb_cwp.
4. Implement vmapbuf, vunmapbuf and vm_fault_quick.  Completely remove
   TODOs from vm_machdep.c (yay!).

Submitted by:	tmm (1, 3, 4)
Obtained from:	existing archs (4)
This commit is contained in:
jake 2001-11-18 03:28:28 +00:00
parent b1b4ae1667
commit 195f88af01
4 changed files with 128 additions and 50 deletions

View File

@ -32,19 +32,11 @@
#include <machine/fp.h>
#include <machine/frame.h>
/*
* XXX: MAXWIN should probably be done dynamically, pcb_wscratch is therefore
* at the end of the pcb.
*/
#define MAXWIN 8
/* Used in pcb_fcwp to mark the wscratch stack as empty. */
#define PCB_CWP_EMPTY 0xff
#define MAXWIN 8
/* NOTE: pcb_fpstate must be aligned on a 64 byte boundary. */
struct pcb {
struct fpstate pcb_fpstate;
u_long pcb_cwp;
u_long pcb_fp;
u_long pcb_pc;
u_long pcb_y;

View File

@ -113,7 +113,8 @@ ENTRY(cpu_switch)
rd %fprs, %l3
stx %l3, [%l2 + PCB_FPSTATE + FP_FPRS]
ldx [%l1 + TF_TSTATE], %l1
andcc %l1, TSTATE_PEF, %l1
srlx %l1, TSTATE_PSTATE_SHIFT, %l1
andcc %l1, PSTATE_PEF, %l1
be,pt %xcc, 1f
nop
savefp %l2 + PCB_FPSTATE, %l4, %l3
@ -124,8 +125,6 @@ ENTRY(cpu_switch)
*/
1: flushw
wrpr %g0, 0, %cleanwin
rdpr %cwp, %l3
stx %l3, [%l2 + PCB_CWP]
stx %fp, [%l2 + PCB_FP]
stx %i7, [%l2 + PCB_PC]
@ -135,7 +134,7 @@ ENTRY(cpu_switch)
*/
.Lsw1: ldx [%o0 + TD_PCB], %o1
#if KTR_COMPILE & KTR_PROC
CATR(KTR_PROC, "cpu_switch: to=%p pc=%#lx fp=%#lx sp=%#lx cwp=%#lx"
CATR(KTR_PROC, "cpu_switch: to=%p pc=%#lx fp=%#lx sp=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
stx %o0, [%g1 + KTR_PARM1]
ldx [%o1 + PCB_PC], %g2
@ -144,17 +143,7 @@ ENTRY(cpu_switch)
stx %g2, [%g1 + KTR_PARM3]
sub %g2, CCFSZ, %g2
stx %g2, [%g1 + KTR_PARM4]
ldx [%o1 + PCB_CWP], %g2
stx %g2, [%g1 + KTR_PARM5]
9:
#endif
#if 1
mov %o0, %g4
mov %l0, %g5
ldx [%o1 + PCB_CWP], %o2
wrpr %o2, %cwp
mov %g4, %o0
mov %g5, %l0
#endif
ldx [%o0 + TD_PCB], %o1
ldx [%o1 + PCB_FP], %fp
@ -187,7 +176,8 @@ ENTRY(cpu_switch)
*/
ldx [%o0 + TD_FRAME], %o4
ldx [%o4 + TF_TSTATE], %o4
andcc %o4, TSTATE_PEF, %o4
srlx %o4, TSTATE_PSTATE_SHIFT, %o4
andcc %o4, PSTATE_PEF, %o4
be,pt %xcc, 2f
nop
restrfp %o1 + PCB_FPSTATE, %o4
@ -274,7 +264,8 @@ ENTRY(savectx)
ldx [PCPU(CURTHREAD)], %l0
ldx [%l0 + TD_FRAME], %l0
ldx [%l0 + TF_TSTATE], %l0
andcc %l0, TSTATE_PEF, %l0
srlx %l0, TSTATE_PSTATE_SHIFT, %l0
andcc %l0, PSTATE_PEF, %l0
be,pt %xcc, 1f
stx %fp, [%i0 + PCB_FP]
add %i0, PCB_FPSTATE, %o0

View File

@ -113,7 +113,8 @@ ENTRY(cpu_switch)
rd %fprs, %l3
stx %l3, [%l2 + PCB_FPSTATE + FP_FPRS]
ldx [%l1 + TF_TSTATE], %l1
andcc %l1, TSTATE_PEF, %l1
srlx %l1, TSTATE_PSTATE_SHIFT, %l1
andcc %l1, PSTATE_PEF, %l1
be,pt %xcc, 1f
nop
savefp %l2 + PCB_FPSTATE, %l4, %l3
@ -124,8 +125,6 @@ ENTRY(cpu_switch)
*/
1: flushw
wrpr %g0, 0, %cleanwin
rdpr %cwp, %l3
stx %l3, [%l2 + PCB_CWP]
stx %fp, [%l2 + PCB_FP]
stx %i7, [%l2 + PCB_PC]
@ -135,7 +134,7 @@ ENTRY(cpu_switch)
*/
.Lsw1: ldx [%o0 + TD_PCB], %o1
#if KTR_COMPILE & KTR_PROC
CATR(KTR_PROC, "cpu_switch: to=%p pc=%#lx fp=%#lx sp=%#lx cwp=%#lx"
CATR(KTR_PROC, "cpu_switch: to=%p pc=%#lx fp=%#lx sp=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
stx %o0, [%g1 + KTR_PARM1]
ldx [%o1 + PCB_PC], %g2
@ -144,17 +143,7 @@ ENTRY(cpu_switch)
stx %g2, [%g1 + KTR_PARM3]
sub %g2, CCFSZ, %g2
stx %g2, [%g1 + KTR_PARM4]
ldx [%o1 + PCB_CWP], %g2
stx %g2, [%g1 + KTR_PARM5]
9:
#endif
#if 1
mov %o0, %g4
mov %l0, %g5
ldx [%o1 + PCB_CWP], %o2
wrpr %o2, %cwp
mov %g4, %o0
mov %g5, %l0
#endif
ldx [%o0 + TD_PCB], %o1
ldx [%o1 + PCB_FP], %fp
@ -187,7 +176,8 @@ ENTRY(cpu_switch)
*/
ldx [%o0 + TD_FRAME], %o4
ldx [%o4 + TF_TSTATE], %o4
andcc %o4, TSTATE_PEF, %o4
srlx %o4, TSTATE_PSTATE_SHIFT, %o4
andcc %o4, PSTATE_PEF, %o4
be,pt %xcc, 2f
nop
restrfp %o1 + PCB_FPSTATE, %o4
@ -274,7 +264,8 @@ ENTRY(savectx)
ldx [PCPU(CURTHREAD)], %l0
ldx [%l0 + TD_FRAME], %l0
ldx [%l0 + TF_TSTATE], %l0
andcc %l0, TSTATE_PEF, %l0
srlx %l0, TSTATE_PSTATE_SHIFT, %l0
andcc %l0, PSTATE_PEF, %l0
be,pt %xcc, 1f
stx %fp, [%i0 + PCB_FP]
add %i0, PCB_FPSTATE, %o0

View File

@ -57,7 +57,10 @@
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_page.h>
#include <machine/cache.h>
#include <machine/cpu.h>
#include <machine/frame.h>
#include <machine/md_var.h>
@ -108,7 +111,6 @@ cpu_fork(struct thread *td1, struct proc *p2, int flags)
__asm __volatile("flushw");
/* Copy the pcb (this will copy the windows saved in the pcb, too). */
bcopy(td1->td_pcb, pcb, sizeof(*pcb));
pcb->pcb_cwp = 2;
/*
* Create a new fresh stack for the new process.
@ -127,7 +129,6 @@ cpu_fork(struct thread *td1, struct proc *p2, int flags)
fp->f_local[0] = (u_long)fork_return;
fp->f_local[1] = (u_long)td2;
fp->f_local[2] = (u_long)tf;
pcb->pcb_cwp = 0;
pcb->pcb_fp = (u_long)fp - SPOFF;
pcb->pcb_pc = (u_long)fork_trampoline - 8;
@ -157,7 +158,7 @@ cpu_reset(void)
bspec[0] = '\0';
bspec[sizeof(bspec) - 1] = '\0';
}
openfirmware_exit(&args);
}
@ -188,30 +189,133 @@ int
is_physical_memory(vm_offset_t addr)
{
TODO;
/* There is no device memory in the midst of the normal RAM. */
return (1);
}
void
swi_vm(void *v)
{
TODO;
/*
* Nothing to do here yet - busdma bounce buffers are not yet
* implemented.
*/
}
/*
* quick version of vm_fault
*/
int
vm_fault_quick(caddr_t v, int prot)
{
TODO;
return (0);
int r;
if (prot & VM_PROT_WRITE)
r = subyte(v, fubyte(v));
else
r = fubyte(v);
return(r);
}
/*
* Map an IO request into kernel virtual address space.
*
* All requests are (re)mapped into kernel VA space.
* Notice that we use b_bufsize for the size of the buffer
* to be mapped. b_bcount might be modified by the driver.
*/
void
vmapbuf(struct buf *bp)
{
TODO;
caddr_t addr, kva;
vm_offset_t pa;
int pidx;
struct vm_page *m;
pmap_t pmap;
GIANT_REQUIRED;
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
pmap = &curproc->p_vmspace->vm_pmap;
for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data), pidx = 0;
addr < bp->b_data + bp->b_bufsize; addr += PAGE_SIZE, pidx++) {
/*
* Do the vm_fault if needed; do the copy-on-write thing
* when reading stuff off device into memory.
*/
vm_fault_quick((addr >= bp->b_data) ? addr : bp->b_data,
(bp->b_iocmd == BIO_READ) ? (VM_PROT_READ | VM_PROT_WRITE) :
VM_PROT_READ);
pa = trunc_page(pmap_extract(pmap, (vm_offset_t)addr));
if (pa == 0)
panic("vmapbuf: page not present");
m = PHYS_TO_VM_PAGE(pa);
vm_page_hold(m);
bp->b_pages[pidx] = m;
}
if (pidx > btoc(MAXPHYS))
panic("vmapbuf: mapped more than MAXPHYS");
pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
kva = bp->b_saveaddr;
bp->b_npages = pidx;
bp->b_saveaddr = bp->b_data;
bp->b_data = kva + (((vm_offset_t)bp->b_data) & PAGE_MASK);
if (CACHE_BADALIAS(trunc_page(bp->b_data),
trunc_page(bp->b_saveaddr))) {
/*
* bp->data (the virtual address the buffer got mapped to in the
* kernel) is an illegal alias to the user address.
* If the kernel had mapped this buffer previously (during a
* past IO operation) at this address, there might still be
* stale but valid tagged data in the cache, so flush it.
* XXX: the kernel address should be selected such that this
* cannot happen.
* XXX: pmap_kenter() maps physically uncacheable right now, so
* this cannot happen.
*/
dcache_inval(pmap, (vm_offset_t)bp->b_data,
(vm_offset_t)bp->b_data + bp->b_bufsize - 1);
}
}
/*
* Free the io map PTEs associated with this IO operation.
* We also invalidate the TLB entries and restore the original b_addr.
*/
void
vunmapbuf(struct buf *bp)
{
TODO;
int pidx;
int npages;
GIANT_REQUIRED;
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
npages = bp->b_npages;
pmap_qremove(trunc_page((vm_offset_t)bp->b_data),
npages);
for (pidx = 0; pidx < npages; pidx++)
vm_page_unhold(bp->b_pages[pidx]);
if (CACHE_BADALIAS(trunc_page(bp->b_data),
trunc_page(bp->b_saveaddr))) {
/*
* bp->data (the virtual address the buffer got mapped to in the
* kernel) is an illegal alias to the user address. In this
* case, D$ of the user adress needs to be flushed to avoid the
* user reading stale data.
* XXX: the kernel address should be selected such that this
* cannot happen.
*/
dcache_inval(&curproc->p_vmspace->vm_pmap,
(vm_offset_t)bp->b_saveaddr, (vm_offset_t)bp->b_saveaddr +
bp->b_bufsize - 1);
}
bp->b_data = bp->b_saveaddr;
}