- Catch up to the VM mutex changes.

- Sort includes in a few places.
This commit is contained in:
jhb 2001-05-30 00:03:13 +00:00
parent 9a8e4d5a40
commit a877c444cf
8 changed files with 51 additions and 44 deletions

View File

@ -29,6 +29,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/bus.h>
#include <sys/interrupt.h>

View File

@ -177,6 +177,7 @@ cpu_startup(dummy)
/*
* Good {morning,afternoon,evening,night}.
*/
mtx_lock(&vm_mtx);
identifycpu();
/* startrtclock(); */
@ -300,6 +301,7 @@ cpu_startup(dummy)
&maxaddr, mb_map_size);
mb_map->system_map = 1;
}
mtx_unlock(&vm_mtx);
/*
* Initialize callouts

View File

@ -49,12 +49,14 @@
#include <sys/conf.h>
#include <sys/fcntl.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/memrange.h>
#include <sys/proc.h>
#include <sys/msgbuf.h>
#include <sys/systm.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/signalvar.h>
#include <sys/systm.h>
#include <sys/uio.h>
#include <machine/frame.h>
@ -187,13 +189,19 @@ mmrw(dev_t dev, struct uio *uio, int flags)
*/
addr = trunc_page(v);
eaddr = round_page(v + c);
mtx_lock(&vm_mtx);
for (; addr < eaddr; addr += PAGE_SIZE)
if (pmap_extract(kernel_pmap, addr) == 0)
if (pmap_extract(kernel_pmap, addr) == 0) {
mtx_unlock(&vm_mtx);
return EFAULT;
}
if (!kernacc((caddr_t)v, c,
uio->uio_rw == UIO_READ ?
VM_PROT_READ : VM_PROT_WRITE))
VM_PROT_READ : VM_PROT_WRITE)) {
mtx_unlock(&vm_mtx);
return (EFAULT);
}
mtx_unlock(&vm_mtx);
error = uiomove((caddr_t)v, c, uio);
}

View File

@ -94,18 +94,19 @@
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/msgbuf.h>
#include <sys/vmmeter.h>
#include <sys/mman.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mman.h>
#include <sys/msgbuf.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/sx.h>
#include <sys/systm.h>
#include <sys/vmmeter.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <sys/lock.h>
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
#include <vm/vm_map.h>

View File

@ -26,18 +26,19 @@
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/tty.h>
#include <sys/proc.h>
#include <sys/cons.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/systm.h>
#include <sys/tty.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <sys/lock.h>
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
#include <vm/vm_map.h>

View File

@ -21,11 +21,12 @@
#include <sys/devicestat.h>
#include <sys/disk.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/sysctl.h>
#include <sys/linker.h>
#include <sys/queue.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/queue.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
#include <vm/vm_kern.h>
#include <vm/vm_page.h>

View File

@ -318,7 +318,6 @@ trap(int vector, int imm, struct trapframe *framep)
vm_prot_t ftype = 0;
int rv;
mtx_lock(&Giant);
/*
* If it was caused by fuswintr or suswintr,
* just punt. Note that we check the faulting
@ -333,7 +332,6 @@ trap(int vector, int imm, struct trapframe *framep)
p->p_addr->u_pcb.pcb_accessaddr == va) {
framep->tf_cr_iip = p->p_addr->u_pcb.pcb_onfault;
p->p_addr->u_pcb.pcb_onfault = 0;
mtx_unlock(&Giant);
goto out;
}
@ -435,12 +433,9 @@ trap(int vector, int imm, struct trapframe *framep)
} else if (rv == KERN_PROTECTION_FAILURE)
rv = KERN_INVALID_ADDRESS;
}
if (rv == KERN_SUCCESS) {
mtx_unlock(&Giant);
if (rv == KERN_SUCCESS)
goto out;
}
mtx_unlock(&Giant);
ucode = va;
i = SIGSEGV;
#ifdef DEBUG

View File

@ -315,11 +315,14 @@ void
cpu_wait(p)
struct proc *p;
{
mtx_lock(&vm_mtx);
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
mtx_unlock(&vm_mtx);
}
/*
@ -371,6 +374,7 @@ vmapbuf(bp)
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
mtx_lock(&vm_mtx);
for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE, v += PAGE_SIZE) {
@ -386,6 +390,7 @@ vmapbuf(bp)
vm_page_hold(PHYS_TO_VM_PAGE(pa));
pmap_kenter((vm_offset_t) v, pa);
}
mtx_unlock(&vm_mtx);
kva = bp->b_saveaddr;
bp->b_saveaddr = bp->b_data;
@ -406,6 +411,7 @@ vunmapbuf(bp)
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
mtx_lock(&vm_mtx);
for (addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE) {
@ -413,6 +419,7 @@ vunmapbuf(bp)
pmap_kremove((vm_offset_t) addr);
vm_page_unhold(PHYS_TO_VM_PAGE(pa));
}
mtx_unlock(&vm_mtx);
bp->b_data = bp->b_saveaddr;
}
@ -470,14 +477,17 @@ vm_page_zero_idle()
* pages because doing so may flush our L1 and L2 caches too much.
*/
if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
if (mtx_trylock(&vm_mtx) == 0)
return (0);
if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) {
mtx_unlock(&vm_mtx);
return(0);
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
}
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) {
mtx_unlock(&vm_mtx);
return(0);
}
#ifdef SMP
if (try_mplock()) {
#endif
s = splvm();
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
zero_state = 0;
@ -486,13 +496,7 @@ vm_page_zero_idle()
TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq);
m->queue = PQ_NONE;
splx(s);
#if 0
rel_mplock();
#endif
pmap_zero_page(VM_PAGE_TO_PHYS(m));
#if 0
get_mplock();
#endif
(void)splvm();
vm_page_flag_set(m, PG_ZERO);
m->queue = PQ_FREE + m->pc;
@ -506,14 +510,8 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
#ifdef SMP
rel_mplock();
#endif
mtx_unlock(&vm_mtx);
return (1);
#ifdef SMP
}
#endif
return (0);
}
/*