catch these files up to their i386 neighbors to make alpha boot

prior to the vm_mtx
This commit is contained in:
gallatin 2001-05-21 16:04:24 +00:00
parent b719eb0861
commit 1c57c3b027
5 changed files with 60 additions and 20 deletions

View File

@ -257,6 +257,7 @@ cpu_startup(dummy)
/*
* Good {morning,afternoon,evening,night}.
*/
mtx_lock(&vm_mtx);
identifycpu();
/* startrtclock(); */
@ -366,6 +367,7 @@ cpu_startup(dummy)
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*(ARG_MAX+(PAGE_SIZE*3))));
mtx_unlock(&vm_mtx);
/*
* XXX: Mbuf system machine-specific initializations should
* go here, if anywhere.

View File

@ -49,6 +49,8 @@
#include <sys/conf.h>
#include <sys/fcntl.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/malloc.h>
#include <sys/memrange.h>
#include <sys/proc.h>
@ -188,13 +190,19 @@ mmrw(dev_t dev, struct uio *uio, int flags)
*/
addr = trunc_page(v);
eaddr = round_page(v + c);
mtx_lock(&vm_mtx);
for (; addr < eaddr; addr += PAGE_SIZE)
if (pmap_extract(kernel_pmap, addr) == 0)
if (pmap_extract(kernel_pmap, addr) == 0) {
mtx_unlock(&vm_mtx);
return EFAULT;
}
if (!kernacc((caddr_t)v, c,
uio->uio_rw == UIO_READ ?
VM_PROT_READ : VM_PROT_WRITE))
VM_PROT_READ : VM_PROT_WRITE)) {
mtx_unlock(&vm_mtx);
return (EFAULT);
}
mtx_unlock(&vm_mtx);
error = uiomove((caddr_t)v, c, uio);
continue;
}

View File

@ -275,11 +275,14 @@ void
cpu_wait(p)
struct proc *p;
{
mtx_lock(&vm_mtx);
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
mtx_unlock(&vm_mtx);
}
/*
@ -331,6 +334,7 @@ vmapbuf(bp)
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
mtx_lock(&vm_mtx);
for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE, v += PAGE_SIZE) {
@ -346,6 +350,7 @@ vmapbuf(bp)
vm_page_hold(PHYS_TO_VM_PAGE(pa));
pmap_kenter((vm_offset_t) v, pa);
}
mtx_unlock(&vm_mtx);
kva = bp->b_saveaddr;
bp->b_saveaddr = bp->b_data;
@ -366,6 +371,7 @@ vunmapbuf(bp)
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
mtx_lock(&vm_mtx);
for (addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE) {
@ -373,6 +379,7 @@ vunmapbuf(bp)
pmap_kremove((vm_offset_t) addr);
vm_page_unhold(PHYS_TO_VM_PAGE(pa));
}
mtx_unlock(&vm_mtx);
bp->b_data = bp->b_saveaddr;
}
@ -430,12 +437,17 @@ vm_page_zero_idle()
* pages because doing so may flush our L1 and L2 caches too much.
*/
if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
if (mtx_trylock(&vm_mtx) == 0)
return (0);
if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) {
mtx_unlock(&vm_mtx);
return(0);
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
}
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) {
mtx_unlock(&vm_mtx);
return(0);
}
if (mtx_trylock(&Giant)) {
s = splvm();
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
zero_state = 0;
@ -464,10 +476,8 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
mtx_unlock(&Giant);
mtx_unlock(&vm_mtx);
return (1);
}
return (0);
}
/*

View File

@ -275,11 +275,14 @@ void
cpu_wait(p)
struct proc *p;
{
mtx_lock(&vm_mtx);
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
mtx_unlock(&vm_mtx);
}
/*
@ -331,6 +334,7 @@ vmapbuf(bp)
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
mtx_lock(&vm_mtx);
for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE, v += PAGE_SIZE) {
@ -346,6 +350,7 @@ vmapbuf(bp)
vm_page_hold(PHYS_TO_VM_PAGE(pa));
pmap_kenter((vm_offset_t) v, pa);
}
mtx_unlock(&vm_mtx);
kva = bp->b_saveaddr;
bp->b_saveaddr = bp->b_data;
@ -366,6 +371,7 @@ vunmapbuf(bp)
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
mtx_lock(&vm_mtx);
for (addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE) {
@ -373,6 +379,7 @@ vunmapbuf(bp)
pmap_kremove((vm_offset_t) addr);
vm_page_unhold(PHYS_TO_VM_PAGE(pa));
}
mtx_unlock(&vm_mtx);
bp->b_data = bp->b_saveaddr;
}
@ -430,12 +437,17 @@ vm_page_zero_idle()
* pages because doing so may flush our L1 and L2 caches too much.
*/
if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
if (mtx_trylock(&vm_mtx) == 0)
return (0);
if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) {
mtx_unlock(&vm_mtx);
return(0);
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
}
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) {
mtx_unlock(&vm_mtx);
return(0);
}
if (mtx_trylock(&Giant)) {
s = splvm();
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
zero_state = 0;
@ -464,10 +476,8 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
mtx_unlock(&Giant);
mtx_unlock(&vm_mtx);
return (1);
}
return (0);
}
/*

View File

@ -275,11 +275,14 @@ void
cpu_wait(p)
struct proc *p;
{
mtx_lock(&vm_mtx);
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
mtx_unlock(&vm_mtx);
}
/*
@ -331,6 +334,7 @@ vmapbuf(bp)
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
mtx_lock(&vm_mtx);
for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE, v += PAGE_SIZE) {
@ -346,6 +350,7 @@ vmapbuf(bp)
vm_page_hold(PHYS_TO_VM_PAGE(pa));
pmap_kenter((vm_offset_t) v, pa);
}
mtx_unlock(&vm_mtx);
kva = bp->b_saveaddr;
bp->b_saveaddr = bp->b_data;
@ -366,6 +371,7 @@ vunmapbuf(bp)
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
mtx_lock(&vm_mtx);
for (addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE) {
@ -373,6 +379,7 @@ vunmapbuf(bp)
pmap_kremove((vm_offset_t) addr);
vm_page_unhold(PHYS_TO_VM_PAGE(pa));
}
mtx_unlock(&vm_mtx);
bp->b_data = bp->b_saveaddr;
}
@ -430,12 +437,17 @@ vm_page_zero_idle()
* pages because doing so may flush our L1 and L2 caches too much.
*/
if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
if (mtx_trylock(&vm_mtx) == 0)
return (0);
if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) {
mtx_unlock(&vm_mtx);
return(0);
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
}
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) {
mtx_unlock(&vm_mtx);
return(0);
}
if (mtx_trylock(&Giant)) {
s = splvm();
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
zero_state = 0;
@ -464,10 +476,8 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
mtx_unlock(&Giant);
mtx_unlock(&vm_mtx);
return (1);
}
return (0);
}
/*