Add a special page zero entry point intended to be called via the single

threaded VM pagezero kthread outside of Giant.  For some platforms, this
is really easy since it can just use the direct mapped region.  For others,
IPI sending is involved or there are other issues, so grab Giant when
needed.

We still have preemption issues to deal with, but Alan Cox has an
interesting suggestion on how to minimize the problem on x86.

Use Luigi's hack for preserving the (lack of) priority.

Turn the idle zeroing back on since it can now actually do something useful
outside of Giant in many cases.
This commit is contained in:
Peter Wemm 2002-07-08 04:24:26 +00:00
parent c00f7bc28b
commit a58b3a6878
10 changed files with 162 additions and 11 deletions

View File

@ -2525,6 +2525,21 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
bzero((char *)(caddr_t)va + off, size);
}
/*
* pmap_zero_page_idle zeros the specified hardware page by
* mapping it into virtual memory and using bzero to clear
* its contents. This is for the vm_pagezero idle process.
*/
void
pmap_zero_page_idle(vm_page_t m)
{
vm_offset_t va = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(m));
bzero((caddr_t) va, PAGE_SIZE);
}
/*
* pmap_copy_page copies the specified (machine independent)
* page by mapping the page into virtual memory and using

View File

@ -175,9 +175,9 @@ static int pmap_pagedaemon_waken = 0;
* All those kernel PT submaps that BSD is so fond of
*/
pt_entry_t *CMAP1 = 0;
static pt_entry_t *CMAP2, *ptmmap;
static pt_entry_t *CMAP2, *CMAP3, *ptmmap;
caddr_t CADDR1 = 0, ptvmmap = 0;
static caddr_t CADDR2;
static caddr_t CADDR2, CADDR3;
static pt_entry_t *msgbufmap;
struct msgbuf *msgbufp = 0;
@ -326,9 +326,11 @@ pmap_bootstrap(firstaddr, loadaddr)
/*
* CMAP1/CMAP2 are used for zeroing and copying pages.
* CMAP3 is used for the idle process page zeroing.
*/
SYSMAP(caddr_t, CMAP1, CADDR1, 1)
SYSMAP(caddr_t, CMAP2, CADDR2, 1)
SYSMAP(caddr_t, CMAP3, CADDR3, 1)
/*
* Crashdump maps.
@ -2685,6 +2687,38 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
*CMAP2 = 0;
}
/*
* pmap_zero_page_idle zeros the specified hardware page by mapping
* the page into KVM and using bzero to clear its contents. This
* is intended to be called from the vm_pagezero process only and
* outside of Giant.
*/
void
pmap_zero_page_idle(vm_page_t m)
{
vm_offset_t phys = VM_PAGE_TO_PHYS(m);
if (*CMAP3)
panic("pmap_zero_page: CMAP3 busy");
*CMAP3 = PG_V | PG_RW | phys | PG_A | PG_M;
#ifdef SMP
mtx_lock(&Giant); /* IPI sender not MPSAFE */
#endif
invltlb_1pg((vm_offset_t)CADDR3);
#ifdef SMP
mtx_unlock(&Giant);
#endif
#if defined(I686_CPU)
if (cpu_class == CPUCLASS_686)
i686_pagezero(CADDR3);
else
#endif
bzero(CADDR3, PAGE_SIZE);
*CMAP3 = 0;
}
/*
* pmap_copy_page copies the specified (machine independent)
* page by mapping the page into virtual memory and using

View File

@ -175,9 +175,9 @@ static int pmap_pagedaemon_waken = 0;
* All those kernel PT submaps that BSD is so fond of
*/
pt_entry_t *CMAP1 = 0;
static pt_entry_t *CMAP2, *ptmmap;
static pt_entry_t *CMAP2, *CMAP3, *ptmmap;
caddr_t CADDR1 = 0, ptvmmap = 0;
static caddr_t CADDR2;
static caddr_t CADDR2, CADDR3;
static pt_entry_t *msgbufmap;
struct msgbuf *msgbufp = 0;
@ -326,9 +326,11 @@ pmap_bootstrap(firstaddr, loadaddr)
/*
* CMAP1/CMAP2 are used for zeroing and copying pages.
* CMAP3 is used for the idle process page zeroing.
*/
SYSMAP(caddr_t, CMAP1, CADDR1, 1)
SYSMAP(caddr_t, CMAP2, CADDR2, 1)
SYSMAP(caddr_t, CMAP3, CADDR3, 1)
/*
* Crashdump maps.
@ -2685,6 +2687,38 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
*CMAP2 = 0;
}
/*
* pmap_zero_page_idle zeros the specified hardware page by mapping
* the page into KVM and using bzero to clear its contents. This
* is intended to be called from the vm_pagezero process only and
* outside of Giant.
*/
void
pmap_zero_page_idle(vm_page_t m)
{
vm_offset_t phys = VM_PAGE_TO_PHYS(m);
if (*CMAP3)
panic("pmap_zero_page: CMAP3 busy");
*CMAP3 = PG_V | PG_RW | phys | PG_A | PG_M;
#ifdef SMP
mtx_lock(&Giant); /* IPI sender not MPSAFE */
#endif
invltlb_1pg((vm_offset_t)CADDR3);
#ifdef SMP
mtx_unlock(&Giant);
#endif
#if defined(I686_CPU)
if (cpu_class == CPUCLASS_686)
i686_pagezero(CADDR3);
else
#endif
bzero(CADDR3, PAGE_SIZE);
*CMAP3 = 0;
}
/*
* pmap_copy_page copies the specified (machine independent)
* page by mapping the page into virtual memory and using

View File

@ -2035,6 +2035,22 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
bzero((char *)(caddr_t)va + off, size);
}
/*
* pmap_zero_page_idle zeros the specified hardware page by
* mapping it into virtual memory and using bzero to clear
* its contents. This is for the vm_idlezero process.
*/
void
pmap_zero_page_area(vm_page_t m)
{
vm_offset_t va = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m));
bzero((caddr_t) va, PAGE_SIZE);
}
/*
/*
* pmap_copy_page copies the specified (machine independent)
* page by mapping the page into virtual memory and using

View File

@ -909,6 +909,17 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
TODO;
}
void
pmap_zero_page_idle(vm_page_t m)
{
/* XXX this is called outside of Giant, is pmap_zero_page safe? */
/* XXX maybe have a dedicated mapping for this to avoid the problem? */
mtx_lock(&Giant);
pmap_zero_page(m);
mtx_unlock(&Giant);
}
/*
* Map the given physical page at the specified virtual address in the
* target pmap with the protection requested. If specified the page

View File

@ -909,6 +909,17 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
TODO;
}
void
pmap_zero_page_idle(vm_page_t m)
{
/* XXX this is called outside of Giant, is pmap_zero_page safe? */
/* XXX maybe have a dedicated mapping for this to avoid the problem? */
mtx_lock(&Giant);
pmap_zero_page(m);
mtx_unlock(&Giant);
}
/*
* Map the given physical page at the specified virtual address in the
* target pmap with the protection requested. If specified the page

View File

@ -909,6 +909,17 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
TODO;
}
void
pmap_zero_page_idle(vm_page_t m)
{
/* XXX this is called outside of Giant, is pmap_zero_page safe? */
/* XXX maybe have a dedicated mapping for this to avoid the problem? */
mtx_lock(&Giant);
pmap_zero_page(m);
mtx_unlock(&Giant);
}
/*
* Map the given physical page at the specified virtual address in the
* target pmap with the protection requested. If specified the page

View File

@ -1429,6 +1429,22 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
aszero(ASI_PHYS_USE_EC, pa + off, size);
}
void
pmap_zero_page_idle(vm_page_t m)
{
vm_offset_t pa = VM_PAGE_TO_PHYS(m);
CTR1(KTR_PMAP, "pmap_zero_page_idle: pa=%#lx", pa);
#ifdef SMP
mtx_lock(&Giant);
#endif
dcache_inval_phys(pa, pa + PAGE_SIZE - 1);
#ifdef SMP
mtx_unlock(&Giant);
#endif
aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
}
/*
* Copy a page of physical memory by temporarily mapping it into the tlb.
*/

View File

@ -126,6 +126,7 @@ void pmap_remove(pmap_t, vm_offset_t, vm_offset_t);
void pmap_remove_pages(pmap_t, vm_offset_t, vm_offset_t);
void pmap_zero_page(vm_page_t);
void pmap_zero_page_area(vm_page_t, int off, int size);
void pmap_zero_page_idle(vm_page_t);
void pmap_prefault(pmap_t, vm_offset_t, vm_map_entry_t);
int pmap_mincore(pmap_t pmap, vm_offset_t addr);
void pmap_new_thread(struct thread *td);

View File

@ -30,7 +30,7 @@ static int cnt_prezero;
SYSCTL_INT(_vm_stats_misc, OID_AUTO,
cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, "");
static int idlezero_enable = 0;
static int idlezero_enable = 1;
SYSCTL_INT(_vm, OID_AUTO, idlezero_enable, CTLFLAG_RW, &idlezero_enable, 0, "");
TUNABLE_INT("vm.idlezero_enable", &idlezero_enable);
@ -83,9 +83,9 @@ vm_page_zero_idle(void)
TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq);
m->queue = PQ_NONE;
mtx_unlock(&vm_page_queue_free_mtx);
/* maybe drop out of Giant here */
pmap_zero_page(m);
/* and return here */
mtx_unlock(&Giant);
pmap_zero_page_idle(m);
mtx_lock(&Giant);
mtx_lock(&vm_page_queue_free_mtx);
vm_page_flag_set(m, PG_ZERO);
m->queue = PQ_FREE + m->pc;
@ -109,7 +109,7 @@ void
vm_page_zero_idle_wakeup(void)
{
if (vm_page_zero_check())
if (idlezero_enable && vm_page_zero_check())
wakeup(&zero_state);
}
@ -119,17 +119,19 @@ vm_pagezero(void)
struct thread *td = curthread;
struct rtprio rtp;
int pages = 0;
int pri;
rtp.prio = RTP_PRIO_MAX;
rtp.type = RTP_PRIO_IDLE;
mtx_lock_spin(&sched_lock);
rtp_to_pri(&rtp, td->td_ksegrp);
pri = td->td_priority;
mtx_unlock_spin(&sched_lock);
for (;;) {
if (vm_page_zero_check()) {
pages += vm_page_zero_idle();
if (pages > idlezero_maxrun) {
if (pages > idlezero_maxrun || kserunnable()) {
mtx_lock_spin(&sched_lock);
td->td_proc->p_stats->p_ru.ru_nvcsw++;
mi_switch();
@ -137,7 +139,7 @@ vm_pagezero(void)
pages = 0;
}
} else {
tsleep(&zero_state, PPAUSE, "pgzero", hz * 300);
tsleep(&zero_state, pri, "pgzero", hz * 300);
pages = 0;
}
}