- Now that both schedulers support temporary cpu pinning use this rather

than the switchin functions to guarantee that we're operating with the
   correct tlb entry.
 - Remove the post copy/zero tlb invalidations.  It is faster to invalidate
   an entry that is known to exist and so it is faster to invalidate after
   use.  However, some architectures implement speculative page table
   prefetching so we can not be guaranteed that the invalidated entry is still
   invalid when we re-enter any of these functions.  As a result of this we
   must always invalidate before use to be safe.
This commit is contained in:
Jeff Roberson 2004-01-25 08:04:45 +00:00
parent e17c57b14b
commit f67fdc737e

View File

@ -117,6 +117,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sx.h>
#include <sys/user.h>
#include <sys/vmmeter.h>
#include <sys/sched.h>
#include <sys/sysctl.h>
#ifdef SMP
#include <sys/smp.h>
@ -2340,38 +2341,6 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
}
}
#ifdef SMP
/*
* pmap_zpi_switchout*()
*
* These functions allow us to avoid doing IPIs alltogether in certain
* temporary page-mapping situations (page zeroing). Instead to deal
* with being preempted and moved onto a different cpu we invalidate
* the page when the scheduler switches us in. This does not occur
* very often so we remain relatively optimal with very little effort.
*/
static void
pmap_zpi_switchout12(void)
{
invlpg((u_int)CADDR1);
invlpg((u_int)CADDR2);
}
static void
pmap_zpi_switchout2(void)
{
invlpg((u_int)CADDR2);
}
static void
pmap_zpi_switchout3(void)
{
invlpg((u_int)CADDR3);
}
#endif
static __inline void
pagezero(void *page)
{
@ -2409,19 +2378,12 @@ pmap_zero_page(vm_page_t m)
mtx_lock(&CMAPCADDR12_lock);
if (*CMAP2)
panic("pmap_zero_page: CMAP2 busy");
#ifdef SMP
curthread->td_pcb->pcb_switchout = pmap_zpi_switchout2;
#endif
sched_pin();
*CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
#ifdef SMP
invlpg((u_int)CADDR2);
#endif
invlcaddr(CADDR2);
pagezero(CADDR2);
*CMAP2 = 0;
invlcaddr(CADDR2);
#ifdef SMP
curthread->td_pcb->pcb_switchout = NULL;
#endif
sched_unpin();
mtx_unlock(&CMAPCADDR12_lock);
}
@ -2438,22 +2400,15 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
mtx_lock(&CMAPCADDR12_lock);
if (*CMAP2)
panic("pmap_zero_page: CMAP2 busy");
#ifdef SMP
curthread->td_pcb->pcb_switchout = pmap_zpi_switchout2;
#endif
sched_pin();
*CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
#ifdef SMP
invlpg((u_int)CADDR2);
#endif
invlcaddr(CADDR2);
if (off == 0 && size == PAGE_SIZE)
pagezero(CADDR2);
else
bzero((char *)CADDR2 + off, size);
*CMAP2 = 0;
invlcaddr(CADDR2);
#ifdef SMP
curthread->td_pcb->pcb_switchout = NULL;
#endif
sched_unpin();
mtx_unlock(&CMAPCADDR12_lock);
}
@ -2469,19 +2424,12 @@ pmap_zero_page_idle(vm_page_t m)
if (*CMAP3)
panic("pmap_zero_page: CMAP3 busy");
#ifdef SMP
curthread->td_pcb->pcb_switchout = pmap_zpi_switchout3;
#endif
sched_pin();
*CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
#ifdef SMP
invlpg((u_int)CADDR3);
#endif
invlcaddr(CADDR3);
pagezero(CADDR3);
*CMAP3 = 0;
invlcaddr(CADDR3);
#ifdef SMP
curthread->td_pcb->pcb_switchout = NULL;
#endif
sched_unpin();
}
/*
@ -2499,27 +2447,19 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
panic("pmap_copy_page: CMAP1 busy");
if (*CMAP2)
panic("pmap_copy_page: CMAP2 busy");
#ifdef SMP
curthread->td_pcb->pcb_switchout = pmap_zpi_switchout12;
#endif
*CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A;
*CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M;
#ifdef SMP
invlpg((u_int)CADDR1);
invlpg((u_int)CADDR2);
#endif
bcopy(CADDR1, CADDR2, PAGE_SIZE);
*CMAP1 = 0;
*CMAP2 = 0;
sched_pin();
#ifdef I386_CPU
invltlb();
#else
invlpg((u_int)CADDR1);
invlpg((u_int)CADDR2);
#endif
#ifdef SMP
curthread->td_pcb->pcb_switchout = NULL;
#endif
*CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A;
*CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M;
bcopy(CADDR1, CADDR2, PAGE_SIZE);
*CMAP1 = 0;
*CMAP2 = 0;
sched_unpin();
mtx_unlock(&CMAPCADDR12_lock);
}