- add ranged shootdowns when fewer than 64 mappings are being invalidated

This commit is contained in:
Kip Macy 2006-12-25 02:05:52 +00:00
parent 0321d7f9a8
commit 35d16ac000
3 changed files with 48 additions and 21 deletions

View File

@ -104,7 +104,7 @@ extern char tl_ipi_level[];
extern char tl_invltlb[]; extern char tl_invltlb[];
extern char tl_invlctx[]; extern char tl_invlctx[];
extern char tl_invlpg[]; extern char tl_invlpg[];
extern char tl_ipi_tlb_range_demap[]; extern char tl_invlrng[];
extern char tl_tsbupdate[]; extern char tl_tsbupdate[];
extern char tl_ttehashupdate[]; extern char tl_ttehashupdate[];

View File

@ -452,6 +452,35 @@ ENTRY(tl_invlpg)
membar #Sync membar #Sync
END(tl_invlpg) END(tl_invlpg)
ENTRY(tl_invlrng)
sethi %hi(PAGE_SIZE), %g5
dec %g5
and %g1, %g5, %g4
andn %g1, %g5, %g1
dec %g4
1: mov %o0, %g5
mov %o1, %g6
mov %o2, %g7
mov MAP_ITLB|MAP_DTLB, %o2
mov %g1, %o0
mov %g2, %o1
ta MMU_UNMAP_ADDR
brnz,a,pn %o0, interrupt_panic_bad_hcall
mov MMU_UNMAP_ADDR, %o1
brnz,pt %g4, 1b
dec %g4
mov %g5, %o0
mov %g6, %o1
mov %g7, %o2
ba,pt %xcc, set_ackmask
membar #Sync
END(tl_invlrng)
ENTRY(tl_tsbupdate) ENTRY(tl_tsbupdate)
/* compare current context with one to be updated */ /* compare current context with one to be updated */
mov MMU_CID_S, %g4 mov MMU_CID_S, %g4

View File

@ -1509,9 +1509,9 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va, int cleartsb)
void void
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int cleartsb) pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int cleartsb)
{ {
vm_offset_t tva; vm_offset_t tva, invlrngva;
#ifdef SMP
char *func; char *func;
#ifdef SMP
cpumask_t active; cpumask_t active;
#endif #endif
if ((eva - sva) == PAGE_SIZE) { if ((eva - sva) == PAGE_SIZE) {
@ -1520,8 +1520,7 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int clearts
} }
if (sva >= eva) KASSERT(sva >= eva, ("invalidating negative or zero range sva=0x%lx eva=0x%lx", sva, eva))
panic("invalidating negative or zero range sva=0x%lx eva=0x%lx", sva, eva);
if (cleartsb == TRUE) if (cleartsb == TRUE)
tsb_clear_range(&pmap->pm_tsb, sva, eva); tsb_clear_range(&pmap->pm_tsb, sva, eva);
@ -1530,18 +1529,18 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int clearts
if ((sva - eva) < PAGE_SIZE*64) { if ((sva - eva) < PAGE_SIZE*64) {
for (tva = sva; tva < eva; tva += PAGE_SIZE_8K) for (tva = sva; tva < eva; tva += PAGE_SIZE_8K)
invlpg(tva, pmap->pm_context); invlpg(tva, pmap->pm_context);
} else if (pmap->pm_context) func = tl_invlrng;
invlctx(pmap->pm_context); } else if (pmap->pm_context) {
else
invltlb();
#ifdef SMP
if (pmap == kernel_pmap)
func = tl_invltlb;
else
func = tl_invlctx; func = tl_invlctx;
invlctx(pmap->pm_context);
active = pmap_ipi(pmap, (void *)func, pmap->pm_context, 0); } else {
func = tl_invltlb;
invltlb();
}
#ifdef SMP
invlrngva = sva | ((eva - sva) >> PAGE_SHIFT);
active = pmap_ipi(pmap, (void *)func, pmap->pm_context, invlrngva);
active &= ~pmap->pm_active; active &= ~pmap->pm_active;
atomic_clear_int(&pmap->pm_tlbactive, active); atomic_clear_int(&pmap->pm_tlbactive, active);
#endif #endif
@ -1552,8 +1551,8 @@ void
pmap_invalidate_all(pmap_t pmap) pmap_invalidate_all(pmap_t pmap)
{ {
if (pmap == kernel_pmap) KASSERT(pmap == kernel_pmap,
panic("invalidate_all called on kernel_pmap"); ("invalidate_all called on kernel_pmap"));
tsb_clear(&pmap->pm_tsb); tsb_clear(&pmap->pm_tsb);
@ -1802,14 +1801,13 @@ void
pmap_qenter(vm_offset_t sva, vm_page_t *m, int count) pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
{ {
vm_offset_t va; vm_offset_t va;
tte_t otte_data; tte_t otte;
otte_data = 0; otte = 0;
va = sva; va = sva;
while (count-- > 0) { while (count-- > 0) {
otte |= tte_hash_update(kernel_pmap->pm_hash, va, otte |= tte_hash_update(kernel_pmap->pm_hash, va,
VM_PAGE_TO_PHYS(*m), VM_PAGE_TO_PHYS(*m) | TTE_KERNEL | VTD_8K);
pa | TTE_KERNEL | VTD_8K);
va += PAGE_SIZE; va += PAGE_SIZE;
m++; m++;
} }