There is no reason why we need to pin the underlying thread to its current

processor in pmap_invalidate_{all,page,range}().  These functions are using
an instruction that broadcasts the TLB invalidation to every processor, so
even if a thread migrates in the middle of one of these functions every
processor will still perform the required TLB invalidations.

Reviewed by:	andrew, markj
MFC after:	10 days
Differential Revision:	https://reviews.freebsd.org/D22502
This commit is contained in:
Alan Cox 2019-11-27 20:33:49 +00:00
parent e7065dd1e8
commit 7b3c31acbe

View File

@ -1043,7 +1043,6 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
{
uint64_t r;
sched_pin();
dsb(ishst);
if (pmap == kernel_pmap) {
r = atop(va);
@ -1054,11 +1053,10 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
}
dsb(ish);
isb();
sched_unpin();
}
static __inline void
pmap_invalidate_range_nopin(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
uint64_t end, r, start;
@ -1079,21 +1077,11 @@ pmap_invalidate_range_nopin(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
isb();
}
static __inline void
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
sched_pin();
pmap_invalidate_range_nopin(pmap, sva, eva);
sched_unpin();
}
static __inline void
pmap_invalidate_all(pmap_t pmap)
{
uint64_t r;
sched_pin();
dsb(ishst);
if (pmap == kernel_pmap) {
__asm __volatile("tlbi vmalle1is");
@ -1103,7 +1091,6 @@ pmap_invalidate_all(pmap_t pmap)
}
dsb(ish);
isb();
sched_unpin();
}
/*
@ -3114,7 +3101,7 @@ pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_entry_t newpte,
* lookup the physical address.
*/
pmap_clear_bits(pte, ATTR_DESCR_VALID);
pmap_invalidate_range_nopin(pmap, va, va + size);
pmap_invalidate_range(pmap, va, va + size);
/* Create the new mapping */
pmap_store(pte, newpte);