amd64: Change SFENCE to locked op for synchronizing with CLFLUSHOPT on Intel.
Reviewed by: cem, jhb Discussed with: alc, scottph Sponsored by: The FreeBSD Foundation MFC after: 1 week Differential revision: https://reviews.freebsd.org/D22007
This commit is contained in:
parent
98158c753d
commit
d70bab39f2
@ -3053,16 +3053,16 @@ pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
|
||||
|
||||
if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) {
|
||||
/*
|
||||
* Do per-cache line flush. Use the sfence
|
||||
* Do per-cache line flush. Use a locked
|
||||
* instruction to insure that previous stores are
|
||||
* included in the write-back. The processor
|
||||
* propagates flush to other processors in the cache
|
||||
* coherence domain.
|
||||
*/
|
||||
sfence();
|
||||
atomic_thread_fence_seq_cst();
|
||||
for (; sva < eva; sva += cpu_clflush_line_size)
|
||||
clflushopt(sva);
|
||||
sfence();
|
||||
atomic_thread_fence_seq_cst();
|
||||
} else {
|
||||
/*
|
||||
* Writes are ordered by CLFLUSH on Intel CPUs.
|
||||
@ -3104,7 +3104,7 @@ pmap_invalidate_cache_pages(vm_page_t *pages, int count)
|
||||
pmap_invalidate_cache();
|
||||
else {
|
||||
if (useclflushopt)
|
||||
sfence();
|
||||
atomic_thread_fence_seq_cst();
|
||||
else if (cpu_vendor_id != CPU_VENDOR_INTEL)
|
||||
mfence();
|
||||
for (i = 0; i < count; i++) {
|
||||
@ -3118,7 +3118,7 @@ pmap_invalidate_cache_pages(vm_page_t *pages, int count)
|
||||
}
|
||||
}
|
||||
if (useclflushopt)
|
||||
sfence();
|
||||
atomic_thread_fence_seq_cst();
|
||||
else if (cpu_vendor_id != CPU_VENDOR_INTEL)
|
||||
mfence();
|
||||
}
|
||||
@ -3139,10 +3139,10 @@ pmap_flush_cache_range(vm_offset_t sva, vm_offset_t eva)
|
||||
if (pmap_kextract(sva) == lapic_paddr)
|
||||
return;
|
||||
|
||||
sfence();
|
||||
atomic_thread_fence_seq_cst();
|
||||
for (; sva < eva; sva += cpu_clflush_line_size)
|
||||
clwb(sva);
|
||||
sfence();
|
||||
atomic_thread_fence_seq_cst();
|
||||
}
|
||||
|
||||
void
|
||||
@ -3175,7 +3175,7 @@ pmap_flush_cache_phys_range(vm_paddr_t spa, vm_paddr_t epa, vm_memattr_t mattr)
|
||||
sched_pin();
|
||||
pte_store(pte, spa | pte_bits);
|
||||
invlpg(vaddr);
|
||||
/* XXXKIB sfences inside flush_cache_range are excessive */
|
||||
/* XXXKIB atomic inside flush_cache_range are excessive */
|
||||
pmap_flush_cache_range(vaddr, vaddr + PAGE_SIZE);
|
||||
sched_unpin();
|
||||
}
|
||||
@ -9504,10 +9504,10 @@ pmap_large_map_wb_fence_mfence(void)
|
||||
}
|
||||
|
||||
static void
|
||||
pmap_large_map_wb_fence_sfence(void)
|
||||
pmap_large_map_wb_fence_atomic(void)
|
||||
{
|
||||
|
||||
sfence();
|
||||
atomic_thread_fence_seq_cst();
|
||||
}
|
||||
|
||||
static void
|
||||
@ -9522,7 +9522,7 @@ DEFINE_IFUNC(static, void, pmap_large_map_wb_fence, (void))
|
||||
return (pmap_large_map_wb_fence_mfence);
|
||||
else if ((cpu_stdext_feature & (CPUID_STDEXT_CLWB |
|
||||
CPUID_STDEXT_CLFLUSHOPT)) == 0)
|
||||
return (pmap_large_map_wb_fence_sfence);
|
||||
return (pmap_large_map_wb_fence_atomic);
|
||||
else
|
||||
/* clflush is strongly enough ordered */
|
||||
return (pmap_large_map_wb_fence_nop);
|
||||
|
Loading…
Reference in New Issue
Block a user