From d70bab39f2e510348574c12afe8eac1c43e839a0 Mon Sep 17 00:00:00 2001 From: Konstantin Belousov Date: Sun, 10 Nov 2019 09:41:29 +0000 Subject: [PATCH] amd64: Change SFENCE to locked op for synchronizing with CLFLUSHOPT on Intel. Reviewed by: cem, jhb Discussed with: alc, scottph Sponsored by: The FreeBSD Foundation MFC after: 1 week Differential revision: https://reviews.freebsd.org/D22007 --- sys/amd64/amd64/pmap.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index c2dea273add2..226e2c3cbf12 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -3053,16 +3053,16 @@ pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva) if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) { /* - * Do per-cache line flush. Use the sfence + * Do per-cache line flush. Use a locked * instruction to insure that previous stores are * included in the write-back. The processor * propagates flush to other processors in the cache * coherence domain. */ - sfence(); + atomic_thread_fence_seq_cst(); for (; sva < eva; sva += cpu_clflush_line_size) clflushopt(sva); - sfence(); + atomic_thread_fence_seq_cst(); } else { /* * Writes are ordered by CLFLUSH on Intel CPUs. @@ -3104,7 +3104,7 @@ pmap_invalidate_cache_pages(vm_page_t *pages, int count) pmap_invalidate_cache(); else { if (useclflushopt) - sfence(); + atomic_thread_fence_seq_cst(); else if (cpu_vendor_id != CPU_VENDOR_INTEL) mfence(); for (i = 0; i < count; i++) { @@ -3118,7 +3118,7 @@ pmap_invalidate_cache_pages(vm_page_t *pages, int count) } } if (useclflushopt) - sfence(); + atomic_thread_fence_seq_cst(); else if (cpu_vendor_id != CPU_VENDOR_INTEL) mfence(); } @@ -3139,10 +3139,10 @@ pmap_flush_cache_range(vm_offset_t sva, vm_offset_t eva) if (pmap_kextract(sva) == lapic_paddr) return; - sfence(); + atomic_thread_fence_seq_cst(); for (; sva < eva; sva += cpu_clflush_line_size) clwb(sva); - sfence(); + atomic_thread_fence_seq_cst(); } void @@ -3175,7 +3175,7 @@ pmap_flush_cache_phys_range(vm_paddr_t spa, vm_paddr_t epa, vm_memattr_t mattr) sched_pin(); pte_store(pte, spa | pte_bits); invlpg(vaddr); - /* XXXKIB sfences inside flush_cache_range are excessive */ + /* XXXKIB atomic inside flush_cache_range are excessive */ pmap_flush_cache_range(vaddr, vaddr + PAGE_SIZE); sched_unpin(); } @@ -9504,10 +9504,10 @@ pmap_large_map_wb_fence_mfence(void) } static void -pmap_large_map_wb_fence_sfence(void) +pmap_large_map_wb_fence_atomic(void) { - sfence(); + atomic_thread_fence_seq_cst(); } static void @@ -9522,7 +9522,7 @@ DEFINE_IFUNC(static, void, pmap_large_map_wb_fence, (void)) return (pmap_large_map_wb_fence_mfence); else if ((cpu_stdext_feature & (CPUID_STDEXT_CLWB | CPUID_STDEXT_CLFLUSHOPT)) == 0) - return (pmap_large_map_wb_fence_sfence); + return (pmap_large_map_wb_fence_atomic); else /* clflush is strongly enough ordered */ return (pmap_large_map_wb_fence_nop);