Add an argument to the x86 pmap_invalidate_cache_range() to request
forced invalidation of the cache range regardless of the presence of self-snoop feature. Some recent Intel GPUs in some modes are not coherent, and dirty lines in CPU cache must be flushed before the pages are transferred to GPU domain. Reviewed by: alc (previous version) Tested by: pho (amd64) Sponsored by: The FreeBSD Foundation MFC after: 1 week
This commit is contained in:
parent
877e018654
commit
30a51a18f4
@ -1710,16 +1710,20 @@ pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
|
||||
#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024)
|
||||
|
||||
void
|
||||
pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
|
||||
pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
|
||||
{
|
||||
|
||||
KASSERT((sva & PAGE_MASK) == 0,
|
||||
("pmap_invalidate_cache_range: sva not page-aligned"));
|
||||
KASSERT((eva & PAGE_MASK) == 0,
|
||||
("pmap_invalidate_cache_range: eva not page-aligned"));
|
||||
if (force) {
|
||||
sva &= ~(vm_offset_t)cpu_clflush_line_size;
|
||||
} else {
|
||||
KASSERT((sva & PAGE_MASK) == 0,
|
||||
("pmap_invalidate_cache_range: sva not page-aligned"));
|
||||
KASSERT((eva & PAGE_MASK) == 0,
|
||||
("pmap_invalidate_cache_range: eva not page-aligned"));
|
||||
}
|
||||
|
||||
if (cpu_feature & CPUID_SS)
|
||||
; /* If "Self Snoop" is supported, do nothing. */
|
||||
if ((cpu_feature & CPUID_SS) != 0 && !force)
|
||||
; /* If "Self Snoop" is supported and allowed, do nothing. */
|
||||
else if ((cpu_feature & CPUID_CLFSH) != 0 &&
|
||||
eva - sva < PMAP_CLFLUSH_THRESHOLD) {
|
||||
|
||||
@ -6222,7 +6226,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
|
||||
for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
|
||||
pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
|
||||
pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
|
||||
pmap_invalidate_cache_range(va, va + tmpsize);
|
||||
pmap_invalidate_cache_range(va, va + tmpsize, FALSE);
|
||||
return ((void *)(va + offset));
|
||||
}
|
||||
|
||||
@ -6558,7 +6562,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
|
||||
*/
|
||||
if (changed) {
|
||||
pmap_invalidate_range(kernel_pmap, base, tmpva);
|
||||
pmap_invalidate_cache_range(base, tmpva);
|
||||
pmap_invalidate_cache_range(base, tmpva, FALSE);
|
||||
}
|
||||
return (error);
|
||||
}
|
||||
|
@ -394,7 +394,8 @@ void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
|
||||
void pmap_invalidate_all(pmap_t);
|
||||
void pmap_invalidate_cache(void);
|
||||
void pmap_invalidate_cache_pages(vm_page_t *pages, int count);
|
||||
void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
|
||||
void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva,
|
||||
boolean_t force);
|
||||
void pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num);
|
||||
#endif /* _KERNEL */
|
||||
|
||||
|
@ -366,7 +366,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
|
||||
goto err_unpin;
|
||||
pmap_qenter((uintptr_t)pc->cpu_page, &obj->pages[0], 1);
|
||||
pmap_invalidate_cache_range((vm_offset_t)pc->cpu_page,
|
||||
(vm_offset_t)pc->cpu_page + PAGE_SIZE);
|
||||
(vm_offset_t)pc->cpu_page + PAGE_SIZE, FALSE);
|
||||
|
||||
pc->obj = obj;
|
||||
ring->private = pc;
|
||||
@ -1014,7 +1014,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
|
||||
pmap_qenter((vm_offset_t)ring->status_page.page_addr, &obj->pages[0],
|
||||
1);
|
||||
pmap_invalidate_cache_range((vm_offset_t)ring->status_page.page_addr,
|
||||
(vm_offset_t)ring->status_page.page_addr + PAGE_SIZE);
|
||||
(vm_offset_t)ring->status_page.page_addr + PAGE_SIZE, FALSE);
|
||||
ring->status_page.obj = obj;
|
||||
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
|
||||
|
||||
|
@ -1172,16 +1172,20 @@ pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
|
||||
#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024)
|
||||
|
||||
void
|
||||
pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
|
||||
pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
|
||||
{
|
||||
|
||||
KASSERT((sva & PAGE_MASK) == 0,
|
||||
("pmap_invalidate_cache_range: sva not page-aligned"));
|
||||
KASSERT((eva & PAGE_MASK) == 0,
|
||||
("pmap_invalidate_cache_range: eva not page-aligned"));
|
||||
if (force) {
|
||||
sva &= ~(vm_offset_t)cpu_clflush_line_size;
|
||||
} else {
|
||||
KASSERT((sva & PAGE_MASK) == 0,
|
||||
("pmap_invalidate_cache_range: sva not page-aligned"));
|
||||
KASSERT((eva & PAGE_MASK) == 0,
|
||||
("pmap_invalidate_cache_range: eva not page-aligned"));
|
||||
}
|
||||
|
||||
if (cpu_feature & CPUID_SS)
|
||||
; /* If "Self Snoop" is supported, do nothing. */
|
||||
if ((cpu_feature & CPUID_SS) != 0 && !force)
|
||||
; /* If "Self Snoop" is supported and allowed, do nothing. */
|
||||
else if ((cpu_feature & CPUID_CLFSH) != 0 &&
|
||||
eva - sva < PMAP_CLFLUSH_THRESHOLD) {
|
||||
|
||||
@ -5164,7 +5168,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
|
||||
for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
|
||||
pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
|
||||
pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
|
||||
pmap_invalidate_cache_range(va, va + size);
|
||||
pmap_invalidate_cache_range(va, va + size, FALSE);
|
||||
return ((void *)(va + offset));
|
||||
}
|
||||
|
||||
@ -5370,7 +5374,7 @@ pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
|
||||
*/
|
||||
if (changed) {
|
||||
pmap_invalidate_range(kernel_pmap, base, tmpva);
|
||||
pmap_invalidate_cache_range(base, tmpva);
|
||||
pmap_invalidate_cache_range(base, tmpva, FALSE);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
@ -813,7 +813,7 @@ sf_buf_invalidate(struct sf_buf *sf)
|
||||
* settings are recalculated.
|
||||
*/
|
||||
pmap_qenter(sf->kva, &m, 1);
|
||||
pmap_invalidate_cache_range(sf->kva, sf->kva + PAGE_SIZE);
|
||||
pmap_invalidate_cache_range(sf->kva, sf->kva + PAGE_SIZE, FALSE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -458,7 +458,8 @@ void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
|
||||
void pmap_invalidate_all(pmap_t);
|
||||
void pmap_invalidate_cache(void);
|
||||
void pmap_invalidate_cache_pages(vm_page_t *pages, int count);
|
||||
void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
|
||||
void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva,
|
||||
boolean_t force);
|
||||
|
||||
#endif /* _KERNEL */
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user