Make the linuxkpi's alloc_pages() consistently return wired pages.

Previously it did this only on platforms without a direct map.  This
also more closely matches Linux's semantics.

Since some DRM v5.0 code assumes the old behaviour, use a
LINUXKPI_VERSION guard to preserve that until the out-of-tree module
is updated.

Reviewed by:	hselasky, kib (earlier versions), johalun
MFC after:	1 week
Sponsored by:	Netflix
Differential Revision:	https://reviews.freebsd.org/D20502
This commit is contained in:
Mark Johnston 2019-06-06 16:09:19 +00:00
parent c080655467
commit 1ef5e651fd
2 changed files with 23 additions and 5 deletions

View File

@ -52,12 +52,15 @@
#define __GFP_RETRY_MAYFAIL 0
#define __GFP_MOVABLE 0
#define __GFP_COMP 0
#define __GFP_KSWAPD_RECLAIM 0
#define __GFP_KSWAPD_RECLAIM 0
#define __GFP_IO 0
#define __GFP_NO_KSWAPD 0
#define __GFP_WAIT M_WAITOK
#define __GFP_DMA32 (1U << 24) /* LinuxKPI only */
#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION == 50000
#define __GFP_NOTWIRED (1U << 25)
#endif
#define __GFP_BITS_SHIFT 25
#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
#define __GFP_NOFAIL M_WAITOK
@ -74,7 +77,7 @@
#define GFP_TEMPORARY M_NOWAIT
#define GFP_NATIVE_MASK (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_ZERO)
#define GFP_TRANSHUGE 0
#define GFP_TRANSHUGE_LIGHT 0
#define GFP_TRANSHUGE_LIGHT 0
CTASSERT((__GFP_DMA32 & GFP_NATIVE_MASK) == 0);
CTASSERT((__GFP_BITS_MASK & GFP_NATIVE_MASK) == GFP_NATIVE_MASK);
@ -98,6 +101,9 @@ static inline struct page *
alloc_page(gfp_t flags)
{
#ifdef __GFP_NOTWIRED
flags |= __GFP_NOTWIRED;
#endif
return (linux_alloc_pages(flags, 0));
}
@ -105,6 +111,9 @@ static inline struct page *
alloc_pages(gfp_t flags, unsigned int order)
{
#ifdef __GFP_NOTWIRED
flags |= __GFP_NOTWIRED;
#endif
return (linux_alloc_pages(flags, order));
}
@ -112,6 +121,9 @@ static inline struct page *
alloc_pages_node(int node_id, gfp_t flags, unsigned int order)
{
#ifdef __GFP_NOTWIRED
flags |= __GFP_NOTWIRED;
#endif
return (linux_alloc_pages(flags, order));
}

View File

@ -91,9 +91,14 @@ linux_alloc_pages(gfp_t flags, unsigned int order)
if (PMAP_HAS_DMAP) {
unsigned long npages = 1UL << order;
int req = (flags & M_ZERO) ? (VM_ALLOC_ZERO | VM_ALLOC_NOOBJ |
VM_ALLOC_NORMAL) : (VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL);
int req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_NORMAL;
#ifdef __GFP_NOTWIRED
if ((flags & __GFP_NOTWIRED) != 0)
req &= ~VM_ALLOC_WIRED;
#endif
if ((flags & M_ZERO) != 0)
req |= VM_ALLOC_ZERO;
if (order == 0 && (flags & GFP_DMA32) == 0) {
page = vm_page_alloc(NULL, 0, req);
if (page == NULL)
@ -154,7 +159,8 @@ linux_free_pages(vm_page_t page, unsigned int order)
vm_page_t pgo = page + x;
vm_page_lock(pgo);
vm_page_free(pgo);
if (vm_page_unwire_noq(pgo))
vm_page_free(pgo);
vm_page_unlock(pgo);
}
} else {