MFC r278153,284416: ttm memory allocation improvements

If the vm_page_alloc_contig() failed in the ttm page allocators, do
what other callers of vm_page_alloc_contig() do, retry after
vm_pageout_grow_cache().

ttm_vm_page_alloc: use vm_page_alloc for pages without dma32 restriction
This change re-organizes code a little bit to extract common pieces
of ttm_alloc_new_pages() and ttm_get_pages() into dedicated functions.
Also, for requests without address restrictions regular vm_page_alloc()
is used.
Lastly, when vm_page_alloc_contig() fails we call VM_WAIT before calling
vm_pageout_grow_cache() to ensure that there is enough free pages at
all.

Note: no MFC to stable/9 because it lacks vm_pageout_grow_cache().
This commit is contained in:
avg 2015-07-01 11:28:42 +00:00
parent b6a3e408bb
commit 3a7588b223
2 changed files with 74 additions and 21 deletions

View File

@ -35,6 +35,7 @@ __FBSDID("$FreeBSD$");
#include <dev/drm2/ttm/ttm_module.h>
#include <dev/drm2/ttm/ttm_bo_driver.h>
#include <dev/drm2/ttm/ttm_placement.h>
#include <vm/vm_pageout.h>
#define TTM_ASSERT_LOCKED(param)
#define TTM_DEBUG(fmt, arg...)
@ -1489,15 +1490,23 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
container_of(ref, struct ttm_bo_global_ref, ref);
struct ttm_bo_global *glob = ref->object;
int ret;
int tries;
sx_init(&glob->device_list_mutex, "ttmdlm");
mtx_init(&glob->lru_lock, "ttmlru", NULL, MTX_DEF);
glob->mem_glob = bo_ref->mem_glob;
tries = 0;
retry:
glob->dummy_read_page = vm_page_alloc_contig(NULL, 0,
VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ,
1, 0, VM_MAX_ADDRESS, PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE);
if (unlikely(glob->dummy_read_page == NULL)) {
if (tries < 1) {
vm_pageout_grow_cache(tries, 0, VM_MAX_ADDRESS);
tries++;
goto retry;
}
ret = -ENOMEM;
goto out_no_drp;
}

View File

@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
#include <dev/drm2/ttm/ttm_bo_driver.h>
#include <dev/drm2/ttm/ttm_page_alloc.h>
#include <vm/vm_pageout.h>
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(vm_page_t))
#define SMALL_ALLOCATION 16
@ -154,6 +155,66 @@ ttm_caching_state_to_vm(enum ttm_caching_state cstate)
panic("caching state %d\n", cstate);
}
static vm_page_t
ttm_vm_page_alloc_dma32(int req, vm_memattr_t memattr)
{
vm_page_t p;
int tries;
for (tries = 0; ; tries++) {
p = vm_page_alloc_contig(NULL, 0, req, 1, 0, 0xffffffff,
PAGE_SIZE, 0, memattr);
if (p != NULL || tries > 2)
return (p);
/*
* Before growing the cache see if this is just a normal
* memory shortage.
*/
VM_WAIT;
vm_pageout_grow_cache(tries, 0, 0xffffffff);
}
}
static vm_page_t
ttm_vm_page_alloc_any(int req, vm_memattr_t memattr)
{
vm_page_t p;
while (1) {
p = vm_page_alloc(NULL, 0, req);
if (p != NULL)
break;
VM_WAIT;
}
pmap_page_set_memattr(p, memattr);
return (p);
}
static vm_page_t
ttm_vm_page_alloc(int flags, enum ttm_caching_state cstate)
{
vm_page_t p;
vm_memattr_t memattr;
int req;
memattr = ttm_caching_state_to_vm(cstate);
req = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ;
if ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0)
req |= VM_ALLOC_ZERO;
if ((flags & TTM_PAGE_FLAG_DMA32) != 0)
p = ttm_vm_page_alloc_dma32(req, memattr);
else
p = ttm_vm_page_alloc_any(req, memattr);
if (p != NULL) {
p->oflags &= ~VPO_UNMANAGED;
p->flags |= PG_FICTITIOUS;
}
return (p);
}
static void ttm_pool_kobj_release(struct ttm_pool_manager *m)
{
@ -472,23 +533,16 @@ static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags,
vm_page_t *caching_array;
vm_page_t p;
int r = 0;
unsigned i, cpages, aflags;
unsigned i, cpages;
unsigned max_cpages = min(count,
(unsigned)(PAGE_SIZE/sizeof(vm_page_t)));
aflags = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
((ttm_alloc_flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ?
VM_ALLOC_ZERO : 0);
/* allocate array for page caching change */
caching_array = malloc(max_cpages * sizeof(vm_page_t), M_TEMP,
M_WAITOK | M_ZERO);
for (i = 0, cpages = 0; i < count; ++i) {
p = vm_page_alloc_contig(NULL, 0, aflags, 1, 0,
(ttm_alloc_flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff :
VM_MAX_ADDRESS, PAGE_SIZE, 0,
ttm_caching_state_to_vm(cstate));
p = ttm_vm_page_alloc(ttm_alloc_flags, cstate);
if (!p) {
printf("[TTM] Unable to get page %u\n", i);
@ -505,8 +559,6 @@ static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags,
r = -ENOMEM;
goto out;
}
p->oflags &= ~VPO_UNMANAGED;
p->flags |= PG_FICTITIOUS;
#ifdef CONFIG_HIGHMEM /* KIB: nop */
/* gfp flags of highmem page should never be dma32 so we
@ -688,26 +740,18 @@ static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct pglist plist;
vm_page_t p = NULL;
int gfp_flags, aflags;
int gfp_flags;
unsigned count;
int r;
aflags = VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? VM_ALLOC_ZERO : 0);
/* No pool for cached pages */
if (pool == NULL) {
for (r = 0; r < npages; ++r) {
p = vm_page_alloc_contig(NULL, 0, aflags, 1, 0,
(flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff :
VM_MAX_ADDRESS, PAGE_SIZE,
0, ttm_caching_state_to_vm(cstate));
p = ttm_vm_page_alloc(flags, cstate);
if (!p) {
printf("[TTM] Unable to allocate page\n");
return -ENOMEM;
}
p->oflags &= ~VPO_UNMANAGED;
p->flags |= PG_FICTITIOUS;
pages[r] = p;
}
return 0;