ttm_vm_page_alloc: use vm_page_alloc for pages without dma32 restriction
This change re-organizes code a little bit to extract common pieces of ttm_alloc_new_pages() and ttm_get_pages() into dedicated functions. Also, for requests without address restrictions regular vm_page_alloc() is used. Lastly, when vm_page_alloc_contig() fails we call VM_WAIT before calling vm_pageout_grow_cache() to ensure that there is enough free pages at all. Reviewed by: kib MFC after: 15 days
This commit is contained in:
parent
2a3ec90af9
commit
1aac2948e9
@ -155,6 +155,66 @@ ttm_caching_state_to_vm(enum ttm_caching_state cstate)
|
||||
panic("caching state %d\n", cstate);
|
||||
}
|
||||
|
||||
static vm_page_t
|
||||
ttm_vm_page_alloc_dma32(int req, vm_memattr_t memattr)
|
||||
{
|
||||
vm_page_t p;
|
||||
int tries;
|
||||
|
||||
for (tries = 0; ; tries++) {
|
||||
p = vm_page_alloc_contig(NULL, 0, req, 1, 0, 0xffffffff,
|
||||
PAGE_SIZE, 0, memattr);
|
||||
if (p != NULL || tries > 2)
|
||||
return (p);
|
||||
|
||||
/*
|
||||
* Before growing the cache see if this is just a normal
|
||||
* memory shortage.
|
||||
*/
|
||||
VM_WAIT;
|
||||
vm_pageout_grow_cache(tries, 0, 0xffffffff);
|
||||
}
|
||||
}
|
||||
|
||||
static vm_page_t
|
||||
ttm_vm_page_alloc_any(int req, vm_memattr_t memattr)
|
||||
{
|
||||
vm_page_t p;
|
||||
|
||||
while (1) {
|
||||
p = vm_page_alloc(NULL, 0, req);
|
||||
if (p != NULL)
|
||||
break;
|
||||
VM_WAIT;
|
||||
}
|
||||
pmap_page_set_memattr(p, memattr);
|
||||
return (p);
|
||||
}
|
||||
|
||||
static vm_page_t
|
||||
ttm_vm_page_alloc(int flags, enum ttm_caching_state cstate)
|
||||
{
|
||||
vm_page_t p;
|
||||
vm_memattr_t memattr;
|
||||
int req;
|
||||
|
||||
memattr = ttm_caching_state_to_vm(cstate);
|
||||
req = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ;
|
||||
if ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0)
|
||||
req |= VM_ALLOC_ZERO;
|
||||
|
||||
if ((flags & TTM_PAGE_FLAG_DMA32) != 0)
|
||||
p = ttm_vm_page_alloc_dma32(req, memattr);
|
||||
else
|
||||
p = ttm_vm_page_alloc_any(req, memattr);
|
||||
|
||||
if (p != NULL) {
|
||||
p->oflags &= ~VPO_UNMANAGED;
|
||||
p->flags |= PG_FICTITIOUS;
|
||||
}
|
||||
return (p);
|
||||
}
|
||||
|
||||
static void ttm_pool_kobj_release(struct ttm_pool_manager *m)
|
||||
{
|
||||
|
||||
@ -461,14 +521,6 @@ static void ttm_handle_caching_state_failure(struct pglist *pages,
|
||||
}
|
||||
}
|
||||
|
||||
static vm_paddr_t
|
||||
ttm_alloc_high_bound(int ttm_alloc_flags)
|
||||
{
|
||||
|
||||
return ((ttm_alloc_flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff :
|
||||
VM_MAX_ADDRESS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate new pages with correct caching.
|
||||
*
|
||||
@ -481,32 +533,17 @@ static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags,
|
||||
vm_page_t *caching_array;
|
||||
vm_page_t p;
|
||||
int r = 0;
|
||||
unsigned i, cpages, aflags;
|
||||
unsigned i, cpages;
|
||||
unsigned max_cpages = min(count,
|
||||
(unsigned)(PAGE_SIZE/sizeof(vm_page_t)));
|
||||
int tries;
|
||||
|
||||
aflags = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
|
||||
((ttm_alloc_flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ?
|
||||
VM_ALLOC_ZERO : 0);
|
||||
|
||||
/* allocate array for page caching change */
|
||||
caching_array = malloc(max_cpages * sizeof(vm_page_t), M_TEMP,
|
||||
M_WAITOK | M_ZERO);
|
||||
|
||||
for (i = 0, cpages = 0; i < count; ++i) {
|
||||
tries = 0;
|
||||
retry:
|
||||
p = vm_page_alloc_contig(NULL, 0, aflags, 1, 0,
|
||||
ttm_alloc_high_bound(ttm_alloc_flags),
|
||||
PAGE_SIZE, 0, ttm_caching_state_to_vm(cstate));
|
||||
p = ttm_vm_page_alloc(ttm_alloc_flags, cstate);
|
||||
if (!p) {
|
||||
if (tries < 3) {
|
||||
vm_pageout_grow_cache(tries, 0,
|
||||
ttm_alloc_high_bound(ttm_alloc_flags));
|
||||
tries++;
|
||||
goto retry;
|
||||
}
|
||||
printf("[TTM] Unable to get page %u\n", i);
|
||||
|
||||
/* store already allocated pages in the pool after
|
||||
@ -522,8 +559,6 @@ retry:
|
||||
r = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
p->oflags &= ~VPO_UNMANAGED;
|
||||
p->flags |= PG_FICTITIOUS;
|
||||
|
||||
#ifdef CONFIG_HIGHMEM /* KIB: nop */
|
||||
/* gfp flags of highmem page should never be dma32 so we
|
||||
@ -705,34 +740,18 @@ static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
|
||||
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
|
||||
struct pglist plist;
|
||||
vm_page_t p = NULL;
|
||||
int gfp_flags, aflags;
|
||||
int gfp_flags;
|
||||
unsigned count;
|
||||
int r;
|
||||
int tries;
|
||||
|
||||
aflags = VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
|
||||
((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? VM_ALLOC_ZERO : 0);
|
||||
|
||||
/* No pool for cached pages */
|
||||
if (pool == NULL) {
|
||||
for (r = 0; r < npages; ++r) {
|
||||
tries = 0;
|
||||
retry:
|
||||
p = vm_page_alloc_contig(NULL, 0, aflags, 1, 0,
|
||||
ttm_alloc_high_bound(flags), PAGE_SIZE,
|
||||
0, ttm_caching_state_to_vm(cstate));
|
||||
p = ttm_vm_page_alloc(flags, cstate);
|
||||
if (!p) {
|
||||
if (tries < 3) {
|
||||
vm_pageout_grow_cache(tries, 0,
|
||||
ttm_alloc_high_bound(flags));
|
||||
tries++;
|
||||
goto retry;
|
||||
}
|
||||
printf("[TTM] Unable to allocate page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
p->oflags &= ~VPO_UNMANAGED;
|
||||
p->flags |= PG_FICTITIOUS;
|
||||
pages[r] = p;
|
||||
}
|
||||
return 0;
|
||||
|
Loading…
x
Reference in New Issue
Block a user