Re-count available PV entries after reclaiming a PV chunk.
The call to reclaim_pv_chunk() in reserve_pv_entries() may free a PV chunk with free entries belonging to the current pmap. In this case we must account for the free entries that were reclaimed, or reserve_pv_entries() may return without having reserved the requested number of entries. Reviewed by: alc, kib Tested by: pho (previous version) MFC after: 2 weeks Differential Revision: https://reviews.freebsd.org/D15911
This commit is contained in:
parent
d1f322bddb
commit
c9e59b6914
@ -3539,8 +3539,9 @@ reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
|
||||
{
|
||||
struct pch new_tail;
|
||||
struct pv_chunk *pc;
|
||||
int avail, free;
|
||||
vm_page_t m;
|
||||
int avail, free;
|
||||
bool reclaimed;
|
||||
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
|
||||
@ -3568,13 +3569,14 @@ reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
|
||||
if (avail >= needed)
|
||||
break;
|
||||
}
|
||||
for (; avail < needed; avail += _NPCPV) {
|
||||
for (reclaimed = false; avail < needed; avail += _NPCPV) {
|
||||
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
|
||||
VM_ALLOC_WIRED);
|
||||
if (m == NULL) {
|
||||
m = reclaim_pv_chunk(pmap, lockp);
|
||||
if (m == NULL)
|
||||
goto retry;
|
||||
reclaimed = true;
|
||||
}
|
||||
PV_STAT(atomic_add_int(&pc_chunk_count, 1));
|
||||
PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
|
||||
@ -3587,6 +3589,14 @@ reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
|
||||
TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
|
||||
TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
|
||||
PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
|
||||
|
||||
/*
|
||||
* The reclaim might have freed a chunk from the current pmap.
|
||||
* If that chunk contained available entries, we need to
|
||||
* re-count the number of available entries.
|
||||
*/
|
||||
if (reclaimed)
|
||||
goto retry;
|
||||
}
|
||||
if (!TAILQ_EMPTY(&new_tail)) {
|
||||
mtx_lock(&pv_chunks_mutex);
|
||||
|
@ -2083,8 +2083,9 @@ reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
|
||||
{
|
||||
struct pch new_tail;
|
||||
struct pv_chunk *pc;
|
||||
int avail, free;
|
||||
vm_page_t m;
|
||||
int avail, free;
|
||||
bool reclaimed;
|
||||
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
|
||||
@ -2107,13 +2108,14 @@ reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
|
||||
if (avail >= needed)
|
||||
break;
|
||||
}
|
||||
for (; avail < needed; avail += _NPCPV) {
|
||||
for (reclaimed = false; avail < needed; avail += _NPCPV) {
|
||||
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
|
||||
VM_ALLOC_WIRED);
|
||||
if (m == NULL) {
|
||||
m = reclaim_pv_chunk(pmap, lockp);
|
||||
if (m == NULL)
|
||||
goto retry;
|
||||
reclaimed = true;
|
||||
}
|
||||
PV_STAT(atomic_add_int(&pc_chunk_count, 1));
|
||||
PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
|
||||
@ -2126,6 +2128,14 @@ reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
|
||||
TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
|
||||
TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
|
||||
PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
|
||||
|
||||
/*
|
||||
* The reclaim might have freed a chunk from the current pmap.
|
||||
* If that chunk contained available entries, we need to
|
||||
* re-count the number of available entries.
|
||||
*/
|
||||
if (reclaimed)
|
||||
goto retry;
|
||||
}
|
||||
if (!TAILQ_EMPTY(&new_tail)) {
|
||||
mtx_lock(&pv_chunks_mutex);
|
||||
|
Loading…
Reference in New Issue
Block a user