From c9e59b691404ba51907f478f2f571b37ccf9dce0 Mon Sep 17 00:00:00 2001 From: markj Date: Sat, 23 Jun 2018 10:41:52 +0000 Subject: [PATCH] Re-count available PV entries after reclaiming a PV chunk. The call to reclaim_pv_chunk() in reserve_pv_entries() may free a PV chunk with free entries belonging to the current pmap. In this case we must account for the free entries that were reclaimed, or reserve_pv_entries() may return without having reserved the requested number of entries. Reviewed by: alc, kib Tested by: pho (previous version) MFC after: 2 weeks Differential Revision: https://reviews.freebsd.org/D15911 --- sys/amd64/amd64/pmap.c | 14 ++++++++++++-- sys/arm64/arm64/pmap.c | 14 ++++++++++++-- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index c6bf923e85de..99ecdd61fb3f 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -3539,8 +3539,9 @@ reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp) { struct pch new_tail; struct pv_chunk *pc; - int avail, free; vm_page_t m; + int avail, free; + bool reclaimed; PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL")); @@ -3568,13 +3569,14 @@ reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp) if (avail >= needed) break; } - for (; avail < needed; avail += _NPCPV) { + for (reclaimed = false; avail < needed; avail += _NPCPV) { m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); if (m == NULL) { m = reclaim_pv_chunk(pmap, lockp); if (m == NULL) goto retry; + reclaimed = true; } PV_STAT(atomic_add_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); @@ -3587,6 +3589,14 @@ reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp) TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV)); + + /* + * The reclaim might have freed a chunk from the current pmap. + * If that chunk contained available entries, we need to + * re-count the number of available entries. + */ + if (reclaimed) + goto retry; } if (!TAILQ_EMPTY(&new_tail)) { mtx_lock(&pv_chunks_mutex); diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c index f4c0c4fe59bc..29724f17ef0a 100644 --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -2083,8 +2083,9 @@ reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp) { struct pch new_tail; struct pv_chunk *pc; - int avail, free; vm_page_t m; + int avail, free; + bool reclaimed; PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL")); @@ -2107,13 +2108,14 @@ reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp) if (avail >= needed) break; } - for (; avail < needed; avail += _NPCPV) { + for (reclaimed = false; avail < needed; avail += _NPCPV) { m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); if (m == NULL) { m = reclaim_pv_chunk(pmap, lockp); if (m == NULL) goto retry; + reclaimed = true; } PV_STAT(atomic_add_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); @@ -2126,6 +2128,14 @@ reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp) TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV)); + + /* + * The reclaim might have freed a chunk from the current pmap. + * If that chunk contained available entries, we need to + * re-count the number of available entries. + */ + if (reclaimed) + goto retry; } if (!TAILQ_EMPTY(&new_tail)) { mtx_lock(&pv_chunks_mutex);