Port r324665 and r325285 to arm64.

These changes ensure that reclaim_pv_chunk() can be safely be
executed concurrently by multiple threads.

Reviewed by:	alc
MFC after:	1 week
Differential Revision:	https://reviews.freebsd.org/D16304
This commit is contained in:
Mark Johnston 2018-07-18 17:58:17 +00:00
parent 9b63ed8bae
commit 1f15b0e6c0
2 changed files with 81 additions and 22 deletions

View File

@ -1827,11 +1827,11 @@ SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
static vm_page_t
reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
{
struct pch new_tail;
struct pv_chunk *pc;
struct pv_chunk *pc, *pc_marker, *pc_marker_end;
struct pv_chunk_header pc_marker_b, pc_marker_end_b;
struct md_page *pvh;
pd_entry_t *pde;
pmap_t pmap;
pmap_t next_pmap, pmap;
pt_entry_t *pte, tpte;
pv_entry_t pv;
vm_offset_t va;
@ -1839,31 +1839,65 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
struct spglist free;
uint64_t inuse;
int bit, field, freed, lvl;
static int active_reclaims = 0;
PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
pmap = NULL;
m_pc = NULL;
SLIST_INIT(&free);
TAILQ_INIT(&new_tail);
bzero(&pc_marker_b, sizeof(pc_marker_b));
bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
pc_marker = (struct pv_chunk *)&pc_marker_b;
pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
mtx_lock(&pv_chunks_mutex);
while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && SLIST_EMPTY(&free)) {
TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
active_reclaims++;
TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru);
TAILQ_INSERT_TAIL(&pv_chunks, pc_marker_end, pc_lru);
while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
SLIST_EMPTY(&free)) {
next_pmap = pc->pc_pmap;
if (next_pmap == NULL) {
/*
* The next chunk is a marker. However, it is
* not our marker, so active_reclaims must be
* > 1. Consequently, the next_chunk code
* will not rotate the pv_chunks list.
*/
goto next_chunk;
}
mtx_unlock(&pv_chunks_mutex);
if (pmap != pc->pc_pmap) {
/*
* A pv_chunk can only be removed from the pc_lru list
* when both pv_chunks_mutex is owned and the
* corresponding pmap is locked.
*/
if (pmap != next_pmap) {
if (pmap != NULL && pmap != locked_pmap)
PMAP_UNLOCK(pmap);
pmap = pc->pc_pmap;
pmap = next_pmap;
/* Avoid deadlock and lock recursion. */
if (pmap > locked_pmap) {
RELEASE_PV_LIST_LOCK(lockp);
PMAP_LOCK(pmap);
} else if (pmap != locked_pmap &&
!PMAP_TRYLOCK(pmap)) {
pmap = NULL;
TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
mtx_lock(&pv_chunks_mutex);
continue;
} else if (pmap != locked_pmap) {
if (PMAP_TRYLOCK(pmap)) {
mtx_lock(&pv_chunks_mutex);
continue;
} else {
pmap = NULL; /* pmap is not locked */
mtx_lock(&pv_chunks_mutex);
pc = TAILQ_NEXT(pc_marker, pc_lru);
if (pc == NULL ||
pc->pc_pmap != next_pmap)
continue;
goto next_chunk;
}
}
}
@ -1908,9 +1942,8 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
}
}
if (freed == 0) {
TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
mtx_lock(&pv_chunks_mutex);
continue;
goto next_chunk;
}
/* Every freed mapping is for a 4 KB page. */
pmap_resident_count_dec(pmap, freed);
@ -1927,16 +1960,36 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
dump_drop_page(m_pc->phys_addr);
mtx_lock(&pv_chunks_mutex);
TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
break;
}
TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
mtx_lock(&pv_chunks_mutex);
/* One freed pv entry in locked_pmap is sufficient. */
if (pmap == locked_pmap)
break;
next_chunk:
TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
TAILQ_INSERT_AFTER(&pv_chunks, pc, pc_marker, pc_lru);
if (active_reclaims == 1 && pmap != NULL) {
/*
* Rotate the pv chunks list so that we do not
* scan the same pv chunks that could not be
* freed (because they contained a wired
* and/or superpage mapping) on every
* invocation of reclaim_pv_chunk().
*/
while ((pc = TAILQ_FIRST(&pv_chunks)) != pc_marker) {
MPASS(pc->pc_pmap != NULL);
TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
}
}
}
TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
TAILQ_REMOVE(&pv_chunks, pc_marker_end, pc_lru);
active_reclaims--;
mtx_unlock(&pv_chunks_mutex);
if (pmap != NULL && pmap != locked_pmap)
PMAP_UNLOCK(pmap);

View File

@ -84,6 +84,7 @@ struct pmap {
TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
struct vm_radix pm_root; /* spare page table pages */
};
typedef struct pmap *pmap_t;
typedef struct pv_entry {
vm_offset_t pv_va; /* virtual address for mapping */
@ -96,15 +97,20 @@ typedef struct pv_entry {
*/
#define _NPCM 3
#define _NPCPV 168
struct pv_chunk {
struct pmap * pc_pmap;
TAILQ_ENTRY(pv_chunk) pc_list;
uint64_t pc_map[_NPCM]; /* bitmap; 1 = free */
#define PV_CHUNK_HEADER \
pmap_t pc_pmap; \
TAILQ_ENTRY(pv_chunk) pc_list; \
uint64_t pc_map[_NPCM]; /* bitmap; 1 = free */ \
TAILQ_ENTRY(pv_chunk) pc_lru;
struct pv_entry pc_pventry[_NPCPV];
struct pv_chunk_header {
PV_CHUNK_HEADER
};
typedef struct pmap *pmap_t;
struct pv_chunk {
PV_CHUNK_HEADER
struct pv_entry pc_pventry[_NPCPV];
};
struct thread;