For i386 temporary mappings, unpin the thread before releasing

the cmap lock.  Releasing the lock first may result in the thread
being immediately rescheduled and bound to the same CPU, only to
unpin itself upon resuming execution.

Noted by:	skra (in review for armv6 equivalent)
MFC after:	1 week
This commit is contained in:
Jason A. Harmening 2017-01-14 09:56:01 +00:00
parent ed2159c92c
commit 28699efd43
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=312153

View File

@ -4216,8 +4216,8 @@ pmap_zero_page(vm_page_t m)
invlcaddr(pc->pc_cmap_addr2);
pagezero(pc->pc_cmap_addr2);
*cmap_pte2 = 0;
mtx_unlock(&pc->pc_cmap_lock);
sched_unpin();
mtx_unlock(&pc->pc_cmap_lock);
}
/*
@ -4244,8 +4244,8 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
else
bzero(pc->pc_cmap_addr2 + off, size);
*cmap_pte2 = 0;
mtx_unlock(&pc->pc_cmap_lock);
sched_unpin();
mtx_unlock(&pc->pc_cmap_lock);
}
/*
@ -4275,8 +4275,8 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
bcopy(pc->pc_cmap_addr1, pc->pc_cmap_addr2, PAGE_SIZE);
*cmap_pte1 = 0;
*cmap_pte2 = 0;
mtx_unlock(&pc->pc_cmap_lock);
sched_unpin();
mtx_unlock(&pc->pc_cmap_lock);
}
int unmapped_buf_allowed = 1;
@ -4323,8 +4323,8 @@ pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
}
*cmap_pte1 = 0;
*cmap_pte2 = 0;
mtx_unlock(&pc->pc_cmap_lock);
sched_unpin();
mtx_unlock(&pc->pc_cmap_lock);
}
/*
@ -5310,8 +5310,8 @@ pmap_flush_page(vm_page_t m)
if (useclflushopt || cpu_vendor_id != CPU_VENDOR_INTEL)
mfence();
*cmap_pte2 = 0;
mtx_unlock(&pc->pc_cmap_lock);
sched_unpin();
mtx_unlock(&pc->pc_cmap_lock);
} else
pmap_invalidate_cache();
}