Introduce a new pmap function, pmap_extract_and_hold(). This function

atomically extracts and holds the physical page that is associated with the
given pmap and virtual address.  Such a function is needed to make the
memory mapping optimizations used by, for example, pipes and raw disk I/O
MP-safe.

Reviewed by:	tegge
This commit is contained in:
Alan Cox 2003-09-08 02:45:03 +00:00
parent a94100fa9b
commit ba2157f218
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=119869
6 changed files with 118 additions and 0 deletions

View File

@ -811,6 +811,29 @@ pmap_extract(pmap, va)
return 0;
}
/*
* Routine: pmap_extract_and_hold
* Function:
* Atomically extract and hold the physical page
* with the given pmap and virtual address.
*/
vm_page_t
pmap_extract_and_hold(pmap_t pmap, vm_offset_t va)
{
vm_paddr_t pa;
vm_page_t m;
m = NULL;
mtx_lock(&Giant);
if ((pa = pmap_extract(pmap, va)) != 0) {
m = PHYS_TO_VM_PAGE(pa);
vm_page_lock_queues();
vm_page_hold(m);
vm_page_unlock_queues();
}
mtx_unlock(&Giant);
return (m);
}
/***************************************************
* Low level mapping routines.....

View File

@ -749,6 +749,30 @@ pmap_extract(pmap, va)
}
/*
* Routine: pmap_extract_and_hold
* Function:
* Atomically extract and hold the physical page
* with the given pmap and virtual address.
*/
vm_page_t
pmap_extract_and_hold(pmap_t pmap, vm_offset_t va)
{
vm_paddr_t pa;
vm_page_t m;
m = NULL;
mtx_lock(&Giant);
if ((pa = pmap_extract(pmap, va)) != 0) {
m = PHYS_TO_VM_PAGE(pa);
vm_page_lock_queues();
vm_page_hold(m);
vm_page_unlock_queues();
}
mtx_unlock(&Giant);
return (m);
}
vm_paddr_t
pmap_kextract(vm_offset_t va)
{

View File

@ -861,6 +861,30 @@ pmap_extract(pmap, va)
}
/*
* Routine: pmap_extract_and_hold
* Function:
* Atomically extract and hold the physical page
* with the given pmap and virtual address.
*/
vm_page_t
pmap_extract_and_hold(pmap_t pmap, vm_offset_t va)
{
vm_paddr_t pa;
vm_page_t m;
m = NULL;
mtx_lock(&Giant);
if ((pa = pmap_extract(pmap, va)) != 0) {
m = PHYS_TO_VM_PAGE(pa);
vm_page_lock_queues();
vm_page_hold(m);
vm_page_unlock_queues();
}
mtx_unlock(&Giant);
return (m);
}
/***************************************************
* Low level mapping routines.....
***************************************************/

View File

@ -1102,6 +1102,30 @@ pmap_extract(pmap, va)
return pmap_pte_pa(pte);
}
/*
* Routine: pmap_extract_and_hold
* Function:
* Atomically extract and hold the physical page
* with the given pmap and virtual address.
*/
vm_page_t
pmap_extract_and_hold(pmap_t pmap, vm_offset_t va)
{
vm_paddr_t pa;
vm_page_t m;
m = NULL;
mtx_lock(&Giant);
if ((pa = pmap_extract(pmap, va)) != 0) {
m = PHYS_TO_VM_PAGE(pa);
vm_page_lock_queues();
vm_page_hold(m);
vm_page_unlock_queues();
}
mtx_unlock(&Giant);
return (m);
}
/***************************************************
* Low level mapping routines.....
***************************************************/

View File

@ -620,6 +620,28 @@ pmap_extract(pmap_t pm, vm_offset_t va)
return (TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp)));
}
/*
* Atomically extract and hold the physical page with the given
* pmap and virtual address pair.
*/
vm_page_t
pmap_extract_and_hold(pmap_t pmap, vm_offset_t va)
{
vm_paddr_t pa;
vm_page_t m;
m = NULL;
mtx_lock(&Giant);
if ((pa = pmap_extract(pmap, va)) != 0) {
m = PHYS_TO_VM_PAGE(pa);
vm_page_lock_queues();
vm_page_hold(m);
vm_page_unlock_queues();
}
mtx_unlock(&Giant);
return (m);
}
/*
* Extract the physical page address associated with the given kernel virtual
* address.

View File

@ -106,6 +106,7 @@ void pmap_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
vm_page_t pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_page_t mpte);
vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va);
vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va);
void pmap_growkernel(vm_offset_t);
void pmap_init(vm_paddr_t, vm_paddr_t);
boolean_t pmap_is_modified(vm_page_t m);