Introduce a new pmap function, pmap_extract_and_hold(). This function
atomically extracts and holds the physical page that is associated with the given pmap and virtual address. Such a function is needed to make the memory mapping optimizations used by, for example, pipes and raw disk I/O MP-safe. Reviewed by: tegge
This commit is contained in:
parent
ce0ede96f1
commit
a81d9ad0b9
@ -811,6 +811,29 @@ pmap_extract(pmap, va)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Routine: pmap_extract_and_hold
|
||||
* Function:
|
||||
* Atomically extract and hold the physical page
|
||||
* with the given pmap and virtual address.
|
||||
*/
|
||||
vm_page_t
|
||||
pmap_extract_and_hold(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
vm_paddr_t pa;
|
||||
vm_page_t m;
|
||||
|
||||
m = NULL;
|
||||
mtx_lock(&Giant);
|
||||
if ((pa = pmap_extract(pmap, va)) != 0) {
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
vm_page_lock_queues();
|
||||
vm_page_hold(m);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
mtx_unlock(&Giant);
|
||||
return (m);
|
||||
}
|
||||
|
||||
/***************************************************
|
||||
* Low level mapping routines.....
|
||||
|
@ -749,6 +749,30 @@ pmap_extract(pmap, va)
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Routine: pmap_extract_and_hold
|
||||
* Function:
|
||||
* Atomically extract and hold the physical page
|
||||
* with the given pmap and virtual address.
|
||||
*/
|
||||
vm_page_t
|
||||
pmap_extract_and_hold(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
vm_paddr_t pa;
|
||||
vm_page_t m;
|
||||
|
||||
m = NULL;
|
||||
mtx_lock(&Giant);
|
||||
if ((pa = pmap_extract(pmap, va)) != 0) {
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
vm_page_lock_queues();
|
||||
vm_page_hold(m);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
mtx_unlock(&Giant);
|
||||
return (m);
|
||||
}
|
||||
|
||||
vm_paddr_t
|
||||
pmap_kextract(vm_offset_t va)
|
||||
{
|
||||
|
@ -861,6 +861,30 @@ pmap_extract(pmap, va)
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Routine: pmap_extract_and_hold
|
||||
* Function:
|
||||
* Atomically extract and hold the physical page
|
||||
* with the given pmap and virtual address.
|
||||
*/
|
||||
vm_page_t
|
||||
pmap_extract_and_hold(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
vm_paddr_t pa;
|
||||
vm_page_t m;
|
||||
|
||||
m = NULL;
|
||||
mtx_lock(&Giant);
|
||||
if ((pa = pmap_extract(pmap, va)) != 0) {
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
vm_page_lock_queues();
|
||||
vm_page_hold(m);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
mtx_unlock(&Giant);
|
||||
return (m);
|
||||
}
|
||||
|
||||
/***************************************************
|
||||
* Low level mapping routines.....
|
||||
***************************************************/
|
||||
|
@ -1102,6 +1102,30 @@ pmap_extract(pmap, va)
|
||||
return pmap_pte_pa(pte);
|
||||
}
|
||||
|
||||
/*
|
||||
* Routine: pmap_extract_and_hold
|
||||
* Function:
|
||||
* Atomically extract and hold the physical page
|
||||
* with the given pmap and virtual address.
|
||||
*/
|
||||
vm_page_t
|
||||
pmap_extract_and_hold(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
vm_paddr_t pa;
|
||||
vm_page_t m;
|
||||
|
||||
m = NULL;
|
||||
mtx_lock(&Giant);
|
||||
if ((pa = pmap_extract(pmap, va)) != 0) {
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
vm_page_lock_queues();
|
||||
vm_page_hold(m);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
mtx_unlock(&Giant);
|
||||
return (m);
|
||||
}
|
||||
|
||||
/***************************************************
|
||||
* Low level mapping routines.....
|
||||
***************************************************/
|
||||
|
@ -620,6 +620,28 @@ pmap_extract(pmap_t pm, vm_offset_t va)
|
||||
return (TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp)));
|
||||
}
|
||||
|
||||
/*
|
||||
* Atomically extract and hold the physical page with the given
|
||||
* pmap and virtual address pair.
|
||||
*/
|
||||
vm_page_t
|
||||
pmap_extract_and_hold(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
vm_paddr_t pa;
|
||||
vm_page_t m;
|
||||
|
||||
m = NULL;
|
||||
mtx_lock(&Giant);
|
||||
if ((pa = pmap_extract(pmap, va)) != 0) {
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
vm_page_lock_queues();
|
||||
vm_page_hold(m);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
mtx_unlock(&Giant);
|
||||
return (m);
|
||||
}
|
||||
|
||||
/*
|
||||
* Extract the physical page address associated with the given kernel virtual
|
||||
* address.
|
||||
|
@ -106,6 +106,7 @@ void pmap_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
|
||||
vm_page_t pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
vm_page_t mpte);
|
||||
vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va);
|
||||
vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va);
|
||||
void pmap_growkernel(vm_offset_t);
|
||||
void pmap_init(vm_paddr_t, vm_paddr_t);
|
||||
boolean_t pmap_is_modified(vm_page_t m);
|
||||
|
Loading…
Reference in New Issue
Block a user