Implement and use a single optimized function for unholding a set of pages.

Reviewed by:	kib@
This commit is contained in:
Alan Cox 2010-12-17 22:41:22 +00:00
parent d6ec8427bc
commit 8c22654d7e
8 changed files with 38 additions and 36 deletions

View File

@ -454,7 +454,7 @@ t3_sosend(struct socket *so, struct uio *uio)
while (uiotmp.uio_resid > 0) {
rv = cxgb_vm_page_to_miov(toep, &uiotmp, &m);
if (rv) {
vm_fault_unhold_pages(toep->tp_pages, count);
vm_page_unhold_pages(toep->tp_pages, count);
return (rv);
}
uio->uio_resid -= m->m_pkthdr.len;
@ -469,7 +469,7 @@ t3_sosend(struct socket *so, struct uio *uio)
*
*/
cxgb_wait_dma_completion(toep);
vm_fault_unhold_pages(toep->tp_pages, count);
vm_page_unhold_pages(toep->tp_pages, count);
/*
* If there is more data to send adjust local copy of iov
* to point to teh start

View File

@ -175,7 +175,7 @@ t3_pin_pages(bus_dma_tag_t tag, bus_dmamap_t dmamap, vm_offset_t addr,
*newgl = p;
return (0);
unpin:
vm_fault_unhold_pages(p->dgl_pages, npages);
vm_page_unhold_pages(p->dgl_pages, npages);
free_gl:
@ -208,7 +208,7 @@ ddp_gl_free_pages(struct ddp_gather_list *gl, int dirty)
/*
* XXX mark pages as dirty before unholding
*/
vm_fault_unhold_pages(gl->dgl_pages, gl->dgl_nelem);
vm_page_unhold_pages(gl->dgl_pages, gl->dgl_nelem);
}
void

View File

@ -150,16 +150,3 @@ vm_fault_hold_user_pages(vm_map_t map, vm_offset_t addr, vm_page_t *mp,
}
return (EFAULT);
}
void
vm_fault_unhold_pages(vm_page_t *mp, int count)
{
KASSERT(count >= 0, ("negative count %d", count));
while (count--) {
vm_page_lock(*mp);
vm_page_unhold(*mp);
vm_page_unlock(*mp);
mp++;
}
}

View File

@ -34,6 +34,5 @@ POSSIBILITY OF SUCH DAMAGE.
int vm_fault_hold_user_pages(vm_map_t map, vm_offset_t addr,
vm_page_t *mp, int count, vm_prot_t prot);
void vm_fault_unhold_pages(vm_page_t *mp, int count);
#endif

View File

@ -749,7 +749,7 @@ pipe_build_write_buffer(wpipe, uio)
{
pmap_t pmap;
u_int size;
int i, j;
int i;
vm_offset_t addr, endaddr;
PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
@ -771,11 +771,7 @@ pipe_build_write_buffer(wpipe, uio)
*/
race:
if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0) {
for (j = 0; j < i; j++) {
vm_page_lock(wpipe->pipe_map.ms[j]);
vm_page_unhold(wpipe->pipe_map.ms[j]);
vm_page_unlock(wpipe->pipe_map.ms[j]);
}
vm_page_unhold_pages(wpipe->pipe_map.ms, i);
return (EFAULT);
}
wpipe->pipe_map.ms[i] = pmap_extract_and_hold(pmap, addr,
@ -812,14 +808,9 @@ static void
pipe_destroy_write_buffer(wpipe)
struct pipe *wpipe;
{
int i;
PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
for (i = 0; i < wpipe->pipe_map.npages; i++) {
vm_page_lock(wpipe->pipe_map.ms[i]);
vm_page_unhold(wpipe->pipe_map.ms[i]);
vm_page_unlock(wpipe->pipe_map.ms[i]);
}
vm_page_unhold_pages(wpipe->pipe_map.ms, wpipe->pipe_map.npages);
wpipe->pipe_map.npages = 0;
}

View File

@ -3911,16 +3911,11 @@ vmapbuf(struct buf *bp)
void
vunmapbuf(struct buf *bp)
{
int pidx;
int npages;
npages = bp->b_npages;
pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
for (pidx = 0; pidx < npages; pidx++) {
vm_page_lock(bp->b_pages[pidx]);
vm_page_unhold(bp->b_pages[pidx]);
vm_page_unlock(bp->b_pages[pidx]);
}
vm_page_unhold_pages(bp->b_pages, npages);
bp->b_data = bp->b_saveaddr;
}

View File

@ -600,6 +600,35 @@ vm_page_unhold(vm_page_t mem)
vm_page_free_toq(mem);
}
/*
* vm_page_unhold_pages:
*
* Unhold each of the pages that is referenced by the given array.
*/
void
vm_page_unhold_pages(vm_page_t *ma, int count)
{
struct mtx *mtx, *new_mtx;
mtx = NULL;
for (; count != 0; count--) {
/*
* Avoid releasing and reacquiring the same page lock.
*/
new_mtx = vm_page_lockptr(*ma);
if (mtx != new_mtx) {
if (mtx != NULL)
mtx_unlock(mtx);
mtx = new_mtx;
mtx_lock(mtx);
}
vm_page_unhold(*ma);
ma++;
}
if (mtx != NULL)
mtx_unlock(mtx);
}
/*
* vm_page_free:
*

View File

@ -364,6 +364,7 @@ void vm_page_set_valid(vm_page_t m, int base, int size);
void vm_page_sleep(vm_page_t m, const char *msg);
vm_page_t vm_page_splay(vm_pindex_t, vm_page_t);
vm_offset_t vm_page_startup(vm_offset_t vaddr);
void vm_page_unhold_pages(vm_page_t *ma, int count);
void vm_page_unwire (vm_page_t, int);
void vm_page_wire (vm_page_t);
void vm_page_set_validclean (vm_page_t, int, int);