Use unlocked page lookup for inmem() to avoid object lock contention
Reviewed By: kib, markj Submitted by: mlaier Sponsored by: Dell EMC Differential Revision: https://reviews.freebsd.org/D26653
This commit is contained in:
parent
31deb3cc76
commit
c2c6fb90e0
@ -154,7 +154,6 @@ caddr_t __read_mostly unmapped_buf;
|
||||
/* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */
|
||||
struct proc *bufdaemonproc;
|
||||
|
||||
static int inmem(struct vnode *vp, daddr_t blkno);
|
||||
static void vm_hold_free_pages(struct buf *bp, int newbsize);
|
||||
static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
|
||||
vm_offset_t to);
|
||||
@ -3585,48 +3584,54 @@ incore(struct bufobj *bo, daddr_t blkno)
|
||||
* associated VM object. This is like incore except
|
||||
* it also hunts around in the VM system for the data.
|
||||
*/
|
||||
|
||||
static int
|
||||
bool
|
||||
inmem(struct vnode * vp, daddr_t blkno)
|
||||
{
|
||||
vm_object_t obj;
|
||||
vm_offset_t toff, tinc, size;
|
||||
vm_page_t m;
|
||||
vm_page_t m, n;
|
||||
vm_ooffset_t off;
|
||||
int valid;
|
||||
|
||||
ASSERT_VOP_LOCKED(vp, "inmem");
|
||||
|
||||
if (incore(&vp->v_bufobj, blkno))
|
||||
return 1;
|
||||
return (true);
|
||||
if (vp->v_mount == NULL)
|
||||
return 0;
|
||||
return (false);
|
||||
obj = vp->v_object;
|
||||
if (obj == NULL)
|
||||
return (0);
|
||||
return (false);
|
||||
|
||||
size = PAGE_SIZE;
|
||||
if (size > vp->v_mount->mnt_stat.f_iosize)
|
||||
size = vp->v_mount->mnt_stat.f_iosize;
|
||||
off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
|
||||
|
||||
VM_OBJECT_RLOCK(obj);
|
||||
for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
|
||||
m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
|
||||
if (!m)
|
||||
goto notinmem;
|
||||
m = vm_page_lookup_unlocked(obj, OFF_TO_IDX(off + toff));
|
||||
recheck:
|
||||
if (m == NULL)
|
||||
return (false);
|
||||
|
||||
tinc = size;
|
||||
if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
|
||||
tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
|
||||
if (vm_page_is_valid(m,
|
||||
(vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
|
||||
goto notinmem;
|
||||
/*
|
||||
* Consider page validity only if page mapping didn't change
|
||||
* during the check.
|
||||
*/
|
||||
valid = vm_page_is_valid(m,
|
||||
(vm_offset_t)((toff + off) & PAGE_MASK), tinc);
|
||||
n = vm_page_lookup_unlocked(obj, OFF_TO_IDX(off + toff));
|
||||
if (m != n) {
|
||||
m = n;
|
||||
goto recheck;
|
||||
}
|
||||
if (!valid)
|
||||
return (false);
|
||||
}
|
||||
VM_OBJECT_RUNLOCK(obj);
|
||||
return 1;
|
||||
|
||||
notinmem:
|
||||
VM_OBJECT_RUNLOCK(obj);
|
||||
return (0);
|
||||
return (true);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -549,6 +549,7 @@ int vfs_bio_awrite(struct buf *);
|
||||
void vfs_busy_pages_acquire(struct buf *bp);
|
||||
void vfs_busy_pages_release(struct buf *bp);
|
||||
struct buf *incore(struct bufobj *, daddr_t);
|
||||
bool inmem(struct vnode *, daddr_t);
|
||||
struct buf *gbincore(struct bufobj *, daddr_t);
|
||||
struct buf *gbincore_unlocked(struct bufobj *, daddr_t);
|
||||
struct buf *getblk(struct vnode *, daddr_t, int, int, int, int);
|
||||
|
@ -1697,6 +1697,21 @@ vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
|
||||
return (vm_radix_lookup(&object->rtree, pindex));
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_page_lookup_unlocked:
|
||||
*
|
||||
* Returns the page associated with the object/offset pair specified;
|
||||
* if none is found, NULL is returned. The page may be no longer be
|
||||
* present in the object at the time that this function returns. Only
|
||||
* useful for opportunistic checks such as inmem().
|
||||
*/
|
||||
vm_page_t
|
||||
vm_page_lookup_unlocked(vm_object_t object, vm_pindex_t pindex)
|
||||
{
|
||||
|
||||
return (vm_radix_lookup_unlocked(&object->rtree, pindex));
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_page_relookup:
|
||||
*
|
||||
|
@ -700,6 +700,7 @@ int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
|
||||
void vm_page_invalid(vm_page_t m);
|
||||
void vm_page_launder(vm_page_t m);
|
||||
vm_page_t vm_page_lookup(vm_object_t, vm_pindex_t);
|
||||
vm_page_t vm_page_lookup_unlocked(vm_object_t, vm_pindex_t);
|
||||
vm_page_t vm_page_next(vm_page_t m);
|
||||
void vm_page_pqbatch_drain(void);
|
||||
void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue);
|
||||
|
Loading…
Reference in New Issue
Block a user