Add a new populate() pager method and extend device pager ops vector

with cdev_pg_populate() to provide device drivers access to it.  It
gives drivers fine control of the pages ownership and allows drivers
to implement arbitrary prefault policies.

The populate method is called on a page fault and is supposed to
populate the vm object with the page at the fault location and some
amount of pages around it, at pager's discretion.  VM provides the
pager with the hints about current range of the object mapping, to
avoid instantiation of immediately unused pages, if pager decides so.
Also, VM passes the fault type and map entry protection to the pager,
allowing it to force the optimal required ownership of the mapped
pages.

Installed pages must contiguously fill the returned region, be fully
valid and exclusively busied.  Of course, the pages must be compatible
with the object' type.

After populate() successfully returned, VM fault handler installs as
many instantiated pages into the process page tables as it sees
reasonable, while still obeying the correct semantic for COW and vm
map locking.

The method is opt-in, pager sets OBJ_POPULATE flag to indicate that
the method can be called.  If pager' vm objects can be shadowed, pager
must implement the traditional getpages() method in addition to the
populate().  Populate() might fall back to the getpages() on per-call
basis as well, by returning VM_PAGER_BAD error code.

For now for device pagers, the populate() method is only allowed to be
used by the managed device pagers, but the limitation is only made
because there is no unmanaged fault handlers which could use it right
now.

KPI designed together with, and reviewed by:	alc
Tested by:	pho
Sponsored by:	The FreeBSD Foundation
MFC after:	3 weeks
This commit is contained in:
Konstantin Belousov 2016-12-08 11:26:11 +00:00
parent dc5401d240
commit c42b43a054
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=309710
4 changed files with 178 additions and 0 deletions

View File

@ -63,6 +63,8 @@ static int dev_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *);
static void dev_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
static boolean_t dev_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
static void dev_pager_free_page(vm_object_t object, vm_page_t m);
static int dev_pager_populate(vm_object_t object, vm_pindex_t pidx,
int fault_type, vm_prot_t, vm_pindex_t *first, vm_pindex_t *last);
/* list of device pager objects */
static struct pagerlst dev_pager_object_list;
@ -84,6 +86,7 @@ struct pagerops mgtdevicepagerops = {
.pgo_getpages = dev_pager_getpages,
.pgo_putpages = dev_pager_putpages,
.pgo_haspage = dev_pager_haspage,
.pgo_populate = dev_pager_populate,
};
static int old_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
@ -127,6 +130,8 @@ cdev_pager_allocate(void *handle, enum obj_type tp, struct cdev_pager_ops *ops,
if (tp != OBJT_DEVICE && tp != OBJT_MGTDEVICE)
return (NULL);
KASSERT(tp == OBJT_MGTDEVICE || ops->cdev_pg_populate == NULL,
("populate on unmanaged device pager"));
/*
* Offset should be page aligned.
@ -179,6 +184,8 @@ cdev_pager_allocate(void *handle, enum obj_type tp, struct cdev_pager_ops *ops,
object->handle = handle;
TAILQ_INSERT_TAIL(&dev_pager_object_list, object,
pager_object_list);
if (ops->cdev_pg_populate != NULL)
vm_object_set_flag(object, OBJ_POPULATE);
}
} else {
if (pindex > object->size)
@ -268,6 +275,8 @@ dev_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int *rbehind,
/* Since our haspage reports zero after/before, the count is 1. */
KASSERT(count == 1, ("%s: count %d", __func__, count));
VM_OBJECT_ASSERT_WLOCKED(object);
if (object->un_pager.devp.ops->cdev_pg_fault == NULL)
return (VM_PAGER_FAIL);
error = object->un_pager.devp.ops->cdev_pg_fault(object,
IDX_TO_OFF(ma[0]->pindex), PROT_READ, &ma[0]);
@ -292,6 +301,18 @@ dev_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int *rbehind,
return (error);
}
static int
dev_pager_populate(vm_object_t object, vm_pindex_t pidx, int fault_type,
vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
{
VM_OBJECT_ASSERT_WLOCKED(object);
if (object->un_pager.devp.ops->cdev_pg_populate == NULL)
return (VM_PAGER_FAIL);
return (object->un_pager.devp.ops->cdev_pg_populate(object, pidx,
fault_type, max_prot, first, last));
}
static int
old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
vm_page_t *mres)

View File

@ -289,6 +289,119 @@ vm_fault_soft_fast(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot,
return (KERN_SUCCESS);
}
static void
vm_fault_restore_map_lock(struct faultstate *fs)
{
VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
MPASS(fs->first_object->paging_in_progress > 0);
if (!vm_map_trylock_read(fs->map)) {
VM_OBJECT_WUNLOCK(fs->first_object);
vm_map_lock_read(fs->map);
VM_OBJECT_WLOCK(fs->first_object);
}
fs->lookup_still_valid = true;
}
static int
vm_fault_populate(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot,
int fault_type, int fault_flags, boolean_t wired, vm_page_t *m_hold)
{
vm_page_t m;
vm_pindex_t f_first, f_last, pidx;
int rv;
MPASS(fs->object == fs->first_object);
VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
MPASS(fs->first_object->paging_in_progress > 0);
MPASS(fs->first_object->backing_object == NULL);
MPASS(fs->lookup_still_valid);
f_first = OFF_TO_IDX(fs->entry->offset);
f_last = OFF_TO_IDX(fs->entry->offset + fs->entry->end -
fs->entry->start) - 1;
unlock_map(fs);
unlock_vp(fs);
/*
* Call the pager (driver) populate() method.
*
* There is no guarantee that the method will be called again
* if the current fault is for read, and a future fault is
* for write. Report the entry's maximum allowed protection
* to the driver.
*/
rv = vm_pager_populate(fs->first_object, fs->first_pindex,
fault_type, fs->entry->max_protection, &f_first, &f_last);
VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
if (rv == VM_PAGER_BAD) {
/*
* VM_PAGER_BAD is the backdoor for a pager to request
* normal fault handling.
*/
vm_fault_restore_map_lock(fs);
if (fs->map->timestamp != fs->map_generation)
return (KERN_RESOURCE_SHORTAGE); /* RetryFault */
return (KERN_NOT_RECEIVER);
}
if (rv != VM_PAGER_OK)
return (KERN_FAILURE); /* AKA SIGSEGV */
/* Ensure that the driver is obeying the interface. */
MPASS(f_first <= f_last);
MPASS(fs->first_pindex <= f_last);
MPASS(fs->first_pindex >= f_first);
MPASS(f_last < fs->first_object->size);
vm_fault_restore_map_lock(fs);
if (fs->map->timestamp != fs->map_generation)
return (KERN_RESOURCE_SHORTAGE); /* RetryFault */
/* Clip pager response to fit into the vm_map_entry. */
f_first = MAX(OFF_TO_IDX(fs->entry->offset), f_first);
f_last = MIN(OFF_TO_IDX(fs->entry->end - fs->entry->start +
fs->entry->offset), f_last);
pidx = f_first;
for (m = vm_page_lookup(fs->first_object, pidx); pidx <= f_last;
pidx++, m = vm_page_next(m)) {
/*
* Check each page to ensure that the driver is
* obeying the interface: the page must be installed
* in the object, fully valid, and exclusively busied.
*/
MPASS(m != NULL);
MPASS(vm_page_xbusied(m));
MPASS(m->valid == VM_PAGE_BITS_ALL);
MPASS(m->object == fs->first_object);
MPASS(m->pindex == pidx);
vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags,
true);
VM_OBJECT_WUNLOCK(fs->first_object);
pmap_enter(fs->map->pmap, fs->entry->start + IDX_TO_OFF(pidx) -
fs->entry->offset, m, prot, fault_type | (wired ?
PMAP_ENTER_WIRED : 0), 0);
VM_OBJECT_WLOCK(fs->first_object);
if (pidx == fs->first_pindex)
vm_fault_fill_hold(m_hold, m);
vm_page_lock(m);
if ((fault_flags & VM_FAULT_WIRE) != 0) {
KASSERT(wired, ("VM_FAULT_WIRE && !wired"));
vm_page_wire(m);
} else {
vm_page_activate(m);
}
vm_page_unlock(m);
vm_page_xunbusy(m);
}
curthread->td_ru.ru_majflt++;
return (KERN_SUCCESS);
}
/*
* vm_fault:
*
@ -555,6 +668,30 @@ RetryFault:;
return (KERN_PROTECTION_FAILURE);
}
if (fs.object == fs.first_object &&
(fs.first_object->flags & OBJ_POPULATE) != 0 &&
fs.first_object->shadow_count == 0) {
rv = vm_fault_populate(&fs, vaddr, prot,
fault_type, fault_flags, wired, m_hold);
switch (rv) {
case KERN_SUCCESS:
case KERN_FAILURE:
unlock_and_deallocate(&fs);
return (rv);
case KERN_RESOURCE_SHORTAGE:
unlock_and_deallocate(&fs);
goto RetryFault;
case KERN_NOT_RECEIVER:
/*
* Pager's populate() method
* returned VM_PAGER_BAD.
*/
break;
default:
panic("inconsistent return codes");
}
}
/*
* Allocate a new page for this object/offset pair.
*

View File

@ -181,6 +181,7 @@ struct vm_object {
*/
#define OBJ_FICTITIOUS 0x0001 /* (c) contains fictitious pages */
#define OBJ_UNMANAGED 0x0002 /* (c) contains unmanaged pages */
#define OBJ_POPULATE 0x0004 /* pager implements populate() */
#define OBJ_DEAD 0x0008 /* dead objects (during rundown) */
#define OBJ_NOSPLIT 0x0010 /* dont split this object */
#define OBJ_UMTXDEAD 0x0020 /* umtx pshared was terminated */

View File

@ -56,6 +56,8 @@ typedef int pgo_getpages_async_t(vm_object_t, vm_page_t *, int, int *, int *,
pgo_getpages_iodone_t, void *);
typedef void pgo_putpages_t(vm_object_t, vm_page_t *, int, int, int *);
typedef boolean_t pgo_haspage_t(vm_object_t, vm_pindex_t, int *, int *);
typedef int pgo_populate_t(vm_object_t, vm_pindex_t, int, vm_prot_t,
vm_pindex_t *, vm_pindex_t *);
typedef void pgo_pageunswapped_t(vm_page_t);
struct pagerops {
@ -66,6 +68,7 @@ struct pagerops {
pgo_getpages_async_t *pgo_getpages_async; /* Get page asyncly. */
pgo_putpages_t *pgo_putpages; /* Put (write) page. */
pgo_haspage_t *pgo_haspage; /* Query page. */
pgo_populate_t *pgo_populate; /* Bulk spec pagein. */
pgo_pageunswapped_t *pgo_pageunswapped;
};
@ -152,6 +155,19 @@ vm_pager_has_page(
return (ret);
}
static __inline int
vm_pager_populate(vm_object_t object, vm_pindex_t pidx, int fault_type,
vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
{
MPASS((object->flags & OBJ_POPULATE) != 0);
MPASS(pidx < object->size);
MPASS(object->paging_in_progress > 0);
return ((*pagertab[object->type]->pgo_populate)(object, pidx,
fault_type, max_prot, first, last));
}
/*
* vm_pager_page_unswapped
*
@ -177,6 +193,9 @@ vm_pager_page_unswapped(vm_page_t m)
struct cdev_pager_ops {
int (*cdev_pg_fault)(vm_object_t vm_obj, vm_ooffset_t offset,
int prot, vm_page_t *mres);
int (*cdev_pg_populate)(vm_object_t vm_obj, vm_pindex_t pidx,
int fault_type, vm_prot_t max_prot, vm_pindex_t *first,
vm_pindex_t *last);
int (*cdev_pg_ctor)(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t foff, struct ucred *cred, u_short *color);
void (*cdev_pg_dtor)(void *handle);