Implement global and per-uid accounting of the anonymous memory. Add

rlimit RLIMIT_SWAP that limits the amount of swap that may be reserved
for the uid.

The accounting information (charge) is associated with either map entry,
or vm object backing the entry, assuming the object is the first one
in the shadow chain and entry does not require COW. Charge is moved
from entry to object on allocation of the object, e.g. during the mmap,
assuming the object is allocated, or on the first page fault on the
entry. It moves back to the entry on forks due to COW setup.

The per-entry granularity of accounting makes the charge process fair
for processes that change uid during lifetime, and decrements charge
for proper uid when region is unmapped.

The interface of vm_pager_allocate(9) is extended by adding struct ucred *,
that is used to charge appropriate uid when allocation if performed by
kernel, e.g. md(4).

Several syscalls, among them is fork(2), may now return ENOMEM when
global or per-uid limits are enforced.

In collaboration with:	pho
Reviewed by:	alc
Approved by:	re (kensmith)
This commit is contained in:
Konstantin Belousov 2009-06-23 20:45:22 +00:00
parent 224fbf9fd6
commit 3364c323e6
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=194766
29 changed files with 664 additions and 88 deletions

View File

@ -1042,18 +1042,18 @@ mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
if (mdio->md_fwheads != 0)
sc->fwheads = mdio->md_fwheads;
sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
VM_PROT_DEFAULT, 0);
VM_PROT_DEFAULT, 0, td->td_ucred);
if (sc->object == NULL)
return (ENOMEM);
sc->flags = mdio->md_options & MD_FORCE;
if (mdio->md_options & MD_RESERVE) {
if (swap_pager_reserve(sc->object, 0, npage) < 0) {
vm_object_deallocate(sc->object);
sc->object = NULL;
return (EDOM);
error = EDOM;
goto finish;
}
}
error = mdsetcred(sc, td->td_ucred);
finish:
if (error != 0) {
vm_object_deallocate(sc->object);
sc->object = NULL;

View File

@ -45,6 +45,7 @@
#include <sys/mount.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
#include <sys/sbuf.h>
#ifdef COMPAT_IA32
#include <sys/sysent.h>
@ -82,6 +83,7 @@ procfs_doprocmap(PFS_FILL_ARGS)
vm_map_entry_t entry, tmp_entry;
struct vnode *vp;
char *fullpath, *freepath;
struct uidinfo *uip;
int error, vfslocked;
unsigned int last_timestamp;
#ifdef COMPAT_IA32
@ -134,6 +136,7 @@ procfs_doprocmap(PFS_FILL_ARGS)
if (obj->shadow_count == 1)
privateresident = obj->resident_page_count;
}
uip = (entry->uip) ? entry->uip : (obj ? obj->uip : NULL);
resident = 0;
addr = entry->start;
@ -198,10 +201,11 @@ procfs_doprocmap(PFS_FILL_ARGS)
/*
* format:
* start, end, resident, private resident, cow, access, type.
* start, end, resident, private resident, cow, access, type,
* charged, charged uid.
*/
error = sbuf_printf(sb,
"0x%lx 0x%lx %d %d %p %s%s%s %d %d 0x%x %s %s %s %s\n",
"0x%lx 0x%lx %d %d %p %s%s%s %d %d 0x%x %s %s %s %s %s %d\n",
(u_long)e_start, (u_long)e_end,
resident, privateresident,
#ifdef COMPAT_IA32
@ -215,7 +219,8 @@ procfs_doprocmap(PFS_FILL_ARGS)
ref_count, shadow_count, flags,
(e_eflags & MAP_ENTRY_COW)?"COW":"NCOW",
(e_eflags & MAP_ENTRY_NEEDS_COPY)?"NC":"NNC",
type, fullpath);
type, fullpath,
uip ? "CH":"NCH", uip ? uip->ui_uid : -1);
if (freepath != NULL)
free(freepath, M_TEMP);

View File

@ -142,7 +142,8 @@ tmpfs_alloc_node(struct tmpfs_mount *tmp, enum vtype type,
case VREG:
nnode->tn_reg.tn_aobj =
vm_pager_allocate(OBJT_SWAP, NULL, 0, VM_PROT_DEFAULT, 0);
vm_pager_allocate(OBJT_SWAP, NULL, 0, VM_PROT_DEFAULT, 0,
NULL /* XXXKIB - tmpfs needs swap reservation */);
nnode->tn_reg.tn_aobj_pages = 0;
break;

View File

@ -214,6 +214,7 @@ fork1(td, flags, pages, procp)
struct thread *td2;
struct sigacts *newsigacts;
struct vmspace *vm2;
vm_ooffset_t mem_charged;
int error;
/* Can't copy and clear. */
@ -274,6 +275,7 @@ fork1(td, flags, pages, procp)
* however it proved un-needed and caused problems
*/
mem_charged = 0;
vm2 = NULL;
/* Allocate new proc. */
newproc = uma_zalloc(proc_zone, M_WAITOK);
@ -295,12 +297,24 @@ fork1(td, flags, pages, procp)
}
}
if ((flags & RFMEM) == 0) {
vm2 = vmspace_fork(p1->p_vmspace);
vm2 = vmspace_fork(p1->p_vmspace, &mem_charged);
if (vm2 == NULL) {
error = ENOMEM;
goto fail1;
}
}
if (!swap_reserve(mem_charged)) {
/*
* The swap reservation failed. The accounting
* from the entries of the copied vm2 will be
* substracted in vmspace_free(), so force the
* reservation there.
*/
swap_reserve_force(mem_charged);
error = ENOMEM;
goto fail1;
}
} else
vm2 = NULL;
#ifdef MAC
mac_proc_init(newproc);
#endif

View File

@ -1213,6 +1213,8 @@ uifind(uid)
} else {
refcount_init(&uip->ui_ref, 0);
uip->ui_uid = uid;
mtx_init(&uip->ui_vmsize_mtx, "ui_vmsize", NULL,
MTX_DEF);
LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
}
}
@ -1269,6 +1271,10 @@ uifree(uip)
if (uip->ui_proccnt != 0)
printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
uip->ui_uid, uip->ui_proccnt);
if (uip->ui_vmsize != 0)
printf("freeing uidinfo: uid = %d, swapuse = %lld\n",
uip->ui_uid, (unsigned long long)uip->ui_vmsize);
mtx_destroy(&uip->ui_vmsize_mtx);
free(uip, M_UIDINFO);
return;
}

View File

@ -59,6 +59,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_kern.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_param.h>
#ifdef COMPAT_IA32
#include <sys/procfs.h>
@ -270,7 +271,10 @@ proc_rwmem(struct proc *p, struct uio *uio)
*/
error = vm_fault(map, pageno, reqprot, fault_flags);
if (error) {
error = EFAULT;
if (error == KERN_RESOURCE_SHORTAGE)
error = ENOMEM;
else
error = EFAULT;
break;
}

View File

@ -770,13 +770,10 @@ shmget_allocate_segment(td, uap, mode)
* We make sure that we have allocated a pager before we need
* to.
*/
if (shm_use_phys) {
shm_object =
vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0);
} else {
shm_object =
vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
}
shm_object = vm_pager_allocate(shm_use_phys ? OBJT_PHYS : OBJT_SWAP,
0, size, VM_PROT_DEFAULT, 0, cred);
if (shm_object == NULL)
return (ENOMEM);
VM_OBJECT_LOCK(shm_object);
vm_object_clear_flag(shm_object, OBJ_ONEMAPPING);
vm_object_set_flag(shm_object, OBJ_NOSPLIT);

View File

@ -110,7 +110,7 @@ static struct shmfd *shm_hold(struct shmfd *shmfd);
static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
static void shm_dotruncate(struct shmfd *shmfd, off_t length);
static int shm_dotruncate(struct shmfd *shmfd, off_t length);
static fo_rdwr_t shm_read;
static fo_rdwr_t shm_write;
@ -167,8 +167,7 @@ shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
if (error)
return (error);
#endif
shm_dotruncate(shmfd, length);
return (0);
return (shm_dotruncate(shmfd, length));
}
static int
@ -242,23 +241,26 @@ shm_close(struct file *fp, struct thread *td)
return (0);
}
static void
static int
shm_dotruncate(struct shmfd *shmfd, off_t length)
{
vm_object_t object;
vm_page_t m;
vm_pindex_t nobjsize;
vm_ooffset_t delta;
object = shmfd->shm_object;
VM_OBJECT_LOCK(object);
if (length == shmfd->shm_size) {
VM_OBJECT_UNLOCK(object);
return;
return (0);
}
nobjsize = OFF_TO_IDX(length + PAGE_MASK);
/* Are we shrinking? If so, trim the end. */
if (length < shmfd->shm_size) {
delta = ptoa(object->size - nobjsize);
/* Toss in memory pages. */
if (nobjsize < object->size)
vm_object_page_remove(object, nobjsize, object->size,
@ -266,8 +268,11 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
/* Toss pages from swap. */
if (object->type == OBJT_SWAP)
swap_pager_freespace(object, nobjsize,
object->size - nobjsize);
swap_pager_freespace(object, nobjsize, delta);
/* Free the swap accounted for shm */
swap_release_by_uid(delta, object->uip);
object->charge -= delta;
/*
* If the last page is partially mapped, then zero out
@ -307,6 +312,15 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
vm_page_cache_free(object, OFF_TO_IDX(length),
nobjsize);
}
} else {
/* Attempt to reserve the swap */
delta = ptoa(nobjsize - object->size);
if (!swap_reserve_by_uid(delta, object->uip)) {
VM_OBJECT_UNLOCK(object);
return (ENOMEM);
}
object->charge += delta;
}
shmfd->shm_size = length;
mtx_lock(&shm_timestamp_lock);
@ -315,6 +329,7 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
mtx_unlock(&shm_timestamp_lock);
object->size = nobjsize;
VM_OBJECT_UNLOCK(object);
return (0);
}
/*
@ -332,7 +347,7 @@ shm_alloc(struct ucred *ucred, mode_t mode)
shmfd->shm_gid = ucred->cr_gid;
shmfd->shm_mode = mode;
shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL,
shmfd->shm_size, VM_PROT_DEFAULT, 0);
shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
VM_OBJECT_LOCK(shmfd->shm_object);
vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);

View File

@ -1830,6 +1830,8 @@ biba_priv_check(struct ucred *cred, int priv)
case PRIV_VM_MADV_PROTECT:
case PRIV_VM_MLOCK:
case PRIV_VM_MUNLOCK:
case PRIV_VM_SWAP_NOQUOTA:
case PRIV_VM_SWAP_NORLIMIT:
/*
* Allow some but not all network privileges. In general, dont allow

View File

@ -1822,6 +1822,8 @@ lomac_priv_check(struct ucred *cred, int priv)
case PRIV_VM_MADV_PROTECT:
case PRIV_VM_MLOCK:
case PRIV_VM_MUNLOCK:
case PRIV_VM_SWAP_NOQUOTA:
case PRIV_VM_SWAP_NORLIMIT:
/*
* Allow some but not all network privileges. In general, dont allow

View File

@ -283,6 +283,14 @@
#define PRIV_VM_MADV_PROTECT 360 /* Can set MADV_PROTECT. */
#define PRIV_VM_MLOCK 361 /* Can mlock(), mlockall(). */
#define PRIV_VM_MUNLOCK 362 /* Can munlock(), munlockall(). */
#define PRIV_VM_SWAP_NOQUOTA 363 /*
* Can override the global
* swap reservation limits.
*/
#define PRIV_VM_SWAP_NORLIMIT 364 /*
* Can override the per-uid
* swap reservation limits.
*/
/*
* Device file system privileges.

View File

@ -94,8 +94,9 @@ struct rusage {
#define RLIMIT_VMEM 10 /* virtual process size (inclusive of mmap) */
#define RLIMIT_AS RLIMIT_VMEM /* standard name for RLIMIT_VMEM */
#define RLIMIT_NPTS 11 /* pseudo-terminals */
#define RLIMIT_SWAP 12 /* swap used */
#define RLIM_NLIMITS 12 /* number of resource limits */
#define RLIM_NLIMITS 13 /* number of resource limits */
#define RLIM_INFINITY ((rlim_t)(((uint64_t)1 << 63) - 1))
/* XXX Missing: RLIM_SAVED_MAX, RLIM_SAVED_CUR */
@ -119,6 +120,7 @@ static char *rlimit_ident[RLIM_NLIMITS] = {
"sbsize",
"vmem",
"npts",
"swap",
};
#endif

View File

@ -86,9 +86,12 @@ struct plimit {
* (a) Constant from inception
* (b) Lockless, updated using atomics
* (c) Locked by global uihashtbl_mtx
* (d) Locked by the ui_vmsize_mtx
*/
struct uidinfo {
LIST_ENTRY(uidinfo) ui_hash; /* (c) hash chain of uidinfos */
struct mtx ui_vmsize_mtx;
vm_ooffset_t ui_vmsize; /* (d) swap reservation by uid */
long ui_sbsize; /* (b) socket buffer space consumed */
long ui_proccnt; /* (b) number of processes */
long ui_ptscnt; /* (b) number of pseudo-terminals */
@ -96,6 +99,9 @@ struct uidinfo {
u_int ui_ref; /* (b) reference count */
};
#define UIDINFO_VMSIZE_LOCK(ui) mtx_lock(&((ui)->ui_vmsize_mtx))
#define UIDINFO_VMSIZE_UNLOCK(ui) mtx_unlock(&((ui)->ui_vmsize_mtx))
struct proc;
struct rusage_ext;
struct thread;

View File

@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
#include <sys/mutex.h>
#include <vm/vm.h>
@ -53,7 +54,7 @@ __FBSDID("$FreeBSD$");
#include <vm/swap_pager.h>
static vm_object_t default_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
vm_ooffset_t);
vm_ooffset_t, struct ucred *);
static void default_pager_dealloc(vm_object_t);
static int default_pager_getpages(vm_object_t, vm_page_t *, int, int);
static void default_pager_putpages(vm_object_t, vm_page_t *, int,
@ -76,12 +77,28 @@ struct pagerops defaultpagerops = {
*/
static vm_object_t
default_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t offset)
vm_ooffset_t offset, struct ucred *cred)
{
vm_object_t object;
struct uidinfo *uip;
if (handle != NULL)
panic("default_pager_alloc: handle specified");
return vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(round_page(offset + size)));
if (cred != NULL) {
uip = cred->cr_ruidinfo;
if (!swap_reserve_by_uid(size, uip))
return (NULL);
uihold(uip);
}
object = vm_object_allocate(OBJT_DEFAULT,
OFF_TO_IDX(round_page(offset + size)));
if (cred != NULL) {
VM_OBJECT_LOCK(object);
object->uip = uip;
object->charge = size;
VM_OBJECT_UNLOCK(object);
}
return (object);
}
/*

View File

@ -54,7 +54,7 @@ __FBSDID("$FreeBSD$");
static void dev_pager_init(void);
static vm_object_t dev_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
vm_ooffset_t);
vm_ooffset_t, struct ucred *);
static void dev_pager_dealloc(vm_object_t);
static int dev_pager_getpages(vm_object_t, vm_page_t *, int, int);
static void dev_pager_putpages(vm_object_t, vm_page_t *, int,
@ -97,7 +97,8 @@ dev_pager_init()
* MPSAFE
*/
static vm_object_t
dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff)
dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t foff, struct ucred *cred)
{
struct cdev *dev;
vm_object_t object, object1;

View File

@ -60,7 +60,7 @@ phys_pager_init(void)
*/
static vm_object_t
phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t foff)
vm_ooffset_t foff, struct ucred *cred)
{
vm_object_t object, object1;
vm_pindex_t pindex;

View File

@ -86,6 +86,8 @@ __FBSDID("$FreeBSD$");
#include <sys/namei.h>
#include <sys/vnode.h>
#include <sys/malloc.h>
#include <sys/resource.h>
#include <sys/resourcevar.h>
#include <sys/sysctl.h>
#include <sys/sysproto.h>
#include <sys/blist.h>
@ -152,6 +154,127 @@ static int nswapdev; /* Number of swap devices */
int swap_pager_avail;
static int swdev_syscall_active = 0; /* serialize swap(on|off) */
static vm_ooffset_t swap_total;
SYSCTL_QUAD(_vm, OID_AUTO, swap_total, CTLFLAG_RD, &swap_total, 0, "");
static vm_ooffset_t swap_reserved;
SYSCTL_QUAD(_vm, OID_AUTO, swap_reserved, CTLFLAG_RD, &swap_reserved, 0, "");
static int overcommit = 0;
SYSCTL_INT(_vm, OID_AUTO, overcommit, CTLFLAG_RW, &overcommit, 0, "");
/* bits from overcommit */
#define SWAP_RESERVE_FORCE_ON (1 << 0)
#define SWAP_RESERVE_RLIMIT_ON (1 << 1)
#define SWAP_RESERVE_ALLOW_NONWIRED (1 << 2)
int
swap_reserve(vm_ooffset_t incr)
{
return (swap_reserve_by_uid(incr, curthread->td_ucred->cr_ruidinfo));
}
int
swap_reserve_by_uid(vm_ooffset_t incr, struct uidinfo *uip)
{
vm_ooffset_t r, s, max;
int res, error;
static int curfail;
static struct timeval lastfail;
if (incr & PAGE_MASK)
panic("swap_reserve: & PAGE_MASK");
res = 0;
error = priv_check(curthread, PRIV_VM_SWAP_NOQUOTA);
mtx_lock(&sw_dev_mtx);
r = swap_reserved + incr;
if (overcommit & SWAP_RESERVE_ALLOW_NONWIRED) {
s = cnt.v_page_count - cnt.v_free_reserved - cnt.v_wire_count;
s *= PAGE_SIZE;
} else
s = 0;
s += swap_total;
if ((overcommit & SWAP_RESERVE_FORCE_ON) == 0 || r <= s ||
(error = priv_check(curthread, PRIV_VM_SWAP_NOQUOTA)) == 0) {
res = 1;
swap_reserved = r;
}
mtx_unlock(&sw_dev_mtx);
if (res) {
PROC_LOCK(curproc);
UIDINFO_VMSIZE_LOCK(uip);
error = priv_check(curthread, PRIV_VM_SWAP_NORLIMIT);
max = (error != 0) ? lim_cur(curproc, RLIMIT_SWAP) : 0;
if (max != 0 && uip->ui_vmsize + incr > max &&
(overcommit & SWAP_RESERVE_RLIMIT_ON) != 0)
res = 0;
else
uip->ui_vmsize += incr;
UIDINFO_VMSIZE_UNLOCK(uip);
PROC_UNLOCK(curproc);
if (!res) {
mtx_lock(&sw_dev_mtx);
swap_reserved -= incr;
mtx_unlock(&sw_dev_mtx);
}
}
if (!res && ppsratecheck(&lastfail, &curfail, 1)) {
printf("uid %d, pid %d: swap reservation for %jd bytes failed\n",
curproc->p_pid, uip->ui_uid, incr);
}
return (res);
}
void
swap_reserve_force(vm_ooffset_t incr)
{
struct uidinfo *uip;
mtx_lock(&sw_dev_mtx);
swap_reserved += incr;
mtx_unlock(&sw_dev_mtx);
uip = curthread->td_ucred->cr_ruidinfo;
PROC_LOCK(curproc);
UIDINFO_VMSIZE_LOCK(uip);
uip->ui_vmsize += incr;
UIDINFO_VMSIZE_UNLOCK(uip);
PROC_UNLOCK(curproc);
}
void
swap_release(vm_ooffset_t decr)
{
struct uidinfo *uip;
PROC_LOCK(curproc);
uip = curthread->td_ucred->cr_ruidinfo;
swap_release_by_uid(decr, uip);
PROC_UNLOCK(curproc);
}
void
swap_release_by_uid(vm_ooffset_t decr, struct uidinfo *uip)
{
if (decr & PAGE_MASK)
panic("swap_release: & PAGE_MASK");
mtx_lock(&sw_dev_mtx);
if (swap_reserved < decr)
panic("swap_reserved < decr");
swap_reserved -= decr;
mtx_unlock(&sw_dev_mtx);
UIDINFO_VMSIZE_LOCK(uip);
if (uip->ui_vmsize < decr)
printf("negative vmsize for uid = %d\n", uip->ui_uid);
uip->ui_vmsize -= decr;
UIDINFO_VMSIZE_UNLOCK(uip);
}
static void swapdev_strategy(struct buf *, struct swdevt *sw);
#define SWM_FREE 0x02 /* free, period */
@ -198,7 +321,7 @@ static struct vm_object swap_zone_obj;
*/
static vm_object_t
swap_pager_alloc(void *handle, vm_ooffset_t size,
vm_prot_t prot, vm_ooffset_t offset);
vm_prot_t prot, vm_ooffset_t offset, struct ucred *);
static void swap_pager_dealloc(vm_object_t object);
static int swap_pager_getpages(vm_object_t, vm_page_t *, int, int);
static void swap_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *);
@ -440,13 +563,13 @@ swap_pager_swap_init(void)
*/
static vm_object_t
swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t offset)
vm_ooffset_t offset, struct ucred *cred)
{
vm_object_t object;
vm_pindex_t pindex;
struct uidinfo *uip;
pindex = OFF_TO_IDX(offset + PAGE_MASK + size);
if (handle) {
mtx_lock(&Giant);
/*
@ -457,21 +580,41 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
*/
sx_xlock(&sw_alloc_sx);
object = vm_pager_object_lookup(NOBJLIST(handle), handle);
if (object == NULL) {
if (cred != NULL) {
uip = cred->cr_ruidinfo;
if (!swap_reserve_by_uid(size, uip)) {
sx_xunlock(&sw_alloc_sx);
mtx_unlock(&Giant);
return (NULL);
}
uihold(uip);
}
object = vm_object_allocate(OBJT_DEFAULT, pindex);
object->handle = handle;
VM_OBJECT_LOCK(object);
object->handle = handle;
if (cred != NULL) {
object->uip = uip;
object->charge = size;
}
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
VM_OBJECT_UNLOCK(object);
}
sx_xunlock(&sw_alloc_sx);
mtx_unlock(&Giant);
} else {
if (cred != NULL) {
uip = cred->cr_ruidinfo;
if (!swap_reserve_by_uid(size, uip))
return (NULL);
uihold(uip);
}
object = vm_object_allocate(OBJT_DEFAULT, pindex);
VM_OBJECT_LOCK(object);
if (cred != NULL) {
object->uip = uip;
object->charge = size;
}
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
VM_OBJECT_UNLOCK(object);
}
@ -2039,6 +2182,7 @@ swaponsomething(struct vnode *vp, void *id, u_long nblks, sw_strategy_t *strateg
TAILQ_INSERT_TAIL(&swtailq, sp, sw_list);
nswapdev++;
swap_pager_avail += nblks;
swap_total += (vm_ooffset_t)nblks * PAGE_SIZE;
swp_sizecheck();
mtx_unlock(&sw_dev_mtx);
}
@ -2143,6 +2287,7 @@ swapoff_one(struct swdevt *sp, struct ucred *cred)
swap_pager_avail -= blist_fill(sp->sw_blist,
dvbase, dmmax);
}
swap_total -= (vm_ooffset_t)nblks * PAGE_SIZE;
mtx_unlock(&sw_dev_mtx);
/*

View File

@ -133,5 +133,12 @@ struct kva_md_info {
extern struct kva_md_info kmi;
extern void vm_ksubmap_init(struct kva_md_info *);
struct uidinfo;
int swap_reserve(vm_ooffset_t incr);
int swap_reserve_by_uid(vm_ooffset_t incr, struct uidinfo *uip);
void swap_reserve_force(vm_ooffset_t incr);
void swap_release(vm_ooffset_t decr);
void swap_release_by_uid(vm_ooffset_t decr, struct uidinfo *uip);
#endif /* VM_H */

View File

@ -63,7 +63,7 @@ void vm_waitproc(struct proc *);
int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t);
void vm_set_page_size(void);
struct vmspace *vmspace_alloc(vm_offset_t, vm_offset_t);
struct vmspace *vmspace_fork(struct vmspace *);
struct vmspace *vmspace_fork(struct vmspace *, vm_ooffset_t *);
int vmspace_exec(struct proc *, vm_offset_t, vm_offset_t);
int vmspace_unshare(struct proc *);
void vmspace_exit(struct thread *);

View File

@ -1163,7 +1163,11 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
VM_OBJECT_LOCK(dst_object);
dst_entry->object.vm_object = dst_object;
dst_entry->offset = 0;
if (dst_entry->uip != NULL) {
dst_object->uip = dst_entry->uip;
dst_object->charge = dst_entry->end - dst_entry->start;
dst_entry->uip = NULL;
}
prot = dst_entry->max_protection;
/*

View File

@ -235,7 +235,8 @@ kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
*min = vm_map_min(parent);
ret = vm_map_find(parent, NULL, 0, min, size, superpage_align ?
VMFS_ALIGNED_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0);
VMFS_ALIGNED_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
MAP_ACC_NO_CHARGE);
if (ret != KERN_SUCCESS)
panic("kmem_suballoc: bad status return of %d", ret);
*max = *min + size;
@ -422,6 +423,8 @@ kmem_alloc_wait(map, size)
vm_offset_t addr;
size = round_page(size);
if (!swap_reserve(size))
return (0);
for (;;) {
/*
@ -434,12 +437,14 @@ kmem_alloc_wait(map, size)
/* no space now; see if we can ever get space */
if (vm_map_max(map) - vm_map_min(map) < size) {
vm_map_unlock(map);
swap_release(size);
return (0);
}
map->needs_wakeup = TRUE;
vm_map_unlock_and_wait(map, 0);
}
vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL,
VM_PROT_ALL, MAP_ACC_CHARGED);
vm_map_unlock(map);
return (addr);
}

View File

@ -149,6 +149,10 @@ static void vm_map_zdtor(void *mem, int size, void *arg);
static void vmspace_zdtor(void *mem, int size, void *arg);
#endif
#define ENTRY_CHARGED(e) ((e)->uip != NULL || \
((e)->object.vm_object != NULL && (e)->object.vm_object->uip != NULL && \
!((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
/*
* PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
* stable.
@ -1076,6 +1080,8 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
vm_map_entry_t prev_entry;
vm_map_entry_t temp_entry;
vm_eflags_t protoeflags;
struct uidinfo *uip;
boolean_t charge_prev_obj;
VM_MAP_ASSERT_LOCKED(map);
@ -1103,6 +1109,7 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
return (KERN_NO_SPACE);
protoeflags = 0;
charge_prev_obj = FALSE;
if (cow & MAP_COPY_ON_WRITE)
protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
@ -1118,6 +1125,27 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
if (cow & MAP_DISABLE_COREDUMP)
protoeflags |= MAP_ENTRY_NOCOREDUMP;
uip = NULL;
KASSERT((object != kmem_object && object != kernel_object) ||
((object == kmem_object || object == kernel_object) &&
!(protoeflags & MAP_ENTRY_NEEDS_COPY)),
("kmem or kernel object and cow"));
if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT))
goto charged;
if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
return (KERN_RESOURCE_SHORTAGE);
KASSERT(object == NULL || (cow & MAP_ENTRY_NEEDS_COPY) ||
object->uip == NULL,
("OVERCOMMIT: vm_map_insert o %p", object));
uip = curthread->td_ucred->cr_ruidinfo;
uihold(uip);
if (object == NULL && !(protoeflags & MAP_ENTRY_NEEDS_COPY))
charge_prev_obj = TRUE;
}
charged:
if (object != NULL) {
/*
* OBJ_ONEMAPPING must be cleared unless this mapping
@ -1135,11 +1163,13 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
(prev_entry->eflags == protoeflags) &&
(prev_entry->end == start) &&
(prev_entry->wired_count == 0) &&
((prev_entry->object.vm_object == NULL) ||
vm_object_coalesce(prev_entry->object.vm_object,
prev_entry->offset,
(vm_size_t)(prev_entry->end - prev_entry->start),
(vm_size_t)(end - prev_entry->end)))) {
(prev_entry->uip == uip ||
(prev_entry->object.vm_object != NULL &&
(prev_entry->object.vm_object->uip == uip))) &&
vm_object_coalesce(prev_entry->object.vm_object,
prev_entry->offset,
(vm_size_t)(prev_entry->end - prev_entry->start),
(vm_size_t)(end - prev_entry->end), charge_prev_obj)) {
/*
* We were able to extend the object. Determine if we
* can extend the previous map entry to include the
@ -1152,6 +1182,8 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
prev_entry->end = end;
vm_map_entry_resize_free(map, prev_entry);
vm_map_simplify_entry(map, prev_entry);
if (uip != NULL)
uifree(uip);
return (KERN_SUCCESS);
}
@ -1165,6 +1197,12 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
offset = prev_entry->offset +
(prev_entry->end - prev_entry->start);
vm_object_reference(object);
if (uip != NULL && object != NULL && object->uip != NULL &&
!(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
/* Object already accounts for this uid. */
uifree(uip);
uip = NULL;
}
}
/*
@ -1179,6 +1217,7 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
new_entry = vm_map_entry_create(map);
new_entry->start = start;
new_entry->end = end;
new_entry->uip = NULL;
new_entry->eflags = protoeflags;
new_entry->object.vm_object = object;
@ -1190,6 +1229,10 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
new_entry->max_protection = max;
new_entry->wired_count = 0;
KASSERT(uip == NULL || !ENTRY_CHARGED(new_entry),
("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry));
new_entry->uip = uip;
/*
* Insert the new entry into the list
*/
@ -1398,7 +1441,8 @@ vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
(prev->protection == entry->protection) &&
(prev->max_protection == entry->max_protection) &&
(prev->inheritance == entry->inheritance) &&
(prev->wired_count == entry->wired_count)) {
(prev->wired_count == entry->wired_count) &&
(prev->uip == entry->uip)) {
vm_map_entry_unlink(map, prev);
entry->start = prev->start;
entry->offset = prev->offset;
@ -1416,6 +1460,8 @@ vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
*/
if (prev->object.vm_object)
vm_object_deallocate(prev->object.vm_object);
if (prev->uip != NULL)
uifree(prev->uip);
vm_map_entry_dispose(map, prev);
}
}
@ -1431,7 +1477,8 @@ vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
(next->protection == entry->protection) &&
(next->max_protection == entry->max_protection) &&
(next->inheritance == entry->inheritance) &&
(next->wired_count == entry->wired_count)) {
(next->wired_count == entry->wired_count) &&
(next->uip == entry->uip)) {
vm_map_entry_unlink(map, next);
entry->end = next->end;
vm_map_entry_resize_free(map, entry);
@ -1441,6 +1488,8 @@ vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
*/
if (next->object.vm_object)
vm_object_deallocate(next->object.vm_object);
if (next->uip != NULL)
uifree(next->uip);
vm_map_entry_dispose(map, next);
}
}
@ -1489,6 +1538,21 @@ _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
atop(entry->end - entry->start));
entry->object.vm_object = object;
entry->offset = 0;
if (entry->uip != NULL) {
object->uip = entry->uip;
object->charge = entry->end - entry->start;
entry->uip = NULL;
}
} else if (entry->object.vm_object != NULL &&
((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
entry->uip != NULL) {
VM_OBJECT_LOCK(entry->object.vm_object);
KASSERT(entry->object.vm_object->uip == NULL,
("OVERCOMMIT: vm_entry_clip_start: both uip e %p", entry));
entry->object.vm_object->uip = entry->uip;
entry->object.vm_object->charge = entry->end - entry->start;
VM_OBJECT_UNLOCK(entry->object.vm_object);
entry->uip = NULL;
}
new_entry = vm_map_entry_create(map);
@ -1497,6 +1561,8 @@ _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
new_entry->end = start;
entry->offset += (start - entry->start);
entry->start = start;
if (new_entry->uip != NULL)
uihold(entry->uip);
vm_map_entry_link(map, entry->prev, new_entry);
@ -1542,6 +1608,21 @@ _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
atop(entry->end - entry->start));
entry->object.vm_object = object;
entry->offset = 0;
if (entry->uip != NULL) {
object->uip = entry->uip;
object->charge = entry->end - entry->start;
entry->uip = NULL;
}
} else if (entry->object.vm_object != NULL &&
((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
entry->uip != NULL) {
VM_OBJECT_LOCK(entry->object.vm_object);
KASSERT(entry->object.vm_object->uip == NULL,
("OVERCOMMIT: vm_entry_clip_end: both uip e %p", entry));
entry->object.vm_object->uip = entry->uip;
entry->object.vm_object->charge = entry->end - entry->start;
VM_OBJECT_UNLOCK(entry->object.vm_object);
entry->uip = NULL;
}
/*
@ -1552,6 +1633,8 @@ _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
new_entry->start = entry->end = end;
new_entry->offset += (end - entry->start);
if (new_entry->uip != NULL)
uihold(entry->uip);
vm_map_entry_link(map, entry, new_entry);
@ -1724,6 +1807,8 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
{
vm_map_entry_t current;
vm_map_entry_t entry;
vm_object_t obj;
struct uidinfo *uip;
vm_map_lock(map);
@ -1751,6 +1836,61 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
current = current->next;
}
/*
* Do an accounting pass for private read-only mappings that
* now will do cow due to allowed write (e.g. debugger sets
* breakpoint on text segment)
*/
for (current = entry; (current != &map->header) &&
(current->start < end); current = current->next) {
vm_map_clip_end(map, current, end);
if (set_max ||
((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
ENTRY_CHARGED(current)) {
continue;
}
uip = curthread->td_ucred->cr_ruidinfo;
obj = current->object.vm_object;
if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
if (!swap_reserve(current->end - current->start)) {
vm_map_unlock(map);
return (KERN_RESOURCE_SHORTAGE);
}
uihold(uip);
current->uip = uip;
continue;
}
VM_OBJECT_LOCK(obj);
if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
VM_OBJECT_UNLOCK(obj);
continue;
}
/*
* Charge for the whole object allocation now, since
* we cannot distinguish between non-charged and
* charged clipped mapping of the same object later.
*/
KASSERT(obj->charge == 0,
("vm_map_protect: object %p overcharged\n", obj));
if (!swap_reserve(ptoa(obj->size))) {
VM_OBJECT_UNLOCK(obj);
vm_map_unlock(map);
return (KERN_RESOURCE_SHORTAGE);
}
uihold(uip);
obj->uip = uip;
obj->charge = ptoa(obj->size);
VM_OBJECT_UNLOCK(obj);
}
/*
* Go back and fix up protections. [Note that clipping is not
* necessary the second time.]
@ -1759,8 +1899,6 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
while ((current != &map->header) && (current->start < end)) {
vm_prot_t old_prot;
vm_map_clip_end(map, current, end);
old_prot = current->protection;
if (set_max)
current->protection =
@ -2470,14 +2608,25 @@ static void
vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
{
vm_object_t object;
vm_pindex_t offidxstart, offidxend, count;
vm_pindex_t offidxstart, offidxend, count, size1;
vm_ooffset_t size;
vm_map_entry_unlink(map, entry);
map->size -= entry->end - entry->start;
object = entry->object.vm_object;
size = entry->end - entry->start;
map->size -= size;
if (entry->uip != NULL) {
swap_release_by_uid(size, entry->uip);
uifree(entry->uip);
}
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
(object = entry->object.vm_object) != NULL) {
count = OFF_TO_IDX(entry->end - entry->start);
(object != NULL)) {
KASSERT(entry->uip == NULL || object->uip == NULL ||
(entry->eflags & MAP_ENTRY_NEEDS_COPY),
("OVERCOMMIT vm_map_entry_delete: both uip %p", entry));
count = OFF_TO_IDX(size);
offidxstart = OFF_TO_IDX(entry->offset);
offidxend = offidxstart + count;
VM_OBJECT_LOCK(object);
@ -2489,8 +2638,17 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
if (object->type == OBJT_SWAP)
swap_pager_freespace(object, offidxstart, count);
if (offidxend >= object->size &&
offidxstart < object->size)
offidxstart < object->size) {
size1 = object->size;
object->size = offidxstart;
if (object->uip != NULL) {
size1 -= object->size;
KASSERT(object->charge >= ptoa(size1),
("vm_map_entry_delete: object->charge < 0"));
swap_release_by_uid(ptoa(size1), object->uip);
object->charge -= ptoa(size1);
}
}
}
VM_OBJECT_UNLOCK(object);
} else
@ -2664,9 +2822,13 @@ vm_map_copy_entry(
vm_map_t src_map,
vm_map_t dst_map,
vm_map_entry_t src_entry,
vm_map_entry_t dst_entry)
vm_map_entry_t dst_entry,
vm_ooffset_t *fork_charge)
{
vm_object_t src_object;
vm_offset_t size;
struct uidinfo *uip;
int charged;
VM_MAP_ASSERT_LOCKED(dst_map);
@ -2689,8 +2851,10 @@ vm_map_copy_entry(
/*
* Make a copy of the object.
*/
size = src_entry->end - src_entry->start;
if ((src_object = src_entry->object.vm_object) != NULL) {
VM_OBJECT_LOCK(src_object);
charged = ENTRY_CHARGED(src_entry);
if ((src_object->handle == NULL) &&
(src_object->type == OBJT_DEFAULT ||
src_object->type == OBJT_SWAP)) {
@ -2702,14 +2866,39 @@ vm_map_copy_entry(
}
vm_object_reference_locked(src_object);
vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
if (src_entry->uip != NULL &&
!(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
KASSERT(src_object->uip == NULL,
("OVERCOMMIT: vm_map_copy_entry: uip %p",
src_object));
src_object->uip = src_entry->uip;
src_object->charge = size;
}
VM_OBJECT_UNLOCK(src_object);
dst_entry->object.vm_object = src_object;
if (charged) {
uip = curthread->td_ucred->cr_ruidinfo;
uihold(uip);
dst_entry->uip = uip;
*fork_charge += size;
if (!(src_entry->eflags &
MAP_ENTRY_NEEDS_COPY)) {
uihold(uip);
src_entry->uip = uip;
*fork_charge += size;
}
}
src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
dst_entry->offset = src_entry->offset;
} else {
dst_entry->object.vm_object = NULL;
dst_entry->offset = 0;
if (src_entry->uip != NULL) {
dst_entry->uip = curthread->td_ucred->cr_ruidinfo;
uihold(dst_entry->uip);
*fork_charge += size;
}
}
pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
@ -2766,7 +2955,7 @@ vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
* The source map must not be locked.
*/
struct vmspace *
vmspace_fork(struct vmspace *vm1)
vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
{
struct vmspace *vm2;
vm_map_t old_map = &vm1->vm_map;
@ -2777,7 +2966,6 @@ vmspace_fork(struct vmspace *vm1)
int locked;
vm_map_lock(old_map);
vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
if (vm2 == NULL)
goto unlock_and_return;
@ -2809,6 +2997,12 @@ vmspace_fork(struct vmspace *vm1)
atop(old_entry->end - old_entry->start));
old_entry->object.vm_object = object;
old_entry->offset = 0;
if (old_entry->uip != NULL) {
object->uip = old_entry->uip;
object->charge = old_entry->end -
old_entry->start;
old_entry->uip = NULL;
}
}
/*
@ -2835,6 +3029,12 @@ vmspace_fork(struct vmspace *vm1)
}
VM_OBJECT_LOCK(object);
vm_object_clear_flag(object, OBJ_ONEMAPPING);
if (old_entry->uip != NULL) {
KASSERT(object->uip == NULL, ("vmspace_fork both uip"));
object->uip = old_entry->uip;
object->charge = old_entry->end - old_entry->start;
old_entry->uip = NULL;
}
VM_OBJECT_UNLOCK(object);
/*
@ -2877,7 +3077,7 @@ vmspace_fork(struct vmspace *vm1)
new_entry);
vmspace_map_entry_forked(vm1, vm2, new_entry);
vm_map_copy_entry(old_map, new_map, old_entry,
new_entry);
new_entry, fork_charge);
break;
}
old_entry = old_entry->next;
@ -3005,6 +3205,7 @@ vm_map_growstack(struct proc *p, vm_offset_t addr)
size_t grow_amount, max_grow;
rlim_t stacklim, vmemlim;
int is_procstack, rv;
struct uidinfo *uip;
Retry:
PROC_LOCK(p);
@ -3170,13 +3371,17 @@ vm_map_growstack(struct proc *p, vm_offset_t addr)
}
grow_amount = addr - stack_entry->end;
uip = stack_entry->uip;
if (uip == NULL && stack_entry->object.vm_object != NULL)
uip = stack_entry->object.vm_object->uip;
if (uip != NULL && !swap_reserve_by_uid(grow_amount, uip))
rv = KERN_NO_SPACE;
/* Grow the underlying object if applicable. */
if (stack_entry->object.vm_object == NULL ||
vm_object_coalesce(stack_entry->object.vm_object,
stack_entry->offset,
(vm_size_t)(stack_entry->end - stack_entry->start),
(vm_size_t)grow_amount)) {
else if (stack_entry->object.vm_object == NULL ||
vm_object_coalesce(stack_entry->object.vm_object,
stack_entry->offset,
(vm_size_t)(stack_entry->end - stack_entry->start),
(vm_size_t)grow_amount, uip != NULL)) {
map->size += (addr - stack_entry->end);
/* Update the current entry. */
stack_entry->end = addr;
@ -3249,12 +3454,18 @@ vmspace_unshare(struct proc *p)
{
struct vmspace *oldvmspace = p->p_vmspace;
struct vmspace *newvmspace;
vm_ooffset_t fork_charge;
if (oldvmspace->vm_refcnt == 1)
return (0);
newvmspace = vmspace_fork(oldvmspace);
fork_charge = 0;
newvmspace = vmspace_fork(oldvmspace, &fork_charge);
if (newvmspace == NULL)
return (ENOMEM);
if (!swap_reserve_by_uid(fork_charge, p->p_ucred->cr_ruidinfo)) {
vmspace_free(newvmspace);
return (ENOMEM);
}
PROC_VMSPACE_LOCK(p);
p->p_vmspace = newvmspace;
PROC_VMSPACE_UNLOCK(p);
@ -3300,6 +3511,9 @@ vm_map_lookup(vm_map_t *var_map, /* IN/OUT */
vm_map_t map = *var_map;
vm_prot_t prot;
vm_prot_t fault_type = fault_typea;
vm_object_t eobject;
struct uidinfo *uip;
vm_ooffset_t size;
RetryLookup:;
@ -3356,7 +3570,7 @@ RetryLookup:;
*wired = (entry->wired_count != 0);
if (*wired)
prot = fault_type = entry->protection;
size = entry->end - entry->start;
/*
* If the entry was copy-on-write, we either ...
*/
@ -3378,11 +3592,40 @@ RetryLookup:;
if (vm_map_lock_upgrade(map))
goto RetryLookup;
if (entry->uip == NULL) {
/*
* The debugger owner is charged for
* the memory.
*/
uip = curthread->td_ucred->cr_ruidinfo;
uihold(uip);
if (!swap_reserve_by_uid(size, uip)) {
uifree(uip);
vm_map_unlock(map);
return (KERN_RESOURCE_SHORTAGE);
}
entry->uip = uip;
}
vm_object_shadow(
&entry->object.vm_object,
&entry->offset,
atop(entry->end - entry->start));
atop(size));
entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
eobject = entry->object.vm_object;
if (eobject->uip != NULL) {
/*
* The object was not shadowed.
*/
swap_release_by_uid(size, entry->uip);
uifree(entry->uip);
entry->uip = NULL;
} else if (entry->uip != NULL) {
VM_OBJECT_LOCK(eobject);
eobject->uip = entry->uip;
eobject->charge = size;
VM_OBJECT_UNLOCK(eobject);
entry->uip = NULL;
}
vm_map_lock_downgrade(map);
} else {
@ -3402,8 +3645,15 @@ RetryLookup:;
if (vm_map_lock_upgrade(map))
goto RetryLookup;
entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
atop(entry->end - entry->start));
atop(size));
entry->offset = 0;
if (entry->uip != NULL) {
VM_OBJECT_LOCK(entry->object.vm_object);
entry->object.vm_object->uip = entry->uip;
entry->object.vm_object->charge = size;
VM_OBJECT_UNLOCK(entry->object.vm_object);
entry->uip = NULL;
}
vm_map_lock_downgrade(map);
}
@ -3583,9 +3833,15 @@ DB_SHOW_COMMAND(map, vm_map_print)
db_indent -= 2;
}
} else {
if (entry->uip != NULL)
db_printf(", uip %d", entry->uip->ui_uid);
db_printf(", object=%p, offset=0x%jx",
(void *)entry->object.vm_object,
(uintmax_t)entry->offset);
if (entry->object.vm_object && entry->object.vm_object->uip)
db_printf(", obj uip %d charge %jx",
entry->object.vm_object->uip->ui_uid,
(uintmax_t)entry->object.vm_object->charge);
if (entry->eflags & MAP_ENTRY_COW)
db_printf(", copy (%s)",
(entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");

View File

@ -114,6 +114,7 @@ struct vm_map_entry {
vm_inherit_t inheritance; /* inheritance */
int wired_count; /* can be paged if = 0 */
vm_pindex_t lastr; /* last read */
struct uidinfo *uip; /* tmp storage for creator ref */
};
#define MAP_ENTRY_NOSYNC 0x0001
@ -310,6 +311,8 @@ long vmspace_wired_count(struct vmspace *vmspace);
#define MAP_PREFAULT_MADVISE 0x0200 /* from (user) madvise request */
#define MAP_STACK_GROWS_DOWN 0x1000
#define MAP_STACK_GROWS_UP 0x2000
#define MAP_ACC_CHARGED 0x4000
#define MAP_ACC_NO_CHARGE 0x8000
/*
* vm_fault option flags

View File

@ -633,6 +633,8 @@ mprotect(td, uap)
return (0);
case KERN_PROTECTION_FAILURE:
return (EACCES);
case KERN_RESOURCE_SHORTAGE:
return (ENOMEM);
}
return (EINVAL);
}
@ -1208,7 +1210,7 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
objsize = round_page(va.va_size);
if (va.va_nlink == 0)
flags |= MAP_NOSYNC;
obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff);
obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff, td->td_ucred);
if (obj == NULL) {
error = ENOMEM;
goto done;
@ -1289,7 +1291,8 @@ vm_mmap_cdev(struct thread *td, vm_size_t objsize,
dev_relthread(cdev);
if (error != ENODEV)
return (error);
obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff);
obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,
td->td_ucred);
if (obj == NULL)
return (EINVAL);
*objp = obj;

View File

@ -77,6 +77,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/proc.h> /* for curproc, pageproc */
#include <sys/socket.h>
#include <sys/resourcevar.h>
#include <sys/vnode.h>
#include <sys/vmmeter.h>
#include <sys/sx.h>
@ -222,6 +223,8 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
object->generation = 1;
object->ref_count = 1;
object->flags = 0;
object->uip = NULL;
object->charge = 0;
if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
object->flags = OBJ_ONEMAPPING;
object->pg_color = 0;
@ -608,6 +611,20 @@ vm_object_destroy(vm_object_t object)
TAILQ_REMOVE(&vm_object_list, object, object_list);
mtx_unlock(&vm_object_list_mtx);
/*
* Release the allocation charge.
*/
if (object->uip != NULL) {
KASSERT(object->type == OBJT_DEFAULT ||
object->type == OBJT_SWAP,
("vm_object_terminate: non-swap obj %p has uip",
object));
swap_release_by_uid(object->charge, object->uip);
object->charge = 0;
uifree(object->uip);
object->uip = NULL;
}
/*
* Free the space for the object.
*/
@ -1347,6 +1364,14 @@ vm_object_split(vm_map_entry_t entry)
orig_object->backing_object_offset + entry->offset;
new_object->backing_object = source;
}
if (orig_object->uip != NULL) {
new_object->uip = orig_object->uip;
uihold(orig_object->uip);
new_object->charge = ptoa(size);
KASSERT(orig_object->charge >= ptoa(size),
("orig_object->charge < 0"));
orig_object->charge -= ptoa(size);
}
retry:
if ((m = TAILQ_FIRST(&orig_object->memq)) != NULL) {
if (m->pindex < offidxstart) {
@ -1757,6 +1782,13 @@ vm_object_collapse(vm_object_t object)
* and no object references within it, all that is
* necessary is to dispose of it.
*/
if (backing_object->uip != NULL) {
swap_release_by_uid(backing_object->charge,
backing_object->uip);
backing_object->charge = 0;
uifree(backing_object->uip);
backing_object->uip = NULL;
}
KASSERT(backing_object->ref_count == 1, ("backing_object %p was somehow re-referenced during collapse!", backing_object));
VM_OBJECT_UNLOCK(backing_object);
@ -1994,13 +2026,15 @@ vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
* prev_offset Offset into prev_object
* prev_size Size of reference to prev_object
* next_size Size of reference to the second object
* reserved Indicator that extension region has
* swap accounted for
*
* Conditions:
* The object must *not* be locked.
*/
boolean_t
vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
vm_size_t prev_size, vm_size_t next_size)
vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
{
vm_pindex_t next_pindex;
@ -2038,6 +2072,28 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
return (FALSE);
}
/*
* Account for the charge.
*/
if (prev_object->uip != NULL) {
/*
* If prev_object was charged, then this mapping,
* althought not charged now, may become writable
* later. Non-NULL uip in the object would prevent
* swap reservation during enabling of the write
* access, so reserve swap now. Failed reservation
* cause allocation of the separate object for the map
* entry, and swap reservation for this entry is
* managed in appropriate time.
*/
if (!reserved && !swap_reserve_by_uid(ptoa(next_size),
prev_object->uip)) {
return (FALSE);
}
prev_object->charge += ptoa(next_size);
}
/*
* Remove any pages that may still be in the object from a previous
* deallocation.
@ -2049,6 +2105,16 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
if (prev_object->type == OBJT_SWAP)
swap_pager_freespace(prev_object,
next_pindex, next_size);
#if 0
if (prev_object->uip != NULL) {
KASSERT(prev_object->charge >=
ptoa(prev_object->size - next_pindex),
("object %p overcharged 1 %jx %jx", prev_object,
(uintmax_t)next_pindex, (uintmax_t)next_size));
prev_object->charge -= ptoa(prev_object->size -
next_pindex);
}
#endif
}
/*
@ -2198,9 +2264,10 @@ DB_SHOW_COMMAND(object, vm_object_print_static)
return;
db_iprintf(
"Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x\n",
"Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x uip %d charge %jx\n",
object, (int)object->type, (uintmax_t)object->size,
object->resident_page_count, object->ref_count, object->flags);
object->resident_page_count, object->ref_count, object->flags,
object->uip ? object->uip->ui_uid : -1, (uintmax_t)object->charge);
db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
object->shadow_count,
object->backing_object ? object->backing_object->ref_count : 0,

View File

@ -133,6 +133,8 @@ struct vm_object {
int swp_bcount;
} swp;
} un_pager;
struct uidinfo *uip;
vm_ooffset_t charge;
};
/*
@ -198,7 +200,8 @@ void vm_object_pip_wait(vm_object_t object, char *waitid);
vm_object_t vm_object_allocate (objtype_t, vm_pindex_t);
void _vm_object_allocate (objtype_t, vm_pindex_t, vm_object_t);
boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t);
boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t,
boolean_t);
void vm_object_collapse (vm_object_t);
void vm_object_deallocate (vm_object_t);
void vm_object_destroy (vm_object_t);

View File

@ -88,7 +88,7 @@ int cluster_pbuf_freecnt = -1; /* unlimited to begin with */
static int dead_pager_getpages(vm_object_t, vm_page_t *, int, int);
static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
vm_ooffset_t);
vm_ooffset_t, struct ucred *);
static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
static void dead_pager_dealloc(vm_object_t);
@ -105,7 +105,7 @@ dead_pager_getpages(obj, ma, count, req)
static vm_object_t
dead_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t off)
vm_ooffset_t off, struct ucred *cred)
{
return NULL;
}
@ -227,14 +227,14 @@ vm_pager_bufferinit()
*/
vm_object_t
vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size,
vm_prot_t prot, vm_ooffset_t off)
vm_prot_t prot, vm_ooffset_t off, struct ucred *cred)
{
vm_object_t ret;
struct pagerops *ops;
ops = pagertab[type];
if (ops)
ret = (*ops->pgo_alloc) (handle, size, prot, off);
ret = (*ops->pgo_alloc) (handle, size, prot, off, cred);
else
ret = NULL;
return (ret);

View File

@ -47,7 +47,8 @@
TAILQ_HEAD(pagerlst, vm_object);
typedef void pgo_init_t(void);
typedef vm_object_t pgo_alloc_t(void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t);
typedef vm_object_t pgo_alloc_t(void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t,
struct ucred *);
typedef void pgo_dealloc_t(vm_object_t);
typedef int pgo_getpages_t(vm_object_t, vm_page_t *, int, int);
typedef void pgo_putpages_t(vm_object_t, vm_page_t *, int, int, int *);
@ -100,7 +101,8 @@ extern vm_map_t pager_map;
extern struct pagerops *pagertab[];
extern struct mtx pbuf_mtx;
vm_object_t vm_pager_allocate(objtype_t, void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t);
vm_object_t vm_pager_allocate(objtype_t, void *, vm_ooffset_t, vm_prot_t,
vm_ooffset_t, struct ucred *);
void vm_pager_bufferinit(void);
void vm_pager_deallocate(vm_object_t);
static __inline int vm_pager_get_pages(vm_object_t, vm_page_t *, int, int);

View File

@ -83,7 +83,8 @@ static void vnode_pager_dealloc(vm_object_t);
static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int);
static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *);
static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t);
static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
vm_ooffset_t, struct ucred *cred);
struct pagerops vnodepagerops = {
.pgo_alloc = vnode_pager_alloc,
@ -128,7 +129,7 @@ vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td)
}
}
object = vnode_pager_alloc(vp, size, 0, 0);
object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred);
/*
* Dereference the reference we just created. This assumes
* that the object is associated with the vp.
@ -185,7 +186,7 @@ vnode_destroy_vobject(struct vnode *vp)
*/
vm_object_t
vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t offset)
vm_ooffset_t offset, struct ucred *cred)
{
vm_object_t object;
struct vnode *vp;