Call swap_pager_freespace() from vm_object_page_remove().

All vm_object_page_remove() callers, except
linux_invalidate_mapping_pages() in the LinuxKPI, free swap space when
removing a range of pages from an object.  The LinuxKPI case appears to
be an unintentional omission that could result in leaked swap blocks, so
unconditionally free swap space in vm_object_page_remove() to protect
against similar bugs in the future.

Reviewed by:	alc, kib
Tested by:	pho
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D25329
This commit is contained in:
Mark Johnston 2020-06-25 15:21:21 +00:00
parent 0a1016f9e8
commit 84242cf68a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=362613
5 changed files with 9 additions and 20 deletions

View File

@ -1566,8 +1566,6 @@ mdresize(struct md_s *sc, struct md_req *mdr)
if (newpages < oldpages) {
VM_OBJECT_WLOCK(sc->object);
vm_object_page_remove(sc->object, newpages, 0, 0);
swap_pager_freespace(sc->object, newpages,
oldpages - newpages);
swap_release_by_cred(IDX_TO_OFF(oldpages -
newpages), sc->cred);
sc->object->charge = IDX_TO_OFF(newpages);

View File

@ -1517,11 +1517,8 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
/*
* Release any swap space and free any whole pages.
*/
if (newpages < oldpages) {
swap_pager_freespace(uobj, newpages, oldpages -
newpages);
if (newpages < oldpages)
vm_object_page_remove(uobj, newpages, 0, 0);
}
}
uobj->size = newpages;
VM_OBJECT_WUNLOCK(uobj);

View File

@ -540,15 +540,10 @@ shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
}
delta = IDX_TO_OFF(object->size - nobjsize);
/* Toss in memory pages. */
if (nobjsize < object->size)
vm_object_page_remove(object, nobjsize, object->size,
0);
/* Toss pages from swap. */
if (object->type == OBJT_SWAP)
swap_pager_freespace(object, nobjsize, delta);
/* Free the swap accounted for shm */
swap_release_by_cred(delta, object->cred);
object->charge -= delta;

View File

@ -3644,7 +3644,7 @@ static void
vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
{
vm_object_t object;
vm_pindex_t offidxstart, offidxend, count, size1;
vm_pindex_t offidxstart, offidxend, size1;
vm_size_t size;
vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE);
@ -3673,9 +3673,8 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
KASSERT(entry->cred == NULL || object->cred == NULL ||
(entry->eflags & MAP_ENTRY_NEEDS_COPY),
("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
count = atop(size);
offidxstart = OFF_TO_IDX(entry->offset);
offidxend = offidxstart + count;
offidxend = offidxstart + atop(size);
VM_OBJECT_WLOCK(object);
if (object->ref_count != 1 &&
((object->flags & OBJ_ONEMAPPING) != 0 ||
@ -3690,9 +3689,6 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
*/
vm_object_page_remove(object, offidxstart, offidxend,
OBJPR_NOTMAPPED);
if (object->type == OBJT_SWAP)
swap_pager_freespace(object, offidxstart,
count);
if (offidxend >= object->size &&
offidxstart < object->size) {
size1 = object->size;

View File

@ -2121,6 +2121,12 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
vm_page_free(p);
}
vm_object_pip_wakeup(object);
if (object->type == OBJT_SWAP) {
if (end == 0)
end = object->size;
swap_pager_freespace(object, start, end - start);
}
}
/*
@ -2288,9 +2294,6 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
if (next_pindex < prev_object->size) {
vm_object_page_remove(prev_object, next_pindex, next_pindex +
next_size, 0);
if (prev_object->type == OBJT_SWAP)
swap_pager_freespace(prev_object,
next_pindex, next_size);
#if 0
if (prev_object->cred != NULL) {
KASSERT(prev_object->charge >=