o Move vm_freeze_copyopts() from vm_map.{c.h} to vm_object.{c,h}. It's plainly
an operation on a vm_object and belongs in the latter place.
This commit is contained in:
parent
5eb6f4bc21
commit
e86256c1f4
@ -3154,83 +3154,6 @@ vm_uiomove(
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Performs the copy_on_write operations necessary to allow the virtual copies
|
||||
* into user space to work. This has to be called for write(2) system calls
|
||||
* from other processes, file unlinking, and file size shrinkage.
|
||||
*/
|
||||
void
|
||||
vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa)
|
||||
{
|
||||
int rv;
|
||||
vm_object_t robject;
|
||||
vm_pindex_t idx;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
if ((object == NULL) ||
|
||||
((object->flags & OBJ_OPT) == 0))
|
||||
return;
|
||||
|
||||
if (object->shadow_count > object->ref_count)
|
||||
panic("vm_freeze_copyopts: sc > rc");
|
||||
|
||||
while ((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) {
|
||||
vm_pindex_t bo_pindex;
|
||||
vm_page_t m_in, m_out;
|
||||
|
||||
bo_pindex = OFF_TO_IDX(robject->backing_object_offset);
|
||||
|
||||
vm_object_reference(robject);
|
||||
|
||||
vm_object_pip_wait(robject, "objfrz");
|
||||
|
||||
if (robject->ref_count == 1) {
|
||||
vm_object_deallocate(robject);
|
||||
continue;
|
||||
}
|
||||
|
||||
vm_object_pip_add(robject, 1);
|
||||
|
||||
for (idx = 0; idx < robject->size; idx++) {
|
||||
|
||||
m_out = vm_page_grab(robject, idx,
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
|
||||
if (m_out->valid == 0) {
|
||||
m_in = vm_page_grab(object, bo_pindex + idx,
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
if (m_in->valid == 0) {
|
||||
rv = vm_pager_get_pages(object, &m_in, 1, 0);
|
||||
if (rv != VM_PAGER_OK) {
|
||||
printf("vm_freeze_copyopts: cannot read page from file: %lx\n", (long)m_in->pindex);
|
||||
continue;
|
||||
}
|
||||
vm_page_deactivate(m_in);
|
||||
}
|
||||
|
||||
vm_page_protect(m_in, VM_PROT_NONE);
|
||||
pmap_copy_page(m_in, m_out);
|
||||
m_out->valid = m_in->valid;
|
||||
vm_page_dirty(m_out);
|
||||
vm_page_activate(m_out);
|
||||
vm_page_wakeup(m_in);
|
||||
}
|
||||
vm_page_wakeup(m_out);
|
||||
}
|
||||
|
||||
object->shadow_count--;
|
||||
object->ref_count--;
|
||||
TAILQ_REMOVE(&object->shadow_head, robject, shadow_list);
|
||||
robject->backing_object = NULL;
|
||||
robject->backing_object_offset = 0;
|
||||
|
||||
vm_object_pip_wakeup(robject);
|
||||
vm_object_deallocate(robject);
|
||||
}
|
||||
|
||||
vm_object_clear_flag(object, OBJ_OPT);
|
||||
}
|
||||
|
||||
#include "opt_ddb.h"
|
||||
#ifdef DDB
|
||||
#include <sys/kernel.h>
|
||||
|
@ -300,7 +300,6 @@ int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t);
|
||||
int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int);
|
||||
void vm_map_simplify_entry (vm_map_t, vm_map_entry_t);
|
||||
void vm_init2 (void);
|
||||
void vm_freeze_copyopts (vm_object_t, vm_pindex_t, vm_pindex_t);
|
||||
int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int);
|
||||
int vm_map_growstack (struct proc *p, vm_offset_t addr);
|
||||
int vmspace_swap_count (struct vmspace *vmspace);
|
||||
|
@ -1775,6 +1775,83 @@ vm_object_set_writeable_dirty(vm_object_t object)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Performs the copy_on_write operations necessary to allow the virtual copies
|
||||
* into user space to work. This has to be called for write(2) system calls
|
||||
* from other processes, file unlinking, and file size shrinkage.
|
||||
*/
|
||||
void
|
||||
vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa)
|
||||
{
|
||||
int rv;
|
||||
vm_object_t robject;
|
||||
vm_pindex_t idx;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
if ((object == NULL) ||
|
||||
((object->flags & OBJ_OPT) == 0))
|
||||
return;
|
||||
|
||||
if (object->shadow_count > object->ref_count)
|
||||
panic("vm_freeze_copyopts: sc > rc");
|
||||
|
||||
while ((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) {
|
||||
vm_pindex_t bo_pindex;
|
||||
vm_page_t m_in, m_out;
|
||||
|
||||
bo_pindex = OFF_TO_IDX(robject->backing_object_offset);
|
||||
|
||||
vm_object_reference(robject);
|
||||
|
||||
vm_object_pip_wait(robject, "objfrz");
|
||||
|
||||
if (robject->ref_count == 1) {
|
||||
vm_object_deallocate(robject);
|
||||
continue;
|
||||
}
|
||||
|
||||
vm_object_pip_add(robject, 1);
|
||||
|
||||
for (idx = 0; idx < robject->size; idx++) {
|
||||
|
||||
m_out = vm_page_grab(robject, idx,
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
|
||||
if (m_out->valid == 0) {
|
||||
m_in = vm_page_grab(object, bo_pindex + idx,
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
if (m_in->valid == 0) {
|
||||
rv = vm_pager_get_pages(object, &m_in, 1, 0);
|
||||
if (rv != VM_PAGER_OK) {
|
||||
printf("vm_freeze_copyopts: cannot read page from file: %lx\n", (long)m_in->pindex);
|
||||
continue;
|
||||
}
|
||||
vm_page_deactivate(m_in);
|
||||
}
|
||||
|
||||
vm_page_protect(m_in, VM_PROT_NONE);
|
||||
pmap_copy_page(m_in, m_out);
|
||||
m_out->valid = m_in->valid;
|
||||
vm_page_dirty(m_out);
|
||||
vm_page_activate(m_out);
|
||||
vm_page_wakeup(m_in);
|
||||
}
|
||||
vm_page_wakeup(m_out);
|
||||
}
|
||||
|
||||
object->shadow_count--;
|
||||
object->ref_count--;
|
||||
TAILQ_REMOVE(&object->shadow_head, robject, shadow_list);
|
||||
robject->backing_object = NULL;
|
||||
robject->backing_object_offset = 0;
|
||||
|
||||
vm_object_pip_wakeup(robject);
|
||||
vm_object_deallocate(robject);
|
||||
}
|
||||
|
||||
vm_object_clear_flag(object, OBJ_OPT);
|
||||
}
|
||||
|
||||
#include "opt_ddb.h"
|
||||
#ifdef DDB
|
||||
#include <sys/kernel.h>
|
||||
|
@ -171,6 +171,7 @@ extern vm_object_t kmem_object;
|
||||
#endif /* _KERNEL */
|
||||
|
||||
#ifdef _KERNEL
|
||||
void vm_freeze_copyopts(vm_object_t, vm_pindex_t, vm_pindex_t);
|
||||
|
||||
void vm_object_set_flag(vm_object_t object, u_short bits);
|
||||
void vm_object_clear_flag(vm_object_t object, u_short bits);
|
||||
|
Loading…
x
Reference in New Issue
Block a user