- define and use VMCNT_{GET,SET,ADD,SUB,PTR} macros for manipulating
vmcnts. This can be used to abstract away pcpu details but also changes to use atomics for all counters now. This means sched lock is no longer responsible for protecting counts in the switch routines. Contributed by: Attilio Rao <attilio@FreeBSD.org>
This commit is contained in:
parent
beb495eff1
commit
e1996cb960
@ -221,8 +221,8 @@ cpu_startup(dummy)
|
||||
vm_ksubmap_init(&kmi);
|
||||
|
||||
printf("avail memory = %ju (%ju MB)\n",
|
||||
ptoa((uintmax_t)cnt.v_free_count),
|
||||
ptoa((uintmax_t)cnt.v_free_count) / 1048576);
|
||||
ptoa((uintmax_t)VMCNT_GET(free_count)),
|
||||
ptoa((uintmax_t)VMCNT_GET(free_count)) / 1048576);
|
||||
|
||||
/*
|
||||
* Set up buffers, so they can be used to read disk labels.
|
||||
|
@ -620,7 +620,7 @@ pmap_init(void)
|
||||
* numbers of pv entries.
|
||||
*/
|
||||
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
|
||||
pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
|
||||
pv_entry_max = shpgperproc * maxproc + VMCNT_GET(page_count);
|
||||
TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
|
||||
pv_entry_high_water = 9 * (pv_entry_max / 10);
|
||||
}
|
||||
@ -633,7 +633,7 @@ pmap_pventry_proc(SYSCTL_HANDLER_ARGS)
|
||||
|
||||
error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
|
||||
if (error == 0 && req->newptr) {
|
||||
shpgperproc = (pv_entry_max - cnt.v_page_count) / maxproc;
|
||||
shpgperproc = (pv_entry_max - VMCNT_GET(page_count)) / maxproc;
|
||||
pv_entry_high_water = 9 * (pv_entry_max / 10);
|
||||
}
|
||||
return (error);
|
||||
@ -648,7 +648,7 @@ pmap_shpgperproc_proc(SYSCTL_HANDLER_ARGS)
|
||||
|
||||
error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
|
||||
if (error == 0 && req->newptr) {
|
||||
pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
|
||||
pv_entry_max = shpgperproc * maxproc + VMCNT_GET(page_count);
|
||||
pv_entry_high_water = 9 * (pv_entry_max / 10);
|
||||
}
|
||||
return (error);
|
||||
@ -1149,8 +1149,7 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
*/
|
||||
m->right = *free;
|
||||
*free = m;
|
||||
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
VMCNT_DEC(wire_count, 1);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -1460,7 +1459,7 @@ pmap_release(pmap_t pmap)
|
||||
pmap->pm_pml4[PML4PML4I] = 0; /* Recursive Mapping */
|
||||
|
||||
m->wire_count--;
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
VMCNT_DEC(wire_count, 1);
|
||||
vm_page_free_zero(m);
|
||||
PMAP_LOCK_DESTROY(pmap);
|
||||
}
|
||||
|
@ -286,8 +286,8 @@ cpu_startup(void *dummy)
|
||||
vm_ksubmap_init(&kmi);
|
||||
|
||||
printf("avail memory = %ju (%ju MB)\n",
|
||||
(uintmax_t)ptoa(cnt.v_free_count),
|
||||
(uintmax_t)ptoa(cnt.v_free_count) / 1048576);
|
||||
(uintmax_t)ptoa(VMCNT_GET(free_count)),
|
||||
(uintmax_t)ptoa(VMCNT_GET(free_count)) / 1048576);
|
||||
|
||||
bufinit();
|
||||
vm_pager_bufferinit();
|
||||
|
@ -1970,7 +1970,7 @@ pmap_init(void)
|
||||
|
||||
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
|
||||
|
||||
pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
|
||||
pv_entry_max = shpgperproc * maxproc + VMCNT_GET(page_count);
|
||||
pv_entry_high_water = 9 * (pv_entry_max / 10);
|
||||
l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor,
|
||||
NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
|
||||
|
@ -146,14 +146,14 @@ linprocfs_domeminfo(PFS_FILL_ARGS)
|
||||
/*
|
||||
* The correct thing here would be:
|
||||
*
|
||||
memfree = cnt.v_free_count * PAGE_SIZE;
|
||||
memfree = VMCNT_GET(free_count) * PAGE_SIZE;
|
||||
memused = memtotal - memfree;
|
||||
*
|
||||
* but it might mislead linux binaries into thinking there
|
||||
* is very little memory left, so we cheat and tell them that
|
||||
* all memory that isn't wired down is free.
|
||||
*/
|
||||
memused = cnt.v_wire_count * PAGE_SIZE;
|
||||
memused = VMCNT_GET(wire_count) * PAGE_SIZE;
|
||||
memfree = memtotal - memused;
|
||||
swap_pager_status(&i, &j);
|
||||
swaptotal = (unsigned long long)i * PAGE_SIZE;
|
||||
@ -175,7 +175,7 @@ linprocfs_domeminfo(PFS_FILL_ARGS)
|
||||
* like unstaticizing it just for linprocfs's sake.
|
||||
*/
|
||||
buffers = 0;
|
||||
cached = cnt.v_cache_count * PAGE_SIZE;
|
||||
cached = VMCNT_GET(cache_count) * PAGE_SIZE;
|
||||
|
||||
sbuf_printf(sb,
|
||||
" total: used: free: shared: buffers: cached:\n"
|
||||
@ -394,12 +394,12 @@ linprocfs_dostat(PFS_FILL_ARGS)
|
||||
"intr %u\n"
|
||||
"ctxt %u\n"
|
||||
"btime %lld\n",
|
||||
cnt.v_vnodepgsin,
|
||||
cnt.v_vnodepgsout,
|
||||
cnt.v_swappgsin,
|
||||
cnt.v_swappgsout,
|
||||
cnt.v_intr,
|
||||
cnt.v_swtch,
|
||||
VMCNT_GET(vnodepgsin),
|
||||
VMCNT_GET(vnodepgsout),
|
||||
VMCNT_GET(swappgsin),
|
||||
VMCNT_GET(swappgsout),
|
||||
VMCNT_GET(intr),
|
||||
VMCNT_GET(swtch),
|
||||
(long long)boottime.tv_sec);
|
||||
return (0);
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args)
|
||||
LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale;
|
||||
|
||||
sysinfo.totalram = physmem * PAGE_SIZE;
|
||||
sysinfo.freeram = sysinfo.totalram - cnt.v_wire_count * PAGE_SIZE;
|
||||
sysinfo.freeram = sysinfo.totalram - VMCNT_GET(wire_count) * PAGE_SIZE;
|
||||
|
||||
sysinfo.sharedram = 0;
|
||||
mtx_lock(&vm_object_list_mtx);
|
||||
|
@ -778,14 +778,14 @@ svr4_sys_sysconfig(td, uap)
|
||||
#if defined(UVM)
|
||||
*retval = uvmexp.free; /* XXX: free instead of total */
|
||||
#else
|
||||
*retval = cnt.v_free_count; /* XXX: free instead of total */
|
||||
*retval = VMCNT_GET(free_count); /* XXX: free instead of total */
|
||||
#endif
|
||||
break;
|
||||
case SVR4_CONFIG_AVPHYS_PAGES:
|
||||
#if defined(UVM)
|
||||
*retval = uvmexp.active; /* XXX: active instead of avg */
|
||||
#else
|
||||
*retval = cnt.v_active_count; /* XXX: active instead of avg */
|
||||
*retval = VMCNT_GET(active_count); /* XXX: active instead of avg */
|
||||
#endif
|
||||
break;
|
||||
#endif /* NOTYET */
|
||||
|
@ -475,8 +475,8 @@ smbfs_getpages(ap)
|
||||
|
||||
kva = (vm_offset_t) bp->b_data;
|
||||
pmap_qenter(kva, pages, npages);
|
||||
cnt.v_vnodein++;
|
||||
cnt.v_vnodepgsin += npages;
|
||||
VMCNT_ADD(vnodein, 1);
|
||||
VMCNT_ADD(vnodepgsin, npages);
|
||||
|
||||
iov.iov_base = (caddr_t) kva;
|
||||
iov.iov_len = count;
|
||||
@ -626,8 +626,8 @@ smbfs_putpages(ap)
|
||||
|
||||
kva = (vm_offset_t) bp->b_data;
|
||||
pmap_qenter(kva, pages, npages);
|
||||
cnt.v_vnodeout++;
|
||||
cnt.v_vnodepgsout += count;
|
||||
VMCNT_ADD(vnodeout, 1);
|
||||
VMCNT_ADD(vnodepgsout, count);
|
||||
|
||||
iov.iov_base = (caddr_t) kva;
|
||||
iov.iov_len = count;
|
||||
|
@ -247,8 +247,8 @@ cpu_startup(dummy)
|
||||
vm_ksubmap_init(&kmi);
|
||||
|
||||
printf("avail memory = %ju (%ju MB)\n",
|
||||
ptoa((uintmax_t)cnt.v_free_count),
|
||||
ptoa((uintmax_t)cnt.v_free_count) / 1048576);
|
||||
ptoa((uintmax_t)VMCNT_GET(free_count)),
|
||||
ptoa((uintmax_t)VMCNT_GET(free_count)) / 1048576);
|
||||
|
||||
/*
|
||||
* Set up buffers, so they can be used to read disk labels.
|
||||
|
@ -606,7 +606,7 @@ pmap_init(void)
|
||||
* numbers of pv entries.
|
||||
*/
|
||||
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
|
||||
pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
|
||||
pv_entry_max = shpgperproc * maxproc + VMCNT_GET(page_count);
|
||||
TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
|
||||
pv_entry_max = roundup(pv_entry_max, _NPCPV);
|
||||
pv_entry_high_water = 9 * (pv_entry_max / 10);
|
||||
@ -1168,7 +1168,7 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
|
||||
pmap->pm_pdir[m->pindex] = 0;
|
||||
--pmap->pm_stats.resident_count;
|
||||
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
VMCNT_DEC(wire_count, 1);
|
||||
|
||||
/*
|
||||
* Do an invltlb to make the invalidated mapping
|
||||
@ -1536,7 +1536,7 @@ pmap_release(pmap_t pmap)
|
||||
("pmap_release: got wrong ptd page"));
|
||||
#endif
|
||||
m->wire_count--;
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
VMCNT_DEC(wire_count, 1);
|
||||
vm_page_free_zero(m);
|
||||
}
|
||||
PMAP_LOCK_DESTROY(pmap);
|
||||
|
@ -283,8 +283,8 @@ cpu_startup(dummy)
|
||||
|
||||
vm_ksubmap_init(&kmi);
|
||||
|
||||
printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
|
||||
ptoa(cnt.v_free_count) / 1048576);
|
||||
printf("avail memory = %ld (%ld MB)\n", ptoa(VMCNT_GET(free_count)),
|
||||
ptoa(VMCNT_GET(free_count)) / 1048576);
|
||||
|
||||
if (fpswa_iface == NULL)
|
||||
printf("Warning: no FPSWA package supplied\n");
|
||||
|
@ -533,7 +533,7 @@ pmap_init(void)
|
||||
pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
|
||||
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
|
||||
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
|
||||
pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
|
||||
pv_entry_max = shpgperproc * maxproc + VMCNT_GET(page_count);
|
||||
TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
|
||||
pv_entry_high_water = 9 * (pv_entry_max / 10);
|
||||
|
||||
|
@ -467,7 +467,7 @@ proc0_init(void *dummy __unused)
|
||||
p->p_limit->pl_rlimit[RLIMIT_NOFILE].rlim_max = maxfiles;
|
||||
p->p_limit->pl_rlimit[RLIMIT_NPROC].rlim_cur =
|
||||
p->p_limit->pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc;
|
||||
i = ptoa(cnt.v_free_count);
|
||||
i = ptoa(VMCNT_GET(free_count));
|
||||
p->p_limit->pl_rlimit[RLIMIT_RSS].rlim_max = i;
|
||||
p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = i;
|
||||
p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = i / 3;
|
||||
|
@ -665,20 +665,20 @@ fork1(td, flags, pages, procp)
|
||||
vm_forkproc(td, p2, td2, flags);
|
||||
|
||||
if (flags == (RFFDG | RFPROC)) {
|
||||
atomic_add_int(&cnt.v_forks, 1);
|
||||
atomic_add_int(&cnt.v_forkpages, p2->p_vmspace->vm_dsize +
|
||||
VMCNT_ADD(forks, 1);
|
||||
VMCNT_ADD(forkpages, p2->p_vmspace->vm_dsize +
|
||||
p2->p_vmspace->vm_ssize);
|
||||
} else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
|
||||
atomic_add_int(&cnt.v_vforks, 1);
|
||||
atomic_add_int(&cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
|
||||
VMCNT_ADD(forks, 1);
|
||||
VMCNT_ADD(forkpages, p2->p_vmspace->vm_dsize +
|
||||
p2->p_vmspace->vm_ssize);
|
||||
} else if (p1 == &proc0) {
|
||||
atomic_add_int(&cnt.v_kthreads, 1);
|
||||
atomic_add_int(&cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
|
||||
VMCNT_ADD(kthreads, 1);
|
||||
VMCNT_ADD(kthreadpages, p2->p_vmspace->vm_dsize +
|
||||
p2->p_vmspace->vm_ssize);
|
||||
} else {
|
||||
atomic_add_int(&cnt.v_rforks, 1);
|
||||
atomic_add_int(&cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
|
||||
VMCNT_ADD(rforks, 1);
|
||||
VMCNT_ADD(rforkpages, p2->p_vmspace->vm_dsize +
|
||||
p2->p_vmspace->vm_ssize);
|
||||
}
|
||||
|
||||
|
@ -551,7 +551,7 @@ kmeminit(void *dummy)
|
||||
* so make sure that there is enough space.
|
||||
*/
|
||||
vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE;
|
||||
mem_size = cnt.v_page_count;
|
||||
mem_size = VMCNT_GET(page_count);
|
||||
|
||||
#if defined(VM_KMEM_SIZE_SCALE)
|
||||
vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
|
||||
@ -589,8 +589,8 @@ kmeminit(void *dummy)
|
||||
* to something sane. Be careful to not overflow the 32bit
|
||||
* ints while doing the check.
|
||||
*/
|
||||
if (((vm_kmem_size / 2) / PAGE_SIZE) > cnt.v_page_count)
|
||||
vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE;
|
||||
if (((vm_kmem_size / 2) / PAGE_SIZE) > VMCNT_GET(page_count))
|
||||
vm_kmem_size = 2 * VMCNT_GET(page_count) * PAGE_SIZE;
|
||||
|
||||
/*
|
||||
* Tune settings based on the kernel map's size at this time.
|
||||
@ -650,7 +650,8 @@ malloc_init(void *data)
|
||||
struct malloc_type_internal *mtip;
|
||||
struct malloc_type *mtp;
|
||||
|
||||
KASSERT(cnt.v_page_count != 0, ("malloc_register before vm_init"));
|
||||
KASSERT(VMCNT_GET(page_count) != 0,
|
||||
("malloc_register before vm_init"));
|
||||
|
||||
mtp = data;
|
||||
mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO);
|
||||
|
@ -177,7 +177,7 @@ sysctl_hw_usermem(SYSCTL_HANDLER_ARGS)
|
||||
{
|
||||
u_long val;
|
||||
|
||||
val = ctob(physmem - cnt.v_wire_count);
|
||||
val = ctob(physmem - VMCNT_GET(wire_count));
|
||||
return (sysctl_handle_long(oidp, &val, 0, req));
|
||||
}
|
||||
|
||||
|
@ -433,7 +433,7 @@ mi_switch(int flags, struct thread *newtd)
|
||||
/*
|
||||
* Finish up stats for outgoing thread.
|
||||
*/
|
||||
cnt.v_swtch++;
|
||||
VMCNT_ADD(swtch, 1);
|
||||
PCPU_SET(switchtime, new_switchtime);
|
||||
PCPU_SET(switchticks, ticks);
|
||||
CTR4(KTR_PROC, "mi_switch: old thread %ld (kse %p, pid %ld, %s)",
|
||||
|
@ -407,7 +407,7 @@ thread_exit(void)
|
||||
p->p_rux.rux_iticks += td->td_iticks;
|
||||
PCPU_SET(switchtime, new_switchtime);
|
||||
PCPU_SET(switchticks, ticks);
|
||||
cnt.v_swtch++;
|
||||
VMCNT_ADD(swtch, 1);
|
||||
|
||||
/* Add our usage into the usage of all our children. */
|
||||
if (p->p_numthreads == 1)
|
||||
|
@ -191,8 +191,8 @@ ast(struct trapframe *framep)
|
||||
#endif
|
||||
td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK |
|
||||
TDF_NEEDRESCHED | TDF_INTERRUPT);
|
||||
cnt.v_trap++;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
VMCNT_ADD(trap, 1);
|
||||
|
||||
/*
|
||||
* XXXKSE While the fact that we owe a user profiling
|
||||
|
@ -2919,8 +2919,10 @@ allocbuf(struct buf *bp, int size)
|
||||
*/
|
||||
if ((curproc != pageproc) &&
|
||||
(VM_PAGE_INQUEUE1(m, PQ_CACHE)) &&
|
||||
((cnt.v_free_count + cnt.v_cache_count) <
|
||||
(cnt.v_free_min + cnt.v_cache_min))) {
|
||||
((VMCNT_GET(free_count) +
|
||||
VMCNT_GET(cache_count)) <
|
||||
(VMCNT_GET(free_min) +
|
||||
VMCNT_GET(cache_min)))) {
|
||||
pagedaemon_wakeup();
|
||||
}
|
||||
vm_page_wire(m);
|
||||
|
@ -297,8 +297,9 @@ vntblinit(void *dummy __unused)
|
||||
* of the kernel's heap size is consumed by vnodes and vm
|
||||
* objects.
|
||||
*/
|
||||
desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size /
|
||||
(5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
|
||||
desiredvnodes = min(maxproc + VMCNT_GET(page_count) / 4, 2 *
|
||||
vm_kmem_size / (5 * (sizeof(struct vm_object) +
|
||||
sizeof(struct vnode))));
|
||||
if (desiredvnodes > MAXVNODES_MAX) {
|
||||
if (bootverbose)
|
||||
printf("Reducing kern.maxvnodes %d -> %d\n",
|
||||
@ -581,7 +582,7 @@ vlrureclaim(struct mount *mp)
|
||||
usevnodes = desiredvnodes;
|
||||
if (usevnodes <= 0)
|
||||
usevnodes = 1;
|
||||
trigger = cnt.v_page_count * 2 / usevnodes;
|
||||
trigger = VMCNT_GET(page_count) * 2 / usevnodes;
|
||||
done = 0;
|
||||
td = curthread;
|
||||
vn_start_write(NULL, &mp, V_WAIT);
|
||||
|
@ -159,8 +159,8 @@ nfs_getpages(struct vop_getpages_args *ap)
|
||||
|
||||
kva = (vm_offset_t) bp->b_data;
|
||||
pmap_qenter(kva, pages, npages);
|
||||
cnt.v_vnodein++;
|
||||
cnt.v_vnodepgsin += npages;
|
||||
VMCNT_ADD(vnodein, 1);
|
||||
VMCNT_ADD(vnodepgsin, npages);
|
||||
|
||||
iov.iov_base = (caddr_t) kva;
|
||||
iov.iov_len = count;
|
||||
@ -323,8 +323,8 @@ nfs_putpages(struct vop_putpages_args *ap)
|
||||
|
||||
kva = (vm_offset_t) bp->b_data;
|
||||
pmap_qenter(kva, pages, npages);
|
||||
cnt.v_vnodeout++;
|
||||
cnt.v_vnodepgsout += count;
|
||||
VMCNT_ADD(vnodeout, 1);
|
||||
VMCNT_ADD(vnodepgsout, count);
|
||||
|
||||
iov.iov_base = (caddr_t) kva;
|
||||
iov.iov_len = count;
|
||||
|
@ -244,8 +244,8 @@ cpu_startup(dummy)
|
||||
vm_ksubmap_init(&kmi);
|
||||
|
||||
printf("avail memory = %ju (%ju MB)\n",
|
||||
ptoa((uintmax_t)cnt.v_free_count),
|
||||
ptoa((uintmax_t)cnt.v_free_count) / 1048576);
|
||||
ptoa((uintmax_t)VMCNT_GET(free_count)),
|
||||
ptoa((uintmax_t)VMCNT_GET(free_count)) / 1048576);
|
||||
|
||||
/*
|
||||
* Set up buffers, so they can be used to read disk labels.
|
||||
|
@ -223,8 +223,8 @@ cpu_startup(void *dummy)
|
||||
|
||||
vm_ksubmap_init(&kmi);
|
||||
|
||||
printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
|
||||
ptoa(cnt.v_free_count) / 1048576);
|
||||
printf("avail memory = %ld (%ld MB)\n", ptoa(VMCNT_GET(free_count)),
|
||||
ptoa(VMCNT_GET(free_count)) / 1048576);
|
||||
|
||||
/*
|
||||
* Set up buffers, so they can be used to read disk labels.
|
||||
|
@ -223,8 +223,8 @@ cpu_startup(void *dummy)
|
||||
|
||||
vm_ksubmap_init(&kmi);
|
||||
|
||||
printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
|
||||
ptoa(cnt.v_free_count) / 1048576);
|
||||
printf("avail memory = %ld (%ld MB)\n", ptoa(VMCNT_GET(free_count)),
|
||||
ptoa(VMCNT_GET(free_count)) / 1048576);
|
||||
|
||||
/*
|
||||
* Set up buffers, so they can be used to read disk labels.
|
||||
|
@ -211,8 +211,8 @@ cpu_startup(void *arg)
|
||||
EVENTHANDLER_REGISTER(shutdown_final, sparc64_shutdown_final, NULL,
|
||||
SHUTDOWN_PRI_LAST);
|
||||
|
||||
printf("avail memory = %lu (%lu MB)\n", cnt.v_free_count * PAGE_SIZE,
|
||||
cnt.v_free_count / ((1024 * 1024) / PAGE_SIZE));
|
||||
printf("avail memory = %lu (%lu MB)\n", VMCNT_GET(free_count) *
|
||||
PAGE_SIZE, VMCNT_GET(free_count) / ((1024 * 1024) / PAGE_SIZE));
|
||||
|
||||
if (bootverbose)
|
||||
printf("machine: %s\n", sparc64_model);
|
||||
|
@ -1088,7 +1088,7 @@ pmap_release(pmap_t pm)
|
||||
("pmap_release: freeing held tsb page"));
|
||||
m->md.pmap = NULL;
|
||||
m->wire_count--;
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
VMCNT_DEC(wire_count, 1);
|
||||
vm_page_free_zero(m);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
|
@ -223,8 +223,8 @@ cpu_startup(void *arg)
|
||||
EVENTHANDLER_REGISTER(shutdown_final, sparc64_shutdown_final, NULL,
|
||||
SHUTDOWN_PRI_LAST);
|
||||
|
||||
printf("avail memory = %lu (%lu MB)\n", cnt.v_free_count * PAGE_SIZE,
|
||||
cnt.v_free_count / ((1024 * 1024) / PAGE_SIZE));
|
||||
printf("avail memory = %lu (%lu MB)\n", VMCNT_GET(free_count) *
|
||||
PAGE_SIZE, VMCNT_GET(free_count) / ((1024 * 1024) / PAGE_SIZE));
|
||||
|
||||
if (bootverbose)
|
||||
printf("machine: %s\n", sparc64_model);
|
||||
|
@ -975,7 +975,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
|
||||
* way below the low water mark of free pages or way
|
||||
* above high water mark of used pv entries.
|
||||
*/
|
||||
if (cnt.v_free_count < cnt.v_free_reserved ||
|
||||
if (VMCNT_GET(free_count) < VMCNT_GET(free_reserved) ||
|
||||
pv_entry_count > pv_entry_high_water)
|
||||
return;
|
||||
|
||||
@ -1318,7 +1318,7 @@ pmap_free_contig_pages(void *ptr, int npages)
|
||||
m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS((vm_offset_t)ptr));
|
||||
for (i = 0; i < npages; i++, m++) {
|
||||
m->wire_count--;
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
VMCNT_DEC(wire_count, 1);
|
||||
vm_page_free(m);
|
||||
}
|
||||
}
|
||||
@ -1349,7 +1349,7 @@ pmap_init(void)
|
||||
pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
|
||||
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
|
||||
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
|
||||
pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
|
||||
pv_entry_max = shpgperproc * maxproc + VMCNT_GET(page_count);
|
||||
TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
|
||||
pv_entry_high_water = 9 * (pv_entry_max / 10);
|
||||
uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
|
||||
|
@ -104,7 +104,7 @@ tsb_deinit(hv_tsb_info_t *hvtsb)
|
||||
m = PHYS_TO_VM_PAGE((vm_paddr_t)hvtsb->hti_ra);
|
||||
for (i = 0, tm = m; i < TSB_SIZE; i++, m++) {
|
||||
tm->wire_count--;
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
VMCNT_DEC(wire_count, 1);
|
||||
vm_page_free(tm);
|
||||
}
|
||||
}
|
||||
|
@ -231,7 +231,7 @@ free_fragment_pages(void *ptr)
|
||||
for (fh = ptr; fh != NULL; fh = fh->thf_head.fh_next) {
|
||||
m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS((vm_offset_t)fh));
|
||||
m->wire_count--;
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
VMCNT_GET(wire_count, 1);
|
||||
vm_page_free(m);
|
||||
}
|
||||
}
|
||||
|
@ -102,7 +102,18 @@ struct vmmeter {
|
||||
};
|
||||
#ifdef _KERNEL
|
||||
|
||||
extern struct vmmeter cnt;
|
||||
extern volatile struct vmmeter cnt;
|
||||
|
||||
#define VMCNT __DEVOLATILE(struct vmmeter *, &cnt)
|
||||
#define VMCNT_SET(member, val) \
|
||||
atomic_store_rel_int(__CONCAT(&cnt.v_, member), val)
|
||||
#define VMCNT_ADD(member, val) \
|
||||
atomic_add_int(__CONCAT(&cnt.v_, member), val)
|
||||
#define VMCNT_DEC(member, val) \
|
||||
atomic_subtract_int(__CONCAT(&cnt.v_, member), val)
|
||||
#define VMCNT_GET(member) (__CONCAT(cnt.v_, member))
|
||||
#define VMCNT_PTR(member) \
|
||||
__DEVOLATILE(u_int *, __CONCAT(&cnt.v_, member))
|
||||
|
||||
/*
|
||||
* Return TRUE if we are under our reserved low-free-pages threshold
|
||||
@ -112,7 +123,8 @@ static __inline
|
||||
int
|
||||
vm_page_count_reserved(void)
|
||||
{
|
||||
return (cnt.v_free_reserved > (cnt.v_free_count + cnt.v_cache_count));
|
||||
return (VMCNT_GET(free_reserved) > (VMCNT_GET(free_count) +
|
||||
VMCNT_GET(cache_count)));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -126,7 +138,8 @@ static __inline
|
||||
int
|
||||
vm_page_count_severe(void)
|
||||
{
|
||||
return (cnt.v_free_severe > (cnt.v_free_count + cnt.v_cache_count));
|
||||
return (VMCNT_GET(free_severe) > (VMCNT_GET(free_count) +
|
||||
VMCNT_GET(cache_count)));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -143,7 +156,8 @@ static __inline
|
||||
int
|
||||
vm_page_count_min(void)
|
||||
{
|
||||
return (cnt.v_free_min > (cnt.v_free_count + cnt.v_cache_count));
|
||||
return (VMCNT_GET(free_min) > (VMCNT_GET(free_count) +
|
||||
VMCNT_GET(cache_count)));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -155,7 +169,8 @@ static __inline
|
||||
int
|
||||
vm_page_count_target(void)
|
||||
{
|
||||
return (cnt.v_free_target > (cnt.v_free_count + cnt.v_cache_count));
|
||||
return (VMCNT_GET(free_target) > (VMCNT_GET(free_count) +
|
||||
VMCNT_GET(cache_count)));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -168,8 +183,8 @@ int
|
||||
vm_paging_target(void)
|
||||
{
|
||||
return (
|
||||
(cnt.v_free_target + cnt.v_cache_min) -
|
||||
(cnt.v_free_count + cnt.v_cache_count)
|
||||
(VMCNT_GET(free_target) + VMCNT_GET(cache_min)) -
|
||||
(VMCNT_GET(free_count) + VMCNT_GET(cache_count))
|
||||
);
|
||||
}
|
||||
|
||||
@ -182,8 +197,8 @@ int
|
||||
vm_paging_needed(void)
|
||||
{
|
||||
return (
|
||||
(cnt.v_free_reserved + cnt.v_cache_min) >
|
||||
(cnt.v_free_count + cnt.v_cache_count)
|
||||
(VMCNT_GET(free_reserved) + VMCNT_GET(cache_min)) >
|
||||
(VMCNT_GET(free_count) + VMCNT_GET(cache_count))
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -385,7 +385,7 @@ swap_pager_swap_init(void)
|
||||
* can hold 16 pages, so this is probably overkill. This reservation
|
||||
* is typically limited to around 32MB by default.
|
||||
*/
|
||||
n = cnt.v_page_count / 2;
|
||||
n = VMCNT_GET(page_count) / 2;
|
||||
if (maxswzone && n > maxswzone / sizeof(struct swblock))
|
||||
n = maxswzone / sizeof(struct swblock);
|
||||
n2 = n;
|
||||
@ -1037,8 +1037,8 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
|
||||
}
|
||||
bp->b_npages = j - i;
|
||||
|
||||
cnt.v_swapin++;
|
||||
cnt.v_swappgsin += bp->b_npages;
|
||||
VMCNT_ADD(swapin, 1);
|
||||
VMCNT_ADD(swappgsin, bp->b_npages);
|
||||
|
||||
/*
|
||||
* We still hold the lock on mreq, and our automatic completion routine
|
||||
@ -1072,7 +1072,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
|
||||
vm_page_lock_queues();
|
||||
vm_page_flag_set(mreq, PG_REFERENCED);
|
||||
vm_page_unlock_queues();
|
||||
cnt.v_intrans++;
|
||||
VMCNT_ADD(intrans, 1);
|
||||
if (msleep(mreq, VM_OBJECT_MTX(object), PSWP, "swread", hz*20)) {
|
||||
printf(
|
||||
"swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n",
|
||||
@ -1263,8 +1263,8 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
|
||||
bp->b_dirtyoff = 0;
|
||||
bp->b_dirtyend = bp->b_bcount;
|
||||
|
||||
cnt.v_swapout++;
|
||||
cnt.v_swappgsout += bp->b_npages;
|
||||
VMCNT_ADD(swapout, 1);
|
||||
VMCNT_ADD(swappgsout, bp->b_npages);
|
||||
|
||||
/*
|
||||
* asynchronous
|
||||
@ -2135,8 +2135,8 @@ swapoff_one(struct swdevt *sp, struct thread *td)
|
||||
* of data we will have to page back in, plus an epsilon so
|
||||
* the system doesn't become critically low on swap space.
|
||||
*/
|
||||
if (cnt.v_free_count + cnt.v_cache_count + swap_pager_avail <
|
||||
nblks + nswap_lowat) {
|
||||
if (VMCNT_GET(free_count) + VMCNT_GET(cache_count) +
|
||||
swap_pager_avail < nblks + nswap_lowat) {
|
||||
return (ENOMEM);
|
||||
}
|
||||
|
||||
|
@ -271,7 +271,7 @@ SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
|
||||
static void
|
||||
bucket_enable(void)
|
||||
{
|
||||
if (cnt.v_free_count < cnt.v_free_min)
|
||||
if (VMCNT_GET(free_count) < VMCNT_GET(free_min))
|
||||
bucketdisable = 1;
|
||||
else
|
||||
bucketdisable = 0;
|
||||
|
@ -204,7 +204,7 @@ contigmalloc1(
|
||||
* Find first page in array that is free, within range,
|
||||
* aligned, and such that the boundary won't be crossed.
|
||||
*/
|
||||
for (i = start; i < cnt.v_page_count; i++) {
|
||||
for (i = start; i < VMCNT_GET(page_count); i++) {
|
||||
phys = VM_PAGE_TO_PHYS(&pga[i]);
|
||||
pqtype = pga[i].queue - pga[i].pc;
|
||||
if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
|
||||
@ -217,7 +217,7 @@ contigmalloc1(
|
||||
/*
|
||||
* If the above failed or we will exceed the upper bound, fail.
|
||||
*/
|
||||
if ((i == cnt.v_page_count) ||
|
||||
if ((i == VMCNT_GET(page_count)) ||
|
||||
((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
|
||||
mtx_unlock(&vm_page_queue_free_mtx);
|
||||
/*
|
||||
|
@ -1271,7 +1271,8 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
|
||||
* try to do any readahead that we might have free pages for.
|
||||
*/
|
||||
if ((rahead + rbehind) >
|
||||
((cnt.v_free_count + cnt.v_cache_count) - cnt.v_free_reserved)) {
|
||||
((VMCNT_GET(free_count) + VMCNT_GET(cache_count)) -
|
||||
VMCNT_GET(free_reserved))) {
|
||||
pagedaemon_wakeup();
|
||||
marray[0] = m;
|
||||
*reqpage = 0;
|
||||
|
@ -219,7 +219,7 @@ vslock(void *addr, size_t len)
|
||||
* Also, the sysctl code, which is the only present user
|
||||
* of vslock(), does a hard loop on EAGAIN.
|
||||
*/
|
||||
if (npages + cnt.v_wire_count > vm_page_max_wired)
|
||||
if (npages + VMCNT_GET(wire_count) > vm_page_max_wired)
|
||||
return (EAGAIN);
|
||||
#endif
|
||||
error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
|
||||
@ -589,7 +589,7 @@ vm_init_limits(udata)
|
||||
limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
|
||||
limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
|
||||
/* limit the limit to no less than 2MB */
|
||||
rss_limit = max(cnt.v_free_count, 512);
|
||||
rss_limit = max(VMCNT_GET(free_count), 512);
|
||||
limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
|
||||
limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
|
||||
}
|
||||
|
@ -274,7 +274,7 @@ vmspace_alloc(min, max)
|
||||
void
|
||||
vm_init2(void)
|
||||
{
|
||||
uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count,
|
||||
uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(VMCNT_GET(page_count),
|
||||
(VM_MAX_KERNEL_ADDRESS - KERNBASE) / PAGE_SIZE) / 8 +
|
||||
maxproc * 2 + maxfiles);
|
||||
vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
|
||||
@ -1488,7 +1488,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
|
||||
* free pages allocating pv entries.
|
||||
*/
|
||||
if ((flags & MAP_PREFAULT_MADVISE) &&
|
||||
cnt.v_free_count < cnt.v_free_reserved) {
|
||||
VMCNT_GET(free_count) < VMCNT_GET(free_reserved)) {
|
||||
psize = tmpidx;
|
||||
break;
|
||||
}
|
||||
|
@ -52,26 +52,26 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/vm_object.h>
|
||||
#include <sys/sysctl.h>
|
||||
|
||||
struct vmmeter cnt;
|
||||
volatile struct vmmeter cnt;
|
||||
|
||||
int maxslp = MAXSLP;
|
||||
|
||||
SYSCTL_UINT(_vm, VM_V_FREE_MIN, v_free_min,
|
||||
CTLFLAG_RW, &cnt.v_free_min, 0, "");
|
||||
CTLFLAG_RW, VMCNT_PTR(free_min), 0, "");
|
||||
SYSCTL_UINT(_vm, VM_V_FREE_TARGET, v_free_target,
|
||||
CTLFLAG_RW, &cnt.v_free_target, 0, "");
|
||||
CTLFLAG_RW, VMCNT_PTR(free_target), 0, "");
|
||||
SYSCTL_UINT(_vm, VM_V_FREE_RESERVED, v_free_reserved,
|
||||
CTLFLAG_RW, &cnt.v_free_reserved, 0, "");
|
||||
CTLFLAG_RW, VMCNT_PTR(free_reserved), 0, "");
|
||||
SYSCTL_UINT(_vm, VM_V_INACTIVE_TARGET, v_inactive_target,
|
||||
CTLFLAG_RW, &cnt.v_inactive_target, 0, "");
|
||||
CTLFLAG_RW, VMCNT_PTR(inactive_target), 0, "");
|
||||
SYSCTL_UINT(_vm, VM_V_CACHE_MIN, v_cache_min,
|
||||
CTLFLAG_RW, &cnt.v_cache_min, 0, "");
|
||||
CTLFLAG_RW, VMCNT_PTR(cache_min), 0, "");
|
||||
SYSCTL_UINT(_vm, VM_V_CACHE_MAX, v_cache_max,
|
||||
CTLFLAG_RW, &cnt.v_cache_max, 0, "");
|
||||
CTLFLAG_RW, VMCNT_PTR(cache_max), 0, "");
|
||||
SYSCTL_UINT(_vm, VM_V_PAGEOUT_FREE_MIN, v_pageout_free_min,
|
||||
CTLFLAG_RW, &cnt.v_pageout_free_min, 0, "");
|
||||
CTLFLAG_RW, VMCNT_PTR(pageout_free_min), 0, "");
|
||||
SYSCTL_UINT(_vm, OID_AUTO, v_free_severe,
|
||||
CTLFLAG_RW, &cnt.v_free_severe, 0, "");
|
||||
CTLFLAG_RW, VMCNT_PTR(free_severe), 0, "");
|
||||
|
||||
static int
|
||||
sysctl_vm_loadavg(SYSCTL_HANDLER_ARGS)
|
||||
@ -235,7 +235,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
|
||||
}
|
||||
}
|
||||
mtx_unlock(&vm_object_list_mtx);
|
||||
total.t_free = cnt.v_free_count + cnt.v_cache_count;
|
||||
total.t_free = VMCNT_GET(free_count) + VMCNT_GET(cache_count);
|
||||
return (sysctl_handle_opaque(oidp, &total, sizeof(total), req));
|
||||
}
|
||||
|
||||
@ -255,7 +255,7 @@ static int
|
||||
vcnt(SYSCTL_HANDLER_ARGS)
|
||||
{
|
||||
int count = *(int *)arg1;
|
||||
int offset = (char *)arg1 - (char *)&cnt;
|
||||
int offset = (char *)arg1 - (char *)VMCNT;
|
||||
#ifdef SMP
|
||||
int i;
|
||||
|
||||
@ -280,101 +280,103 @@ static SYSCTL_NODE(_vm_stats, OID_AUTO, vm, CTLFLAG_RW, 0,
|
||||
SYSCTL_NODE(_vm_stats, OID_AUTO, misc, CTLFLAG_RW, 0, "VM meter misc stats");
|
||||
|
||||
SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_swtch, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_swtch, 0, vcnt, "IU", "Context switches");
|
||||
VMCNT_PTR(swtch), 0, vcnt, "IU", "Context switches");
|
||||
SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_trap, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_trap, 0, vcnt, "IU", "Traps");
|
||||
VMCNT_PTR(trap), 0, vcnt, "IU", "Traps");
|
||||
SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_syscall, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_syscall, 0, vcnt, "IU", "Syscalls");
|
||||
VMCNT_PTR(syscall), 0, vcnt, "IU", "Syscalls");
|
||||
SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_intr, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_intr, 0, vcnt, "IU", "Hardware interrupts");
|
||||
VMCNT_PTR(intr), 0, vcnt, "IU", "Hardware interrupts");
|
||||
SYSCTL_PROC(_vm_stats_sys, OID_AUTO, v_soft, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_soft, 0, vcnt, "IU", "Software interrupts");
|
||||
VMCNT_PTR(soft), 0, vcnt, "IU", "Software interrupts");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vm_faults, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_vm_faults, 0, vcnt, "IU", "VM faults");
|
||||
VMCNT_PTR(vm_faults), 0, vcnt, "IU", "VM faults");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cow_faults, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_cow_faults, 0, vcnt, "IU", "COW faults");
|
||||
VMCNT_PTR(cow_faults), 0, vcnt, "IU", "COW faults");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cow_optim, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_cow_optim, 0, vcnt, "IU", "Optimized COW faults");
|
||||
VMCNT_PTR(cow_optim), 0, vcnt, "IU", "Optimized COW faults");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_zfod, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_zfod, 0, vcnt, "IU", "Zero fill");
|
||||
VMCNT_PTR(zfod), 0, vcnt, "IU", "Zero fill");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_ozfod, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_ozfod, 0, vcnt, "IU", "Optimized zero fill");
|
||||
VMCNT_PTR(ozfod), 0, vcnt, "IU", "Optimized zero fill");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swapin, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_swapin, 0, vcnt, "IU", "Swapin operations");
|
||||
VMCNT_PTR(swapin), 0, vcnt, "IU", "Swapin operations");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swapout, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_swapout, 0, vcnt, "IU", "Swapout operations");
|
||||
VMCNT_PTR(swapout), 0, vcnt, "IU", "Swapout operations");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swappgsin, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_swappgsin, 0, vcnt, "IU", "Swapin pages");
|
||||
VMCNT_PTR(swappgsin), 0, vcnt, "IU", "Swapin pages");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_swappgsout, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_swappgsout, 0, vcnt, "IU", "Swapout pages");
|
||||
VMCNT_PTR(swappgsout), 0, vcnt, "IU", "Swapout pages");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodein, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_vnodein, 0, vcnt, "IU", "Vnodein operations");
|
||||
VMCNT_PTR(vnodein), 0, vcnt, "IU", "Vnodein operations");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodeout, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_vnodeout, 0, vcnt, "IU", "Vnodeout operations");
|
||||
VMCNT_PTR(vnodeout), 0, vcnt, "IU", "Vnodeout operations");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodepgsin, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_vnodepgsin, 0, vcnt, "IU", "Vnodein pages");
|
||||
VMCNT_PTR(vnodepgsin), 0, vcnt, "IU", "Vnodein pages");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vnodepgsout, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_vnodepgsout, 0, vcnt, "IU", "Vnodeout pages");
|
||||
VMCNT_PTR(vnodepgsout), 0, vcnt, "IU", "Vnodeout pages");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_intrans, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_intrans, 0, vcnt, "IU", "In transit page blocking");
|
||||
VMCNT_PTR(intrans), 0, vcnt, "IU", "In transit page blocking");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_reactivated, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_reactivated, 0, vcnt, "IU", "Reactivated pages");
|
||||
VMCNT_PTR(reactivated), 0, vcnt, "IU", "Reactivated pages");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdwakeups, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_pdwakeups, 0, vcnt, "IU", "Pagedaemon wakeups");
|
||||
VMCNT_PTR(pdwakeups), 0, vcnt, "IU", "Pagedaemon wakeups");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdpages, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_pdpages, 0, vcnt, "IU", "Pagedaemon page scans");
|
||||
VMCNT_PTR(pdpages), 0, vcnt, "IU", "Pagedaemon page scans");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_dfree, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_dfree, 0, vcnt, "IU", "");
|
||||
VMCNT_PTR(dfree), 0, vcnt, "IU", "");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pfree, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_pfree, 0, vcnt, "IU", "");
|
||||
VMCNT_PTR(pfree), 0, vcnt, "IU", "");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_tfree, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_tfree, 0, vcnt, "IU", "");
|
||||
VMCNT_PTR(tfree), 0, vcnt, "IU", "");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_page_size, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_page_size, 0, vcnt, "IU", "");
|
||||
VMCNT_PTR(page_size), 0, vcnt, "IU", "");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_page_count, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_page_count, 0, vcnt, "IU", "");
|
||||
VMCNT_PTR(page_count), 0, vcnt, "IU", "");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_free_reserved, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_free_reserved, 0, vcnt, "IU", "");
|
||||
VMCNT_PTR(free_reserved), 0, vcnt, "IU", "");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_free_target, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_free_target, 0, vcnt, "IU", "");
|
||||
VMCNT_PTR(free_target), 0, vcnt, "IU", "");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_free_min, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_free_min, 0, vcnt, "IU", "");
|
||||
VMCNT_PTR(free_min), 0, vcnt, "IU", "");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_free_count, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_free_count, 0, vcnt, "IU", "");
|
||||
VMCNT_PTR(free_count), 0, vcnt, "IU", "");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_wire_count, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_wire_count, 0, vcnt, "IU", "");
|
||||
VMCNT_PTR(wire_count), 0, vcnt, "IU", "");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_active_count, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_active_count, 0, vcnt, "IU", "");
|
||||
VMCNT_PTR(active_count), 0, vcnt, "IU", "");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_inactive_target, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_inactive_target, 0, vcnt, "IU", "");
|
||||
VMCNT_PTR(inactive_target), 0, vcnt, "IU", "");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_inactive_count, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_inactive_count, 0, vcnt, "IU", "");
|
||||
VMCNT_PTR(inactive_count), 0, vcnt, "IU", "");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cache_count, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_cache_count, 0, vcnt, "IU", "");
|
||||
VMCNT_PTR(cache_count), 0, vcnt, "IU", "");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cache_min, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_cache_min, 0, vcnt, "IU", "");
|
||||
VMCNT_PTR(cache_min), 0, vcnt, "IU", "");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_cache_max, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_cache_max, 0, vcnt, "IU", "");
|
||||
VMCNT_PTR(cache_max), 0, vcnt, "IU", "");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pageout_free_min, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_pageout_free_min, 0, vcnt, "IU", "");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_interrupt_free_min, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_interrupt_free_min, 0, vcnt, "IU", "");
|
||||
VMCNT_PTR(pageout_free_min), 0, vcnt, "IU", "");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_interrupt_free_min, CTLTYPE_UINT |
|
||||
CTLFLAG_RD, VMCNT_PTR(interrupt_free_min), 0, vcnt, "IU", "");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_forks, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_forks, 0, vcnt, "IU", "Number of fork() calls");
|
||||
VMCNT_PTR(forks), 0, vcnt, "IU", "Number of fork() calls");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vforks, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_vforks, 0, vcnt, "IU", "Number of vfork() calls");
|
||||
VMCNT_PTR(vforks), 0, vcnt, "IU", "Number of vfork() calls");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_rforks, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_rforks, 0, vcnt, "IU", "Number of rfork() calls");
|
||||
VMCNT_PTR(rforks), 0, vcnt, "IU", "Number of rfork() calls");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreads, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_kthreads, 0, vcnt, "IU", "Number of fork() calls by kernel");
|
||||
VMCNT_PTR(kthreads), 0, vcnt, "IU",
|
||||
"Number of fork() calls by kernel");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_forkpages, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_forkpages, 0, vcnt, "IU", "VM pages affected by fork()");
|
||||
VMCNT_PTR(forkpages), 0, vcnt, "IU", "VM pages affected by fork()");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_vforkpages, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_vforkpages, 0, vcnt, "IU", "VM pages affected by vfork()");
|
||||
VMCNT_PTR(vforkpages), 0, vcnt, "IU", "VM pages affected by vfork()");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_rforkpages, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_rforkpages, 0, vcnt, "IU", "VM pages affected by rfork()");
|
||||
VMCNT_PTR(rforkpages), 0, vcnt, "IU", "VM pages affected by rfork()");
|
||||
SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_kthreadpages, CTLTYPE_UINT|CTLFLAG_RD,
|
||||
&cnt.v_kthreadpages, 0, vcnt, "IU", "VM pages affected by fork() by kernel");
|
||||
VMCNT_PTR(kthreadpages), 0, vcnt, "IU",
|
||||
"VM pages affected by fork() by kernel");
|
||||
|
||||
SYSCTL_INT(_vm_stats_misc, OID_AUTO,
|
||||
zero_page_count, CTLFLAG_RD, &vm_page_zero_count, 0, "");
|
||||
|
@ -974,7 +974,7 @@ mlock(td, uap)
|
||||
return (ENOMEM);
|
||||
}
|
||||
PROC_UNLOCK(proc);
|
||||
if (npages + cnt.v_wire_count > vm_page_max_wired)
|
||||
if (npages + VMCNT_GET(wire_count) > vm_page_max_wired)
|
||||
return (EAGAIN);
|
||||
error = vm_map_wire(&proc->p_vmspace->vm_map, start, end,
|
||||
VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
|
||||
|
@ -655,7 +655,7 @@ vm_object_terminate(vm_object_t object)
|
||||
"p->busy = %d, p->flags %x\n", p, p->busy, p->flags));
|
||||
if (p->wire_count == 0) {
|
||||
vm_page_free(p);
|
||||
cnt.v_pfree++;
|
||||
VMCNT_ADD(pfree, 1);
|
||||
} else {
|
||||
vm_page_remove(p);
|
||||
}
|
||||
|
@ -151,9 +151,9 @@ SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0,
|
||||
void
|
||||
vm_set_page_size(void)
|
||||
{
|
||||
if (cnt.v_page_size == 0)
|
||||
cnt.v_page_size = PAGE_SIZE;
|
||||
if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0)
|
||||
if (VMCNT_GET(page_size) == 0)
|
||||
VMCNT_SET(page_size, PAGE_SIZE);
|
||||
if (((VMCNT_GET(page_size) - 1) & VMCNT_GET(page_size)) != 0)
|
||||
panic("vm_set_page_size: page size not a power of two");
|
||||
}
|
||||
|
||||
@ -357,8 +357,8 @@ vm_page_startup(vm_offset_t vaddr)
|
||||
* last rather than first. On large-memory machines, this avoids
|
||||
* the exhaustion of low physical memory before isa_dma_init has run.
|
||||
*/
|
||||
cnt.v_page_count = 0;
|
||||
cnt.v_free_count = 0;
|
||||
VMCNT_SET(page_count, 0);
|
||||
VMCNT_SET(free_count, 0);
|
||||
list = getenv("vm.blacklist");
|
||||
for (i = 0; phys_avail[i + 1] != 0; i += 2) {
|
||||
pa = phys_avail[i];
|
||||
@ -874,11 +874,11 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
|
||||
|
||||
loop:
|
||||
mtx_lock(&vm_page_queue_free_mtx);
|
||||
if (cnt.v_free_count > cnt.v_free_reserved ||
|
||||
if (VMCNT_GET(free_count) > VMCNT_GET(free_reserved) ||
|
||||
(page_req == VM_ALLOC_SYSTEM &&
|
||||
cnt.v_cache_count == 0 &&
|
||||
cnt.v_free_count > cnt.v_interrupt_free_min) ||
|
||||
(page_req == VM_ALLOC_INTERRUPT && cnt.v_free_count > 0)) {
|
||||
VMCNT_GET(cache_count) == 0 &&
|
||||
VMCNT_GET(free_count) > VMCNT_GET(interrupt_free_min)) ||
|
||||
(page_req == VM_ALLOC_INTERRUPT && VMCNT_GET(free_count) > 0)) {
|
||||
/*
|
||||
* Allocate from the free queue if the number of free pages
|
||||
* exceeds the minimum for the request class.
|
||||
@ -893,9 +893,9 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
|
||||
*/
|
||||
vm_page_lock_queues();
|
||||
if ((m = vm_page_select_cache(color)) == NULL) {
|
||||
KASSERT(cnt.v_cache_count == 0,
|
||||
KASSERT(VMCNT_GET(cache_count) == 0,
|
||||
("vm_page_alloc: cache queue is missing %d pages",
|
||||
cnt.v_cache_count));
|
||||
VMCNT_GET(cache_count)));
|
||||
vm_page_unlock_queues();
|
||||
atomic_add_int(&vm_pageout_deficit, 1);
|
||||
pagedaemon_wakeup();
|
||||
@ -904,7 +904,8 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
|
||||
return (NULL);
|
||||
|
||||
mtx_lock(&vm_page_queue_free_mtx);
|
||||
if (cnt.v_free_count <= cnt.v_interrupt_free_min) {
|
||||
if (VMCNT_GET(free_count) <=
|
||||
VMCNT_GET(interrupt_free_min)) {
|
||||
mtx_unlock(&vm_page_queue_free_mtx);
|
||||
return (NULL);
|
||||
}
|
||||
@ -954,7 +955,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
|
||||
else
|
||||
m->oflags = VPO_BUSY;
|
||||
if (req & VM_ALLOC_WIRED) {
|
||||
atomic_add_int(&cnt.v_wire_count, 1);
|
||||
VMCNT_ADD(wire_count, 1);
|
||||
m->wire_count = 1;
|
||||
} else
|
||||
m->wire_count = 0;
|
||||
@ -1000,8 +1001,8 @@ vm_wait(void)
|
||||
vm_pages_needed = 1;
|
||||
wakeup(&vm_pages_needed);
|
||||
}
|
||||
msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM,
|
||||
"vmwait", 0);
|
||||
msleep(VMCNT_PTR(free_count), &vm_page_queue_free_mtx, PDROP |
|
||||
PVM, "vmwait", 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1024,7 +1025,7 @@ vm_waitpfault(void)
|
||||
vm_pages_needed = 1;
|
||||
wakeup(&vm_pages_needed);
|
||||
}
|
||||
msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER,
|
||||
msleep(VMCNT_PTR(free_count), &vm_page_queue_free_mtx, PDROP | PUSER,
|
||||
"pfault", 0);
|
||||
}
|
||||
|
||||
@ -1045,7 +1046,7 @@ vm_page_activate(vm_page_t m)
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
if (VM_PAGE_GETKNOWNQUEUE2(m) != PQ_ACTIVE) {
|
||||
if (VM_PAGE_INQUEUE1(m, PQ_CACHE))
|
||||
cnt.v_reactivated++;
|
||||
VMCNT_ADD(reactivated, 1);
|
||||
vm_pageq_remove(m);
|
||||
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
|
||||
if (m->act_count < ACT_INIT)
|
||||
@ -1078,7 +1079,8 @@ vm_page_free_wakeup(void)
|
||||
* some free.
|
||||
*/
|
||||
if (vm_pageout_pages_needed &&
|
||||
cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) {
|
||||
VMCNT_GET(cache_count) + VMCNT_GET(free_count) >=
|
||||
VMCNT_GET(pageout_free_min)) {
|
||||
wakeup(&vm_pageout_pages_needed);
|
||||
vm_pageout_pages_needed = 0;
|
||||
}
|
||||
@ -1089,7 +1091,7 @@ vm_page_free_wakeup(void)
|
||||
*/
|
||||
if (vm_pages_needed && !vm_page_count_min()) {
|
||||
vm_pages_needed = 0;
|
||||
wakeup(&cnt.v_free_count);
|
||||
wakeup(VMCNT_PTR(free_count));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1112,7 +1114,7 @@ vm_page_free_toq(vm_page_t m)
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
KASSERT(!pmap_page_is_mapped(m),
|
||||
("vm_page_free_toq: freeing mapped page %p", m));
|
||||
cnt.v_tfree++;
|
||||
VMCNT_ADD(tfree, 1);
|
||||
|
||||
if (m->busy || VM_PAGE_INQUEUE1(m, PQ_FREE)) {
|
||||
printf(
|
||||
@ -1203,7 +1205,7 @@ vm_page_wire(vm_page_t m)
|
||||
if (m->wire_count == 0) {
|
||||
if ((m->flags & PG_UNMANAGED) == 0)
|
||||
vm_pageq_remove(m);
|
||||
atomic_add_int(&cnt.v_wire_count, 1);
|
||||
VMCNT_ADD(wire_count, 1);
|
||||
}
|
||||
m->wire_count++;
|
||||
KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
|
||||
@ -1247,7 +1249,7 @@ vm_page_unwire(vm_page_t m, int activate)
|
||||
if (m->wire_count > 0) {
|
||||
m->wire_count--;
|
||||
if (m->wire_count == 0) {
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
VMCNT_DEC(wire_count, 1);
|
||||
if (m->flags & PG_UNMANAGED) {
|
||||
;
|
||||
} else if (activate)
|
||||
@ -1286,7 +1288,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
|
||||
return;
|
||||
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
|
||||
if (VM_PAGE_INQUEUE1(m, PQ_CACHE))
|
||||
cnt.v_reactivated++;
|
||||
VMCNT_ADD(reactivated, 1);
|
||||
vm_page_flag_clear(m, PG_WINATCFLS);
|
||||
vm_pageq_remove(m);
|
||||
if (athead)
|
||||
@ -1295,7 +1297,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
|
||||
TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
|
||||
VM_PAGE_SETQUEUE2(m, PQ_INACTIVE);
|
||||
vm_page_queues[PQ_INACTIVE].lcnt++;
|
||||
cnt.v_inactive_count++;
|
||||
VMCNT_ADD(inactive_count, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1780,16 +1782,16 @@ vm_page_cowsetup(vm_page_t m)
|
||||
|
||||
DB_SHOW_COMMAND(page, vm_page_print_page_info)
|
||||
{
|
||||
db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
|
||||
db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
|
||||
db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
|
||||
db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
|
||||
db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
|
||||
db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
|
||||
db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
|
||||
db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
|
||||
db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
|
||||
db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
|
||||
db_printf("cnt.v_free_count: %d\n", VMCNT_GET(free_count));
|
||||
db_printf("cnt.v_cache_count: %d\n", VMCNT_GET(cache_count));
|
||||
db_printf("cnt.v_inactive_count: %d\n", VMCNT_GET(inactive_count));
|
||||
db_printf("cnt.v_active_count: %d\n", VMCNT_GET(active_count));
|
||||
db_printf("cnt.v_wire_count: %d\n", VMCNT_GET(wire_count));
|
||||
db_printf("cnt.v_free_reserved: %d\n", VMCNT_GET(free_reserved));
|
||||
db_printf("cnt.v_free_min: %d\n", VMCNT_GET(free_min));
|
||||
db_printf("cnt.v_free_target: %d\n", VMCNT_GET(free_target));
|
||||
db_printf("cnt.v_cache_min: %d\n", VMCNT_GET(cache_min));
|
||||
db_printf("cnt.v_inactive_target: %d\n", VMCNT_GET(inactive_target));
|
||||
}
|
||||
|
||||
DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
|
||||
|
@ -538,7 +538,7 @@ vm_pageout_object_deactivate_pages(pmap, first_object, desired)
|
||||
goto unlock_return;
|
||||
}
|
||||
next = TAILQ_NEXT(p, listq);
|
||||
cnt.v_pdpages++;
|
||||
VMCNT_ADD(pdpages, 1);
|
||||
if (p->wire_count != 0 ||
|
||||
p->hold_count != 0 ||
|
||||
p->busy != 0 ||
|
||||
@ -739,13 +739,13 @@ vm_pageout_scan(int pass)
|
||||
vm_page_lock_queues();
|
||||
rescan0:
|
||||
addl_page_shortage = addl_page_shortage_init;
|
||||
maxscan = cnt.v_inactive_count;
|
||||
maxscan = VMCNT_GET(inactive_count);
|
||||
|
||||
for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl);
|
||||
m != NULL && maxscan-- > 0 && page_shortage > 0;
|
||||
m = next) {
|
||||
|
||||
cnt.v_pdpages++;
|
||||
VMCNT_ADD(pdpages, 1);
|
||||
|
||||
if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE) {
|
||||
goto rescan0;
|
||||
@ -856,7 +856,7 @@ vm_pageout_scan(int pass)
|
||||
* Invalid pages can be easily freed
|
||||
*/
|
||||
vm_page_free(m);
|
||||
cnt.v_dfree++;
|
||||
VMCNT_ADD(dfree, 1);
|
||||
--page_shortage;
|
||||
} else if (m->dirty == 0) {
|
||||
/*
|
||||
@ -1043,8 +1043,8 @@ vm_pageout_scan(int pass)
|
||||
* Compute the number of pages we want to try to move from the
|
||||
* active queue to the inactive queue.
|
||||
*/
|
||||
page_shortage = vm_paging_target() +
|
||||
cnt.v_inactive_target - cnt.v_inactive_count;
|
||||
page_shortage = vm_paging_target() + VMCNT_GET(inactive_target) -
|
||||
VMCNT_GET(inactive_count);
|
||||
page_shortage += addl_page_shortage;
|
||||
|
||||
/*
|
||||
@ -1052,7 +1052,7 @@ vm_pageout_scan(int pass)
|
||||
* track the per-page activity counter and use it to locate
|
||||
* deactivation candidates.
|
||||
*/
|
||||
pcount = cnt.v_active_count;
|
||||
pcount = VMCNT_GET(active_count);
|
||||
m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
|
||||
|
||||
while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
|
||||
@ -1089,7 +1089,7 @@ vm_pageout_scan(int pass)
|
||||
* The count for pagedaemon pages is done after checking the
|
||||
* page for eligibility...
|
||||
*/
|
||||
cnt.v_pdpages++;
|
||||
VMCNT_ADD(pdpages, 1);
|
||||
|
||||
/*
|
||||
* Check to see "how much" the page has been used.
|
||||
@ -1149,8 +1149,9 @@ vm_pageout_scan(int pass)
|
||||
*/
|
||||
cache_cur = cache_last_free;
|
||||
cache_first_failure = -1;
|
||||
while (cnt.v_free_count < cnt.v_free_reserved && (cache_cur =
|
||||
(cache_cur + PQ_PRIME2) & PQ_COLORMASK) != cache_first_failure) {
|
||||
while (VMCNT_GET(free_count) < VMCNT_GET(free_reserved) &&
|
||||
(cache_cur = (cache_cur + PQ_PRIME2) & PQ_COLORMASK) !=
|
||||
cache_first_failure) {
|
||||
TAILQ_FOREACH(m, &vm_page_queues[PQ_CACHE + cache_cur].pl,
|
||||
pageq) {
|
||||
KASSERT(m->dirty == 0,
|
||||
@ -1168,7 +1169,7 @@ vm_pageout_scan(int pass)
|
||||
m));
|
||||
vm_page_free(m);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
cnt.v_dfree++;
|
||||
VMCNT_ADD(dfree, 1);
|
||||
cache_last_free = cache_cur;
|
||||
cache_first_failure = -1;
|
||||
break;
|
||||
@ -1291,7 +1292,7 @@ vm_pageout_scan(int pass)
|
||||
sched_nice(bigproc, PRIO_MIN);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_UNLOCK(bigproc);
|
||||
wakeup(&cnt.v_free_count);
|
||||
wakeup(VMCNT_PTR(free_count));
|
||||
}
|
||||
}
|
||||
mtx_unlock(&Giant);
|
||||
@ -1314,16 +1315,18 @@ vm_pageout_page_stats()
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
page_shortage =
|
||||
(cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) -
|
||||
(cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
|
||||
(VMCNT_GET(inactive_target) + VMCNT_GET(cache_max) +
|
||||
VMCNT_GET(free_min)) - (VMCNT_GET(free_count) +
|
||||
VMCNT_GET(inactive_count) + VMCNT_GET(cache_count));
|
||||
|
||||
if (page_shortage <= 0)
|
||||
return;
|
||||
|
||||
pcount = cnt.v_active_count;
|
||||
pcount = VMCNT_GET(active_count);
|
||||
fullintervalcount += vm_pageout_stats_interval;
|
||||
if (fullintervalcount < vm_pageout_full_stats_interval) {
|
||||
tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count;
|
||||
tpcount = (vm_pageout_stats_max * VMCNT_GET(active_count)) /
|
||||
VMCNT_GET(page_count);
|
||||
if (pcount > tpcount)
|
||||
pcount = tpcount;
|
||||
} else {
|
||||
@ -1409,8 +1412,8 @@ vm_pageout()
|
||||
/*
|
||||
* Initialize some paging parameters.
|
||||
*/
|
||||
cnt.v_interrupt_free_min = 2;
|
||||
if (cnt.v_page_count < 2000)
|
||||
VMCNT_SET(interrupt_free_min, 2);
|
||||
if (VMCNT_GET(page_count) < 2000)
|
||||
vm_pageout_page_count = 8;
|
||||
|
||||
/*
|
||||
@ -1418,17 +1421,16 @@ vm_pageout()
|
||||
* swap pager structures plus enough for any pv_entry structs
|
||||
* when paging.
|
||||
*/
|
||||
if (cnt.v_page_count > 1024)
|
||||
cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
|
||||
else
|
||||
cnt.v_free_min = 4;
|
||||
cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
|
||||
cnt.v_interrupt_free_min;
|
||||
cnt.v_free_reserved = vm_pageout_page_count +
|
||||
cnt.v_pageout_free_min + (cnt.v_page_count / 768) + PQ_NUMCOLORS;
|
||||
cnt.v_free_severe = cnt.v_free_min / 2;
|
||||
cnt.v_free_min += cnt.v_free_reserved;
|
||||
cnt.v_free_severe += cnt.v_free_reserved;
|
||||
VMCNT_SET(free_min, (VMCNT_GET(page_count) > 1024) ? (4 +
|
||||
(VMCNT_GET(page_count) - 1024) / 200) : 4);
|
||||
VMCNT_SET(pageout_free_min, (2 * MAXBSIZE) / PAGE_SIZE +
|
||||
VMCNT_GET(interrupt_free_min));
|
||||
VMCNT_SET(free_reserved, vm_pageout_page_count +
|
||||
VMCNT_GET(pageout_free_min) + (VMCNT_GET(page_count) / 768) +
|
||||
PQ_NUMCOLORS);
|
||||
VMCNT_SET(free_severe, VMCNT_GET(free_min) / 2);
|
||||
VMCNT_ADD(free_min, VMCNT_GET(free_reserved));
|
||||
VMCNT_ADD(free_severe, VMCNT_GET(free_reserved));
|
||||
|
||||
/*
|
||||
* v_free_target and v_cache_min control pageout hysteresis. Note
|
||||
@ -1441,29 +1443,27 @@ vm_pageout()
|
||||
* be big enough to handle memory needs while the pageout daemon
|
||||
* is signalled and run to free more pages.
|
||||
*/
|
||||
if (cnt.v_free_count > 6144)
|
||||
cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved;
|
||||
else
|
||||
cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
|
||||
VMCNT_SET(free_target, ((VMCNT_GET(free_count) > 6144) ? 4 : 2) *
|
||||
VMCNT_GET(free_min) + VMCNT_GET(free_reserved));
|
||||
|
||||
if (cnt.v_free_count > 2048) {
|
||||
cnt.v_cache_min = cnt.v_free_target;
|
||||
cnt.v_cache_max = 2 * cnt.v_cache_min;
|
||||
cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
|
||||
if (VMCNT_GET(free_count) > 2048) {
|
||||
VMCNT_SET(cache_min, VMCNT_GET(free_target));
|
||||
VMCNT_SET(cache_max, 2 * VMCNT_GET(cache_min));
|
||||
VMCNT_SET(inactive_target, (3 * VMCNT_GET(free_target) / 2));
|
||||
} else {
|
||||
cnt.v_cache_min = 0;
|
||||
cnt.v_cache_max = 0;
|
||||
cnt.v_inactive_target = cnt.v_free_count / 4;
|
||||
VMCNT_SET(cache_min, 0);
|
||||
VMCNT_SET(cache_max, 0);
|
||||
VMCNT_SET(inactive_target, VMCNT_GET(free_count) / 4);
|
||||
}
|
||||
if (cnt.v_inactive_target > cnt.v_free_count / 3)
|
||||
cnt.v_inactive_target = cnt.v_free_count / 3;
|
||||
if (VMCNT_GET(inactive_target) > VMCNT_GET(free_count) / 3)
|
||||
VMCNT_SET(inactive_target, VMCNT_GET(free_count) / 3);
|
||||
|
||||
/* XXX does not really belong here */
|
||||
if (vm_page_max_wired == 0)
|
||||
vm_page_max_wired = cnt.v_free_count / 3;
|
||||
vm_page_max_wired = VMCNT_GET(free_count) / 3;
|
||||
|
||||
if (vm_pageout_stats_max == 0)
|
||||
vm_pageout_stats_max = cnt.v_free_target;
|
||||
vm_pageout_stats_max = VMCNT_GET(free_target);
|
||||
|
||||
/*
|
||||
* Set interval in seconds for stats scan.
|
||||
@ -1489,7 +1489,7 @@ vm_pageout()
|
||||
if (vm_pages_needed && !vm_page_count_min()) {
|
||||
if (!vm_paging_needed())
|
||||
vm_pages_needed = 0;
|
||||
wakeup(&cnt.v_free_count);
|
||||
wakeup(VMCNT_PTR(free_count));
|
||||
}
|
||||
if (vm_pages_needed) {
|
||||
/*
|
||||
@ -1524,7 +1524,7 @@ vm_pageout()
|
||||
}
|
||||
}
|
||||
if (vm_pages_needed)
|
||||
cnt.v_pdwakeups++;
|
||||
VMCNT_ADD(pdwakeups, 1);
|
||||
mtx_unlock(&vm_page_queue_free_mtx);
|
||||
vm_pageout_scan(pass);
|
||||
}
|
||||
|
@ -140,14 +140,14 @@ vm_pageq_init(void)
|
||||
vm_coloring_init();
|
||||
|
||||
for (i = 0; i < PQ_NUMCOLORS; ++i) {
|
||||
vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count;
|
||||
vm_page_queues[PQ_FREE+i].cnt = VMCNT_PTR(free_count);
|
||||
}
|
||||
for (i = 0; i < PQ_NUMCOLORS; ++i) {
|
||||
vm_page_queues[PQ_CACHE + i].cnt = &cnt.v_cache_count;
|
||||
vm_page_queues[PQ_CACHE + i].cnt = VMCNT_PTR(cache_count);
|
||||
}
|
||||
vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
|
||||
vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count;
|
||||
vm_page_queues[PQ_HOLD].cnt = &cnt.v_active_count;
|
||||
vm_page_queues[PQ_INACTIVE].cnt = VMCNT_PTR(inactive_count);
|
||||
vm_page_queues[PQ_ACTIVE].cnt = VMCNT_PTR(active_count);
|
||||
vm_page_queues[PQ_HOLD].cnt = VMCNT_PTR(active_count);
|
||||
|
||||
for (i = 0; i < PQ_COUNT; i++) {
|
||||
TAILQ_INIT(&vm_page_queues[i].pl);
|
||||
@ -192,7 +192,7 @@ vm_pageq_add_new_page(vm_paddr_t pa)
|
||||
{
|
||||
vm_page_t m;
|
||||
|
||||
atomic_add_int(&cnt.v_page_count, 1);
|
||||
VMCNT_ADD(page_count, 1);
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
m->phys_addr = pa;
|
||||
m->flags = 0;
|
||||
|
@ -90,9 +90,10 @@ vm_page_zero_check(void)
|
||||
* fast sleeps. We also do not want to be continuously zeroing
|
||||
* pages because doing so may flush our L1 and L2 caches too much.
|
||||
*/
|
||||
if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
|
||||
if (zero_state && vm_page_zero_count >=
|
||||
ZIDLE_LO(VMCNT_GET(free_count)))
|
||||
return (0);
|
||||
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
|
||||
if (vm_page_zero_count >= ZIDLE_HI(VMCNT_GET(free_count)))
|
||||
return (0);
|
||||
return (1);
|
||||
}
|
||||
@ -115,7 +116,7 @@ vm_page_zero_idle(void)
|
||||
vm_pageq_enqueue(PQ_FREE + m->pc, m);
|
||||
++vm_page_zero_count;
|
||||
++cnt_prezero;
|
||||
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
|
||||
if (vm_page_zero_count >= ZIDLE_HI(VMCNT_GET(free_count)))
|
||||
zero_state = 1;
|
||||
}
|
||||
free_rover = (free_rover + PQ_PRIME2) & PQ_COLORMASK;
|
||||
|
@ -728,8 +728,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
|
||||
if (i != reqpage)
|
||||
vm_page_free(m[i]);
|
||||
vm_page_unlock_queues();
|
||||
cnt.v_vnodein++;
|
||||
cnt.v_vnodepgsin++;
|
||||
VMCNT_ADD(vnodein, 1);
|
||||
VMCNT_ADD(vnodepgsin, 1);
|
||||
error = vnode_pager_input_old(object, m[reqpage]);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
return (error);
|
||||
@ -757,8 +757,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
|
||||
vm_page_free(m[i]);
|
||||
vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
cnt.v_vnodein++;
|
||||
cnt.v_vnodepgsin++;
|
||||
VMCNT_ADD(vnodein, 1);
|
||||
VMCNT_ADD(vnodepgsin, 1);
|
||||
return vnode_pager_input_smlfs(object, m[reqpage]);
|
||||
}
|
||||
|
||||
@ -909,8 +909,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
|
||||
bp->b_runningbufspace = bp->b_bufsize;
|
||||
atomic_add_int(&runningbufspace, bp->b_runningbufspace);
|
||||
|
||||
cnt.v_vnodein++;
|
||||
cnt.v_vnodepgsin += count;
|
||||
VMCNT_ADD(vnodein, 1);
|
||||
VMCNT_ADD(vnodepgsin, 1);
|
||||
|
||||
/* do the input */
|
||||
bp->b_iooffset = dbtob(bp->b_blkno);
|
||||
@ -1031,7 +1031,8 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
|
||||
* daemon up. This should be probably be addressed XXX.
|
||||
*/
|
||||
|
||||
if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min)
|
||||
if ((VMCNT_GET(free_count) + VMCNT_GET(cache_count)) <
|
||||
VMCNT_GET(pageout_free_min))
|
||||
sync |= OBJPC_SYNC;
|
||||
|
||||
/*
|
||||
@ -1157,8 +1158,8 @@ vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
|
||||
auio.uio_resid = maxsize;
|
||||
auio.uio_td = (struct thread *) 0;
|
||||
error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred);
|
||||
cnt.v_vnodeout++;
|
||||
cnt.v_vnodepgsout += ncount;
|
||||
VMCNT_ADD(vnodein, 1);
|
||||
VMCNT_ADD(vnodepgsin, ncount);
|
||||
|
||||
if (error) {
|
||||
if ((ppscheck = ppsratecheck(&lastfail, &curfail, 1)))
|
||||
|
Loading…
Reference in New Issue
Block a user