Rename global cnt to vm_cnt to avoid shadowing.
To reduce the diff struct pcu.cnt field was not renamed, so PCPU_OP(cnt.field) is still used. pc_cnt and pcpu are also used in kvm(3) and vmstat(8). The goal was to not affect externally used KPI. Bump __FreeBSD_version_ in case some out-of-tree module/code relies on the the global cnt variable. Exp-run revealed no ports using it directly. No objection from: arch@ Sponsored by: EMC / Isilon Storage Division
This commit is contained in:
parent
856c73664b
commit
44f1c91610
@ -24,7 +24,7 @@
|
||||
.\"
|
||||
.\" $FreeBSD$
|
||||
.\"
|
||||
.Dd October 21, 2011
|
||||
.Dd March 21, 2014
|
||||
.Dt MEMGUARD 9
|
||||
.Os
|
||||
.Sh NAME
|
||||
@ -129,7 +129,7 @@ memory
|
||||
.Nm
|
||||
is allowed to consume.
|
||||
The default is 10, so up to
|
||||
.Va cnt.v_page_count Ns /10
|
||||
.Va vm_cnt.v_page_count Ns /10
|
||||
pages can be used.
|
||||
.Nm
|
||||
will reserve
|
||||
|
@ -26,7 +26,7 @@
|
||||
.\"
|
||||
.\" $FreeBSD$
|
||||
.\"
|
||||
.Dd July 17, 2001
|
||||
.Dd March 21, 2014
|
||||
.Dt VM_SET_PAGE_SIZE 9
|
||||
.Os
|
||||
.Sh NAME
|
||||
@ -43,16 +43,16 @@ The
|
||||
.Fn vm_set_page_size
|
||||
function initializes the system page size.
|
||||
If
|
||||
.Va cnt.v_page_size
|
||||
.Va vm_cnt.v_page_size
|
||||
(see
|
||||
.In sys/vmmeter.h )
|
||||
equals 0,
|
||||
.Dv PAGE_SIZE
|
||||
is used; otherwise, the value stored in
|
||||
.Va cnt.v_page_size
|
||||
.Va vm_cnt.v_page_size
|
||||
is used.
|
||||
If
|
||||
.Va cnt.v_page_size
|
||||
.Va vm_cnt.v_page_size
|
||||
is not a power of two, the system will panic.
|
||||
.Pp
|
||||
.Fn vm_set_page_size
|
||||
|
@ -284,7 +284,7 @@ cpu_startup(dummy)
|
||||
memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
|
||||
freeenv(sysenv);
|
||||
}
|
||||
if (memsize < ptoa((uintmax_t)cnt.v_free_count))
|
||||
if (memsize < ptoa((uintmax_t)vm_cnt.v_free_count))
|
||||
memsize = ptoa((uintmax_t)Maxmem);
|
||||
printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
|
||||
realmem = atop(memsize);
|
||||
@ -311,8 +311,8 @@ cpu_startup(dummy)
|
||||
vm_ksubmap_init(&kmi);
|
||||
|
||||
printf("avail memory = %ju (%ju MB)\n",
|
||||
ptoa((uintmax_t)cnt.v_free_count),
|
||||
ptoa((uintmax_t)cnt.v_free_count) / 1048576);
|
||||
ptoa((uintmax_t)vm_cnt.v_free_count),
|
||||
ptoa((uintmax_t)vm_cnt.v_free_count) / 1048576);
|
||||
|
||||
/*
|
||||
* Set up buffers, so they can be used to read disk labels.
|
||||
|
@ -2135,7 +2135,7 @@ _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
|
||||
* the page table page is globally performed before TLB shoot-
|
||||
* down is begun.
|
||||
*/
|
||||
atomic_subtract_rel_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_rel_int(&vm_cnt.v_wire_count, 1);
|
||||
|
||||
/*
|
||||
* Put page on a list so that it is released after
|
||||
@ -2328,7 +2328,7 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
|
||||
if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index,
|
||||
lockp) == NULL) {
|
||||
--m->wire_count;
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
vm_page_free_zero(m);
|
||||
return (NULL);
|
||||
}
|
||||
@ -2361,7 +2361,7 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
|
||||
if (_pmap_allocpte(pmap, NUPDE + pdpindex,
|
||||
lockp) == NULL) {
|
||||
--m->wire_count;
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
vm_page_free_zero(m);
|
||||
return (NULL);
|
||||
}
|
||||
@ -2375,7 +2375,7 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
|
||||
if (_pmap_allocpte(pmap, NUPDE + pdpindex,
|
||||
lockp) == NULL) {
|
||||
--m->wire_count;
|
||||
atomic_subtract_int(&cnt.v_wire_count,
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count,
|
||||
1);
|
||||
vm_page_free_zero(m);
|
||||
return (NULL);
|
||||
@ -2515,7 +2515,7 @@ pmap_release(pmap_t pmap)
|
||||
pmap->pm_pml4[PML4PML4I] = 0; /* Recursive Mapping */
|
||||
|
||||
m->wire_count--;
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
vm_page_free_zero(m);
|
||||
if (pmap->pm_pcid != -1)
|
||||
free_unr(&pcid_unr, pmap->pm_pcid);
|
||||
@ -2814,7 +2814,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
|
||||
SLIST_REMOVE_HEAD(&free, plinks.s.ss);
|
||||
/* Recycle a freed page table page. */
|
||||
m_pc->wire_count = 1;
|
||||
atomic_add_int(&cnt.v_wire_count, 1);
|
||||
atomic_add_int(&vm_cnt.v_wire_count, 1);
|
||||
}
|
||||
pmap_free_zero_pages(&free);
|
||||
return (m_pc);
|
||||
@ -3484,7 +3484,7 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
|
||||
("pmap_remove_pde: pte page wire count error"));
|
||||
mpte->wire_count = 0;
|
||||
pmap_add_delayed_free_list(mpte, free, FALSE);
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
}
|
||||
}
|
||||
return (pmap_unuse_pt(pmap, sva, *pmap_pdpe(pmap, sva), free));
|
||||
@ -5288,7 +5288,7 @@ pmap_remove_pages(pmap_t pmap)
|
||||
("pmap_remove_pages: pte page wire count error"));
|
||||
mpte->wire_count = 0;
|
||||
pmap_add_delayed_free_list(mpte, &free, FALSE);
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
}
|
||||
} else {
|
||||
pmap_resident_count_dec(pmap, 1);
|
||||
|
@ -80,5 +80,5 @@ uma_small_free(void *mem, int size, u_int8_t flags)
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
m->wire_count--;
|
||||
vm_page_free(m);
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
}
|
||||
|
@ -366,8 +366,8 @@ cpu_startup(void *dummy)
|
||||
(uintmax_t)arm32_ptob(realmem),
|
||||
(uintmax_t)arm32_ptob(realmem) / mbyte);
|
||||
printf("avail memory = %ju (%ju MB)\n",
|
||||
(uintmax_t)arm32_ptob(cnt.v_free_count),
|
||||
(uintmax_t)arm32_ptob(cnt.v_free_count) / mbyte);
|
||||
(uintmax_t)arm32_ptob(vm_cnt.v_free_count),
|
||||
(uintmax_t)arm32_ptob(vm_cnt.v_free_count) / mbyte);
|
||||
if (bootverbose) {
|
||||
arm_physmem_print_tables();
|
||||
arm_devmap_print_table();
|
||||
|
@ -1348,7 +1348,7 @@ pmap_init(void)
|
||||
*/
|
||||
|
||||
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
|
||||
pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
|
||||
pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
|
||||
TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
|
||||
pv_entry_max = roundup(pv_entry_max, _NPCPV);
|
||||
pv_entry_high_water = 9 * (pv_entry_max / 10);
|
||||
|
@ -1790,7 +1790,7 @@ pmap_init(void)
|
||||
pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL,
|
||||
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
|
||||
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
|
||||
pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
|
||||
pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
|
||||
uma_zone_reserve_kva(pvzone, pv_entry_max);
|
||||
pv_entry_high_water = 9 * (pv_entry_max / 10);
|
||||
|
||||
|
@ -120,7 +120,7 @@ static void
|
||||
kmem_size_init(void *unused __unused)
|
||||
{
|
||||
|
||||
kmem_size_val = (uint64_t)cnt.v_page_count * PAGE_SIZE;
|
||||
kmem_size_val = (uint64_t)vm_cnt.v_page_count * PAGE_SIZE;
|
||||
if (kmem_size_val > vm_kmem_size)
|
||||
kmem_size_val = vm_kmem_size;
|
||||
}
|
||||
|
@ -3877,7 +3877,7 @@ arc_memory_throttle(uint64_t reserve, uint64_t txg)
|
||||
{
|
||||
#ifdef _KERNEL
|
||||
uint64_t available_memory =
|
||||
ptoa((uintmax_t)cnt.v_free_count + cnt.v_cache_count);
|
||||
ptoa((uintmax_t)vm_cnt.v_free_count + vm_cnt.v_cache_count);
|
||||
static uint64_t page_load = 0;
|
||||
static uint64_t last_txg = 0;
|
||||
|
||||
@ -3888,7 +3888,7 @@ arc_memory_throttle(uint64_t reserve, uint64_t txg)
|
||||
#endif
|
||||
#endif /* sun */
|
||||
|
||||
if (cnt.v_free_count + cnt.v_cache_count >
|
||||
if (vm_cnt.v_free_count + vm_cnt.v_cache_count >
|
||||
(uint64_t)physmem * arc_lotsfree_percent / 100)
|
||||
return (0);
|
||||
|
||||
|
@ -2347,7 +2347,7 @@ zfs_vnodes_adjust(void)
|
||||
* vntblinit(). If it is equal to desiredvnodes, it means that
|
||||
* it wasn't tuned by the administrator and we can tune it down.
|
||||
*/
|
||||
newdesiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 *
|
||||
newdesiredvnodes = min(maxproc + vm_cnt.v_page_count / 4, 2 *
|
||||
vm_kmem_size / (5 * (sizeof(struct vm_object) +
|
||||
sizeof(struct vnode))));
|
||||
if (newdesiredvnodes == desiredvnodes)
|
||||
|
@ -159,14 +159,14 @@ linprocfs_domeminfo(PFS_FILL_ARGS)
|
||||
/*
|
||||
* The correct thing here would be:
|
||||
*
|
||||
memfree = cnt.v_free_count * PAGE_SIZE;
|
||||
memfree = vm_cnt.v_free_count * PAGE_SIZE;
|
||||
memused = memtotal - memfree;
|
||||
*
|
||||
* but it might mislead linux binaries into thinking there
|
||||
* is very little memory left, so we cheat and tell them that
|
||||
* all memory that isn't wired down is free.
|
||||
*/
|
||||
memused = cnt.v_wire_count * PAGE_SIZE;
|
||||
memused = vm_cnt.v_wire_count * PAGE_SIZE;
|
||||
memfree = memtotal - memused;
|
||||
swap_pager_status(&i, &j);
|
||||
swaptotal = (unsigned long long)i * PAGE_SIZE;
|
||||
@ -188,7 +188,7 @@ linprocfs_domeminfo(PFS_FILL_ARGS)
|
||||
* like unstaticizing it just for linprocfs's sake.
|
||||
*/
|
||||
buffers = 0;
|
||||
cached = cnt.v_cache_count * PAGE_SIZE;
|
||||
cached = vm_cnt.v_cache_count * PAGE_SIZE;
|
||||
|
||||
sbuf_printf(sb,
|
||||
" total: used: free: shared: buffers: cached:\n"
|
||||
@ -486,12 +486,12 @@ linprocfs_dostat(PFS_FILL_ARGS)
|
||||
"intr %u\n"
|
||||
"ctxt %u\n"
|
||||
"btime %lld\n",
|
||||
cnt.v_vnodepgsin,
|
||||
cnt.v_vnodepgsout,
|
||||
cnt.v_swappgsin,
|
||||
cnt.v_swappgsout,
|
||||
cnt.v_intr,
|
||||
cnt.v_swtch,
|
||||
vm_cnt.v_vnodepgsin,
|
||||
vm_cnt.v_vnodepgsout,
|
||||
vm_cnt.v_swappgsin,
|
||||
vm_cnt.v_swappgsout,
|
||||
vm_cnt.v_intr,
|
||||
vm_cnt.v_swtch,
|
||||
(long long)boottime.tv_sec);
|
||||
return (0);
|
||||
}
|
||||
|
@ -148,7 +148,7 @@ linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args)
|
||||
LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale;
|
||||
|
||||
sysinfo.totalram = physmem * PAGE_SIZE;
|
||||
sysinfo.freeram = sysinfo.totalram - cnt.v_wire_count * PAGE_SIZE;
|
||||
sysinfo.freeram = sysinfo.totalram - vm_cnt.v_wire_count * PAGE_SIZE;
|
||||
|
||||
sysinfo.sharedram = 0;
|
||||
mtx_lock(&vm_object_list_mtx);
|
||||
|
@ -773,14 +773,14 @@ svr4_sys_sysconfig(td, uap)
|
||||
#if defined(UVM)
|
||||
*retval = uvmexp.free; /* XXX: free instead of total */
|
||||
#else
|
||||
*retval = cnt.v_free_count; /* XXX: free instead of total */
|
||||
*retval = vm_cnt.v_free_count; /* XXX: free instead of total */
|
||||
#endif
|
||||
break;
|
||||
case SVR4_CONFIG_AVPHYS_PAGES:
|
||||
#if defined(UVM)
|
||||
*retval = uvmexp.active; /* XXX: active instead of avg */
|
||||
#else
|
||||
*retval = cnt.v_active_count; /* XXX: active instead of avg */
|
||||
*retval = vm_cnt.v_active_count;/* XXX: active instead of avg */
|
||||
#endif
|
||||
break;
|
||||
#endif /* NOTYET */
|
||||
|
@ -104,7 +104,7 @@ tmpfs_mem_avail(void)
|
||||
{
|
||||
vm_ooffset_t avail;
|
||||
|
||||
avail = swap_pager_avail + cnt.v_free_count + cnt.v_cache_count -
|
||||
avail = swap_pager_avail + vm_cnt.v_free_count + vm_cnt.v_cache_count -
|
||||
tmpfs_pages_reserved;
|
||||
if (__predict_false(avail < 0))
|
||||
avail = 0;
|
||||
|
@ -308,7 +308,7 @@ cpu_startup(dummy)
|
||||
memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
|
||||
freeenv(sysenv);
|
||||
}
|
||||
if (memsize < ptoa((uintmax_t)cnt.v_free_count))
|
||||
if (memsize < ptoa((uintmax_t)vm_cnt.v_free_count))
|
||||
memsize = ptoa((uintmax_t)Maxmem);
|
||||
printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
|
||||
realmem = atop(memsize);
|
||||
@ -335,8 +335,8 @@ cpu_startup(dummy)
|
||||
vm_ksubmap_init(&kmi);
|
||||
|
||||
printf("avail memory = %ju (%ju MB)\n",
|
||||
ptoa((uintmax_t)cnt.v_free_count),
|
||||
ptoa((uintmax_t)cnt.v_free_count) / 1048576);
|
||||
ptoa((uintmax_t)vm_cnt.v_free_count),
|
||||
ptoa((uintmax_t)vm_cnt.v_free_count) / 1048576);
|
||||
|
||||
/*
|
||||
* Set up buffers, so they can be used to read disk labels.
|
||||
|
@ -744,7 +744,7 @@ pmap_init(void)
|
||||
* numbers of pv entries.
|
||||
*/
|
||||
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
|
||||
pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
|
||||
pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
|
||||
TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
|
||||
pv_entry_max = roundup(pv_entry_max, _NPCPV);
|
||||
pv_entry_high_water = 9 * (pv_entry_max / 10);
|
||||
@ -1674,7 +1674,7 @@ _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free)
|
||||
* the page table page is globally performed before TLB shoot-
|
||||
* down is begun.
|
||||
*/
|
||||
atomic_subtract_rel_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_rel_int(&vm_cnt.v_wire_count, 1);
|
||||
|
||||
/*
|
||||
* Do an invltlb to make the invalidated mapping
|
||||
@ -2050,7 +2050,7 @@ pmap_release(pmap_t pmap)
|
||||
("pmap_release: got wrong ptd page"));
|
||||
#endif
|
||||
m->wire_count--;
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
vm_page_free_zero(m);
|
||||
}
|
||||
}
|
||||
@ -2313,7 +2313,7 @@ pmap_pv_reclaim(pmap_t locked_pmap)
|
||||
SLIST_REMOVE_HEAD(&free, plinks.s.ss);
|
||||
/* Recycle a freed page table page. */
|
||||
m_pc->wire_count = 1;
|
||||
atomic_add_int(&cnt.v_wire_count, 1);
|
||||
atomic_add_int(&vm_cnt.v_wire_count, 1);
|
||||
}
|
||||
pmap_free_zero_pages(&free);
|
||||
return (m_pc);
|
||||
@ -2862,7 +2862,7 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
|
||||
("pmap_remove_pde: pte page wire count error"));
|
||||
mpte->wire_count = 0;
|
||||
pmap_add_delayed_free_list(mpte, free, FALSE);
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4523,7 +4523,7 @@ pmap_remove_pages(pmap_t pmap)
|
||||
("pmap_remove_pages: pte page wire count error"));
|
||||
mpte->wire_count = 0;
|
||||
pmap_add_delayed_free_list(mpte, &free, FALSE);
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
}
|
||||
} else {
|
||||
pmap->pm_stats.resident_count--;
|
||||
|
@ -614,7 +614,7 @@ pmap_init(void)
|
||||
* numbers of pv entries.
|
||||
*/
|
||||
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
|
||||
pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
|
||||
pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
|
||||
TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
|
||||
pv_entry_max = roundup(pv_entry_max, _NPCPV);
|
||||
pv_entry_high_water = 9 * (pv_entry_max / 10);
|
||||
@ -1380,7 +1380,7 @@ _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free)
|
||||
* the page table page is globally performed before TLB shoot-
|
||||
* down is begun.
|
||||
*/
|
||||
atomic_subtract_rel_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_rel_int(&vm_cnt.v_wire_count, 1);
|
||||
|
||||
/*
|
||||
* Do an invltlb to make the invalidated mapping
|
||||
@ -1813,7 +1813,7 @@ pmap_release(pmap_t pmap)
|
||||
("pmap_release: got wrong ptd page"));
|
||||
#endif
|
||||
m->wire_count--;
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
vm_page_free(m);
|
||||
}
|
||||
#ifdef PAE
|
||||
@ -2089,7 +2089,7 @@ pmap_pv_reclaim(pmap_t locked_pmap)
|
||||
free = (void *)m_pc->object;
|
||||
/* Recycle a freed page table page. */
|
||||
m_pc->wire_count = 1;
|
||||
atomic_add_int(&cnt.v_wire_count, 1);
|
||||
atomic_add_int(&vm_cnt.v_wire_count, 1);
|
||||
}
|
||||
pmap_free_zero_pages(free);
|
||||
return (m_pc);
|
||||
|
@ -281,8 +281,8 @@ cpu_startup(void *dummy)
|
||||
|
||||
vm_ksubmap_init(&kmi);
|
||||
|
||||
printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
|
||||
ptoa(cnt.v_free_count) / 1048576);
|
||||
printf("avail memory = %ld (%ld MB)\n", ptoa(vm_cnt.v_free_count),
|
||||
ptoa(vm_cnt.v_free_count) / 1048576);
|
||||
|
||||
if (fpswa_iface == NULL)
|
||||
printf("Warning: no FPSWA package supplied\n");
|
||||
|
@ -73,5 +73,5 @@ uma_small_free(void *mem, int size, u_int8_t flags)
|
||||
m = PHYS_TO_VM_PAGE(IA64_RR_MASK((u_int64_t)mem));
|
||||
m->wire_count--;
|
||||
vm_page_free(m);
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
}
|
||||
|
@ -544,7 +544,7 @@ proc0_init(void *dummy __unused)
|
||||
p->p_limit->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
|
||||
p->p_limit->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
|
||||
/* Cast to avoid overflow on i386/PAE. */
|
||||
pageablemem = ptoa((vm_paddr_t)cnt.v_free_count);
|
||||
pageablemem = ptoa((vm_paddr_t)vm_cnt.v_free_count);
|
||||
p->p_limit->pl_rlimit[RLIMIT_RSS].rlim_cur =
|
||||
p->p_limit->pl_rlimit[RLIMIT_RSS].rlim_max = pageablemem;
|
||||
p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = pageablemem / 3;
|
||||
|
@ -711,7 +711,7 @@ kmeminit(void)
|
||||
* VM_KMEM_SIZE_MAX is itself a function of the available KVA space on
|
||||
* a given architecture.
|
||||
*/
|
||||
mem_size = cnt.v_page_count;
|
||||
mem_size = vm_cnt.v_page_count;
|
||||
|
||||
vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
|
||||
TUNABLE_INT_FETCH("vm.kmem_size_scale", &vm_kmem_size_scale);
|
||||
@ -819,7 +819,7 @@ malloc_init(void *data)
|
||||
struct malloc_type_internal *mtip;
|
||||
struct malloc_type *mtp;
|
||||
|
||||
KASSERT(cnt.v_page_count != 0, ("malloc_register before vm_init"));
|
||||
KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init"));
|
||||
|
||||
mtp = data;
|
||||
if (mtp->ks_magic != M_MAGIC)
|
||||
|
@ -199,7 +199,7 @@ sysctl_hw_usermem(SYSCTL_HANDLER_ARGS)
|
||||
{
|
||||
u_long val;
|
||||
|
||||
val = ctob(physmem - cnt.v_wire_count);
|
||||
val = ctob(physmem - vm_cnt.v_wire_count);
|
||||
return (sysctl_handle_long(oidp, &val, 0, req));
|
||||
}
|
||||
|
||||
|
@ -4290,7 +4290,7 @@ vm_hold_free_pages(struct buf *bp, int newbsize)
|
||||
(intmax_t)bp->b_blkno, (intmax_t)bp->b_lblkno);
|
||||
p->wire_count--;
|
||||
vm_page_free(p);
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
}
|
||||
bp->b_npages = newnpages;
|
||||
}
|
||||
|
@ -330,8 +330,8 @@ vntblinit(void *dummy __unused)
|
||||
* size. The memory required by desiredvnodes vnodes and vm objects
|
||||
* may not exceed one seventh of the kernel's heap size.
|
||||
*/
|
||||
physvnodes = maxproc + cnt.v_page_count / 16 + 3 * min(98304 * 4,
|
||||
cnt.v_page_count) / 16;
|
||||
physvnodes = maxproc + vm_cnt.v_page_count / 16 + 3 * min(98304 * 4,
|
||||
vm_cnt.v_page_count) / 16;
|
||||
virtvnodes = vm_kmem_size / (7 * (sizeof(struct vm_object) +
|
||||
sizeof(struct vnode)));
|
||||
desiredvnodes = min(physvnodes, virtvnodes);
|
||||
@ -708,7 +708,7 @@ vlrureclaim(struct mount *mp)
|
||||
usevnodes = desiredvnodes;
|
||||
if (usevnodes <= 0)
|
||||
usevnodes = 1;
|
||||
trigger = cnt.v_page_count * 2 / usevnodes;
|
||||
trigger = vm_cnt.v_page_count * 2 / usevnodes;
|
||||
done = 0;
|
||||
vn_start_write(NULL, &mp, V_WAIT);
|
||||
MNT_ILOCK(mp);
|
||||
|
@ -211,8 +211,8 @@ cpu_startup(void *dummy)
|
||||
vm_ksubmap_init(&kmi);
|
||||
|
||||
printf("avail memory = %ju (%juMB)\n",
|
||||
ptoa((uintmax_t)cnt.v_free_count),
|
||||
ptoa((uintmax_t)cnt.v_free_count) / 1048576);
|
||||
ptoa((uintmax_t)vm_cnt.v_free_count),
|
||||
ptoa((uintmax_t)vm_cnt.v_free_count) / 1048576);
|
||||
cpu_init_interrupts();
|
||||
|
||||
/*
|
||||
|
@ -995,7 +995,7 @@ _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m)
|
||||
* If the page is finally unwired, simply free it.
|
||||
*/
|
||||
vm_page_free_zero(m);
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1144,7 +1144,7 @@ _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
|
||||
flags) == NULL) {
|
||||
/* alloc failed, release current */
|
||||
--m->wire_count;
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
vm_page_free_zero(m);
|
||||
return (NULL);
|
||||
}
|
||||
@ -1227,7 +1227,7 @@ pmap_release(pmap_t pmap)
|
||||
ptdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(ptdva));
|
||||
|
||||
ptdpg->wire_count--;
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
vm_page_free_zero(ptdpg);
|
||||
}
|
||||
|
||||
|
@ -79,5 +79,5 @@ uma_small_free(void *mem, int size, u_int8_t flags)
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
m->wire_count--;
|
||||
vm_page_free(m);
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
}
|
||||
|
@ -347,7 +347,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
PROC_UNLOCK(proc);
|
||||
if (npages + cnt.v_wire_count > vm_page_max_wired) {
|
||||
if (npages + vm_cnt.v_wire_count > vm_page_max_wired) {
|
||||
kfree(umem);
|
||||
return ERR_PTR(-EAGAIN);
|
||||
}
|
||||
|
@ -559,7 +559,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
|
||||
goto out;
|
||||
}
|
||||
PROC_UNLOCK(proc);
|
||||
if (cnt.v_wire_count + 1 > vm_page_max_wired) {
|
||||
if (vm_cnt.v_wire_count + 1 > vm_page_max_wired) {
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
@ -270,8 +270,8 @@ cpu_startup(dummy)
|
||||
vm_ksubmap_init(&kmi);
|
||||
|
||||
printf("avail memory = %ju (%ju MB)\n",
|
||||
ptoa((uintmax_t)cnt.v_free_count),
|
||||
ptoa((uintmax_t)cnt.v_free_count) / 1048576);
|
||||
ptoa((uintmax_t)vm_cnt.v_free_count),
|
||||
ptoa((uintmax_t)vm_cnt.v_free_count) / 1048576);
|
||||
|
||||
/*
|
||||
* Set up buffers, so they can be used to read disk labels.
|
||||
|
@ -217,8 +217,8 @@ cpu_startup(void *dummy)
|
||||
|
||||
vm_ksubmap_init(&kmi);
|
||||
|
||||
printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
|
||||
ptoa(cnt.v_free_count) / 1048576);
|
||||
printf("avail memory = %ld (%ld MB)\n", ptoa(vm_cnt.v_free_count),
|
||||
ptoa(vm_cnt.v_free_count) / 1048576);
|
||||
|
||||
/*
|
||||
* Set up buffers, so they can be used to read disk labels.
|
||||
|
@ -93,6 +93,6 @@ uma_small_free(void *mem, int size, u_int8_t flags)
|
||||
m = PHYS_TO_VM_PAGE((vm_offset_t)mem);
|
||||
m->wire_count--;
|
||||
vm_page_free(m);
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&hw_uma_mdpages, 1);
|
||||
}
|
||||
|
@ -220,8 +220,8 @@ cpu_booke_startup(void *dummy)
|
||||
|
||||
vm_ksubmap_init(&kmi);
|
||||
|
||||
printf("avail memory = %lu (%ld MB)\n", ptoa(cnt.v_free_count),
|
||||
ptoa(cnt.v_free_count) / 1048576);
|
||||
printf("avail memory = %lu (%ld MB)\n", ptoa(vm_cnt.v_free_count),
|
||||
ptoa(vm_cnt.v_free_count) / 1048576);
|
||||
|
||||
/* Set up buffers, so they can be used to read disk labels. */
|
||||
bufinit();
|
||||
|
@ -649,7 +649,7 @@ ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
|
||||
pa = pte_vatopa(mmu, kernel_pmap, va);
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
vm_page_free_zero(m);
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
mmu_booke_kremove(mmu, va);
|
||||
}
|
||||
|
||||
@ -1385,7 +1385,7 @@ mmu_booke_init(mmu_t mmu)
|
||||
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
|
||||
|
||||
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
|
||||
pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
|
||||
pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
|
||||
|
||||
TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
|
||||
pv_entry_high_water = 9 * (pv_entry_max / 10);
|
||||
|
@ -190,8 +190,8 @@ cpu_startup(void *arg)
|
||||
EVENTHANDLER_REGISTER(shutdown_final, sparc64_shutdown_final, NULL,
|
||||
SHUTDOWN_PRI_LAST);
|
||||
|
||||
printf("avail memory = %lu (%lu MB)\n", cnt.v_free_count * PAGE_SIZE,
|
||||
cnt.v_free_count / ((1024 * 1024) / PAGE_SIZE));
|
||||
printf("avail memory = %lu (%lu MB)\n", vm_cnt.v_free_count * PAGE_SIZE,
|
||||
vm_cnt.v_free_count / ((1024 * 1024) / PAGE_SIZE));
|
||||
|
||||
if (bootverbose)
|
||||
printf("machine: %s\n", sparc64_model);
|
||||
|
@ -1293,7 +1293,7 @@ pmap_release(pmap_t pm)
|
||||
m = TAILQ_FIRST(&obj->memq);
|
||||
m->md.pmap = NULL;
|
||||
m->wire_count--;
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
vm_page_free_zero(m);
|
||||
}
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
|
@ -548,5 +548,5 @@ uma_small_free(void *mem, int size, u_int8_t flags)
|
||||
m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS((vm_offset_t)mem));
|
||||
m->wire_count--;
|
||||
vm_page_free(m);
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
}
|
||||
|
@ -58,7 +58,7 @@
|
||||
* in the range 5 to 9.
|
||||
*/
|
||||
#undef __FreeBSD_version
|
||||
#define __FreeBSD_version 1100014 /* Master, propagated to newvers */
|
||||
#define __FreeBSD_version 1100015 /* Master, propagated to newvers */
|
||||
|
||||
/*
|
||||
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,
|
||||
|
@ -116,7 +116,7 @@ struct vmmeter {
|
||||
};
|
||||
#ifdef _KERNEL
|
||||
|
||||
extern struct vmmeter cnt;
|
||||
extern struct vmmeter vm_cnt;
|
||||
|
||||
extern int vm_pageout_wakeup_thresh;
|
||||
|
||||
@ -131,7 +131,8 @@ static __inline
|
||||
int
|
||||
vm_page_count_severe(void)
|
||||
{
|
||||
return (cnt.v_free_severe > (cnt.v_free_count + cnt.v_cache_count));
|
||||
return (vm_cnt.v_free_severe > (vm_cnt.v_free_count +
|
||||
vm_cnt.v_cache_count));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -148,7 +149,7 @@ static __inline
|
||||
int
|
||||
vm_page_count_min(void)
|
||||
{
|
||||
return (cnt.v_free_min > (cnt.v_free_count + cnt.v_cache_count));
|
||||
return (vm_cnt.v_free_min > (vm_cnt.v_free_count + vm_cnt.v_cache_count));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -160,7 +161,8 @@ static __inline
|
||||
int
|
||||
vm_page_count_target(void)
|
||||
{
|
||||
return (cnt.v_free_target > (cnt.v_free_count + cnt.v_cache_count));
|
||||
return (vm_cnt.v_free_target > (vm_cnt.v_free_count +
|
||||
vm_cnt.v_cache_count));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -172,7 +174,8 @@ static __inline
|
||||
int
|
||||
vm_paging_target(void)
|
||||
{
|
||||
return (cnt.v_free_target - (cnt.v_free_count + cnt.v_cache_count));
|
||||
return (vm_cnt.v_free_target - (vm_cnt.v_free_count +
|
||||
vm_cnt.v_cache_count));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -183,7 +186,8 @@ static __inline
|
||||
int
|
||||
vm_paging_needed(void)
|
||||
{
|
||||
return (cnt.v_free_count + cnt.v_cache_count < vm_pageout_wakeup_thresh);
|
||||
return (vm_cnt.v_free_count + vm_cnt.v_cache_count <
|
||||
vm_pageout_wakeup_thresh);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -181,7 +181,7 @@ memguard_fudge(unsigned long km_size, const struct vm_map *parent_map)
|
||||
* This prevents memguard's page promotions from completely
|
||||
* using up memory, since most malloc(9) calls are sub-page.
|
||||
*/
|
||||
mem_pgs = cnt.v_page_count;
|
||||
mem_pgs = vm_cnt.v_page_count;
|
||||
memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE;
|
||||
/*
|
||||
* We want as much KVA as we can take safely. Use at most our
|
||||
|
@ -203,7 +203,7 @@ swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred)
|
||||
mtx_lock(&sw_dev_mtx);
|
||||
r = swap_reserved + incr;
|
||||
if (overcommit & SWAP_RESERVE_ALLOW_NONWIRED) {
|
||||
s = cnt.v_page_count - cnt.v_free_reserved - cnt.v_wire_count;
|
||||
s = vm_cnt.v_page_count - vm_cnt.v_free_reserved - vm_cnt.v_wire_count;
|
||||
s *= PAGE_SIZE;
|
||||
} else
|
||||
s = 0;
|
||||
@ -545,7 +545,7 @@ swap_pager_swap_init(void)
|
||||
* can hold 16 pages, so this is probably overkill. This reservation
|
||||
* is typically limited to around 32MB by default.
|
||||
*/
|
||||
n = cnt.v_page_count / 2;
|
||||
n = vm_cnt.v_page_count / 2;
|
||||
if (maxswzone && n > maxswzone / sizeof(struct swblock))
|
||||
n = maxswzone / sizeof(struct swblock);
|
||||
n2 = n;
|
||||
@ -2316,7 +2316,7 @@ swapoff_one(struct swdevt *sp, struct ucred *cred)
|
||||
* of data we will have to page back in, plus an epsilon so
|
||||
* the system doesn't become critically low on swap space.
|
||||
*/
|
||||
if (cnt.v_free_count + cnt.v_cache_count + swap_pager_avail <
|
||||
if (vm_cnt.v_free_count + vm_cnt.v_cache_count + swap_pager_avail <
|
||||
nblks + nswap_lowat) {
|
||||
return (ENOMEM);
|
||||
}
|
||||
|
@ -197,7 +197,7 @@ vslock(void *addr, size_t len)
|
||||
* Also, the sysctl code, which is the only present user
|
||||
* of vslock(), does a hard loop on EAGAIN.
|
||||
*/
|
||||
if (npages + cnt.v_wire_count > vm_page_max_wired)
|
||||
if (npages + vm_cnt.v_wire_count > vm_page_max_wired)
|
||||
return (EAGAIN);
|
||||
#endif
|
||||
error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
|
||||
|
@ -1840,7 +1840,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
|
||||
* free pages allocating pv entries.
|
||||
*/
|
||||
if ((flags & MAP_PREFAULT_MADVISE) &&
|
||||
cnt.v_free_count < cnt.v_free_reserved) {
|
||||
vm_cnt.v_free_count < vm_cnt.v_free_reserved) {
|
||||
psize = tmpidx;
|
||||
break;
|
||||
}
|
||||
|
@ -53,24 +53,24 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/vm_object.h>
|
||||
#include <sys/sysctl.h>
|
||||
|
||||
struct vmmeter cnt;
|
||||
struct vmmeter vm_cnt;
|
||||
|
||||
SYSCTL_UINT(_vm, VM_V_FREE_MIN, v_free_min,
|
||||
CTLFLAG_RW, &cnt.v_free_min, 0, "Minimum low-free-pages threshold");
|
||||
CTLFLAG_RW, &vm_cnt.v_free_min, 0, "Minimum low-free-pages threshold");
|
||||
SYSCTL_UINT(_vm, VM_V_FREE_TARGET, v_free_target,
|
||||
CTLFLAG_RW, &cnt.v_free_target, 0, "Desired free pages");
|
||||
CTLFLAG_RW, &vm_cnt.v_free_target, 0, "Desired free pages");
|
||||
SYSCTL_UINT(_vm, VM_V_FREE_RESERVED, v_free_reserved,
|
||||
CTLFLAG_RW, &cnt.v_free_reserved, 0, "Pages reserved for deadlock");
|
||||
CTLFLAG_RW, &vm_cnt.v_free_reserved, 0, "Pages reserved for deadlock");
|
||||
SYSCTL_UINT(_vm, VM_V_INACTIVE_TARGET, v_inactive_target,
|
||||
CTLFLAG_RW, &cnt.v_inactive_target, 0, "Pages desired inactive");
|
||||
CTLFLAG_RW, &vm_cnt.v_inactive_target, 0, "Pages desired inactive");
|
||||
SYSCTL_UINT(_vm, VM_V_CACHE_MIN, v_cache_min,
|
||||
CTLFLAG_RW, &cnt.v_cache_min, 0, "Min pages on cache queue");
|
||||
CTLFLAG_RW, &vm_cnt.v_cache_min, 0, "Min pages on cache queue");
|
||||
SYSCTL_UINT(_vm, VM_V_CACHE_MAX, v_cache_max,
|
||||
CTLFLAG_RW, &cnt.v_cache_max, 0, "Max pages on cache queue");
|
||||
CTLFLAG_RW, &vm_cnt.v_cache_max, 0, "Max pages on cache queue");
|
||||
SYSCTL_UINT(_vm, VM_V_PAGEOUT_FREE_MIN, v_pageout_free_min,
|
||||
CTLFLAG_RW, &cnt.v_pageout_free_min, 0, "Min pages reserved for kernel");
|
||||
CTLFLAG_RW, &vm_cnt.v_pageout_free_min, 0, "Min pages reserved for kernel");
|
||||
SYSCTL_UINT(_vm, OID_AUTO, v_free_severe,
|
||||
CTLFLAG_RW, &cnt.v_free_severe, 0, "Severe page depletion point");
|
||||
CTLFLAG_RW, &vm_cnt.v_free_severe, 0, "Severe page depletion point");
|
||||
|
||||
static int
|
||||
sysctl_vm_loadavg(SYSCTL_HANDLER_ARGS)
|
||||
@ -231,7 +231,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
|
||||
}
|
||||
}
|
||||
mtx_unlock(&vm_object_list_mtx);
|
||||
total.t_free = cnt.v_free_count + cnt.v_cache_count;
|
||||
total.t_free = vm_cnt.v_free_count + vm_cnt.v_cache_count;
|
||||
return (sysctl_handle_opaque(oidp, &total, sizeof(total), req));
|
||||
}
|
||||
|
||||
@ -251,7 +251,7 @@ static int
|
||||
vcnt(SYSCTL_HANDLER_ARGS)
|
||||
{
|
||||
int count = *(int *)arg1;
|
||||
int offset = (char *)arg1 - (char *)&cnt;
|
||||
int offset = (char *)arg1 - (char *)&vm_cnt;
|
||||
int i;
|
||||
|
||||
CPU_FOREACH(i) {
|
||||
@ -273,7 +273,7 @@ SYSCTL_NODE(_vm_stats, OID_AUTO, misc, CTLFLAG_RW, 0, "VM meter misc stats");
|
||||
|
||||
#define VM_STATS(parent, var, descr) \
|
||||
SYSCTL_PROC(parent, OID_AUTO, var, \
|
||||
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, &cnt.var, 0, vcnt, \
|
||||
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, &vm_cnt.var, 0, vcnt, \
|
||||
"IU", descr)
|
||||
#define VM_STATS_VM(var, descr) VM_STATS(_vm_stats_vm, var, descr)
|
||||
#define VM_STATS_SYS(var, descr) VM_STATS(_vm_stats_sys, var, descr)
|
||||
|
@ -1090,7 +1090,7 @@ vm_mlock(struct proc *proc, struct ucred *cred, const void *addr0, size_t len)
|
||||
return (ENOMEM);
|
||||
}
|
||||
PROC_UNLOCK(proc);
|
||||
if (npages + cnt.v_wire_count > vm_page_max_wired)
|
||||
if (npages + vm_cnt.v_wire_count > vm_page_max_wired)
|
||||
return (EAGAIN);
|
||||
#ifdef RACCT
|
||||
PROC_LOCK(proc);
|
||||
|
@ -1957,7 +1957,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
|
||||
("inconsistent wire count %d %d %p",
|
||||
p->wire_count, wirings, p));
|
||||
p->wire_count = 0;
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
}
|
||||
}
|
||||
vm_page_free(p);
|
||||
|
@ -209,9 +209,9 @@ vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked)
|
||||
void
|
||||
vm_set_page_size(void)
|
||||
{
|
||||
if (cnt.v_page_size == 0)
|
||||
cnt.v_page_size = PAGE_SIZE;
|
||||
if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0)
|
||||
if (vm_cnt.v_page_size == 0)
|
||||
vm_cnt.v_page_size = PAGE_SIZE;
|
||||
if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0)
|
||||
panic("vm_set_page_size: page size not a power of two");
|
||||
}
|
||||
|
||||
@ -254,11 +254,11 @@ vm_page_domain_init(struct vm_domain *vmd)
|
||||
*__DECONST(char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) =
|
||||
"vm inactive pagequeue";
|
||||
*__DECONST(int **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_vcnt) =
|
||||
&cnt.v_inactive_count;
|
||||
&vm_cnt.v_inactive_count;
|
||||
*__DECONST(char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) =
|
||||
"vm active pagequeue";
|
||||
*__DECONST(int **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_vcnt) =
|
||||
&cnt.v_active_count;
|
||||
&vm_cnt.v_active_count;
|
||||
vmd->vmd_page_count = 0;
|
||||
vmd->vmd_free_count = 0;
|
||||
vmd->vmd_segs = 0;
|
||||
@ -452,8 +452,8 @@ vm_page_startup(vm_offset_t vaddr)
|
||||
* Add every available physical page that is not blacklisted to
|
||||
* the free lists.
|
||||
*/
|
||||
cnt.v_page_count = 0;
|
||||
cnt.v_free_count = 0;
|
||||
vm_cnt.v_page_count = 0;
|
||||
vm_cnt.v_free_count = 0;
|
||||
list = getenv("vm.blacklist");
|
||||
for (i = 0; phys_avail[i + 1] != 0; i += 2) {
|
||||
pa = phys_avail[i];
|
||||
@ -1339,7 +1339,7 @@ vm_page_cache_remove(vm_page_t m)
|
||||
("vm_page_cache_remove: page %p is not cached", m));
|
||||
vm_radix_remove(&m->object->cache, m->pindex);
|
||||
m->object = NULL;
|
||||
cnt.v_cache_count--;
|
||||
vm_cnt.v_cache_count--;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1482,11 +1482,11 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
|
||||
* vm_page_cache().
|
||||
*/
|
||||
mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE);
|
||||
if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved ||
|
||||
if (vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_free_reserved ||
|
||||
(req_class == VM_ALLOC_SYSTEM &&
|
||||
cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) ||
|
||||
vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_interrupt_free_min) ||
|
||||
(req_class == VM_ALLOC_INTERRUPT &&
|
||||
cnt.v_free_count + cnt.v_cache_count > 0)) {
|
||||
vm_cnt.v_free_count + vm_cnt.v_cache_count > 0)) {
|
||||
/*
|
||||
* Allocate from the free queue if the number of free pages
|
||||
* exceeds the minimum for the request class.
|
||||
@ -1557,7 +1557,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
|
||||
KASSERT(m->valid != 0,
|
||||
("vm_page_alloc: cached page %p is invalid", m));
|
||||
if (m->object == object && m->pindex == pindex)
|
||||
cnt.v_reactivated++;
|
||||
vm_cnt.v_reactivated++;
|
||||
else
|
||||
m->valid = 0;
|
||||
m_object = m->object;
|
||||
@ -1597,7 +1597,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
|
||||
* The page lock is not required for wiring a page until that
|
||||
* page is inserted into the object.
|
||||
*/
|
||||
atomic_add_int(&cnt.v_wire_count, 1);
|
||||
atomic_add_int(&vm_cnt.v_wire_count, 1);
|
||||
m->wire_count = 1;
|
||||
}
|
||||
m->act_count = 0;
|
||||
@ -1609,7 +1609,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
|
||||
vdrop(vp);
|
||||
pagedaemon_wakeup();
|
||||
if (req & VM_ALLOC_WIRED) {
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
m->wire_count = 0;
|
||||
}
|
||||
m->object = NULL;
|
||||
@ -1725,11 +1725,11 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
|
||||
|
||||
SLIST_INIT(&deferred_vdrop_list);
|
||||
mtx_lock(&vm_page_queue_free_mtx);
|
||||
if (cnt.v_free_count + cnt.v_cache_count >= npages +
|
||||
cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM &&
|
||||
cnt.v_free_count + cnt.v_cache_count >= npages +
|
||||
cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT &&
|
||||
cnt.v_free_count + cnt.v_cache_count >= npages)) {
|
||||
if (vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages +
|
||||
vm_cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM &&
|
||||
vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages +
|
||||
vm_cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT &&
|
||||
vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages)) {
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
retry:
|
||||
if (object == NULL || (object->flags & OBJ_COLORED) == 0 ||
|
||||
@ -1776,7 +1776,7 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
|
||||
if ((req & VM_ALLOC_NODUMP) != 0)
|
||||
flags |= PG_NODUMP;
|
||||
if ((req & VM_ALLOC_WIRED) != 0)
|
||||
atomic_add_int(&cnt.v_wire_count, npages);
|
||||
atomic_add_int(&vm_cnt.v_wire_count, npages);
|
||||
if (object != NULL) {
|
||||
if (object->memattr != VM_MEMATTR_DEFAULT &&
|
||||
memattr == VM_MEMATTR_DEFAULT)
|
||||
@ -1803,7 +1803,7 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
|
||||
if (vm_paging_needed())
|
||||
pagedaemon_wakeup();
|
||||
if ((req & VM_ALLOC_WIRED) != 0)
|
||||
atomic_subtract_int(&cnt.v_wire_count,
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count,
|
||||
npages);
|
||||
for (m_tmp = m, m = m_ret;
|
||||
m < &m_ret[npages]; m++) {
|
||||
@ -1916,11 +1916,11 @@ vm_page_alloc_freelist(int flind, int req)
|
||||
* Do not allocate reserved pages unless the req has asked for it.
|
||||
*/
|
||||
mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE);
|
||||
if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved ||
|
||||
if (vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_free_reserved ||
|
||||
(req_class == VM_ALLOC_SYSTEM &&
|
||||
cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) ||
|
||||
vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_interrupt_free_min) ||
|
||||
(req_class == VM_ALLOC_INTERRUPT &&
|
||||
cnt.v_free_count + cnt.v_cache_count > 0))
|
||||
vm_cnt.v_free_count + vm_cnt.v_cache_count > 0))
|
||||
m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0);
|
||||
else {
|
||||
mtx_unlock(&vm_page_queue_free_mtx);
|
||||
@ -1949,7 +1949,7 @@ vm_page_alloc_freelist(int flind, int req)
|
||||
* The page lock is not required for wiring a page that does
|
||||
* not belong to an object.
|
||||
*/
|
||||
atomic_add_int(&cnt.v_wire_count, 1);
|
||||
atomic_add_int(&vm_cnt.v_wire_count, 1);
|
||||
m->wire_count = 1;
|
||||
}
|
||||
/* Unmanaged pages don't use "act_count". */
|
||||
@ -1981,7 +1981,7 @@ vm_wait(void)
|
||||
vm_pages_needed = 1;
|
||||
wakeup(&vm_pages_needed);
|
||||
}
|
||||
msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM,
|
||||
msleep(&vm_cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM,
|
||||
"vmwait", 0);
|
||||
}
|
||||
}
|
||||
@ -2005,7 +2005,7 @@ vm_waitpfault(void)
|
||||
vm_pages_needed = 1;
|
||||
wakeup(&vm_pages_needed);
|
||||
}
|
||||
msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER,
|
||||
msleep(&vm_cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER,
|
||||
"pfault", 0);
|
||||
}
|
||||
|
||||
@ -2172,7 +2172,7 @@ vm_page_free_wakeup(void)
|
||||
* some free.
|
||||
*/
|
||||
if (vm_pageout_pages_needed &&
|
||||
cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) {
|
||||
vm_cnt.v_cache_count + vm_cnt.v_free_count >= vm_cnt.v_pageout_free_min) {
|
||||
wakeup(&vm_pageout_pages_needed);
|
||||
vm_pageout_pages_needed = 0;
|
||||
}
|
||||
@ -2183,7 +2183,7 @@ vm_page_free_wakeup(void)
|
||||
*/
|
||||
if (vm_pages_needed && !vm_page_count_min()) {
|
||||
vm_pages_needed = 0;
|
||||
wakeup(&cnt.v_free_count);
|
||||
wakeup(&vm_cnt.v_free_count);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2204,7 +2204,7 @@ vm_page_cache_turn_free(vm_page_t m)
|
||||
KASSERT((m->flags & PG_CACHED) != 0,
|
||||
("vm_page_cache_turn_free: page %p is not cached", m));
|
||||
m->flags &= ~PG_CACHED;
|
||||
cnt.v_cache_count--;
|
||||
vm_cnt.v_cache_count--;
|
||||
vm_phys_freecnt_adj(m, 1);
|
||||
}
|
||||
|
||||
@ -2319,7 +2319,7 @@ vm_page_wire(vm_page_t m)
|
||||
m->queue == PQ_NONE,
|
||||
("vm_page_wire: unmanaged page %p is queued", m));
|
||||
vm_page_remque(m);
|
||||
atomic_add_int(&cnt.v_wire_count, 1);
|
||||
atomic_add_int(&vm_cnt.v_wire_count, 1);
|
||||
}
|
||||
m->wire_count++;
|
||||
KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
|
||||
@ -2355,7 +2355,7 @@ vm_page_unwire(vm_page_t m, int activate)
|
||||
if (m->wire_count > 0) {
|
||||
m->wire_count--;
|
||||
if (m->wire_count == 0) {
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0 ||
|
||||
m->object == NULL)
|
||||
return;
|
||||
@ -2552,7 +2552,7 @@ vm_page_cache(vm_page_t m)
|
||||
cache_was_empty = vm_radix_is_singleton(&object->cache);
|
||||
|
||||
m->flags |= PG_CACHED;
|
||||
cnt.v_cache_count++;
|
||||
vm_cnt.v_cache_count++;
|
||||
PCPU_INC(cnt.v_tcached);
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
if (!vm_reserv_free_page(m)) {
|
||||
@ -3116,16 +3116,16 @@ vm_page_object_lock_assert(vm_page_t m)
|
||||
|
||||
DB_SHOW_COMMAND(page, vm_page_print_page_info)
|
||||
{
|
||||
db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
|
||||
db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
|
||||
db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
|
||||
db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
|
||||
db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
|
||||
db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
|
||||
db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
|
||||
db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
|
||||
db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
|
||||
db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
|
||||
db_printf("vm_cnt.v_free_count: %d\n", vm_cnt.v_free_count);
|
||||
db_printf("vm_cnt.v_cache_count: %d\n", vm_cnt.v_cache_count);
|
||||
db_printf("vm_cnt.v_inactive_count: %d\n", vm_cnt.v_inactive_count);
|
||||
db_printf("vm_cnt.v_active_count: %d\n", vm_cnt.v_active_count);
|
||||
db_printf("vm_cnt.v_wire_count: %d\n", vm_cnt.v_wire_count);
|
||||
db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved);
|
||||
db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min);
|
||||
db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target);
|
||||
db_printf("vm_cnt.v_cache_min: %d\n", vm_cnt.v_cache_min);
|
||||
db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target);
|
||||
}
|
||||
|
||||
DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
|
||||
@ -3133,7 +3133,7 @@ DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
|
||||
int dom;
|
||||
|
||||
db_printf("pq_free %d pq_cache %d\n",
|
||||
cnt.v_free_count, cnt.v_cache_count);
|
||||
vm_cnt.v_free_count, vm_cnt.v_cache_count);
|
||||
for (dom = 0; dom < vm_ndomains; dom++) {
|
||||
db_printf(
|
||||
"dom %d page_cnt %d free %d pq_act %d pq_inact %d pass %d\n",
|
||||
|
@ -678,9 +678,9 @@ vm_pageout_grow_cache(int tries, vm_paddr_t low, vm_paddr_t high)
|
||||
initial_dom = atomic_fetchadd_int(&start_dom, 1) % vm_ndomains;
|
||||
|
||||
inactl = 0;
|
||||
inactmax = cnt.v_inactive_count;
|
||||
inactmax = vm_cnt.v_inactive_count;
|
||||
actl = 0;
|
||||
actmax = tries < 2 ? 0 : cnt.v_active_count;
|
||||
actmax = tries < 2 ? 0 : vm_cnt.v_active_count;
|
||||
dom = initial_dom;
|
||||
|
||||
/*
|
||||
@ -1310,7 +1310,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
|
||||
* Compute the number of pages we want to try to move from the
|
||||
* active queue to the inactive queue.
|
||||
*/
|
||||
page_shortage = cnt.v_inactive_target - cnt.v_inactive_count +
|
||||
page_shortage = vm_cnt.v_inactive_target - vm_cnt.v_inactive_count +
|
||||
vm_paging_target() + deficit + addl_page_shortage;
|
||||
|
||||
pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
|
||||
@ -1576,7 +1576,7 @@ vm_pageout_oom(int shortage)
|
||||
killproc(bigproc, "out of swap space");
|
||||
sched_nice(bigproc, PRIO_MIN);
|
||||
PROC_UNLOCK(bigproc);
|
||||
wakeup(&cnt.v_free_count);
|
||||
wakeup(&vm_cnt.v_free_count);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1612,7 +1612,7 @@ vm_pageout_worker(void *arg)
|
||||
if (vm_pages_needed && !vm_page_count_min()) {
|
||||
if (!vm_paging_needed())
|
||||
vm_pages_needed = 0;
|
||||
wakeup(&cnt.v_free_count);
|
||||
wakeup(&vm_cnt.v_free_count);
|
||||
}
|
||||
if (vm_pages_needed) {
|
||||
/*
|
||||
@ -1635,7 +1635,7 @@ vm_pageout_worker(void *arg)
|
||||
|
||||
}
|
||||
if (vm_pages_needed) {
|
||||
cnt.v_pdwakeups++;
|
||||
vm_cnt.v_pdwakeups++;
|
||||
domain->vmd_pass++;
|
||||
}
|
||||
mtx_unlock(&vm_page_queue_free_mtx);
|
||||
@ -1656,8 +1656,8 @@ vm_pageout(void)
|
||||
/*
|
||||
* Initialize some paging parameters.
|
||||
*/
|
||||
cnt.v_interrupt_free_min = 2;
|
||||
if (cnt.v_page_count < 2000)
|
||||
vm_cnt.v_interrupt_free_min = 2;
|
||||
if (vm_cnt.v_page_count < 2000)
|
||||
vm_pageout_page_count = 8;
|
||||
|
||||
/*
|
||||
@ -1665,27 +1665,27 @@ vm_pageout(void)
|
||||
* swap pager structures plus enough for any pv_entry structs
|
||||
* when paging.
|
||||
*/
|
||||
if (cnt.v_page_count > 1024)
|
||||
cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
|
||||
if (vm_cnt.v_page_count > 1024)
|
||||
vm_cnt.v_free_min = 4 + (vm_cnt.v_page_count - 1024) / 200;
|
||||
else
|
||||
cnt.v_free_min = 4;
|
||||
cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
|
||||
cnt.v_interrupt_free_min;
|
||||
cnt.v_free_reserved = vm_pageout_page_count +
|
||||
cnt.v_pageout_free_min + (cnt.v_page_count / 768);
|
||||
cnt.v_free_severe = cnt.v_free_min / 2;
|
||||
cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved;
|
||||
cnt.v_free_min += cnt.v_free_reserved;
|
||||
cnt.v_free_severe += cnt.v_free_reserved;
|
||||
cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
|
||||
if (cnt.v_inactive_target > cnt.v_free_count / 3)
|
||||
cnt.v_inactive_target = cnt.v_free_count / 3;
|
||||
vm_cnt.v_free_min = 4;
|
||||
vm_cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
|
||||
vm_cnt.v_interrupt_free_min;
|
||||
vm_cnt.v_free_reserved = vm_pageout_page_count +
|
||||
vm_cnt.v_pageout_free_min + (vm_cnt.v_page_count / 768);
|
||||
vm_cnt.v_free_severe = vm_cnt.v_free_min / 2;
|
||||
vm_cnt.v_free_target = 4 * vm_cnt.v_free_min + vm_cnt.v_free_reserved;
|
||||
vm_cnt.v_free_min += vm_cnt.v_free_reserved;
|
||||
vm_cnt.v_free_severe += vm_cnt.v_free_reserved;
|
||||
vm_cnt.v_inactive_target = (3 * vm_cnt.v_free_target) / 2;
|
||||
if (vm_cnt.v_inactive_target > vm_cnt.v_free_count / 3)
|
||||
vm_cnt.v_inactive_target = vm_cnt.v_free_count / 3;
|
||||
|
||||
/*
|
||||
* Set the default wakeup threshold to be 10% above the minimum
|
||||
* page limit. This keeps the steady state out of shortfall.
|
||||
*/
|
||||
vm_pageout_wakeup_thresh = (cnt.v_free_min / 10) * 11;
|
||||
vm_pageout_wakeup_thresh = (vm_cnt.v_free_min / 10) * 11;
|
||||
|
||||
/*
|
||||
* Set interval in seconds for active scan. We want to visit each
|
||||
@ -1697,7 +1697,7 @@ vm_pageout(void)
|
||||
|
||||
/* XXX does not really belong here */
|
||||
if (vm_page_max_wired == 0)
|
||||
vm_page_max_wired = cnt.v_free_count / 3;
|
||||
vm_page_max_wired = vm_cnt.v_free_count / 3;
|
||||
|
||||
swap_pager_swap_init();
|
||||
#if MAXMEMDOM > 1
|
||||
@ -1716,7 +1716,7 @@ vm_pageout(void)
|
||||
/*
|
||||
* Unless the free page queue lock is held by the caller, this function
|
||||
* should be regarded as advisory. Specifically, the caller should
|
||||
* not msleep() on &cnt.v_free_count following this function unless
|
||||
* not msleep() on &vm_cnt.v_free_count following this function unless
|
||||
* the free page queue lock is held until the msleep() is performed.
|
||||
*/
|
||||
void
|
||||
|
@ -75,13 +75,13 @@
|
||||
#define VM_TOTAL 1 /* struct vmtotal */
|
||||
#define VM_METER VM_TOTAL/* deprecated, use VM_TOTAL */
|
||||
#define VM_LOADAVG 2 /* struct loadavg */
|
||||
#define VM_V_FREE_MIN 3 /* cnt.v_free_min */
|
||||
#define VM_V_FREE_TARGET 4 /* cnt.v_free_target */
|
||||
#define VM_V_FREE_RESERVED 5 /* cnt.v_free_reserved */
|
||||
#define VM_V_INACTIVE_TARGET 6 /* cnt.v_inactive_target */
|
||||
#define VM_V_CACHE_MIN 7 /* cnt.v_cache_min */
|
||||
#define VM_V_CACHE_MAX 8 /* cnt.v_cache_max */
|
||||
#define VM_V_PAGEOUT_FREE_MIN 9 /* cnt.v_pageout_free_min */
|
||||
#define VM_V_FREE_MIN 3 /* vm_cnt.v_free_min */
|
||||
#define VM_V_FREE_TARGET 4 /* vm_cnt.v_free_target */
|
||||
#define VM_V_FREE_RESERVED 5 /* vm_cnt.v_free_reserved */
|
||||
#define VM_V_INACTIVE_TARGET 6 /* vm_cnt.v_inactive_target */
|
||||
#define VM_V_CACHE_MIN 7 /* vm_cnt.v_cache_min */
|
||||
#define VM_V_CACHE_MAX 8 /* vm_cnt.v_cache_max */
|
||||
#define VM_V_PAGEOUT_FREE_MIN 9 /* vm_cnt.v_pageout_free_min */
|
||||
#define VM_OBSOLETE_10 10 /* pageout algorithm */
|
||||
#define VM_SWAPPING_ENABLED 11 /* swapping enabled */
|
||||
#define VM_MAXID 12 /* number of valid vm ids */
|
||||
|
@ -383,7 +383,7 @@ vm_phys_add_page(vm_paddr_t pa)
|
||||
vm_page_t m;
|
||||
struct vm_domain *vmd;
|
||||
|
||||
cnt.v_page_count++;
|
||||
vm_cnt.v_page_count++;
|
||||
m = vm_phys_paddr_to_vm_page(pa);
|
||||
m->phys_addr = pa;
|
||||
m->queue = PQ_NONE;
|
||||
|
@ -113,7 +113,7 @@ vm_phys_freecnt_adj(vm_page_t m, int adj)
|
||||
{
|
||||
|
||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
|
||||
cnt.v_free_count += adj;
|
||||
vm_cnt.v_free_count += adj;
|
||||
vm_phys_domain(m)->vmd_free_count += adj;
|
||||
}
|
||||
|
||||
|
@ -298,7 +298,7 @@ vm_radix_reserve_kva(void *arg __unused)
|
||||
* are needed to store them.
|
||||
*/
|
||||
if (!uma_zone_reserve_kva(vm_radix_node_zone,
|
||||
((vm_paddr_t)cnt.v_page_count * PAGE_SIZE) / (PAGE_SIZE +
|
||||
((vm_paddr_t)vm_cnt.v_page_count * PAGE_SIZE) / (PAGE_SIZE +
|
||||
sizeof(struct vm_radix_node))))
|
||||
panic("%s: unable to reserve KVA", __func__);
|
||||
}
|
||||
|
@ -84,9 +84,9 @@ vm_page_zero_check(void)
|
||||
* fast sleeps. We also do not want to be continuously zeroing
|
||||
* pages because doing so may flush our L1 and L2 caches too much.
|
||||
*/
|
||||
if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
|
||||
if (zero_state && vm_page_zero_count >= ZIDLE_LO(vm_cnt.v_free_count))
|
||||
return (0);
|
||||
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
|
||||
if (vm_page_zero_count >= ZIDLE_HI(vm_cnt.v_free_count))
|
||||
return (0);
|
||||
return (1);
|
||||
}
|
||||
@ -98,7 +98,7 @@ vm_page_zero_idle(void)
|
||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
|
||||
zero_state = 0;
|
||||
if (vm_phys_zero_pages_idle()) {
|
||||
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
|
||||
if (vm_page_zero_count >= ZIDLE_HI(vm_cnt.v_free_count))
|
||||
zero_state = 1;
|
||||
}
|
||||
}
|
||||
|
@ -1026,7 +1026,8 @@ vnode_pager_putpages(vm_object_t object, vm_page_t *m, int count,
|
||||
* daemon up. This should be probably be addressed XXX.
|
||||
*/
|
||||
|
||||
if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min)
|
||||
if ((vm_cnt.v_free_count + vm_cnt.v_cache_count) <
|
||||
vm_cnt.v_pageout_free_min)
|
||||
sync |= OBJPC_SYNC;
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user