Optimize vmmeter locking.

In particular:
- Add an explicative table for locking of struct vmmeter members
- Apply new rules for some of those members
- Remove some unuseful comments

Heavily reviewed by: alc, bde, jeff
Approved by: jeff (mentor)
This commit is contained in:
Attilio Rao 2007-06-10 21:59:14 +00:00
parent f194524fb1
commit 393a081d42
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=170517
9 changed files with 73 additions and 90 deletions

View File

@ -736,10 +736,6 @@ syscall(struct trapframe *frame)
int reg, regcnt;
ksiginfo_t ksi;
/*
* note: PCPU_INC() can only be used if we can afford
* occassional inaccuracy in the count.
*/
PCPU_INC(cnt.v_syscall);
#ifdef DIAGNOSTIC

View File

@ -104,12 +104,7 @@ ia32_syscall(struct trapframe *frame)
u_int code;
ksiginfo_t ksi;
/*
* note: PCPU_INC() can only be used if we can afford
* occassional inaccuracy in the count.
*/
PCPU_INC(cnt.v_syscall);
td->td_pticks = 0;
td->td_frame = frame;
if (td->td_ucred != p->p_ucred)

View File

@ -921,10 +921,6 @@ syscall(struct trapframe *frame)
u_int code;
ksiginfo_t ksi;
/*
* note: PCPU_INC() can only be used if we can afford
* occassional inaccuracy in the count.
*/
PCPU_INC(cnt.v_syscall);
#ifdef DIAGNOSTIC

View File

@ -658,20 +658,20 @@ fork1(td, flags, pages, procp)
vm_forkproc(td, p2, td2, flags);
if (flags == (RFFDG | RFPROC)) {
atomic_add_int(&cnt.v_forks, 1);
atomic_add_int(&cnt.v_forkpages, p2->p_vmspace->vm_dsize +
PCPU_INC(cnt.v_forks);
PCPU_ADD(cnt.v_forkpages, p2->p_vmspace->vm_dsize +
p2->p_vmspace->vm_ssize);
} else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
atomic_add_int(&cnt.v_vforks, 1);
atomic_add_int(&cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
PCPU_INC(cnt.v_vforks);
PCPU_ADD(cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
p2->p_vmspace->vm_ssize);
} else if (p1 == &proc0) {
atomic_add_int(&cnt.v_kthreads, 1);
atomic_add_int(&cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
PCPU_INC(cnt.v_kthreads);
PCPU_ADD(cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
p2->p_vmspace->vm_ssize);
} else {
atomic_add_int(&cnt.v_rforks, 1);
atomic_add_int(&cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
PCPU_INC(cnt.v_rforks);
PCPU_ADD(cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
p2->p_vmspace->vm_ssize);
}

View File

@ -35,70 +35,76 @@
/*
* System wide statistics counters.
* Locking:
* a - locked by atomic operations
* c - constant after initialization
* f - locked by vm_page_queue_free_mtx
* p - locked by being in the PCPU and atomicity respect to interrupts
* q - locked by vm_page_queue_mtx
*/
struct vmmeter {
/*
* General system activity.
*/
u_int v_swtch; /* context switches */
u_int v_trap; /* calls to trap */
u_int v_syscall; /* calls to syscall() */
u_int v_intr; /* device interrupts */
u_int v_soft; /* software interrupts */
u_int v_swtch; /* (p) context switches */
u_int v_trap; /* (p) calls to trap */
u_int v_syscall; /* (p) calls to syscall() */
u_int v_intr; /* (p) device interrupts */
u_int v_soft; /* (p) software interrupts */
/*
* Virtual memory activity.
*/
u_int v_vm_faults; /* number of address memory faults */
u_int v_cow_faults; /* number of copy-on-writes */
u_int v_cow_optim; /* number of optimized copy-on-writes */
u_int v_zfod; /* pages zero filled on demand */
u_int v_ozfod; /* optimized zero fill pages */
u_int v_swapin; /* swap pager pageins */
u_int v_swapout; /* swap pager pageouts */
u_int v_swappgsin; /* swap pager pages paged in */
u_int v_swappgsout; /* swap pager pages paged out */
u_int v_vnodein; /* vnode pager pageins */
u_int v_vnodeout; /* vnode pager pageouts */
u_int v_vnodepgsin; /* vnode_pager pages paged in */
u_int v_vnodepgsout; /* vnode pager pages paged out */
u_int v_intrans; /* intransit blocking page faults */
u_int v_reactivated; /* number of pages reactivated from free list */
u_int v_pdwakeups; /* number of times daemon has awaken from sleep */
u_int v_pdpages; /* number of pages analyzed by daemon */
u_int v_vm_faults; /* (p) address memory faults */
u_int v_cow_faults; /* (p) copy-on-writes faults */
u_int v_cow_optim; /* (p) optimized copy-on-writes faults */
u_int v_zfod; /* (p) pages zero filled on demand */
u_int v_ozfod; /* (p) optimized zero fill pages */
u_int v_swapin; /* (p) swap pager pageins */
u_int v_swapout; /* (p) swap pager pageouts */
u_int v_swappgsin; /* (p) swap pager pages paged in */
u_int v_swappgsout; /* (p) swap pager pages paged out */
u_int v_vnodein; /* (p) vnode pager pageins */
u_int v_vnodeout; /* (p) vnode pager pageouts */
u_int v_vnodepgsin; /* (p) vnode_pager pages paged in */
u_int v_vnodepgsout; /* (p) vnode pager pages paged out */
u_int v_intrans; /* (p) intransit blocking page faults */
u_int v_reactivated; /* (q) pages reactivated from free list */
u_int v_pdwakeups; /* (f) times daemon has awaken from sleep */
u_int v_pdpages; /* (q) pages analyzed by daemon */
u_int v_dfree; /* pages freed by daemon */
u_int v_pfree; /* pages freed by exiting processes */
u_int v_tfree; /* total pages freed */
u_int v_dfree; /* (q) pages freed by daemon */
u_int v_pfree; /* (q) pages freed by exiting processes */
u_int v_tfree; /* (p) total pages freed */
/*
* Distribution of page usages.
*/
u_int v_page_size; /* page size in bytes */
u_int v_page_count; /* total number of pages in system */
u_int v_free_reserved; /* number of pages reserved for deadlock */
u_int v_free_target; /* number of pages desired free */
u_int v_free_min; /* minimum number of pages desired free */
u_int v_free_count; /* number of pages free */
u_int v_wire_count; /* number of pages wired down */
u_int v_active_count; /* number of pages active */
u_int v_inactive_target; /* number of pages desired inactive */
u_int v_inactive_count; /* number of pages inactive */
u_int v_cache_count; /* number of pages on buffer cache queue */
u_int v_cache_min; /* min number of pages desired on cache queue */
u_int v_cache_max; /* max number of pages in cached obj */
u_int v_pageout_free_min; /* min number pages reserved for kernel */
u_int v_interrupt_free_min; /* reserved number of pages for int code */
u_int v_free_severe; /* severe depletion of pages below this pt */
u_int v_page_size; /* (c) page size in bytes */
u_int v_page_count; /* (c) total number of pages in system */
u_int v_free_reserved; /* (c) pages reserved for deadlock */
u_int v_free_target; /* (c) pages desired free */
u_int v_free_min; /* (c) pages desired free */
u_int v_free_count; /* (f) pages free */
u_int v_wire_count; /* (a) pages wired down */
u_int v_active_count; /* (q) pages active */
u_int v_inactive_target; /* (c) pages desired inactive */
u_int v_inactive_count; /* (q) pages inactive */
u_int v_cache_count; /* (q) pages on buffer cache queue */
u_int v_cache_min; /* (c) min pages desired on cache queue */
u_int v_cache_max; /* (c) max pages in cached obj */
u_int v_pageout_free_min; /* (c) min pages reserved for kernel */
u_int v_interrupt_free_min; /* (c) reserved pages for int code */
u_int v_free_severe; /* (c) severe page depletion point */
/*
* Fork/vfork/rfork activity.
*/
u_int v_forks; /* number of fork() calls */
u_int v_vforks; /* number of vfork() calls */
u_int v_rforks; /* number of rfork() calls */
u_int v_kthreads; /* number of fork() calls by kernel */
u_int v_forkpages; /* number of VM pages affected by fork() */
u_int v_vforkpages; /* number of VM pages affected by vfork() */
u_int v_rforkpages; /* number of VM pages affected by rfork() */
u_int v_kthreadpages; /* number of VM pages affected by fork() by kernel */
u_int v_forks; /* (p) fork() calls */
u_int v_vforks; /* (p) vfork() calls */
u_int v_rforks; /* (p) rfork() calls */
u_int v_kthreads; /* (p) fork() calls by kernel */
u_int v_forkpages; /* (p) VM pages affected by fork() */
u_int v_vforkpages; /* (p) VM pages affected by vfork() */
u_int v_rforkpages; /* (p) VM pages affected by rfork() */
u_int v_kthreadpages; /* (p) VM pages affected by fork() by kernel */
};
#ifdef _KERNEL

View File

@ -655,7 +655,7 @@ vm_object_terminate(vm_object_t object)
"p->busy = %d, p->flags %x\n", p, p->busy, p->flags));
if (p->wire_count == 0) {
vm_page_free(p);
PCPU_INC(cnt.v_pfree);
cnt.v_pfree++;
} else {
vm_page_remove(p);
}

View File

@ -1045,7 +1045,7 @@ vm_page_activate(vm_page_t m)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (VM_PAGE_GETKNOWNQUEUE2(m) != PQ_ACTIVE) {
if (VM_PAGE_INQUEUE1(m, PQ_CACHE))
PCPU_INC(cnt.v_reactivated);
cnt.v_reactivated++;
vm_pageq_remove(m);
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
if (m->act_count < ACT_INIT)
@ -1286,7 +1286,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
return;
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
if (VM_PAGE_INQUEUE1(m, PQ_CACHE))
PCPU_INC(cnt.v_reactivated);
cnt.v_reactivated++;
vm_page_flag_clear(m, PG_WINATCFLS);
vm_pageq_remove(m);
if (athead)
@ -1295,11 +1295,6 @@ _vm_page_deactivate(vm_page_t m, int athead)
TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
VM_PAGE_SETQUEUE2(m, PQ_INACTIVE);
vm_page_queues[PQ_INACTIVE].lcnt++;
/*
* Just not use an atomic here since vm_page_queues_lock
* alredy protects this field.
*/
cnt.v_inactive_count++;
}
}

View File

@ -538,7 +538,7 @@ vm_pageout_object_deactivate_pages(pmap, first_object, desired)
goto unlock_return;
}
next = TAILQ_NEXT(p, listq);
PCPU_INC(cnt.v_pdpages);
cnt.v_pdpages++;
if (p->wire_count != 0 ||
p->hold_count != 0 ||
p->busy != 0 ||
@ -745,7 +745,7 @@ vm_pageout_scan(int pass)
m != NULL && maxscan-- > 0 && page_shortage > 0;
m = next) {
PCPU_INC(cnt.v_pdpages);
cnt.v_pdpages++;
if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE) {
goto rescan0;
@ -856,7 +856,7 @@ vm_pageout_scan(int pass)
* Invalid pages can be easily freed
*/
vm_page_free(m);
PCPU_INC(cnt.v_dfree);
cnt.v_dfree++;
--page_shortage;
} else if (m->dirty == 0) {
/*
@ -1089,7 +1089,7 @@ vm_pageout_scan(int pass)
* The count for pagedaemon pages is done after checking the
* page for eligibility...
*/
PCPU_INC(cnt.v_pdpages);
cnt.v_pdpages++;
/*
* Check to see "how much" the page has been used.
@ -1168,7 +1168,7 @@ vm_pageout_scan(int pass)
m));
vm_page_free(m);
VM_OBJECT_UNLOCK(object);
PCPU_INC(cnt.v_dfree);
cnt.v_dfree++;
cache_last_free = cache_cur;
cache_first_failure = -1;
break;
@ -1429,11 +1429,6 @@ vm_pageout()
cnt.v_free_reserved = vm_pageout_page_count +
cnt.v_pageout_free_min + (cnt.v_page_count / 768) + PQ_NUMCOLORS;
cnt.v_free_severe = cnt.v_free_min / 2;
/*
* Here adds don't need to be atomic since we are only initializing
* v_free_min and v_free_severe.
*/
cnt.v_free_min += cnt.v_free_reserved;
cnt.v_free_severe += cnt.v_free_reserved;
@ -1531,7 +1526,7 @@ vm_pageout()
}
}
if (vm_pages_needed)
PCPU_INC(cnt.v_pdwakeups);
cnt.v_pdwakeups++;
mtx_unlock(&vm_page_queue_free_mtx);
vm_pageout_scan(pass);
}

View File

@ -192,7 +192,7 @@ vm_pageq_add_new_page(vm_paddr_t pa)
{
vm_page_t m;
atomic_add_int(&cnt.v_page_count, 1);
cnt.v_page_count++;
m = PHYS_TO_VM_PAGE(pa);
m->phys_addr = pa;
m->flags = 0;