Do proper "locking" for missing vmmeters part.

Now, we assume no more sched_lock protection for some of them and use the
distribuited loads method for vmmeter (distribuited through CPUs).

Reviewed by: alc, bde
Approved by: jeff (mentor)
This commit is contained in:
Attilio Rao 2007-06-04 21:45:18 +00:00
parent 6759608248
commit b4b7081961
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=170292
10 changed files with 44 additions and 34 deletions

View File

@ -475,8 +475,8 @@ smbfs_getpages(ap)
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, pages, npages);
cnt.v_vnodein++;
cnt.v_vnodepgsin += npages;
PCPU_INC(cnt.v_vnodein);
PCPU_ADD(cnt.v_vnodepgsin, npages);
iov.iov_base = (caddr_t) kva;
iov.iov_len = count;
@ -626,8 +626,8 @@ smbfs_putpages(ap)
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, pages, npages);
cnt.v_vnodeout++;
cnt.v_vnodepgsout += count;
PCPU_INC(cnt.v_vnodeout);
PCPU_ADD(cnt.v_vnodepgsout, count);
iov.iov_base = (caddr_t) kva;
iov.iov_len = count;

View File

@ -412,7 +412,7 @@ mi_switch(int flags, struct thread *newtd)
td->td_runtime += new_switchtime - PCPU_GET(switchtime);
PCPU_SET(switchtime, new_switchtime);
td->td_generation++; /* bump preempt-detect counter */
cnt.v_swtch++;
PCPU_INC(cnt.v_swtch);
PCPU_SET(switchticks, ticks);
CTR4(KTR_PROC, "mi_switch: old thread %ld (kse %p, pid %ld, %s)",
td->td_tid, td->td_sched, p->p_pid, p->p_comm);

View File

@ -405,7 +405,7 @@ thread_exit(void)
p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime));
PCPU_SET(switchtime, new_switchtime);
PCPU_SET(switchticks, ticks);
cnt.v_swtch++;
PCPU_INC(cnt.v_swtch);
/*
* Aggregate this thread's tick stats in the parent so they are not
* lost. Also add the child usage to our own when the final thread

View File

@ -190,8 +190,8 @@ ast(struct trapframe *framep)
#endif
td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK |
TDF_NEEDRESCHED | TDF_INTERRUPT);
cnt.v_trap++;
mtx_unlock_spin(&sched_lock);
PCPU_INC(cnt.v_trap);
/*
* XXXKSE While the fact that we owe a user profiling

View File

@ -159,8 +159,8 @@ nfs_getpages(struct vop_getpages_args *ap)
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, pages, npages);
cnt.v_vnodein++;
cnt.v_vnodepgsin += npages;
PCPU_INC(cnt.v_vnodein);
PCPU_ADD(cnt.v_vnodepgsin, npages);
iov.iov_base = (caddr_t) kva;
iov.iov_len = count;
@ -323,8 +323,8 @@ nfs_putpages(struct vop_putpages_args *ap)
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, pages, npages);
cnt.v_vnodeout++;
cnt.v_vnodepgsout += count;
PCPU_INC(cnt.v_vnodeout);
PCPU_ADD(cnt.v_vnodepgsout, count);
iov.iov_base = (caddr_t) kva;
iov.iov_len = count;

View File

@ -1037,8 +1037,8 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
}
bp->b_npages = j - i;
cnt.v_swapin++;
cnt.v_swappgsin += bp->b_npages;
PCPU_INC(cnt.v_swapin);
PCPU_ADD(cnt.v_swappgsin, bp->b_npages);
/*
* We still hold the lock on mreq, and our automatic completion routine
@ -1072,7 +1072,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
vm_page_lock_queues();
vm_page_flag_set(mreq, PG_REFERENCED);
vm_page_unlock_queues();
cnt.v_intrans++;
PCPU_INC(cnt.v_intrans);
if (msleep(mreq, VM_OBJECT_MTX(object), PSWP, "swread", hz*20)) {
printf(
"swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n",
@ -1263,8 +1263,8 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
bp->b_dirtyoff = 0;
bp->b_dirtyend = bp->b_bcount;
cnt.v_swapout++;
cnt.v_swappgsout += bp->b_npages;
PCPU_INC(cnt.v_swapout);
PCPU_ADD(cnt.v_swappgsout, bp->b_npages);
/*
* asynchronous

View File

@ -655,7 +655,7 @@ vm_object_terminate(vm_object_t object)
"p->busy = %d, p->flags %x\n", p, p->busy, p->flags));
if (p->wire_count == 0) {
vm_page_free(p);
cnt.v_pfree++;
PCPU_INC(cnt.v_pfree);
} else {
vm_page_remove(p);
}

View File

@ -1045,7 +1045,7 @@ vm_page_activate(vm_page_t m)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (VM_PAGE_GETKNOWNQUEUE2(m) != PQ_ACTIVE) {
if (VM_PAGE_INQUEUE1(m, PQ_CACHE))
cnt.v_reactivated++;
PCPU_INC(cnt.v_reactivated);
vm_pageq_remove(m);
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
if (m->act_count < ACT_INIT)
@ -1112,7 +1112,7 @@ vm_page_free_toq(vm_page_t m)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
KASSERT(!pmap_page_is_mapped(m),
("vm_page_free_toq: freeing mapped page %p", m));
cnt.v_tfree++;
PCPU_INC(cnt.v_tfree);
if (m->busy || VM_PAGE_INQUEUE1(m, PQ_FREE)) {
printf(
@ -1286,7 +1286,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
return;
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
if (VM_PAGE_INQUEUE1(m, PQ_CACHE))
cnt.v_reactivated++;
PCPU_INC(cnt.v_reactivated);
vm_page_flag_clear(m, PG_WINATCFLS);
vm_pageq_remove(m);
if (athead)
@ -1295,6 +1295,11 @@ _vm_page_deactivate(vm_page_t m, int athead)
TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
VM_PAGE_SETQUEUE2(m, PQ_INACTIVE);
vm_page_queues[PQ_INACTIVE].lcnt++;
/*
* Just not use an atomic here since vm_page_queues_lock
* alredy protects this field.
*/
cnt.v_inactive_count++;
}
}

View File

@ -538,7 +538,7 @@ vm_pageout_object_deactivate_pages(pmap, first_object, desired)
goto unlock_return;
}
next = TAILQ_NEXT(p, listq);
cnt.v_pdpages++;
PCPU_INC(cnt.v_pdpages);
if (p->wire_count != 0 ||
p->hold_count != 0 ||
p->busy != 0 ||
@ -745,7 +745,7 @@ vm_pageout_scan(int pass)
m != NULL && maxscan-- > 0 && page_shortage > 0;
m = next) {
cnt.v_pdpages++;
PCPU_INC(cnt.v_pdpages);
if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE) {
goto rescan0;
@ -856,7 +856,7 @@ vm_pageout_scan(int pass)
* Invalid pages can be easily freed
*/
vm_page_free(m);
cnt.v_dfree++;
PCPU_INC(cnt.v_dfree);
--page_shortage;
} else if (m->dirty == 0) {
/*
@ -1089,7 +1089,7 @@ vm_pageout_scan(int pass)
* The count for pagedaemon pages is done after checking the
* page for eligibility...
*/
cnt.v_pdpages++;
PCPU_INC(cnt.v_pdpages);
/*
* Check to see "how much" the page has been used.
@ -1168,7 +1168,7 @@ vm_pageout_scan(int pass)
m));
vm_page_free(m);
VM_OBJECT_UNLOCK(object);
cnt.v_dfree++;
PCPU_INC(cnt.v_dfree);
cache_last_free = cache_cur;
cache_first_failure = -1;
break;
@ -1427,6 +1427,11 @@ vm_pageout()
cnt.v_free_reserved = vm_pageout_page_count +
cnt.v_pageout_free_min + (cnt.v_page_count / 768) + PQ_NUMCOLORS;
cnt.v_free_severe = cnt.v_free_min / 2;
/*
* Here adds don't need to be atomic since we are only initializing
* v_free_min and v_free_severe.
*/
cnt.v_free_min += cnt.v_free_reserved;
cnt.v_free_severe += cnt.v_free_reserved;
@ -1524,7 +1529,7 @@ vm_pageout()
}
}
if (vm_pages_needed)
cnt.v_pdwakeups++;
PCPU_INC(cnt.v_pdwakeups);
mtx_unlock(&vm_page_queue_free_mtx);
vm_pageout_scan(pass);
}

View File

@ -728,8 +728,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
if (i != reqpage)
vm_page_free(m[i]);
vm_page_unlock_queues();
cnt.v_vnodein++;
cnt.v_vnodepgsin++;
PCPU_INC(cnt.v_vnodein);
PCPU_INC(cnt.v_vnodepgsin);
error = vnode_pager_input_old(object, m[reqpage]);
VM_OBJECT_UNLOCK(object);
return (error);
@ -757,8 +757,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
vm_page_free(m[i]);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
cnt.v_vnodein++;
cnt.v_vnodepgsin++;
PCPU_INC(cnt.v_vnodein);
PCPU_INC(cnt.v_vnodepgsin);
return vnode_pager_input_smlfs(object, m[reqpage]);
}
@ -909,8 +909,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
bp->b_runningbufspace = bp->b_bufsize;
atomic_add_int(&runningbufspace, bp->b_runningbufspace);
cnt.v_vnodein++;
cnt.v_vnodepgsin += count;
PCPU_INC(cnt.v_vnodein);
PCPU_ADD(cnt.v_vnodepgsin, count);
/* do the input */
bp->b_iooffset = dbtob(bp->b_blkno);
@ -1157,8 +1157,8 @@ vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
auio.uio_resid = maxsize;
auio.uio_td = (struct thread *) 0;
error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred);
cnt.v_vnodeout++;
cnt.v_vnodepgsout += ncount;
PCPU_INC(cnt.v_vnodeout);
PCPU_ADD(cnt.v_vnodepgsout, ncount);
if (error) {
if ((ppscheck = ppsratecheck(&lastfail, &curfail, 1)))