Track three new sendfile-related statistics:

- The number of times sendfile had to do disk I/O
- The number of times sfbuf allocation failed
- The number of times sfbuf allocation had to wait
This commit is contained in:
Mike Silbersack 2003-12-28 08:57:09 +00:00
parent eb2cdcb127
commit ddeb5b242e
9 changed files with 13 additions and 0 deletions

View File

@ -415,6 +415,7 @@ sf_buf_alloc(struct vm_page *m)
mtx_lock(&sf_freelist.sf_lock);
while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
sf_buf_alloc_want++;
mbstat.sf_allocwait++;
error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH,
"sfbufa", 0);
sf_buf_alloc_want--;

View File

@ -460,6 +460,7 @@ sf_buf_alloc(struct vm_page *m)
mtx_lock(&sf_freelist.sf_lock);
while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
sf_buf_alloc_want++;
mbstat.sf_allocwait++;
error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH,
"sfbufa", 0);
sf_buf_alloc_want--;

View File

@ -621,6 +621,7 @@ sf_buf_alloc(struct vm_page *m)
}
while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
sf_buf_alloc_want++;
mbstat.sf_allocwait++;
error = msleep(&sf_buf_freelist, &sf_buf_lock, PVM|PCATCH,
"sfbufa", 0);
sf_buf_alloc_want--;

View File

@ -354,6 +354,7 @@ sf_buf_alloc(struct vm_page *m)
mtx_lock(&sf_freelist.sf_lock);
while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
sf_buf_alloc_want++;
mbstat.sf_allocwait++;
error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH,
"sfbufa", 0);
sf_buf_alloc_want--;

View File

@ -1866,6 +1866,7 @@ do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
sbunlock(&so->so_snd);
goto done;
}
mbstat.sf_iocnt++;
} else
VM_OBJECT_UNLOCK(obj);
vm_page_unlock_queues();
@ -1875,6 +1876,7 @@ do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
* but this wait can be interrupted.
*/
if ((sf = sf_buf_alloc(pg)) == NULL) {
mbstat.sf_allocfail++;
vm_page_lock_queues();
vm_page_unwire(pg, 0);
if (pg->wire_count == 0 && pg->object == NULL)

View File

@ -269,6 +269,7 @@ sf_buf_alloc(struct vm_page *m)
mtx_lock(&sf_freelist.sf_lock);
while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
sf_buf_alloc_want++;
mbstat.sf_allocwait++;
error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH,
"sfbufa", 0);
sf_buf_alloc_want--;

View File

@ -269,6 +269,7 @@ sf_buf_alloc(struct vm_page *m)
mtx_lock(&sf_freelist.sf_lock);
while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
sf_buf_alloc_want++;
mbstat.sf_allocwait++;
error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH,
"sfbufa", 0);
sf_buf_alloc_want--;

View File

@ -396,6 +396,7 @@ sf_buf_alloc(struct vm_page *m)
mtx_lock(&sf_freelist.sf_lock);
while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
sf_buf_alloc_want++;
mbstat.sf_allocwait++;
error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH,
"sfbufa", 0);
sf_buf_alloc_want--;

View File

@ -261,6 +261,10 @@ struct mbstat {
u_int m_clperbuck; /* number of clusters per "bucket" */
/* Number of mbtypes (gives # elems in mbpstat's mb_mbtypes[] array: */
short m_numtypes;
/* XXX: Sendfile stats should eventually move to their own struct */
u_long sf_iocnt; /* times sendfile had to do disk I/O */
u_long sf_allocfail; /* times sfbuf allocation failed */
u_long sf_allocwait; /* times sfbuf allocation had to wait */
};
/*