Giant pushdown for read/write/pread/pwrite syscalls.

kern/kern_descrip.c:
Aquire Giant in fdrop_locked when file refcount hits zero, this removes
the requirement for the caller to own Giant for the most part.

kern/kern_ktrace.c:
Aquire Giant in ktrgenio, simplifies locking in upper read/write syscalls.

kern/vfs_bio.c:
Aquire Giant in bwillwrite if needed.

kern/sys_generic.c
Giant pushdown, remove Giant for:
   read, pread, write and pwrite.
readv and writev aren't done yet because of the possible malloc calls
for iov to uio processing.

kern/sys_socket.c
Grab giant in the socket fo_read/write functions.

kern/vfs_vnops.c
Grab giant in the vnode fo_read/write functions.
This commit is contained in:
Alfred Perlstein 2002-03-15 08:03:46 +00:00
parent 67253d7329
commit 628abf6c69
6 changed files with 26 additions and 11 deletions

View File

@ -1716,6 +1716,7 @@ fdrop_locked(fp, td)
FILE_UNLOCK(fp);
return (0);
}
mtx_lock(&Giant);
if (fp->f_count < 0)
panic("fdrop: count < 0");
if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE) {
@ -1733,6 +1734,7 @@ fdrop_locked(fp, td)
else
error = 0;
ffree(fp);
mtx_unlock(&Giant);
return (error);
}

View File

@ -181,6 +181,8 @@ ktrgenio(vp, fd, rw, uio, error)
if (error)
return;
mtx_lock(&Giant);
/*
* don't let p_tracep get ripped out from under us
*/
@ -200,6 +202,7 @@ ktrgenio(vp, fd, rw, uio, error)
vrele(vp);
FREE(kth, M_KTRACE);
p->p_traceflag &= ~KTRFAC_ACTIVE;
mtx_unlock(&Giant);
}
void

View File

@ -102,13 +102,11 @@ read(td, uap)
struct file *fp;
int error;
mtx_lock(&Giant);
if ((error = fget_read(td, uap->fd, &fp)) == 0) {
error = dofileread(td, fp, uap->fd, uap->buf,
uap->nbyte, (off_t)-1, 0);
fdrop(fp, td);
}
mtx_unlock(&Giant);
return(error);
}
@ -137,7 +135,6 @@ pread(td, uap)
if ((error = fget_read(td, uap->fd, &fp)) != 0)
return (error);
mtx_lock(&Giant);
if (fp->f_type != DTYPE_VNODE) {
error = ESPIPE;
} else {
@ -145,7 +142,6 @@ pread(td, uap)
uap->offset, FOF_OFFSET);
}
fdrop(fp, td);
mtx_unlock(&Giant);
return(error);
}
@ -332,7 +328,6 @@ write(td, uap)
struct file *fp;
int error;
mtx_lock(&Giant);
if ((error = fget_write(td, uap->fd, &fp)) == 0) {
error = dofilewrite(td, fp, uap->fd, uap->buf, uap->nbyte,
(off_t)-1, 0);
@ -340,7 +335,6 @@ write(td, uap)
} else {
error = EBADF; /* XXX this can't be right */
}
mtx_unlock(&Giant);
return(error);
}
@ -368,7 +362,6 @@ pwrite(td, uap)
int error;
if ((error = fget_write(td, uap->fd, &fp)) == 0) {
mtx_lock(&Giant);
if (fp->f_type == DTYPE_VNODE) {
error = dofilewrite(td, fp, uap->fd, uap->buf,
uap->nbyte, uap->offset, FOF_OFFSET);
@ -376,7 +369,6 @@ pwrite(td, uap)
error = ESPIPE;
}
fdrop(fp, td);
mtx_unlock(&Giant);
} else {
error = EBADF; /* this can't be right */
}

View File

@ -65,7 +65,12 @@ soo_read(fp, uio, cred, flags, td)
int flags;
{
struct socket *so = (struct socket *)fp->f_data;
return so->so_proto->pr_usrreqs->pru_soreceive(so, 0, uio, 0, 0, 0);
int error;
mtx_lock(&Giant);
error = so->so_proto->pr_usrreqs->pru_soreceive(so, 0, uio, 0, 0, 0);
mtx_unlock(&Giant);
return (error);
}
/* ARGSUSED */
@ -78,8 +83,13 @@ soo_write(fp, uio, cred, flags, td)
int flags;
{
struct socket *so = (struct socket *)fp->f_data;
return so->so_proto->pr_usrreqs->pru_sosend(so, 0, uio, 0, 0, 0,
int error;
mtx_lock(&Giant);
error = so->so_proto->pr_usrreqs->pru_sosend(so, 0, uio, 0, 0, 0,
uio->uio_td);
mtx_unlock(&Giant);
return (error);
}
int

View File

@ -1079,6 +1079,7 @@ bwillwrite(void)
if (numdirtybuffers >= hidirtybuffers) {
int s;
mtx_lock(&Giant);
s = splbio();
while (numdirtybuffers >= hidirtybuffers) {
bd_wakeup(1);
@ -1086,6 +1087,7 @@ bwillwrite(void)
tsleep(&needsbuffer, (PRIBIO + 4), "flswai", 0);
}
splx(s);
mtx_unlock(&Giant);
}
}

View File

@ -443,6 +443,7 @@ vn_read(fp, uio, cred, flags, td)
struct vnode *vp;
int error, ioflag;
mtx_lock(&Giant);
KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
uio->uio_td, td));
vp = (struct vnode *)fp->f_data;
@ -463,6 +464,7 @@ vn_read(fp, uio, cred, flags, td)
fp->f_offset = uio->uio_offset;
fp->f_nextoff = uio->uio_offset;
VOP_UNLOCK(vp, 0, td);
mtx_unlock(&Giant);
return (error);
}
@ -481,6 +483,7 @@ vn_write(fp, uio, cred, flags, td)
struct mount *mp;
int error, ioflag;
mtx_lock(&Giant);
KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
uio->uio_td, td));
vp = (struct vnode *)fp->f_data;
@ -498,8 +501,10 @@ vn_write(fp, uio, cred, flags, td)
ioflag |= IO_SYNC;
mp = NULL;
if (vp->v_type != VCHR &&
(error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
(error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) {
mtx_unlock(&Giant);
return (error);
}
VOP_LEASE(vp, td, cred, LEASE_WRITE);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
if ((flags & FOF_OFFSET) == 0)
@ -511,6 +516,7 @@ vn_write(fp, uio, cred, flags, td)
fp->f_nextoff = uio->uio_offset;
VOP_UNLOCK(vp, 0, td);
vn_finished_write(mp);
mtx_unlock(&Giant);
return (error);
}