2005-01-06 18:10:42 +00:00
|
|
|
/*-
|
2017-11-27 15:15:37 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
|
|
|
*
|
2010-04-07 16:50:38 +00:00
|
|
|
* Copyright (c) 2000-2001 Boris Popov
|
2001-04-10 07:59:06 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* $FreeBSD$
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/fcntl.h>
|
|
|
|
#include <sys/bio.h>
|
|
|
|
#include <sys/buf.h>
|
|
|
|
#include <sys/mount.h>
|
|
|
|
#include <sys/namei.h>
|
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/dirent.h>
|
2013-05-04 14:27:28 +00:00
|
|
|
#include <sys/rwlock.h>
|
2001-04-10 07:59:06 +00:00
|
|
|
#include <sys/signalvar.h>
|
|
|
|
#include <sys/sysctl.h>
|
2001-07-04 19:55:01 +00:00
|
|
|
#include <sys/vmmeter.h>
|
2001-04-10 07:59:06 +00:00
|
|
|
|
|
|
|
#include <vm/vm.h>
|
2012-08-05 14:11:42 +00:00
|
|
|
#include <vm/vm_param.h>
|
2001-04-10 07:59:06 +00:00
|
|
|
#include <vm/vm_page.h>
|
|
|
|
#include <vm/vm_extern.h>
|
|
|
|
#include <vm/vm_object.h>
|
|
|
|
#include <vm/vm_pager.h>
|
|
|
|
#include <vm/vnode_pager.h>
|
|
|
|
/*
|
|
|
|
#include <sys/ioccom.h>
|
|
|
|
*/
|
|
|
|
#include <netsmb/smb.h>
|
|
|
|
#include <netsmb/smb_conn.h>
|
|
|
|
#include <netsmb/smb_subr.h>
|
|
|
|
|
|
|
|
#include <fs/smbfs/smbfs.h>
|
|
|
|
#include <fs/smbfs/smbfs_node.h>
|
|
|
|
#include <fs/smbfs/smbfs_subr.h>
|
|
|
|
|
|
|
|
/*#define SMBFS_RWGENERIC*/
|
|
|
|
|
Allocate pager bufs from UMA instead of 80-ish mutex protected linked list.
o In vm_pager_bufferinit() create pbuf_zone and start accounting on how many
pbufs are we going to have set.
In various subsystems that are going to utilize pbufs create private zones
via call to pbuf_zsecond_create(). The latter calls uma_zsecond_create(),
and sets a limit on created zone. After startup preallocate pbufs according
to requirements of all pbuf zones.
Subsystems that used to have a private limit with old allocator now have
private pbuf zones: md(4), fusefs, NFS client, smbfs, VFS cluster, FFS,
swap, vnode pager.
The following subsystems use shared pbuf zone: cam(4), nvme(4), physio(9),
aio(4). They should have their private limits, but changing that is out of
scope of this commit.
o Fetch tunable value of kern.nswbuf from init_param2() and while here move
NSWBUF_MIN to opt_param.h and eliminate opt_swap.h, that was holding only
this option.
Default values aren't touched by this commit, but they probably should be
reviewed wrt to modern hardware.
This change removes a tight bottleneck from sendfile(2) operation, that
uses pbufs in vnode pager. Other pagers also would benefit from faster
allocation.
Together with: gallatin
Tested by: pho
2019-01-15 01:02:16 +00:00
|
|
|
extern uma_zone_t smbfs_pbuf_zone;
|
2001-04-10 07:59:06 +00:00
|
|
|
|
|
|
|
static int smbfs_fastlookup = 1;
|
|
|
|
|
|
|
|
SYSCTL_DECL(_vfs_smbfs);
|
|
|
|
SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
|
|
|
|
|
|
|
|
#define DE_SIZE (sizeof(struct dirent))
|
|
|
|
|
|
|
|
static int
|
|
|
|
smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
|
|
|
|
{
|
|
|
|
struct dirent de;
|
|
|
|
struct componentname cn;
|
2012-10-31 03:34:07 +00:00
|
|
|
struct smb_cred *scred;
|
2001-04-10 07:59:06 +00:00
|
|
|
struct smbfs_fctx *ctx;
|
|
|
|
struct vnode *newvp;
|
|
|
|
struct smbnode *np = VTOSMB(vp);
|
|
|
|
int error/*, *eofflag = ap->a_eofflag*/;
|
|
|
|
long offset, limit;
|
|
|
|
|
|
|
|
np = VTOSMB(vp);
|
|
|
|
SMBVDEBUG("dirname='%s'\n", np->n_name);
|
2012-10-31 03:34:07 +00:00
|
|
|
scred = smbfs_malloc_scred();
|
|
|
|
smb_makescred(scred, uio->uio_td, cred);
|
2003-03-02 15:56:49 +00:00
|
|
|
offset = uio->uio_offset / DE_SIZE; /* offset in the directory */
|
2001-04-10 07:59:06 +00:00
|
|
|
limit = uio->uio_resid / DE_SIZE;
|
2012-10-31 03:34:07 +00:00
|
|
|
if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2001-04-10 07:59:06 +00:00
|
|
|
while (limit && offset < 2) {
|
|
|
|
limit--;
|
|
|
|
bzero((caddr_t)&de, DE_SIZE);
|
|
|
|
de.d_reclen = DE_SIZE;
|
|
|
|
de.d_fileno = (offset == 0) ? np->n_ino :
|
2012-11-22 08:58:29 +00:00
|
|
|
(np->n_parent ? np->n_parentino : 2);
|
2001-04-10 07:59:06 +00:00
|
|
|
if (de.d_fileno == 0)
|
|
|
|
de.d_fileno = 0x7ffffffd + offset;
|
2021-01-03 16:32:30 +00:00
|
|
|
de.d_off = offset + 1;
|
2001-04-10 07:59:06 +00:00
|
|
|
de.d_namlen = offset + 1;
|
|
|
|
de.d_name[0] = '.';
|
|
|
|
de.d_name[1] = '.';
|
|
|
|
de.d_type = DT_DIR;
|
2018-11-23 22:24:59 +00:00
|
|
|
dirent_terminate(&de);
|
2003-03-02 15:50:23 +00:00
|
|
|
error = uiomove(&de, DE_SIZE, uio);
|
2001-04-10 07:59:06 +00:00
|
|
|
if (error)
|
2012-10-31 03:34:07 +00:00
|
|
|
goto out;
|
2001-04-10 07:59:06 +00:00
|
|
|
offset++;
|
|
|
|
uio->uio_offset += DE_SIZE;
|
|
|
|
}
|
2012-10-31 03:34:07 +00:00
|
|
|
if (limit == 0) {
|
|
|
|
error = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
2001-04-10 07:59:06 +00:00
|
|
|
if (offset != np->n_dirofs || np->n_dirseq == NULL) {
|
|
|
|
SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
|
|
|
|
if (np->n_dirseq) {
|
2012-10-31 03:34:07 +00:00
|
|
|
smbfs_findclose(np->n_dirseq, scred);
|
2001-04-10 07:59:06 +00:00
|
|
|
np->n_dirseq = NULL;
|
|
|
|
}
|
|
|
|
np->n_dirofs = 2;
|
|
|
|
error = smbfs_findopen(np, "*", 1,
|
|
|
|
SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
|
2012-10-31 03:34:07 +00:00
|
|
|
scred, &ctx);
|
2001-04-10 07:59:06 +00:00
|
|
|
if (error) {
|
|
|
|
SMBVDEBUG("can not open search, error = %d", error);
|
2012-10-31 03:34:07 +00:00
|
|
|
goto out;
|
2001-04-10 07:59:06 +00:00
|
|
|
}
|
|
|
|
np->n_dirseq = ctx;
|
|
|
|
} else
|
|
|
|
ctx = np->n_dirseq;
|
|
|
|
while (np->n_dirofs < offset) {
|
2012-10-31 03:34:07 +00:00
|
|
|
error = smbfs_findnext(ctx, offset - np->n_dirofs++, scred);
|
2001-04-10 07:59:06 +00:00
|
|
|
if (error) {
|
2012-10-31 03:34:07 +00:00
|
|
|
smbfs_findclose(np->n_dirseq, scred);
|
2001-04-10 07:59:06 +00:00
|
|
|
np->n_dirseq = NULL;
|
2012-10-31 03:34:07 +00:00
|
|
|
error = ENOENT ? 0 : error;
|
|
|
|
goto out;
|
2001-04-10 07:59:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
error = 0;
|
|
|
|
for (; limit; limit--, offset++) {
|
2012-10-31 03:34:07 +00:00
|
|
|
error = smbfs_findnext(ctx, limit, scred);
|
2001-04-10 07:59:06 +00:00
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
np->n_dirofs++;
|
|
|
|
bzero((caddr_t)&de, DE_SIZE);
|
|
|
|
de.d_reclen = DE_SIZE;
|
|
|
|
de.d_fileno = ctx->f_attr.fa_ino;
|
2021-01-03 16:32:30 +00:00
|
|
|
de.d_off = offset + 1;
|
2001-04-10 07:59:06 +00:00
|
|
|
de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
|
|
|
|
de.d_namlen = ctx->f_nmlen;
|
|
|
|
bcopy(ctx->f_name, de.d_name, de.d_namlen);
|
2018-11-23 22:24:59 +00:00
|
|
|
dirent_terminate(&de);
|
2001-04-10 07:59:06 +00:00
|
|
|
if (smbfs_fastlookup) {
|
|
|
|
error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
|
|
|
|
ctx->f_nmlen, &ctx->f_attr, &newvp);
|
|
|
|
if (!error) {
|
|
|
|
cn.cn_nameptr = de.d_name;
|
|
|
|
cn.cn_namelen = de.d_namlen;
|
2003-03-02 15:56:49 +00:00
|
|
|
cache_enter(vp, newvp, &cn);
|
2001-04-10 07:59:06 +00:00
|
|
|
vput(newvp);
|
|
|
|
}
|
|
|
|
}
|
2003-03-02 15:50:23 +00:00
|
|
|
error = uiomove(&de, DE_SIZE, uio);
|
2001-04-10 07:59:06 +00:00
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (error == ENOENT)
|
|
|
|
error = 0;
|
|
|
|
uio->uio_offset = offset * DE_SIZE;
|
2012-10-31 03:34:07 +00:00
|
|
|
out:
|
|
|
|
smbfs_free_scred(scred);
|
2001-04-10 07:59:06 +00:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
|
|
|
|
{
|
|
|
|
struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
|
|
|
|
struct smbnode *np = VTOSMB(vp);
|
2001-12-02 08:56:58 +00:00
|
|
|
struct thread *td;
|
2001-04-10 07:59:06 +00:00
|
|
|
struct vattr vattr;
|
2012-10-31 03:34:07 +00:00
|
|
|
struct smb_cred *scred;
|
2001-04-10 07:59:06 +00:00
|
|
|
int error, lks;
|
|
|
|
|
2002-04-26 03:49:02 +00:00
|
|
|
/*
|
|
|
|
* Protect against method which is not supported for now
|
|
|
|
*/
|
|
|
|
if (uiop->uio_segflg == UIO_NOCOPY)
|
|
|
|
return EOPNOTSUPP;
|
|
|
|
|
2001-04-10 07:59:06 +00:00
|
|
|
if (vp->v_type != VREG && vp->v_type != VDIR) {
|
|
|
|
SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
|
|
|
|
return EIO;
|
|
|
|
}
|
|
|
|
if (uiop->uio_resid == 0)
|
|
|
|
return 0;
|
|
|
|
if (uiop->uio_offset < 0)
|
|
|
|
return EINVAL;
|
|
|
|
/* if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
|
|
|
|
return EFBIG;*/
|
2001-12-02 08:56:58 +00:00
|
|
|
td = uiop->uio_td;
|
2001-04-10 07:59:06 +00:00
|
|
|
if (vp->v_type == VDIR) {
|
2008-02-25 18:45:57 +00:00
|
|
|
lks = LK_EXCLUSIVE; /* lockstatus(vp->v_vnlock); */
|
2001-04-10 07:59:06 +00:00
|
|
|
if (lks == LK_SHARED)
|
2008-01-10 01:10:58 +00:00
|
|
|
vn_lock(vp, LK_UPGRADE | LK_RETRY);
|
2001-04-10 07:59:06 +00:00
|
|
|
error = smbfs_readvdir(vp, uiop, cred);
|
|
|
|
if (lks == LK_SHARED)
|
2008-01-10 01:10:58 +00:00
|
|
|
vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
|
2001-04-10 07:59:06 +00:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
|
|
|
|
if (np->n_flag & NMODIFIED) {
|
|
|
|
smbfs_attr_cacheremove(vp);
|
2008-08-28 15:23:18 +00:00
|
|
|
error = VOP_GETATTR(vp, &vattr, cred);
|
2001-04-10 07:59:06 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
|
|
|
|
} else {
|
2008-08-28 15:23:18 +00:00
|
|
|
error = VOP_GETATTR(vp, &vattr, cred);
|
2001-04-10 07:59:06 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
|
2005-01-14 08:52:55 +00:00
|
|
|
error = smbfs_vinvalbuf(vp, td);
|
2001-04-10 07:59:06 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
|
|
|
|
}
|
|
|
|
}
|
2012-10-31 03:34:07 +00:00
|
|
|
scred = smbfs_malloc_scred();
|
|
|
|
smb_makescred(scred, td, cred);
|
|
|
|
error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
|
|
|
|
smbfs_free_scred(scred);
|
|
|
|
return (error);
|
2001-04-10 07:59:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
smbfs_writevnode(struct vnode *vp, struct uio *uiop,
|
|
|
|
struct ucred *cred, int ioflag)
|
|
|
|
{
|
|
|
|
struct smbmount *smp = VTOSMBFS(vp);
|
|
|
|
struct smbnode *np = VTOSMB(vp);
|
2012-10-31 03:34:07 +00:00
|
|
|
struct smb_cred *scred;
|
2001-12-02 08:56:58 +00:00
|
|
|
struct thread *td;
|
2001-04-10 07:59:06 +00:00
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
if (vp->v_type != VREG) {
|
|
|
|
SMBERROR("vn types other than VREG unsupported !\n");
|
|
|
|
return EIO;
|
|
|
|
}
|
2012-10-31 02:54:44 +00:00
|
|
|
SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
|
|
|
|
uiop->uio_resid);
|
2001-04-10 07:59:06 +00:00
|
|
|
if (uiop->uio_offset < 0)
|
|
|
|
return EINVAL;
|
|
|
|
/* if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
|
|
|
|
return (EFBIG);*/
|
2001-12-02 08:56:58 +00:00
|
|
|
td = uiop->uio_td;
|
2001-04-10 07:59:06 +00:00
|
|
|
if (ioflag & (IO_APPEND | IO_SYNC)) {
|
|
|
|
if (np->n_flag & NMODIFIED) {
|
|
|
|
smbfs_attr_cacheremove(vp);
|
2005-01-14 08:52:55 +00:00
|
|
|
error = smbfs_vinvalbuf(vp, td);
|
2001-04-10 07:59:06 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
if (ioflag & IO_APPEND) {
|
2005-12-04 10:06:06 +00:00
|
|
|
#ifdef notyet
|
2001-04-10 07:59:06 +00:00
|
|
|
/*
|
|
|
|
* File size can be changed by another client
|
|
|
|
*/
|
|
|
|
smbfs_attr_cacheremove(vp);
|
2008-08-28 15:23:18 +00:00
|
|
|
error = VOP_GETATTR(vp, &vattr, cred);
|
2001-04-10 07:59:06 +00:00
|
|
|
if (error) return (error);
|
|
|
|
#endif
|
|
|
|
uiop->uio_offset = np->n_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (uiop->uio_resid == 0)
|
|
|
|
return 0;
|
2010-05-05 16:44:25 +00:00
|
|
|
|
|
|
|
if (vn_rlimit_fsize(vp, uiop, td))
|
|
|
|
return (EFBIG);
|
2020-09-01 21:18:40 +00:00
|
|
|
|
2012-10-31 03:34:07 +00:00
|
|
|
scred = smbfs_malloc_scred();
|
|
|
|
smb_makescred(scred, td, cred);
|
|
|
|
error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
|
|
|
|
smbfs_free_scred(scred);
|
2012-10-31 02:54:44 +00:00
|
|
|
SMBVDEBUG("after: ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
|
|
|
|
uiop->uio_resid);
|
2001-04-10 07:59:06 +00:00
|
|
|
if (!error) {
|
|
|
|
if (uiop->uio_offset > np->n_size) {
|
|
|
|
np->n_size = uiop->uio_offset;
|
|
|
|
vnode_pager_setsize(vp, np->n_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do an I/O operation to/from a cache block.
|
|
|
|
*/
|
|
|
|
int
|
2004-09-07 08:53:28 +00:00
|
|
|
smbfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
|
2001-04-10 07:59:06 +00:00
|
|
|
{
|
|
|
|
struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
|
|
|
|
struct smbnode *np = VTOSMB(vp);
|
2012-10-31 03:34:07 +00:00
|
|
|
struct uio *uiop;
|
2001-04-10 07:59:06 +00:00
|
|
|
struct iovec io;
|
2012-10-31 03:34:07 +00:00
|
|
|
struct smb_cred *scred;
|
2001-04-10 07:59:06 +00:00
|
|
|
int error = 0;
|
|
|
|
|
2012-10-31 03:34:07 +00:00
|
|
|
uiop = malloc(sizeof(struct uio), M_SMBFSDATA, M_WAITOK);
|
2001-04-10 07:59:06 +00:00
|
|
|
uiop->uio_iov = &io;
|
|
|
|
uiop->uio_iovcnt = 1;
|
|
|
|
uiop->uio_segflg = UIO_SYSSPACE;
|
2001-12-02 08:56:58 +00:00
|
|
|
uiop->uio_td = td;
|
2001-04-10 07:59:06 +00:00
|
|
|
|
2012-10-31 03:34:07 +00:00
|
|
|
scred = smbfs_malloc_scred();
|
|
|
|
smb_makescred(scred, td, cr);
|
2001-04-10 07:59:06 +00:00
|
|
|
|
|
|
|
if (bp->b_iocmd == BIO_READ) {
|
|
|
|
io.iov_len = uiop->uio_resid = bp->b_bcount;
|
|
|
|
io.iov_base = bp->b_data;
|
|
|
|
uiop->uio_rw = UIO_READ;
|
|
|
|
switch (vp->v_type) {
|
|
|
|
case VREG:
|
|
|
|
uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
|
2012-10-31 03:34:07 +00:00
|
|
|
error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
|
2001-04-10 07:59:06 +00:00
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
if (uiop->uio_resid) {
|
|
|
|
int left = uiop->uio_resid;
|
|
|
|
int nread = bp->b_bcount - left;
|
|
|
|
if (left > 0)
|
|
|
|
bzero((char *)bp->b_data + nread, left);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
printf("smbfs_doio: type %x unexpected\n",vp->v_type);
|
|
|
|
break;
|
2016-04-10 23:07:00 +00:00
|
|
|
}
|
2001-04-10 07:59:06 +00:00
|
|
|
if (error) {
|
|
|
|
bp->b_error = error;
|
|
|
|
bp->b_ioflags |= BIO_ERROR;
|
|
|
|
}
|
|
|
|
} else { /* write */
|
|
|
|
if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
|
|
|
|
bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
|
|
|
|
|
|
|
|
if (bp->b_dirtyend > bp->b_dirtyoff) {
|
|
|
|
io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
|
|
|
|
uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
|
|
|
|
io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
|
|
|
|
uiop->uio_rw = UIO_WRITE;
|
2012-10-31 03:34:07 +00:00
|
|
|
error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
|
2001-04-10 07:59:06 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For an interrupted write, the buffer is still valid
|
|
|
|
* and the write hasn't been pushed to the server yet,
|
|
|
|
* so we can't set BIO_ERROR and report the interruption
|
|
|
|
* by setting B_EINTR. For the B_ASYNC case, B_EINTR
|
|
|
|
* is not relevant, so the rpc attempt is essentially
|
|
|
|
* a noop. For the case of a V3 write rpc not being
|
|
|
|
* committed to stable storage, the block is still
|
|
|
|
* dirty and requires either a commit rpc or another
|
|
|
|
* write rpc with iomode == NFSV3WRITE_FILESYNC before
|
|
|
|
* the block is reused. This is indicated by setting
|
|
|
|
* the B_DELWRI and B_NEEDCOMMIT flags.
|
|
|
|
*/
|
2003-03-02 15:56:49 +00:00
|
|
|
if (error == EINTR
|
2001-04-10 07:59:06 +00:00
|
|
|
|| (!error && (bp->b_flags & B_NEEDCOMMIT))) {
|
|
|
|
bp->b_flags &= ~(B_INVAL|B_NOCACHE);
|
|
|
|
if ((bp->b_flags & B_ASYNC) == 0)
|
|
|
|
bp->b_flags |= B_EINTR;
|
|
|
|
if ((bp->b_flags & B_PAGING) == 0) {
|
|
|
|
bdirty(bp);
|
|
|
|
bp->b_flags &= ~B_DONE;
|
|
|
|
}
|
|
|
|
if ((bp->b_flags & B_ASYNC) == 0)
|
|
|
|
bp->b_flags |= B_EINTR;
|
2003-03-02 15:56:49 +00:00
|
|
|
} else {
|
2001-04-10 07:59:06 +00:00
|
|
|
if (error) {
|
|
|
|
bp->b_ioflags |= BIO_ERROR;
|
|
|
|
bp->b_error = error;
|
|
|
|
}
|
|
|
|
bp->b_dirtyoff = bp->b_dirtyend = 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
bp->b_resid = 0;
|
|
|
|
bufdone(bp);
|
2012-10-31 03:34:07 +00:00
|
|
|
free(uiop, M_SMBFSDATA);
|
|
|
|
smbfs_free_scred(scred);
|
2001-04-10 07:59:06 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
bp->b_resid = uiop->uio_resid;
|
|
|
|
bufdone(bp);
|
2012-10-31 03:34:07 +00:00
|
|
|
free(uiop, M_SMBFSDATA);
|
|
|
|
smbfs_free_scred(scred);
|
2001-04-10 07:59:06 +00:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Vnode op for VM getpages.
|
|
|
|
* Wish wish .... get rid from multiple IO routines
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
smbfs_getpages(ap)
|
|
|
|
struct vop_getpages_args /* {
|
|
|
|
struct vnode *a_vp;
|
|
|
|
vm_page_t *a_m;
|
|
|
|
int a_count;
|
|
|
|
int a_reqpage;
|
|
|
|
} */ *ap;
|
|
|
|
{
|
|
|
|
#ifdef SMBFS_RWGENERIC
|
2001-05-01 08:34:45 +00:00
|
|
|
return vop_stdgetpages(ap);
|
2001-04-10 07:59:06 +00:00
|
|
|
#else
|
A change to KPI of vm_pager_get_pages() and underlying VOP_GETPAGES().
o With new KPI consumers can request contiguous ranges of pages, and
unlike before, all pages will be kept busied on return, like it was
done before with the 'reqpage' only. Now the reqpage goes away. With
new interface it is easier to implement code protected from race
conditions.
Such arrayed requests for now should be preceeded by a call to
vm_pager_haspage() to make sure that request is possible. This
could be improved later, making vm_pager_haspage() obsolete.
Strenghtening the promises on the business of the array of pages
allows us to remove such hacks as swp_pager_free_nrpage() and
vm_pager_free_nonreq().
o New KPI accepts two integer pointers that may optionally point at
values for read ahead and read behind, that a pager may do, if it
can. These pages are completely owned by pager, and not controlled
by the caller.
This shifts the UFS-specific readahead logic from vm_fault.c, which
should be file system agnostic, into vnode_pager.c. It also removes
one VOP_BMAP() request per hard fault.
Discussed with: kib, alc, jeff, scottl
Sponsored by: Nginx, Inc.
Sponsored by: Netflix
2015-12-16 21:30:45 +00:00
|
|
|
int i, error, nextoff, size, toff, npages, count;
|
2001-04-10 07:59:06 +00:00
|
|
|
struct uio uio;
|
|
|
|
struct iovec iov;
|
|
|
|
vm_offset_t kva;
|
|
|
|
struct buf *bp;
|
|
|
|
struct vnode *vp;
|
2001-12-02 08:56:58 +00:00
|
|
|
struct thread *td;
|
2001-04-10 07:59:06 +00:00
|
|
|
struct ucred *cred;
|
|
|
|
struct smbmount *smp;
|
|
|
|
struct smbnode *np;
|
2012-10-31 03:34:07 +00:00
|
|
|
struct smb_cred *scred;
|
2003-06-19 03:38:05 +00:00
|
|
|
vm_object_t object;
|
A change to KPI of vm_pager_get_pages() and underlying VOP_GETPAGES().
o With new KPI consumers can request contiguous ranges of pages, and
unlike before, all pages will be kept busied on return, like it was
done before with the 'reqpage' only. Now the reqpage goes away. With
new interface it is easier to implement code protected from race
conditions.
Such arrayed requests for now should be preceeded by a call to
vm_pager_haspage() to make sure that request is possible. This
could be improved later, making vm_pager_haspage() obsolete.
Strenghtening the promises on the business of the array of pages
allows us to remove such hacks as swp_pager_free_nrpage() and
vm_pager_free_nonreq().
o New KPI accepts two integer pointers that may optionally point at
values for read ahead and read behind, that a pager may do, if it
can. These pages are completely owned by pager, and not controlled
by the caller.
This shifts the UFS-specific readahead logic from vm_fault.c, which
should be file system agnostic, into vnode_pager.c. It also removes
one VOP_BMAP() request per hard fault.
Discussed with: kib, alc, jeff, scottl
Sponsored by: Nginx, Inc.
Sponsored by: Netflix
2015-12-16 21:30:45 +00:00
|
|
|
vm_page_t *pages;
|
2001-04-10 07:59:06 +00:00
|
|
|
|
|
|
|
vp = ap->a_vp;
|
2003-06-19 03:38:05 +00:00
|
|
|
if ((object = vp->v_object) == NULL) {
|
2002-04-23 14:30:43 +00:00
|
|
|
printf("smbfs_getpages: called with non-merged cache vnode??\n");
|
|
|
|
return VM_PAGER_ERROR;
|
|
|
|
}
|
|
|
|
|
2001-12-02 08:56:58 +00:00
|
|
|
td = curthread; /* XXX */
|
2002-02-27 18:32:23 +00:00
|
|
|
cred = td->td_ucred; /* XXX */
|
2001-04-10 07:59:06 +00:00
|
|
|
np = VTOSMB(vp);
|
|
|
|
smp = VFSTOSMBFS(vp->v_mount);
|
|
|
|
pages = ap->a_m;
|
2015-12-16 23:48:50 +00:00
|
|
|
npages = ap->a_count;
|
2002-04-23 14:30:43 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the requested page is partially valid, just return it and
|
|
|
|
* allow the pager to zero-out the blanks. Partially valid pages
|
|
|
|
* can only occur at the file EOF.
|
A change to KPI of vm_pager_get_pages() and underlying VOP_GETPAGES().
o With new KPI consumers can request contiguous ranges of pages, and
unlike before, all pages will be kept busied on return, like it was
done before with the 'reqpage' only. Now the reqpage goes away. With
new interface it is easier to implement code protected from race
conditions.
Such arrayed requests for now should be preceeded by a call to
vm_pager_haspage() to make sure that request is possible. This
could be improved later, making vm_pager_haspage() obsolete.
Strenghtening the promises on the business of the array of pages
allows us to remove such hacks as swp_pager_free_nrpage() and
vm_pager_free_nonreq().
o New KPI accepts two integer pointers that may optionally point at
values for read ahead and read behind, that a pager may do, if it
can. These pages are completely owned by pager, and not controlled
by the caller.
This shifts the UFS-specific readahead logic from vm_fault.c, which
should be file system agnostic, into vnode_pager.c. It also removes
one VOP_BMAP() request per hard fault.
Discussed with: kib, alc, jeff, scottl
Sponsored by: Nginx, Inc.
Sponsored by: Netflix
2015-12-16 21:30:45 +00:00
|
|
|
*
|
|
|
|
* XXXGL: is that true for SMB filesystem?
|
2002-04-23 14:30:43 +00:00
|
|
|
*/
|
2013-05-04 14:27:28 +00:00
|
|
|
VM_OBJECT_WLOCK(object);
|
2019-10-15 03:45:41 +00:00
|
|
|
if (!vm_page_none_valid(pages[npages - 1]) && --npages == 0)
|
2015-12-16 23:48:50 +00:00
|
|
|
goto out;
|
2013-05-04 14:27:28 +00:00
|
|
|
VM_OBJECT_WUNLOCK(object);
|
2002-04-23 14:30:43 +00:00
|
|
|
|
2012-10-31 03:34:07 +00:00
|
|
|
scred = smbfs_malloc_scred();
|
|
|
|
smb_makescred(scred, td, cred);
|
2001-04-10 07:59:06 +00:00
|
|
|
|
Allocate pager bufs from UMA instead of 80-ish mutex protected linked list.
o In vm_pager_bufferinit() create pbuf_zone and start accounting on how many
pbufs are we going to have set.
In various subsystems that are going to utilize pbufs create private zones
via call to pbuf_zsecond_create(). The latter calls uma_zsecond_create(),
and sets a limit on created zone. After startup preallocate pbufs according
to requirements of all pbuf zones.
Subsystems that used to have a private limit with old allocator now have
private pbuf zones: md(4), fusefs, NFS client, smbfs, VFS cluster, FFS,
swap, vnode pager.
The following subsystems use shared pbuf zone: cam(4), nvme(4), physio(9),
aio(4). They should have their private limits, but changing that is out of
scope of this commit.
o Fetch tunable value of kern.nswbuf from init_param2() and while here move
NSWBUF_MIN to opt_param.h and eliminate opt_swap.h, that was holding only
this option.
Default values aren't touched by this commit, but they probably should be
reviewed wrt to modern hardware.
This change removes a tight bottleneck from sendfile(2) operation, that
uses pbufs in vnode pager. Other pagers also would benefit from faster
allocation.
Together with: gallatin
Tested by: pho
2019-01-15 01:02:16 +00:00
|
|
|
bp = uma_zalloc(smbfs_pbuf_zone, M_WAITOK);
|
2002-04-23 13:55:14 +00:00
|
|
|
|
2001-04-10 07:59:06 +00:00
|
|
|
kva = (vm_offset_t) bp->b_data;
|
|
|
|
pmap_qenter(kva, pages, npages);
|
- Remove 'struct vmmeter' from 'struct pcpu', leaving only global vmmeter
in place. To do per-cpu stats, convert all fields that previously were
maintained in the vmmeters that sit in pcpus to counter(9).
- Since some vmmeter stats may be touched at very early stages of boot,
before we have set up UMA and we can do counter_u64_alloc(), provide an
early counter mechanism:
o Leave one spare uint64_t in struct pcpu, named pc_early_dummy_counter.
o Point counter(9) fields of vmmeter to pcpu[0].pc_early_dummy_counter,
so that at early stages of boot, before counters are allocated we already
point to a counter that can be safely written to.
o For sparc64 that required a whole dummy pcpu[MAXCPU] array.
Further related changes:
- Don't include vmmeter.h into pcpu.h.
- vm.stats.vm.v_swappgsout and vm.stats.vm.v_swappgsin changed to 64-bit,
to match kernel representation.
- struct vmmeter hidden under _KERNEL, and only vmstat(1) is an exclusion.
This is based on benno@'s 4-year old patch:
https://lists.freebsd.org/pipermail/freebsd-arch/2013-July/014471.html
Reviewed by: kib, gallatin, marius, lidl
Differential Revision: https://reviews.freebsd.org/D10156
2017-04-17 17:34:47 +00:00
|
|
|
VM_CNT_INC(v_vnodein);
|
|
|
|
VM_CNT_ADD(v_vnodepgsin, npages);
|
2001-04-10 07:59:06 +00:00
|
|
|
|
2015-12-16 23:48:50 +00:00
|
|
|
count = npages << PAGE_SHIFT;
|
2001-04-10 07:59:06 +00:00
|
|
|
iov.iov_base = (caddr_t) kva;
|
|
|
|
iov.iov_len = count;
|
|
|
|
uio.uio_iov = &iov;
|
|
|
|
uio.uio_iovcnt = 1;
|
|
|
|
uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
|
|
|
|
uio.uio_resid = count;
|
|
|
|
uio.uio_segflg = UIO_SYSSPACE;
|
|
|
|
uio.uio_rw = UIO_READ;
|
2001-12-02 08:56:58 +00:00
|
|
|
uio.uio_td = td;
|
2001-04-10 07:59:06 +00:00
|
|
|
|
2012-10-31 03:34:07 +00:00
|
|
|
error = smb_read(smp->sm_share, np->n_fid, &uio, scred);
|
|
|
|
smbfs_free_scred(scred);
|
2001-04-10 07:59:06 +00:00
|
|
|
pmap_qremove(kva, npages);
|
|
|
|
|
Allocate pager bufs from UMA instead of 80-ish mutex protected linked list.
o In vm_pager_bufferinit() create pbuf_zone and start accounting on how many
pbufs are we going to have set.
In various subsystems that are going to utilize pbufs create private zones
via call to pbuf_zsecond_create(). The latter calls uma_zsecond_create(),
and sets a limit on created zone. After startup preallocate pbufs according
to requirements of all pbuf zones.
Subsystems that used to have a private limit with old allocator now have
private pbuf zones: md(4), fusefs, NFS client, smbfs, VFS cluster, FFS,
swap, vnode pager.
The following subsystems use shared pbuf zone: cam(4), nvme(4), physio(9),
aio(4). They should have their private limits, but changing that is out of
scope of this commit.
o Fetch tunable value of kern.nswbuf from init_param2() and while here move
NSWBUF_MIN to opt_param.h and eliminate opt_swap.h, that was holding only
this option.
Default values aren't touched by this commit, but they probably should be
reviewed wrt to modern hardware.
This change removes a tight bottleneck from sendfile(2) operation, that
uses pbufs in vnode pager. Other pagers also would benefit from faster
allocation.
Together with: gallatin
Tested by: pho
2019-01-15 01:02:16 +00:00
|
|
|
uma_zfree(smbfs_pbuf_zone, bp);
|
2001-04-10 07:59:06 +00:00
|
|
|
|
|
|
|
if (error && (uio.uio_resid == count)) {
|
|
|
|
printf("smbfs_getpages: error %d\n",error);
|
|
|
|
return VM_PAGER_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
size = count - uio.uio_resid;
|
|
|
|
|
A change to KPI of vm_pager_get_pages() and underlying VOP_GETPAGES().
o With new KPI consumers can request contiguous ranges of pages, and
unlike before, all pages will be kept busied on return, like it was
done before with the 'reqpage' only. Now the reqpage goes away. With
new interface it is easier to implement code protected from race
conditions.
Such arrayed requests for now should be preceeded by a call to
vm_pager_haspage() to make sure that request is possible. This
could be improved later, making vm_pager_haspage() obsolete.
Strenghtening the promises on the business of the array of pages
allows us to remove such hacks as swp_pager_free_nrpage() and
vm_pager_free_nonreq().
o New KPI accepts two integer pointers that may optionally point at
values for read ahead and read behind, that a pager may do, if it
can. These pages are completely owned by pager, and not controlled
by the caller.
This shifts the UFS-specific readahead logic from vm_fault.c, which
should be file system agnostic, into vnode_pager.c. It also removes
one VOP_BMAP() request per hard fault.
Discussed with: kib, alc, jeff, scottl
Sponsored by: Nginx, Inc.
Sponsored by: Netflix
2015-12-16 21:30:45 +00:00
|
|
|
VM_OBJECT_WLOCK(object);
|
2001-04-10 07:59:06 +00:00
|
|
|
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
|
|
|
|
vm_page_t m;
|
|
|
|
nextoff = toff + PAGE_SIZE;
|
|
|
|
m = pages[i];
|
|
|
|
|
|
|
|
if (nextoff <= size) {
|
2002-04-23 14:30:43 +00:00
|
|
|
/*
|
|
|
|
* Read operation filled an entire page
|
|
|
|
*/
|
2019-10-15 03:45:41 +00:00
|
|
|
vm_page_valid(m);
|
2009-05-12 05:49:02 +00:00
|
|
|
KASSERT(m->dirty == 0,
|
|
|
|
("smbfs_getpages: page %p is dirty", m));
|
2002-04-23 14:30:43 +00:00
|
|
|
} else if (size > toff) {
|
|
|
|
/*
|
|
|
|
* Read operation filled a partial page.
|
|
|
|
*/
|
2019-10-15 03:45:41 +00:00
|
|
|
vm_page_invalid(m);
|
2011-11-30 17:39:00 +00:00
|
|
|
vm_page_set_valid_range(m, 0, size - toff);
|
2009-05-28 18:11:09 +00:00
|
|
|
KASSERT(m->dirty == 0,
|
2009-05-15 04:33:35 +00:00
|
|
|
("smbfs_getpages: page %p is dirty", m));
|
2001-04-10 07:59:06 +00:00
|
|
|
} else {
|
2002-04-23 14:30:43 +00:00
|
|
|
/*
|
2016-04-29 20:51:24 +00:00
|
|
|
* Read operation was short. If no error occurred
|
2002-04-23 14:30:43 +00:00
|
|
|
* we may have hit a zero-fill section. We simply
|
|
|
|
* leave valid set to 0.
|
|
|
|
*/
|
|
|
|
;
|
2001-04-10 07:59:06 +00:00
|
|
|
}
|
|
|
|
}
|
2015-12-16 23:48:50 +00:00
|
|
|
out:
|
2013-05-04 14:27:28 +00:00
|
|
|
VM_OBJECT_WUNLOCK(object);
|
2015-12-16 23:48:50 +00:00
|
|
|
if (ap->a_rbehind)
|
|
|
|
*ap->a_rbehind = 0;
|
|
|
|
if (ap->a_rahead)
|
|
|
|
*ap->a_rahead = 0;
|
|
|
|
return (VM_PAGER_OK);
|
2001-04-10 07:59:06 +00:00
|
|
|
#endif /* SMBFS_RWGENERIC */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Vnode op for VM putpages.
|
|
|
|
* possible bug: all IO done in sync mode
|
|
|
|
* Note that vop_close always invalidate pages before close, so it's
|
|
|
|
* not necessary to open vnode.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
smbfs_putpages(ap)
|
|
|
|
struct vop_putpages_args /* {
|
|
|
|
struct vnode *a_vp;
|
|
|
|
vm_page_t *a_m;
|
|
|
|
int a_count;
|
|
|
|
int a_sync;
|
|
|
|
int *a_rtvals;
|
|
|
|
} */ *ap;
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct vnode *vp = ap->a_vp;
|
2001-12-02 08:56:58 +00:00
|
|
|
struct thread *td;
|
2001-04-10 07:59:06 +00:00
|
|
|
struct ucred *cred;
|
|
|
|
|
|
|
|
#ifdef SMBFS_RWGENERIC
|
2001-12-02 08:56:58 +00:00
|
|
|
td = curthread; /* XXX */
|
2002-02-27 18:32:23 +00:00
|
|
|
cred = td->td_ucred; /* XXX */
|
2007-06-01 14:33:11 +00:00
|
|
|
VOP_OPEN(vp, FWRITE, cred, td, NULL);
|
2001-05-01 08:34:45 +00:00
|
|
|
error = vop_stdputpages(ap);
|
2001-12-02 08:56:58 +00:00
|
|
|
VOP_CLOSE(vp, FWRITE, cred, td);
|
2001-04-10 07:59:06 +00:00
|
|
|
return error;
|
|
|
|
#else
|
|
|
|
struct uio uio;
|
|
|
|
struct iovec iov;
|
|
|
|
vm_offset_t kva;
|
|
|
|
struct buf *bp;
|
|
|
|
int i, npages, count;
|
|
|
|
int *rtvals;
|
|
|
|
struct smbmount *smp;
|
|
|
|
struct smbnode *np;
|
2012-10-31 03:34:07 +00:00
|
|
|
struct smb_cred *scred;
|
2001-04-10 07:59:06 +00:00
|
|
|
vm_page_t *pages;
|
|
|
|
|
2001-12-02 08:56:58 +00:00
|
|
|
td = curthread; /* XXX */
|
2002-02-27 18:32:23 +00:00
|
|
|
cred = td->td_ucred; /* XXX */
|
2007-06-01 14:33:11 +00:00
|
|
|
/* VOP_OPEN(vp, FWRITE, cred, td, NULL);*/
|
2001-04-10 07:59:06 +00:00
|
|
|
np = VTOSMB(vp);
|
|
|
|
smp = VFSTOSMBFS(vp->v_mount);
|
|
|
|
pages = ap->a_m;
|
|
|
|
count = ap->a_count;
|
|
|
|
rtvals = ap->a_rtvals;
|
|
|
|
npages = btoc(count);
|
|
|
|
|
|
|
|
for (i = 0; i < npages; i++) {
|
2011-06-01 21:00:28 +00:00
|
|
|
rtvals[i] = VM_PAGER_ERROR;
|
2001-04-10 07:59:06 +00:00
|
|
|
}
|
|
|
|
|
Allocate pager bufs from UMA instead of 80-ish mutex protected linked list.
o In vm_pager_bufferinit() create pbuf_zone and start accounting on how many
pbufs are we going to have set.
In various subsystems that are going to utilize pbufs create private zones
via call to pbuf_zsecond_create(). The latter calls uma_zsecond_create(),
and sets a limit on created zone. After startup preallocate pbufs according
to requirements of all pbuf zones.
Subsystems that used to have a private limit with old allocator now have
private pbuf zones: md(4), fusefs, NFS client, smbfs, VFS cluster, FFS,
swap, vnode pager.
The following subsystems use shared pbuf zone: cam(4), nvme(4), physio(9),
aio(4). They should have their private limits, but changing that is out of
scope of this commit.
o Fetch tunable value of kern.nswbuf from init_param2() and while here move
NSWBUF_MIN to opt_param.h and eliminate opt_swap.h, that was holding only
this option.
Default values aren't touched by this commit, but they probably should be
reviewed wrt to modern hardware.
This change removes a tight bottleneck from sendfile(2) operation, that
uses pbufs in vnode pager. Other pagers also would benefit from faster
allocation.
Together with: gallatin
Tested by: pho
2019-01-15 01:02:16 +00:00
|
|
|
bp = uma_zalloc(smbfs_pbuf_zone, M_WAITOK);
|
2002-04-23 13:55:14 +00:00
|
|
|
|
2001-04-10 07:59:06 +00:00
|
|
|
kva = (vm_offset_t) bp->b_data;
|
|
|
|
pmap_qenter(kva, pages, npages);
|
- Remove 'struct vmmeter' from 'struct pcpu', leaving only global vmmeter
in place. To do per-cpu stats, convert all fields that previously were
maintained in the vmmeters that sit in pcpus to counter(9).
- Since some vmmeter stats may be touched at very early stages of boot,
before we have set up UMA and we can do counter_u64_alloc(), provide an
early counter mechanism:
o Leave one spare uint64_t in struct pcpu, named pc_early_dummy_counter.
o Point counter(9) fields of vmmeter to pcpu[0].pc_early_dummy_counter,
so that at early stages of boot, before counters are allocated we already
point to a counter that can be safely written to.
o For sparc64 that required a whole dummy pcpu[MAXCPU] array.
Further related changes:
- Don't include vmmeter.h into pcpu.h.
- vm.stats.vm.v_swappgsout and vm.stats.vm.v_swappgsin changed to 64-bit,
to match kernel representation.
- struct vmmeter hidden under _KERNEL, and only vmstat(1) is an exclusion.
This is based on benno@'s 4-year old patch:
https://lists.freebsd.org/pipermail/freebsd-arch/2013-July/014471.html
Reviewed by: kib, gallatin, marius, lidl
Differential Revision: https://reviews.freebsd.org/D10156
2017-04-17 17:34:47 +00:00
|
|
|
VM_CNT_INC(v_vnodeout);
|
|
|
|
VM_CNT_ADD(v_vnodepgsout, count);
|
2001-04-10 07:59:06 +00:00
|
|
|
|
|
|
|
iov.iov_base = (caddr_t) kva;
|
|
|
|
iov.iov_len = count;
|
|
|
|
uio.uio_iov = &iov;
|
|
|
|
uio.uio_iovcnt = 1;
|
|
|
|
uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
|
|
|
|
uio.uio_resid = count;
|
|
|
|
uio.uio_segflg = UIO_SYSSPACE;
|
|
|
|
uio.uio_rw = UIO_WRITE;
|
2001-12-02 08:56:58 +00:00
|
|
|
uio.uio_td = td;
|
2012-10-31 02:54:44 +00:00
|
|
|
SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uio.uio_offset,
|
|
|
|
uio.uio_resid);
|
2001-04-10 07:59:06 +00:00
|
|
|
|
2012-10-31 03:34:07 +00:00
|
|
|
scred = smbfs_malloc_scred();
|
|
|
|
smb_makescred(scred, td, cred);
|
|
|
|
error = smb_write(smp->sm_share, np->n_fid, &uio, scred);
|
|
|
|
smbfs_free_scred(scred);
|
2001-12-02 08:56:58 +00:00
|
|
|
/* VOP_CLOSE(vp, FWRITE, cred, td);*/
|
2001-04-10 07:59:06 +00:00
|
|
|
SMBVDEBUG("paged write done: %d\n", error);
|
|
|
|
|
|
|
|
pmap_qremove(kva, npages);
|
2002-04-23 13:55:14 +00:00
|
|
|
|
Allocate pager bufs from UMA instead of 80-ish mutex protected linked list.
o In vm_pager_bufferinit() create pbuf_zone and start accounting on how many
pbufs are we going to have set.
In various subsystems that are going to utilize pbufs create private zones
via call to pbuf_zsecond_create(). The latter calls uma_zsecond_create(),
and sets a limit on created zone. After startup preallocate pbufs according
to requirements of all pbuf zones.
Subsystems that used to have a private limit with old allocator now have
private pbuf zones: md(4), fusefs, NFS client, smbfs, VFS cluster, FFS,
swap, vnode pager.
The following subsystems use shared pbuf zone: cam(4), nvme(4), physio(9),
aio(4). They should have their private limits, but changing that is out of
scope of this commit.
o Fetch tunable value of kern.nswbuf from init_param2() and while here move
NSWBUF_MIN to opt_param.h and eliminate opt_swap.h, that was holding only
this option.
Default values aren't touched by this commit, but they probably should be
reviewed wrt to modern hardware.
This change removes a tight bottleneck from sendfile(2) operation, that
uses pbufs in vnode pager. Other pagers also would benefit from faster
allocation.
Together with: gallatin
Tested by: pho
2019-01-15 01:02:16 +00:00
|
|
|
uma_zfree(smbfs_pbuf_zone, bp);
|
2001-04-10 07:59:06 +00:00
|
|
|
|
2017-07-26 20:07:05 +00:00
|
|
|
if (error == 0) {
|
|
|
|
vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid,
|
|
|
|
npages * PAGE_SIZE, npages * PAGE_SIZE);
|
|
|
|
}
|
|
|
|
return (rtvals[0]);
|
2001-04-10 07:59:06 +00:00
|
|
|
#endif /* SMBFS_RWGENERIC */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush and invalidate all dirty buffers. If another process is already
|
|
|
|
* doing the flush, just wait for completion.
|
|
|
|
*/
|
|
|
|
int
|
2005-01-14 08:52:55 +00:00
|
|
|
smbfs_vinvalbuf(struct vnode *vp, struct thread *td)
|
2001-04-10 07:59:06 +00:00
|
|
|
{
|
|
|
|
struct smbnode *np = VTOSMB(vp);
|
2005-01-14 08:52:55 +00:00
|
|
|
int error = 0;
|
2001-04-10 07:59:06 +00:00
|
|
|
|
2019-12-08 21:30:04 +00:00
|
|
|
if (VN_IS_DOOMED(vp))
|
2001-04-10 07:59:06 +00:00
|
|
|
return 0;
|
2002-08-04 10:29:36 +00:00
|
|
|
|
2001-04-10 07:59:06 +00:00
|
|
|
while (np->n_flag & NFLUSHINPROG) {
|
|
|
|
np->n_flag |= NFLUSHWANT;
|
2005-01-14 08:52:55 +00:00
|
|
|
error = tsleep(&np->n_flag, PRIBIO + 2, "smfsvinv", 2 * hz);
|
2003-04-01 09:24:12 +00:00
|
|
|
error = smb_td_intr(td);
|
2005-01-14 08:52:55 +00:00
|
|
|
if (error == EINTR)
|
2001-04-10 07:59:06 +00:00
|
|
|
return EINTR;
|
|
|
|
}
|
|
|
|
np->n_flag |= NFLUSHINPROG;
|
2006-05-25 01:00:35 +00:00
|
|
|
|
2006-05-25 17:16:11 +00:00
|
|
|
if (vp->v_bufobj.bo_object != NULL) {
|
2013-05-04 14:27:28 +00:00
|
|
|
VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
|
2006-05-25 01:00:35 +00:00
|
|
|
vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
|
2013-05-04 14:27:28 +00:00
|
|
|
VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
|
2006-05-25 17:16:11 +00:00
|
|
|
}
|
2006-05-25 01:00:35 +00:00
|
|
|
|
2008-10-10 21:23:50 +00:00
|
|
|
error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
|
2001-04-10 07:59:06 +00:00
|
|
|
while (error) {
|
2005-01-14 08:52:55 +00:00
|
|
|
if (error == ERESTART || error == EINTR) {
|
2001-04-10 07:59:06 +00:00
|
|
|
np->n_flag &= ~NFLUSHINPROG;
|
|
|
|
if (np->n_flag & NFLUSHWANT) {
|
|
|
|
np->n_flag &= ~NFLUSHWANT;
|
2003-03-02 16:54:40 +00:00
|
|
|
wakeup(&np->n_flag);
|
2001-04-10 07:59:06 +00:00
|
|
|
}
|
|
|
|
return EINTR;
|
|
|
|
}
|
2008-10-10 21:23:50 +00:00
|
|
|
error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
|
2001-04-10 07:59:06 +00:00
|
|
|
}
|
|
|
|
np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
|
|
|
|
if (np->n_flag & NFLUSHWANT) {
|
|
|
|
np->n_flag &= ~NFLUSHWANT;
|
2003-03-02 16:54:40 +00:00
|
|
|
wakeup(&np->n_flag);
|
2001-04-10 07:59:06 +00:00
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|