Some style(9) fixes in order to fabricate a commit to denote that

the commit message for r201896 actually should have read:

As nfsm_srvmtofh_xx() assumes the 4-byte alignment required by XDR
ensure the mbuf data is aligned accordingly by calling nfs_realign()
in fha_extract_info(). This fix is orthogonal to the problem solved
by r199274/r199284.

PR:		142102 (second part)
MFC after:	1 week
This commit is contained in:
Marius Strobl 2010-01-09 15:59:15 +00:00
parent 77680d964f
commit 869652f6f1
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=201899
3 changed files with 75 additions and 79 deletions

View File

@ -82,14 +82,13 @@
#define IO_METASYNC 0
#endif
/* NFS state flags XXX -Wunused */
#define NFSRV_SNDLOCK 0x01000000 /* Send socket lock */
#define NFSRV_WANTSND 0x02000000 /* Want above */
/*
* Structures for the nfssvc(2) syscall. Not that anyone but nfsd and mount_nfs
* should ever try and use it.
* Structures for the nfssvc(2) syscall. Not that anyone but nfsd and
* mount_nfs should ever try and use it.
*/
/*

View File

@ -72,14 +72,15 @@ static struct fha_global {
} g_fha;
/*
* These are the entries in the filehandle hash. They talk about a specific
* file, requests against which are being handled by one or more nfsds. We keep
* a chain of nfsds against the file. We only have more than one if reads are
* ongoing, and then only if the reads affect disparate regions of the file.
* These are the entries in the filehandle hash. They talk about a specific
* file, requests against which are being handled by one or more nfsds. We
* keep a chain of nfsds against the file. We only have more than one if reads
* are ongoing, and then only if the reads affect disparate regions of the
* file.
*
* In general, we want to assign a new request to an existing nfsd if it is
* going to contend with work happening already on that nfsd, or if the
* operation is a read and the nfsd is already handling a proximate read. We
* operation is a read and the nfsd is already handling a proximate read. We
* do this to avoid jumping around in the read stream unnecessarily, and to
* avoid contention between threads over single files.
*/
@ -168,8 +169,8 @@ fha_extract_info(struct svc_req *req, struct fha_info *i)
rpcproc_t procnum;
/*
* We start off with a random fh. If we get a reasonable
* procnum, we set the fh. If there's a concept of offset
* We start off with a random fh. If we get a reasonable
* procnum, we set the fh. If there's a concept of offset
* that we're interested in, we set that.
*/
i->fh = ++random_fh;
@ -178,7 +179,7 @@ fha_extract_info(struct svc_req *req, struct fha_info *i)
/*
* Extract the procnum and convert to v3 form if necessary,
* taking care to deal with out-of-range procnums. Caller will
* taking care to deal with out-of-range procnums. Caller will
* ensure that rq_vers is either 2 or 3.
*/
procnum = req->rq_proc;
@ -189,8 +190,8 @@ fha_extract_info(struct svc_req *req, struct fha_info *i)
}
/*
* We do affinity for most. However, we divide a realm of affinity
* by file offset so as to allow for concurrent random access. We
* We do affinity for most. However, we divide a realm of affinity
* by file offset so as to allow for concurrent random access. We
* only do this for reads today, but this may change when IFS supports
* efficient concurrent writes.
*/
@ -271,7 +272,7 @@ fha_hash_entry_new(u_int64_t fh)
e->num_threads = 0;
LIST_INIT(&e->threads);
return e;
return (e);
}
static void
@ -296,10 +297,9 @@ fha_hash_entry_lookup(SVCPOOL *pool, u_int64_t fh)
{
struct fha_hash_entry *fhe, *new_fhe;
LIST_FOREACH(fhe, &g_fha.hashtable[fh % g_fha.hashmask], link) {
LIST_FOREACH(fhe, &g_fha.hashtable[fh % g_fha.hashmask], link)
if (fhe->fh == fh)
break;
}
if (!fhe) {
/* Allocate a new entry. */
@ -308,25 +308,24 @@ fha_hash_entry_lookup(SVCPOOL *pool, u_int64_t fh)
mtx_lock(&pool->sp_lock);
/* Double-check to make sure we still need the new entry. */
LIST_FOREACH(fhe, &g_fha.hashtable[fh % g_fha.hashmask], link) {
LIST_FOREACH(fhe, &g_fha.hashtable[fh % g_fha.hashmask], link)
if (fhe->fh == fh)
break;
}
if (!fhe) {
fhe = new_fhe;
LIST_INSERT_HEAD(&g_fha.hashtable[fh % g_fha.hashmask],
fhe, link);
} else {
} else
fha_hash_entry_destroy(new_fhe);
}
}
return fhe;
return (fhe);
}
static void
fha_hash_entry_add_thread(struct fha_hash_entry *fhe, SVCTHREAD *thread)
{
LIST_INSERT_HEAD(&fhe->threads, thread, st_alink);
fhe->num_threads++;
}
@ -428,7 +427,7 @@ fha_hash_entry_choose_thread(SVCPOOL *pool, struct fha_hash_entry *fhe,
}
/*
* We didn't find a good match yet. See if we can add
* We didn't find a good match yet. See if we can add
* a new thread to this file handle entry's thread list.
*/
if ((fha_ctls.max_nfsds_per_fh == 0) ||
@ -468,7 +467,7 @@ fha_hash_entry_choose_thread(SVCPOOL *pool, struct fha_hash_entry *fhe,
}
/*
* After getting a request, try to assign it to some thread. Usually we
* After getting a request, try to assign it to some thread. Usually we
* handle it ourselves.
*/
SVCTHREAD *
@ -512,7 +511,7 @@ fha_assign(SVCTHREAD *this_thread, struct svc_req *req)
}
/*
* Called when we're done with an operation. The request has already
* Called when we're done with an operation. The request has already
* been de-queued.
*/
void

View File

@ -187,9 +187,9 @@ nfssvc_nfsserver(struct thread *td, struct nfssvc_args *uap)
}
error = nfssvc_addsock(fp, td);
fdrop(fp, td);
} else if (uap->flag & NFSSVC_OLDNFSD) {
} else if (uap->flag & NFSSVC_OLDNFSD)
error = nfssvc_nfsd(td, NULL);
} else if (uap->flag & NFSSVC_NFSD) {
else if (uap->flag & NFSSVC_NFSD) {
if (!uap->argp)
return (EINVAL);
error = copyin(uap->argp, (caddr_t)&nfsdarg,
@ -197,9 +197,8 @@ nfssvc_nfsserver(struct thread *td, struct nfssvc_args *uap)
if (error)
return (error);
error = nfssvc_nfsd(td, &nfsdarg);
} else {
} else
error = ENXIO;
}
return (error);
}
@ -447,9 +446,8 @@ nfssvc_addsock(struct file *fp, struct thread *td)
siz = sb_max_adj;
error = soreserve(so, siz, siz);
if (error) {
if (error)
return (error);
}
/*
* Steal the socket from userland so that it doesn't close
@ -471,7 +469,7 @@ nfssvc_addsock(struct file *fp, struct thread *td)
}
/*
* Called by nfssvc() for nfsds. Just loops around servicing rpc requests
* Called by nfssvc() for nfsds. Just loops around servicing rpc requests
* until it is killed by a signal.
*/
static int
@ -496,9 +494,9 @@ nfssvc_nfsd(struct thread *td, struct nfsd_nfsd_args *args)
#endif
/*
* Only the first nfsd actually does any work. The RPC code
* adds threads to it as needed. Any extra processes offered
* by nfsd just exit. If nfsd is new enough, it will call us
* Only the first nfsd actually does any work. The RPC code
* adds threads to it as needed. Any extra processes offered
* by nfsd just exit. If nfsd is new enough, it will call us
* once with a structure that specifies how many threads to
* use.
*/
@ -541,7 +539,7 @@ nfssvc_nfsd(struct thread *td, struct nfsd_nfsd_args *args)
/*
* Size the NFS server's duplicate request cache at 1/2 the
* nmbclusters, floating within a (64, 2048) range. This is to
* nmbclusters, floating within a (64, 2048) range. This is to
* prevent all mbuf clusters being tied up in the NFS dupreq
* cache for small values of nmbclusters.
*/