Re-implement the client side of rpc.lockd in the kernel. This implementation

provides the correct semantics for flock(2) style locks which are used by the
lockf(1) command line tool and the pidfile(3) library. It also implements
recovery from server restarts and ensures that dirty cache blocks are written
to the server before obtaining locks (allowing multiple clients to use file
locking to safely share data).

Sponsored by:	Isilon Systems
PR:		94256
MFC after:	2 weeks
This commit is contained in:
Doug Rabson 2008-06-26 10:21:54 +00:00
parent 91bc389e54
commit c675522fc4
28 changed files with 3103 additions and 777 deletions

View File

@ -2059,6 +2059,7 @@ nfsserver/nfs_srvsock.c optional nfsserver
nfsserver/nfs_srvcache.c optional nfsserver
nfsserver/nfs_srvsubs.c optional nfsserver
nfsserver/nfs_syscalls.c optional nfsserver
nlm/nlm_advlock.c optional nfslockd
nlm/nlm_prot_clnt.c optional nfslockd
nlm/nlm_prot_impl.c optional nfslockd
nlm/nlm_prot_server.c optional nfslockd

View File

@ -1354,7 +1354,8 @@ lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
priority = PLOCK;
if (lock->lf_type == F_WRLCK)
priority += 4;
priority |= PCATCH;
if (!(lock->lf_flags & F_NOINTR))
priority |= PCATCH;
/*
* Scan lock list for this file looking for locks that would block us.
*/
@ -1814,27 +1815,26 @@ lf_split(struct lockf *state, struct lockf_entry *lock1,
lf_insert_lock(state, splitlock);
}
struct clearlock {
STAILQ_ENTRY(clearlock) link;
struct lockdesc {
STAILQ_ENTRY(lockdesc) link;
struct vnode *vp;
struct flock fl;
};
STAILQ_HEAD(clearlocklist, clearlock);
STAILQ_HEAD(lockdesclist, lockdesc);
void
lf_clearremotesys(int sysid)
int
lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg)
{
struct lockf *ls;
struct lockf_entry *lf;
struct clearlock *cl;
struct clearlocklist locks;
KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS"));
struct lockdesc *ldesc;
struct lockdesclist locks;
int error;
/*
* In order to keep the locking simple, we iterate over the
* active lock lists to build a list of locks that need
* releasing. We then call VOP_ADVLOCK for each one in turn.
* releasing. We then call the iterator for each one in turn.
*
* We take an extra reference to the vnode for the duration to
* make sure it doesn't go away before we are finished.
@ -1847,32 +1847,116 @@ lf_clearremotesys(int sysid)
if (lf->lf_owner->lo_sysid != sysid)
continue;
cl = malloc(sizeof(struct clearlock), M_LOCKF,
ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
M_WAITOK);
cl->vp = lf->lf_vnode;
vref(cl->vp);
cl->fl.l_start = lf->lf_start;
ldesc->vp = lf->lf_vnode;
vref(ldesc->vp);
ldesc->fl.l_start = lf->lf_start;
if (lf->lf_end == OFF_MAX)
cl->fl.l_len = 0;
ldesc->fl.l_len = 0;
else
cl->fl.l_len =
ldesc->fl.l_len =
lf->lf_end - lf->lf_start + 1;
cl->fl.l_whence = SEEK_SET;
cl->fl.l_type = F_UNLCK;
cl->fl.l_pid = lf->lf_owner->lo_pid;
cl->fl.l_sysid = sysid;
STAILQ_INSERT_TAIL(&locks, cl, link);
ldesc->fl.l_whence = SEEK_SET;
ldesc->fl.l_type = F_UNLCK;
ldesc->fl.l_pid = lf->lf_owner->lo_pid;
ldesc->fl.l_sysid = sysid;
STAILQ_INSERT_TAIL(&locks, ldesc, link);
}
sx_xunlock(&ls->ls_lock);
}
sx_xunlock(&lf_lock_states_lock);
while ((cl = STAILQ_FIRST(&locks)) != NULL) {
/*
* Call the iterator function for each lock in turn. If the
* iterator returns an error code, just free the rest of the
* lockdesc structures.
*/
error = 0;
while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
STAILQ_REMOVE_HEAD(&locks, link);
VOP_ADVLOCK(cl->vp, 0, F_UNLCK, &cl->fl, F_REMOTE);
vrele(cl->vp);
free(cl, M_LOCKF);
if (!error)
error = fn(ldesc->vp, &ldesc->fl, arg);
vrele(ldesc->vp);
free(ldesc, M_LOCKF);
}
return (error);
}
int
lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg)
{
struct lockf *ls;
struct lockf_entry *lf;
struct lockdesc *ldesc;
struct lockdesclist locks;
int error;
/*
* In order to keep the locking simple, we iterate over the
* active lock lists to build a list of locks that need
* releasing. We then call the iterator for each one in turn.
*
* We take an extra reference to the vnode for the duration to
* make sure it doesn't go away before we are finished.
*/
STAILQ_INIT(&locks);
ls = vp->v_lockf;
if (!ls)
return (0);
sx_xlock(&ls->ls_lock);
LIST_FOREACH(lf, &ls->ls_active, lf_link) {
ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
M_WAITOK);
ldesc->vp = lf->lf_vnode;
vref(ldesc->vp);
ldesc->fl.l_start = lf->lf_start;
if (lf->lf_end == OFF_MAX)
ldesc->fl.l_len = 0;
else
ldesc->fl.l_len =
lf->lf_end - lf->lf_start + 1;
ldesc->fl.l_whence = SEEK_SET;
ldesc->fl.l_type = F_UNLCK;
ldesc->fl.l_pid = lf->lf_owner->lo_pid;
ldesc->fl.l_sysid = lf->lf_owner->lo_sysid;
STAILQ_INSERT_TAIL(&locks, ldesc, link);
}
sx_xunlock(&ls->ls_lock);
/*
* Call the iterator function for each lock in turn. If the
* iterator returns an error code, just free the rest of the
* lockdesc structures.
*/
error = 0;
while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
STAILQ_REMOVE_HEAD(&locks, link);
if (!error)
error = fn(ldesc->vp, &ldesc->fl, arg);
vrele(ldesc->vp);
free(ldesc, M_LOCKF);
}
return (error);
}
static int
lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg)
{
VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE);
return (0);
}
void
lf_clearremotesys(int sysid)
{
KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS"));
lf_iteratelocks_sysid(sysid, lf_clearremotesys_iterator, NULL);
}
int

View File

@ -3,6 +3,7 @@
.PATH: ${.CURDIR}/../../nlm ${.CURDIR}/../../rpc
KMOD= nfslockd
SRCS= vnode_if.h \
nlm_advlock.c \
nlm_prot_clnt.c \
nlm_prot_impl.c \
nlm_prot_server.c \

View File

@ -93,6 +93,7 @@
#define NFSSTA_SNDLOCK 0x01000000 /* Send socket lock */
#define NFSSTA_WANTSND 0x02000000 /* Want above */
#define NFSSTA_TIMEO 0x10000000 /* Experiencing a timeout */
#define NFSSTA_LOCKTIMEO 0x20000000 /* Experiencing a lockd timeout */
/*

View File

@ -233,6 +233,13 @@ nfs_reclaim(struct vop_reclaim_args *ap)
if (prtactive && vrefcnt(vp) != 0)
vprint("nfs_reclaim: pushing active", vp);
/*
* If the NLM is running, give it a chance to abort pending
* locks.
*/
if (nfs_reclaim_p)
nfs_reclaim_p(ap);
/*
* Destroy the vm object and flush associated pages.
*/

View File

@ -495,6 +495,7 @@ nfs_mountroot(struct mount *mp, struct thread *td)
(l >> 24) & 0xff, (l >> 16) & 0xff,
(l >> 8) & 0xff, (l >> 0) & 0xff, nd->root_hostnam);
printf("NFS ROOT: %s\n", buf);
nd->root_args.hostname = buf;
if ((error = nfs_mountdiskless(buf,
&nd->root_saddr, &nd->root_args, td, &vp, mp)) != 0) {
return (error);
@ -540,6 +541,7 @@ nfs_decode_args(struct mount *mp, struct nfsmount *nmp, struct nfs_args *argp)
int s;
int adjsock;
int maxio;
char *p;
s = splnet();
@ -699,6 +701,11 @@ nfs_decode_args(struct mount *mp, struct nfsmount *nmp, struct nfs_args *argp)
(void) tsleep((caddr_t)&lbolt, PSOCK, "nfscon", 0);
}
}
strlcpy(nmp->nm_hostname, argp->hostname, sizeof(nmp->nm_hostname));
p = strchr(nmp->nm_hostname, ':');
if (p)
*p = '\0';
}
static const char *nfs_opts[] = { "from", "nfs_args",

View File

@ -198,6 +198,8 @@ struct mtx nfs_iod_mtx;
struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON];
struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON];
int nfs_numasync = 0;
vop_advlock_t *nfs_advlock_p = nfs_dolock;
vop_reclaim_t *nfs_reclaim_p = NULL;
#define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1))
SYSCTL_DECL(_vfs_nfs);
@ -3051,8 +3053,13 @@ nfs_advlock(struct vop_advlock_args *ap)
size = VTONFS(vp)->n_size;
VOP_UNLOCK(vp, 0);
error = lf_advlock(ap, &(vp->v_lockf), size);
} else
error = nfs_dolock(ap);
} else {
if (nfs_advlock_p)
error = nfs_advlock_p(ap);
else
error = ENOLCK;
}
return (error);
}

View File

@ -91,6 +91,7 @@ struct nfsmount {
int nm_tprintf_initial_delay; /* initial delay */
int nm_tprintf_delay; /* interval for messages */
struct nfs_tcp_mountstate nm_nfstcpstate;
char nm_hostname[MNAMELEN]; /* server's name */
/* NFSv4 */
uint64_t nm_clientid;

View File

@ -187,6 +187,9 @@ extern struct vop_vector nfs4_vnodeops;
extern struct buf_ops buf_ops_nfs;
extern struct buf_ops buf_ops_nfs4;
extern vop_advlock_t *nfs_advlock_p;
extern vop_reclaim_t *nfs_reclaim_p;
/*
* Prototypes for NFS vnode operations
*/

View File

@ -36,7 +36,17 @@
MALLOC_DECLARE(M_NLM);
#endif
/*
* This value is added to host system IDs when recording NFS client
* locks in the local lock manager.
*/
#define NLM_SYSID_CLIENT 0x1000000
struct nlm_host;
struct vnode;
extern struct timeval nlm_zero_tv;
extern int nlm_nsm_state;
/*
* Copy a struct netobj.
@ -47,61 +57,140 @@ extern void nlm_copy_netobj(struct netobj *dst, struct netobj *src,
/*
* Search for an existing NLM host that matches the given name
* (typically the caller_name element of an nlm4_lock). If none is
* found, create a new host. If 'rqstp' is non-NULL, record the remote
* found, create a new host. If 'addr' is non-NULL, record the remote
* address of the host so that we can call it back for async
* responses.
* responses. If 'vers' is greater than zero then record the NLM
* program version to use to communicate with this client. The host
* reference count is incremented - the caller must call
* nlm_host_release when it has finished using it.
*/
extern struct nlm_host *nlm_find_host_by_name(const char *name,
struct svc_req *rqstp);
const struct sockaddr *addr, rpcvers_t vers);
/*
* Search for an existing NLM host that matches the given remote
* address. If none is found, create a new host with the requested
* address and remember 'vers' as the NLM protocol version to use for
* that host.
* that host. The host reference count is incremented - the caller
* must call nlm_host_release when it has finished using it.
*/
extern struct nlm_host *nlm_find_host_by_addr(const struct sockaddr *addr,
int vers);
/*
* Register this NLM host with the local NSM so that we can be
* notified if it reboots.
*/
extern void nlm_host_monitor(struct nlm_host *host, int state);
/*
* Decrement the host reference count, freeing resources if the
* reference count reaches zero.
*/
extern void nlm_host_release(struct nlm_host *host);
/*
* Return an RPC client handle that can be used to talk to the NLM
* running on the given host.
*/
extern CLIENT *nlm_host_get_rpc(struct nlm_host *host);
/*
* Return the system ID for a host.
*/
extern int nlm_host_get_sysid(struct nlm_host *host);
/*
* Return the remote NSM state value for a host.
*/
extern int nlm_host_get_state(struct nlm_host *host);
/*
* When sending a blocking lock request, we need to track the request
* in our waiting lock list. We add an entry to the waiting list
* before we send the lock RPC so that we can cope with a granted
* message arriving at any time. Call this function before sending the
* lock rpc. If the lock succeeds, call nlm_deregister_wait_lock with
* the handle this function returns, otherwise nlm_wait_lock. Both
* will remove the entry from the waiting list.
*/
extern void *nlm_register_wait_lock(struct nlm4_lock *lock, struct vnode *vp);
/*
* Deregister a blocking lock request. Call this if the lock succeeded
* without blocking.
*/
extern void nlm_deregister_wait_lock(void *handle);
/*
* Wait for a granted callback for a blocked lock request, waiting at
* most timo ticks. If no granted message is received within the
* timeout, return EWOULDBLOCK. If a signal interrupted the wait,
* return EINTR - the caller must arrange to send a cancellation to
* the server. In both cases, the request is removed from the waiting
* list.
*/
extern int nlm_wait_lock(void *handle, int timo);
/*
* Cancel any pending waits for this vnode - called on forcible unmounts.
*/
extern void nlm_cancel_wait(struct vnode *vp);
/*
* Called when a host restarts.
*/
extern void nlm_sm_notify(nlm_sm_status *argp);
/*
* Implementation for lock testing RPCs. Returns the NLM host that
* matches the RPC arguments.
* Implementation for lock testing RPCs. If the request was handled
* successfully and rpcp is non-NULL, *rpcp is set to an RPC client
* handle which can be used to send an async rpc reply. Returns zero
* if the request was handled, or a suitable unix error code
* otherwise.
*/
extern struct nlm_host *nlm_do_test(nlm4_testargs *argp,
nlm4_testres *result, struct svc_req *rqstp);
extern int nlm_do_test(nlm4_testargs *argp, nlm4_testres *result,
struct svc_req *rqstp, CLIENT **rpcp);
/*
* Implementation for lock setting RPCs. Returns the NLM host that
* matches the RPC arguments. If monitor is TRUE, set up an NSM
* monitor for this host.
* Implementation for lock setting RPCs. If the request was handled
* successfully and rpcp is non-NULL, *rpcp is set to an RPC client
* handle which can be used to send an async rpc reply. Returns zero
* if the request was handled, or a suitable unix error code
* otherwise.
*/
extern struct nlm_host *nlm_do_lock(nlm4_lockargs *argp,
nlm4_res *result, struct svc_req *rqstp, bool_t monitor);
extern int nlm_do_lock(nlm4_lockargs *argp, nlm4_res *result,
struct svc_req *rqstp, bool_t monitor, CLIENT **rpcp);
/*
* Implementation for cancelling a pending lock request. Returns the
* NLM host that matches the RPC arguments.
* Implementation for cancelling a pending lock request. If the
* request was handled successfully and rpcp is non-NULL, *rpcp is set
* to an RPC client handle which can be used to send an async rpc
* reply. Returns zero if the request was handled, or a suitable unix
* error code otherwise.
*/
extern struct nlm_host *nlm_do_cancel(nlm4_cancargs *argp,
nlm4_res *result, struct svc_req *rqstp);
extern int nlm_do_cancel(nlm4_cancargs *argp, nlm4_res *result,
struct svc_req *rqstp, CLIENT **rpcp);
/*
* Implementation for unlocking RPCs. Returns the NLM host that
* matches the RPC arguments.
* Implementation for unlocking RPCs. If the request was handled
* successfully and rpcp is non-NULL, *rpcp is set to an RPC client
* handle which can be used to send an async rpc reply. Returns zero
* if the request was handled, or a suitable unix error code
* otherwise.
*/
extern struct nlm_host *nlm_do_unlock(nlm4_unlockargs *argp,
nlm4_res *result, struct svc_req *rqstp);
extern int nlm_do_unlock(nlm4_unlockargs *argp, nlm4_res *result,
struct svc_req *rqstp, CLIENT **rpcp);
/*
* Implementation for granted RPCs. If the request was handled
* successfully and rpcp is non-NULL, *rpcp is set to an RPC client
* handle which can be used to send an async rpc reply. Returns zero
* if the request was handled, or a suitable unix error code
* otherwise.
*/
extern int nlm_do_granted(nlm4_testargs *argp, nlm4_res *result,
struct svc_req *rqstp, CLIENT **rpcp);
/*
* Free all locks associated with the hostname argp->name.
@ -109,10 +198,17 @@ extern struct nlm_host *nlm_do_unlock(nlm4_unlockargs *argp,
extern void nlm_do_free_all(nlm4_notify *argp);
/*
* Find an RPC transport that can be used to communicate with the
* userland part of lockd.
* Recover client lock state after a server reboot.
*/
extern CLIENT *nlm_user_lockd(void);
extern void nlm_client_recovery(struct nlm_host *);
/*
* Interface from NFS client code to the NLM.
*/
struct vop_advlock_args;
struct vop_reclaim_args;
extern int nlm_advlock(struct vop_advlock_args *ap);
extern int nlm_reclaim(struct vop_reclaim_args *ap);
#endif

1235
sys/nlm/nlm_advlock.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -280,129 +280,129 @@ typedef struct nlm4_notify nlm4_notify;
#define NLM_SM ((unsigned long)(0))
#define NLM_SM_NOTIFY ((unsigned long)(1))
extern enum clnt_stat nlm_sm_notify_0(struct nlm_sm_status *, void *, CLIENT *);
extern enum clnt_stat nlm_sm_notify_0(struct nlm_sm_status *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_sm_notify_0_svc(struct nlm_sm_status *, void *, struct svc_req *);
#define NLM_VERS ((unsigned long)(1))
#define NLM_TEST ((unsigned long)(1))
extern enum clnt_stat nlm_test_1(struct nlm_testargs *, nlm_testres *, CLIENT *);
extern enum clnt_stat nlm_test_1(struct nlm_testargs *, nlm_testres *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_test_1_svc(struct nlm_testargs *, nlm_testres *, struct svc_req *);
#define NLM_LOCK ((unsigned long)(2))
extern enum clnt_stat nlm_lock_1(struct nlm_lockargs *, nlm_res *, CLIENT *);
extern enum clnt_stat nlm_lock_1(struct nlm_lockargs *, nlm_res *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_lock_1_svc(struct nlm_lockargs *, nlm_res *, struct svc_req *);
#define NLM_CANCEL ((unsigned long)(3))
extern enum clnt_stat nlm_cancel_1(struct nlm_cancargs *, nlm_res *, CLIENT *);
extern enum clnt_stat nlm_cancel_1(struct nlm_cancargs *, nlm_res *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_cancel_1_svc(struct nlm_cancargs *, nlm_res *, struct svc_req *);
#define NLM_UNLOCK ((unsigned long)(4))
extern enum clnt_stat nlm_unlock_1(struct nlm_unlockargs *, nlm_res *, CLIENT *);
extern enum clnt_stat nlm_unlock_1(struct nlm_unlockargs *, nlm_res *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_unlock_1_svc(struct nlm_unlockargs *, nlm_res *, struct svc_req *);
#define NLM_GRANTED ((unsigned long)(5))
extern enum clnt_stat nlm_granted_1(struct nlm_testargs *, nlm_res *, CLIENT *);
extern enum clnt_stat nlm_granted_1(struct nlm_testargs *, nlm_res *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_granted_1_svc(struct nlm_testargs *, nlm_res *, struct svc_req *);
#define NLM_TEST_MSG ((unsigned long)(6))
extern enum clnt_stat nlm_test_msg_1(struct nlm_testargs *, void *, CLIENT *);
extern enum clnt_stat nlm_test_msg_1(struct nlm_testargs *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_test_msg_1_svc(struct nlm_testargs *, void *, struct svc_req *);
#define NLM_LOCK_MSG ((unsigned long)(7))
extern enum clnt_stat nlm_lock_msg_1(struct nlm_lockargs *, void *, CLIENT *);
extern enum clnt_stat nlm_lock_msg_1(struct nlm_lockargs *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_lock_msg_1_svc(struct nlm_lockargs *, void *, struct svc_req *);
#define NLM_CANCEL_MSG ((unsigned long)(8))
extern enum clnt_stat nlm_cancel_msg_1(struct nlm_cancargs *, void *, CLIENT *);
extern enum clnt_stat nlm_cancel_msg_1(struct nlm_cancargs *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_cancel_msg_1_svc(struct nlm_cancargs *, void *, struct svc_req *);
#define NLM_UNLOCK_MSG ((unsigned long)(9))
extern enum clnt_stat nlm_unlock_msg_1(struct nlm_unlockargs *, void *, CLIENT *);
extern enum clnt_stat nlm_unlock_msg_1(struct nlm_unlockargs *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_unlock_msg_1_svc(struct nlm_unlockargs *, void *, struct svc_req *);
#define NLM_GRANTED_MSG ((unsigned long)(10))
extern enum clnt_stat nlm_granted_msg_1(struct nlm_testargs *, void *, CLIENT *);
extern enum clnt_stat nlm_granted_msg_1(struct nlm_testargs *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_granted_msg_1_svc(struct nlm_testargs *, void *, struct svc_req *);
#define NLM_TEST_RES ((unsigned long)(11))
extern enum clnt_stat nlm_test_res_1(nlm_testres *, void *, CLIENT *);
extern enum clnt_stat nlm_test_res_1(nlm_testres *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_test_res_1_svc(nlm_testres *, void *, struct svc_req *);
#define NLM_LOCK_RES ((unsigned long)(12))
extern enum clnt_stat nlm_lock_res_1(nlm_res *, void *, CLIENT *);
extern enum clnt_stat nlm_lock_res_1(nlm_res *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_lock_res_1_svc(nlm_res *, void *, struct svc_req *);
#define NLM_CANCEL_RES ((unsigned long)(13))
extern enum clnt_stat nlm_cancel_res_1(nlm_res *, void *, CLIENT *);
extern enum clnt_stat nlm_cancel_res_1(nlm_res *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_cancel_res_1_svc(nlm_res *, void *, struct svc_req *);
#define NLM_UNLOCK_RES ((unsigned long)(14))
extern enum clnt_stat nlm_unlock_res_1(nlm_res *, void *, CLIENT *);
extern enum clnt_stat nlm_unlock_res_1(nlm_res *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_unlock_res_1_svc(nlm_res *, void *, struct svc_req *);
#define NLM_GRANTED_RES ((unsigned long)(15))
extern enum clnt_stat nlm_granted_res_1(nlm_res *, void *, CLIENT *);
extern enum clnt_stat nlm_granted_res_1(nlm_res *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_granted_res_1_svc(nlm_res *, void *, struct svc_req *);
extern int nlm_prog_1_freeresult(SVCXPRT *, xdrproc_t, caddr_t);
#define NLM_VERSX ((unsigned long)(3))
#define NLM_SHARE ((unsigned long)(20))
extern enum clnt_stat nlm_share_3(nlm_shareargs *, nlm_shareres *, CLIENT *);
extern enum clnt_stat nlm_share_3(nlm_shareargs *, nlm_shareres *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_share_3_svc(nlm_shareargs *, nlm_shareres *, struct svc_req *);
#define NLM_UNSHARE ((unsigned long)(21))
extern enum clnt_stat nlm_unshare_3(nlm_shareargs *, nlm_shareres *, CLIENT *);
extern enum clnt_stat nlm_unshare_3(nlm_shareargs *, nlm_shareres *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_unshare_3_svc(nlm_shareargs *, nlm_shareres *, struct svc_req *);
#define NLM_NM_LOCK ((unsigned long)(22))
extern enum clnt_stat nlm_nm_lock_3(nlm_lockargs *, nlm_res *, CLIENT *);
extern enum clnt_stat nlm_nm_lock_3(nlm_lockargs *, nlm_res *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_nm_lock_3_svc(nlm_lockargs *, nlm_res *, struct svc_req *);
#define NLM_FREE_ALL ((unsigned long)(23))
extern enum clnt_stat nlm_free_all_3(nlm_notify *, void *, CLIENT *);
extern enum clnt_stat nlm_free_all_3(nlm_notify *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_free_all_3_svc(nlm_notify *, void *, struct svc_req *);
extern int nlm_prog_3_freeresult(SVCXPRT *, xdrproc_t, caddr_t);
#define NLM_VERS4 ((unsigned long)(4))
#define NLM4_TEST ((unsigned long)(1))
extern enum clnt_stat nlm4_test_4(nlm4_testargs *, nlm4_testres *, CLIENT *);
extern enum clnt_stat nlm4_test_4(nlm4_testargs *, nlm4_testres *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_test_4_svc(nlm4_testargs *, nlm4_testres *, struct svc_req *);
#define NLM4_LOCK ((unsigned long)(2))
extern enum clnt_stat nlm4_lock_4(nlm4_lockargs *, nlm4_res *, CLIENT *);
extern enum clnt_stat nlm4_lock_4(nlm4_lockargs *, nlm4_res *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_lock_4_svc(nlm4_lockargs *, nlm4_res *, struct svc_req *);
#define NLM4_CANCEL ((unsigned long)(3))
extern enum clnt_stat nlm4_cancel_4(nlm4_cancargs *, nlm4_res *, CLIENT *);
extern enum clnt_stat nlm4_cancel_4(nlm4_cancargs *, nlm4_res *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_cancel_4_svc(nlm4_cancargs *, nlm4_res *, struct svc_req *);
#define NLM4_UNLOCK ((unsigned long)(4))
extern enum clnt_stat nlm4_unlock_4(nlm4_unlockargs *, nlm4_res *, CLIENT *);
extern enum clnt_stat nlm4_unlock_4(nlm4_unlockargs *, nlm4_res *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_unlock_4_svc(nlm4_unlockargs *, nlm4_res *, struct svc_req *);
#define NLM4_GRANTED ((unsigned long)(5))
extern enum clnt_stat nlm4_granted_4(nlm4_testargs *, nlm4_res *, CLIENT *);
extern enum clnt_stat nlm4_granted_4(nlm4_testargs *, nlm4_res *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_granted_4_svc(nlm4_testargs *, nlm4_res *, struct svc_req *);
#define NLM4_TEST_MSG ((unsigned long)(6))
extern enum clnt_stat nlm4_test_msg_4(nlm4_testargs *, void *, CLIENT *);
extern enum clnt_stat nlm4_test_msg_4(nlm4_testargs *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_test_msg_4_svc(nlm4_testargs *, void *, struct svc_req *);
#define NLM4_LOCK_MSG ((unsigned long)(7))
extern enum clnt_stat nlm4_lock_msg_4(nlm4_lockargs *, void *, CLIENT *);
extern enum clnt_stat nlm4_lock_msg_4(nlm4_lockargs *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_lock_msg_4_svc(nlm4_lockargs *, void *, struct svc_req *);
#define NLM4_CANCEL_MSG ((unsigned long)(8))
extern enum clnt_stat nlm4_cancel_msg_4(nlm4_cancargs *, void *, CLIENT *);
extern enum clnt_stat nlm4_cancel_msg_4(nlm4_cancargs *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_cancel_msg_4_svc(nlm4_cancargs *, void *, struct svc_req *);
#define NLM4_UNLOCK_MSG ((unsigned long)(9))
extern enum clnt_stat nlm4_unlock_msg_4(nlm4_unlockargs *, void *, CLIENT *);
extern enum clnt_stat nlm4_unlock_msg_4(nlm4_unlockargs *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_unlock_msg_4_svc(nlm4_unlockargs *, void *, struct svc_req *);
#define NLM4_GRANTED_MSG ((unsigned long)(10))
extern enum clnt_stat nlm4_granted_msg_4(nlm4_testargs *, void *, CLIENT *);
extern enum clnt_stat nlm4_granted_msg_4(nlm4_testargs *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_granted_msg_4_svc(nlm4_testargs *, void *, struct svc_req *);
#define NLM4_TEST_RES ((unsigned long)(11))
extern enum clnt_stat nlm4_test_res_4(nlm4_testres *, void *, CLIENT *);
extern enum clnt_stat nlm4_test_res_4(nlm4_testres *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_test_res_4_svc(nlm4_testres *, void *, struct svc_req *);
#define NLM4_LOCK_RES ((unsigned long)(12))
extern enum clnt_stat nlm4_lock_res_4(nlm4_res *, void *, CLIENT *);
extern enum clnt_stat nlm4_lock_res_4(nlm4_res *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_lock_res_4_svc(nlm4_res *, void *, struct svc_req *);
#define NLM4_CANCEL_RES ((unsigned long)(13))
extern enum clnt_stat nlm4_cancel_res_4(nlm4_res *, void *, CLIENT *);
extern enum clnt_stat nlm4_cancel_res_4(nlm4_res *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_cancel_res_4_svc(nlm4_res *, void *, struct svc_req *);
#define NLM4_UNLOCK_RES ((unsigned long)(14))
extern enum clnt_stat nlm4_unlock_res_4(nlm4_res *, void *, CLIENT *);
extern enum clnt_stat nlm4_unlock_res_4(nlm4_res *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_unlock_res_4_svc(nlm4_res *, void *, struct svc_req *);
#define NLM4_GRANTED_RES ((unsigned long)(15))
extern enum clnt_stat nlm4_granted_res_4(nlm4_res *, void *, CLIENT *);
extern enum clnt_stat nlm4_granted_res_4(nlm4_res *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_granted_res_4_svc(nlm4_res *, void *, struct svc_req *);
#define NLM4_SHARE ((unsigned long)(20))
extern enum clnt_stat nlm4_share_4(nlm4_shareargs *, nlm4_shareres *, CLIENT *);
extern enum clnt_stat nlm4_share_4(nlm4_shareargs *, nlm4_shareres *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_share_4_svc(nlm4_shareargs *, nlm4_shareres *, struct svc_req *);
#define NLM4_UNSHARE ((unsigned long)(21))
extern enum clnt_stat nlm4_unshare_4(nlm4_shareargs *, nlm4_shareres *, CLIENT *);
extern enum clnt_stat nlm4_unshare_4(nlm4_shareargs *, nlm4_shareres *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_unshare_4_svc(nlm4_shareargs *, nlm4_shareres *, struct svc_req *);
#define NLM4_NM_LOCK ((unsigned long)(22))
extern enum clnt_stat nlm4_nm_lock_4(nlm4_lockargs *, nlm4_res *, CLIENT *);
extern enum clnt_stat nlm4_nm_lock_4(nlm4_lockargs *, nlm4_res *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_nm_lock_4_svc(nlm4_lockargs *, nlm4_res *, struct svc_req *);
#define NLM4_FREE_ALL ((unsigned long)(23))
extern enum clnt_stat nlm4_free_all_4(nlm4_notify *, void *, CLIENT *);
extern enum clnt_stat nlm4_free_all_4(nlm4_notify *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_free_all_4_svc(nlm4_notify *, void *, struct svc_req *);
extern int nlm_prog_4_freeresult(SVCXPRT *, xdrproc_t, caddr_t);

View File

@ -17,356 +17,353 @@ __RCSID("$NetBSD: nlm_prot.x,v 1.6 2000/06/07 14:30:15 bouyer Exp $");
#endif /* not lint */
__FBSDID("$FreeBSD$");
/* Default timeout can be changed using clnt_control() */
static struct timeval TIMEOUT = { 25, 0 };
enum clnt_stat
nlm_sm_notify_0(struct nlm_sm_status *argp, void *clnt_res, CLIENT *clnt)
nlm_sm_notify_0(struct nlm_sm_status *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM_SM_NOTIFY,
return (CLNT_CALL_EXT(clnt, ext, NLM_SM_NOTIFY,
(xdrproc_t) xdr_nlm_sm_status, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm_test_1(struct nlm_testargs *argp, nlm_testres *clnt_res, CLIENT *clnt)
nlm_test_1(struct nlm_testargs *argp, nlm_testres *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM_TEST,
return (CLNT_CALL_EXT(clnt, ext, NLM_TEST,
(xdrproc_t) xdr_nlm_testargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm_testres, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm_lock_1(struct nlm_lockargs *argp, nlm_res *clnt_res, CLIENT *clnt)
nlm_lock_1(struct nlm_lockargs *argp, nlm_res *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM_LOCK,
return (CLNT_CALL_EXT(clnt, ext, NLM_LOCK,
(xdrproc_t) xdr_nlm_lockargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm_res, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm_cancel_1(struct nlm_cancargs *argp, nlm_res *clnt_res, CLIENT *clnt)
nlm_cancel_1(struct nlm_cancargs *argp, nlm_res *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM_CANCEL,
return (CLNT_CALL_EXT(clnt, ext, NLM_CANCEL,
(xdrproc_t) xdr_nlm_cancargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm_res, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm_unlock_1(struct nlm_unlockargs *argp, nlm_res *clnt_res, CLIENT *clnt)
nlm_unlock_1(struct nlm_unlockargs *argp, nlm_res *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM_UNLOCK,
return (CLNT_CALL_EXT(clnt, ext, NLM_UNLOCK,
(xdrproc_t) xdr_nlm_unlockargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm_res, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm_granted_1(struct nlm_testargs *argp, nlm_res *clnt_res, CLIENT *clnt)
nlm_granted_1(struct nlm_testargs *argp, nlm_res *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM_GRANTED,
return (CLNT_CALL_EXT(clnt, ext, NLM_GRANTED,
(xdrproc_t) xdr_nlm_testargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm_res, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm_test_msg_1(struct nlm_testargs *argp, void *clnt_res, CLIENT *clnt)
nlm_test_msg_1(struct nlm_testargs *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM_TEST_MSG,
return (CLNT_CALL_EXT(clnt, ext, NLM_TEST_MSG,
(xdrproc_t) xdr_nlm_testargs, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm_lock_msg_1(struct nlm_lockargs *argp, void *clnt_res, CLIENT *clnt)
nlm_lock_msg_1(struct nlm_lockargs *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM_LOCK_MSG,
return (CLNT_CALL_EXT(clnt, ext, NLM_LOCK_MSG,
(xdrproc_t) xdr_nlm_lockargs, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm_cancel_msg_1(struct nlm_cancargs *argp, void *clnt_res, CLIENT *clnt)
nlm_cancel_msg_1(struct nlm_cancargs *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM_CANCEL_MSG,
return (CLNT_CALL_EXT(clnt, ext, NLM_CANCEL_MSG,
(xdrproc_t) xdr_nlm_cancargs, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm_unlock_msg_1(struct nlm_unlockargs *argp, void *clnt_res, CLIENT *clnt)
nlm_unlock_msg_1(struct nlm_unlockargs *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM_UNLOCK_MSG,
return (CLNT_CALL_EXT(clnt, ext, NLM_UNLOCK_MSG,
(xdrproc_t) xdr_nlm_unlockargs, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm_granted_msg_1(struct nlm_testargs *argp, void *clnt_res, CLIENT *clnt)
nlm_granted_msg_1(struct nlm_testargs *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM_GRANTED_MSG,
return (CLNT_CALL_EXT(clnt, ext, NLM_GRANTED_MSG,
(xdrproc_t) xdr_nlm_testargs, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm_test_res_1(nlm_testres *argp, void *clnt_res, CLIENT *clnt)
nlm_test_res_1(nlm_testres *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM_TEST_RES,
return (CLNT_CALL_EXT(clnt, ext, NLM_TEST_RES,
(xdrproc_t) xdr_nlm_testres, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm_lock_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt)
nlm_lock_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM_LOCK_RES,
return (CLNT_CALL_EXT(clnt, ext, NLM_LOCK_RES,
(xdrproc_t) xdr_nlm_res, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm_cancel_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt)
nlm_cancel_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM_CANCEL_RES,
return (CLNT_CALL_EXT(clnt, ext, NLM_CANCEL_RES,
(xdrproc_t) xdr_nlm_res, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm_unlock_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt)
nlm_unlock_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM_UNLOCK_RES,
return (CLNT_CALL_EXT(clnt, ext, NLM_UNLOCK_RES,
(xdrproc_t) xdr_nlm_res, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm_granted_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt)
nlm_granted_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM_GRANTED_RES,
return (CLNT_CALL_EXT(clnt, ext, NLM_GRANTED_RES,
(xdrproc_t) xdr_nlm_res, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm_share_3(nlm_shareargs *argp, nlm_shareres *clnt_res, CLIENT *clnt)
nlm_share_3(nlm_shareargs *argp, nlm_shareres *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM_SHARE,
return (CLNT_CALL_EXT(clnt, ext, NLM_SHARE,
(xdrproc_t) xdr_nlm_shareargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm_shareres, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm_unshare_3(nlm_shareargs *argp, nlm_shareres *clnt_res, CLIENT *clnt)
nlm_unshare_3(nlm_shareargs *argp, nlm_shareres *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM_UNSHARE,
return (CLNT_CALL_EXT(clnt, ext, NLM_UNSHARE,
(xdrproc_t) xdr_nlm_shareargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm_shareres, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm_nm_lock_3(nlm_lockargs *argp, nlm_res *clnt_res, CLIENT *clnt)
nlm_nm_lock_3(nlm_lockargs *argp, nlm_res *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM_NM_LOCK,
return (CLNT_CALL_EXT(clnt, ext, NLM_NM_LOCK,
(xdrproc_t) xdr_nlm_lockargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm_res, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm_free_all_3(nlm_notify *argp, void *clnt_res, CLIENT *clnt)
nlm_free_all_3(nlm_notify *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM_FREE_ALL,
return (CLNT_CALL_EXT(clnt, ext, NLM_FREE_ALL,
(xdrproc_t) xdr_nlm_notify, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm4_test_4(nlm4_testargs *argp, nlm4_testres *clnt_res, CLIENT *clnt)
nlm4_test_4(nlm4_testargs *argp, nlm4_testres *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM4_TEST,
return (CLNT_CALL_EXT(clnt, ext, NLM4_TEST,
(xdrproc_t) xdr_nlm4_testargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm4_testres, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm4_lock_4(nlm4_lockargs *argp, nlm4_res *clnt_res, CLIENT *clnt)
nlm4_lock_4(nlm4_lockargs *argp, nlm4_res *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM4_LOCK,
return (CLNT_CALL_EXT(clnt, ext, NLM4_LOCK,
(xdrproc_t) xdr_nlm4_lockargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm4_res, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm4_cancel_4(nlm4_cancargs *argp, nlm4_res *clnt_res, CLIENT *clnt)
nlm4_cancel_4(nlm4_cancargs *argp, nlm4_res *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM4_CANCEL,
return (CLNT_CALL_EXT(clnt, ext, NLM4_CANCEL,
(xdrproc_t) xdr_nlm4_cancargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm4_res, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm4_unlock_4(nlm4_unlockargs *argp, nlm4_res *clnt_res, CLIENT *clnt)
nlm4_unlock_4(nlm4_unlockargs *argp, nlm4_res *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM4_UNLOCK,
return (CLNT_CALL_EXT(clnt, ext, NLM4_UNLOCK,
(xdrproc_t) xdr_nlm4_unlockargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm4_res, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm4_granted_4(nlm4_testargs *argp, nlm4_res *clnt_res, CLIENT *clnt)
nlm4_granted_4(nlm4_testargs *argp, nlm4_res *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM4_GRANTED,
return (CLNT_CALL_EXT(clnt, ext, NLM4_GRANTED,
(xdrproc_t) xdr_nlm4_testargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm4_res, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm4_test_msg_4(nlm4_testargs *argp, void *clnt_res, CLIENT *clnt)
nlm4_test_msg_4(nlm4_testargs *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM4_TEST_MSG,
return (CLNT_CALL_EXT(clnt, ext, NLM4_TEST_MSG,
(xdrproc_t) xdr_nlm4_testargs, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm4_lock_msg_4(nlm4_lockargs *argp, void *clnt_res, CLIENT *clnt)
nlm4_lock_msg_4(nlm4_lockargs *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM4_LOCK_MSG,
return (CLNT_CALL_EXT(clnt, ext, NLM4_LOCK_MSG,
(xdrproc_t) xdr_nlm4_lockargs, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm4_cancel_msg_4(nlm4_cancargs *argp, void *clnt_res, CLIENT *clnt)
nlm4_cancel_msg_4(nlm4_cancargs *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM4_CANCEL_MSG,
return (CLNT_CALL_EXT(clnt, ext, NLM4_CANCEL_MSG,
(xdrproc_t) xdr_nlm4_cancargs, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm4_unlock_msg_4(nlm4_unlockargs *argp, void *clnt_res, CLIENT *clnt)
nlm4_unlock_msg_4(nlm4_unlockargs *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM4_UNLOCK_MSG,
return (CLNT_CALL_EXT(clnt, ext, NLM4_UNLOCK_MSG,
(xdrproc_t) xdr_nlm4_unlockargs, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm4_granted_msg_4(nlm4_testargs *argp, void *clnt_res, CLIENT *clnt)
nlm4_granted_msg_4(nlm4_testargs *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM4_GRANTED_MSG,
return (CLNT_CALL_EXT(clnt, ext, NLM4_GRANTED_MSG,
(xdrproc_t) xdr_nlm4_testargs, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm4_test_res_4(nlm4_testres *argp, void *clnt_res, CLIENT *clnt)
nlm4_test_res_4(nlm4_testres *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM4_TEST_RES,
return (CLNT_CALL_EXT(clnt, ext, NLM4_TEST_RES,
(xdrproc_t) xdr_nlm4_testres, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm4_lock_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt)
nlm4_lock_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM4_LOCK_RES,
return (CLNT_CALL_EXT(clnt, ext, NLM4_LOCK_RES,
(xdrproc_t) xdr_nlm4_res, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm4_cancel_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt)
nlm4_cancel_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM4_CANCEL_RES,
return (CLNT_CALL_EXT(clnt, ext, NLM4_CANCEL_RES,
(xdrproc_t) xdr_nlm4_res, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm4_unlock_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt)
nlm4_unlock_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM4_UNLOCK_RES,
return (CLNT_CALL_EXT(clnt, ext, NLM4_UNLOCK_RES,
(xdrproc_t) xdr_nlm4_res, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm4_granted_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt)
nlm4_granted_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM4_GRANTED_RES,
return (CLNT_CALL_EXT(clnt, ext, NLM4_GRANTED_RES,
(xdrproc_t) xdr_nlm4_res, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm4_share_4(nlm4_shareargs *argp, nlm4_shareres *clnt_res, CLIENT *clnt)
nlm4_share_4(nlm4_shareargs *argp, nlm4_shareres *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM4_SHARE,
return (CLNT_CALL_EXT(clnt, ext, NLM4_SHARE,
(xdrproc_t) xdr_nlm4_shareargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm4_shareres, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm4_unshare_4(nlm4_shareargs *argp, nlm4_shareres *clnt_res, CLIENT *clnt)
nlm4_unshare_4(nlm4_shareargs *argp, nlm4_shareres *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM4_UNSHARE,
return (CLNT_CALL_EXT(clnt, ext, NLM4_UNSHARE,
(xdrproc_t) xdr_nlm4_shareargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm4_shareres, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm4_nm_lock_4(nlm4_lockargs *argp, nlm4_res *clnt_res, CLIENT *clnt)
nlm4_nm_lock_4(nlm4_lockargs *argp, nlm4_res *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM4_NM_LOCK,
return (CLNT_CALL_EXT(clnt, ext, NLM4_NM_LOCK,
(xdrproc_t) xdr_nlm4_lockargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm4_res, (caddr_t) clnt_res,
TIMEOUT));
timo));
}
enum clnt_stat
nlm4_free_all_4(nlm4_notify *argp, void *clnt_res, CLIENT *clnt)
nlm4_free_all_4(nlm4_notify *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
return (clnt_call(clnt, NLM4_FREE_ALL,
return (CLNT_CALL_EXT(clnt, ext, NLM4_FREE_ALL,
(xdrproc_t) xdr_nlm4_notify, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
TIMEOUT));
timo));
}

File diff suppressed because it is too large Load Diff

View File

@ -232,7 +232,6 @@ nlm_test_msg_1_svc(struct nlm_testargs *argp, void *result, struct svc_req *rqst
nlm4_testargs args4;
nlm4_testres res4;
nlm_testres res;
struct nlm_host *host;
CLIENT *rpc;
char dummy;
@ -240,7 +239,8 @@ nlm_test_msg_1_svc(struct nlm_testargs *argp, void *result, struct svc_req *rqst
args4.exclusive = argp->exclusive;
nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
host = nlm_do_test(&args4, &res4, rqstp);
if (nlm_do_test(&args4, &res4, rqstp, &rpc))
return (FALSE);
res.cookie = res4.cookie;
res.stat.stat = nlm_convert_to_nlm_stats(res4.stat.stat);
@ -249,9 +249,10 @@ nlm_test_msg_1_svc(struct nlm_testargs *argp, void *result, struct svc_req *rqst
&res.stat.nlm_testrply_u.holder,
&res4.stat.nlm4_testrply_u.holder);
rpc = nlm_host_get_rpc(host);
if (rpc)
nlm_test_res_1(&res, &dummy, rpc);
if (rpc) {
nlm_test_res_1(&res, &dummy, rpc, NULL, nlm_zero_tv);
CLNT_RELEASE(rpc);
}
xdr_free((xdrproc_t) xdr_nlm_testres, &res);
return (FALSE);
@ -263,7 +264,6 @@ nlm_lock_msg_1_svc(struct nlm_lockargs *argp, void *result, struct svc_req *rqst
nlm4_lockargs args4;
nlm4_res res4;
nlm_res res;
struct nlm_host *host;
CLIENT *rpc;
char dummy;
@ -274,13 +274,15 @@ nlm_lock_msg_1_svc(struct nlm_lockargs *argp, void *result, struct svc_req *rqst
args4.reclaim = argp->reclaim;
args4.state = argp->state;
host = nlm_do_lock(&args4, &res4, rqstp, TRUE);
if (nlm_do_lock(&args4, &res4, rqstp, TRUE, &rpc))
return (FALSE);
nlm_convert_to_nlm_res(&res, &res4);
rpc = nlm_host_get_rpc(host);
if (rpc)
nlm_lock_res_1(&res, &dummy, rpc);
if (rpc) {
nlm_lock_res_1(&res, &dummy, rpc, NULL, nlm_zero_tv);
CLNT_RELEASE(rpc);
}
xdr_free((xdrproc_t) xdr_nlm_res, &res);
return (FALSE);
@ -292,7 +294,6 @@ nlm_cancel_msg_1_svc(struct nlm_cancargs *argp, void *result, struct svc_req *rq
nlm4_cancargs args4;
nlm4_res res4;
nlm_res res;
struct nlm_host *host;
CLIENT *rpc;
char dummy;
@ -301,13 +302,15 @@ nlm_cancel_msg_1_svc(struct nlm_cancargs *argp, void *result, struct svc_req *rq
args4.exclusive = argp->exclusive;
nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
host = nlm_do_cancel(&args4, &res4, rqstp);
if (nlm_do_cancel(&args4, &res4, rqstp, &rpc))
return (FALSE);
nlm_convert_to_nlm_res(&res, &res4);
rpc = nlm_host_get_rpc(host);
if (rpc)
nlm_cancel_res_1(&res, &dummy, rpc);
if (rpc) {
nlm_cancel_res_1(&res, &dummy, rpc, NULL, nlm_zero_tv);
CLNT_RELEASE(rpc);
}
xdr_free((xdrproc_t) xdr_nlm_res, &res);
return (FALSE);
@ -319,20 +322,21 @@ nlm_unlock_msg_1_svc(struct nlm_unlockargs *argp, void *result, struct svc_req *
nlm4_unlockargs args4;
nlm4_res res4;
nlm_res res;
struct nlm_host *host;
CLIENT *rpc;
char dummy;
args4.cookie = argp->cookie;
nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
host = nlm_do_unlock(&args4, &res4, rqstp);
if (nlm_do_unlock(&args4, &res4, rqstp, &rpc))
return (FALSE);
nlm_convert_to_nlm_res(&res, &res4);
rpc = nlm_host_get_rpc(host);
if (rpc)
nlm_unlock_res_1(&res, &dummy, rpc);
if (rpc) {
nlm_unlock_res_1(&res, &dummy, rpc, NULL, nlm_zero_tv);
CLNT_RELEASE(rpc);
}
xdr_free((xdrproc_t) xdr_nlm_res, &res);
return (FALSE);
@ -344,7 +348,6 @@ nlm_granted_msg_1_svc(struct nlm_testargs *argp, void *result, struct svc_req *r
nlm4_testargs args4;
nlm4_res res4;
nlm_res res;
struct nlm_host *host;
CLIENT *rpc;
char dummy;
@ -352,20 +355,15 @@ nlm_granted_msg_1_svc(struct nlm_testargs *argp, void *result, struct svc_req *r
args4.exclusive = argp->exclusive;
nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
/*
* We make a synchronous call to userland and send the reply
* back async.
*/
nlm4_granted_4_svc(&args4, &res4, rqstp);
if (nlm_do_granted(&args4, &res4, rqstp, &rpc))
return (FALSE);
nlm_convert_to_nlm_res(&res, &res4);
host = nlm_find_host_by_addr(
(struct sockaddr *) rqstp->rq_xprt->xp_rtaddr.buf,
rqstp->rq_vers);
rpc = nlm_host_get_rpc(host);
if (rpc)
nlm_granted_res_1(&res, &dummy, rpc);
if (rpc) {
nlm_granted_res_1(&res, &dummy, rpc, NULL, nlm_zero_tv);
CLNT_RELEASE(rpc);
}
xdr_free((xdrproc_t) xdr_nlm_res, &res);
return (FALSE);
@ -515,7 +513,7 @@ bool_t
nlm4_test_4_svc(nlm4_testargs *argp, nlm4_testres *result, struct svc_req *rqstp)
{
nlm_do_test(argp, result, rqstp);
nlm_do_test(argp, result, rqstp, NULL);
return (TRUE);
}
@ -523,7 +521,7 @@ bool_t
nlm4_lock_4_svc(nlm4_lockargs *argp, nlm4_res *result, struct svc_req *rqstp)
{
nlm_do_lock(argp, result, rqstp, TRUE);
nlm_do_lock(argp, result, rqstp, TRUE, NULL);
return (TRUE);
}
@ -531,7 +529,7 @@ bool_t
nlm4_cancel_4_svc(nlm4_cancargs *argp, nlm4_res *result, struct svc_req *rqstp)
{
nlm_do_cancel(argp, result, rqstp);
nlm_do_cancel(argp, result, rqstp, NULL);
return (TRUE);
}
@ -539,35 +537,15 @@ bool_t
nlm4_unlock_4_svc(nlm4_unlockargs *argp, nlm4_res *result, struct svc_req *rqstp)
{
nlm_do_unlock(argp, result, rqstp);
nlm_do_unlock(argp, result, rqstp, NULL);
return (TRUE);
}
bool_t
nlm4_granted_4_svc(nlm4_testargs *argp, nlm4_res *result, struct svc_req *rqstp)
{
CLIENT* lockd;
struct timeval tv;
memset(result, 0, sizeof(*result));
nlm_copy_netobj(&result->cookie, &argp->cookie, M_RPC);
/*
* Set a non-zero timeout to give the userland a chance to reply.
*/
lockd = nlm_user_lockd();
if (!lockd) {
result->stat.stat = nlm4_failed;
return (TRUE);
}
tv.tv_sec = 20;
tv.tv_usec = 0;
CLNT_CONTROL(lockd, CLSET_TIMEOUT, &tv);
nlm4_granted_4(argp, result, lockd);
tv.tv_sec = 0;
tv.tv_usec = 0;
CLNT_CONTROL(lockd, CLSET_TIMEOUT, &tv);
nlm_do_granted(argp, result, rqstp, NULL);
return (TRUE);
}
@ -575,14 +553,15 @@ bool_t
nlm4_test_msg_4_svc(nlm4_testargs *argp, void *result, struct svc_req *rqstp)
{
nlm4_testres res4;
struct nlm_host *host;
CLIENT *rpc;
char dummy;
host = nlm_do_test(argp, &res4, rqstp);
rpc = nlm_host_get_rpc(host);
if (rpc)
nlm4_test_res_4(&res4, &dummy, rpc);
if (nlm_do_test(argp, &res4, rqstp, &rpc))
return (FALSE);
if (rpc) {
nlm4_test_res_4(&res4, &dummy, rpc, NULL, nlm_zero_tv);
CLNT_RELEASE(rpc);
}
xdr_free((xdrproc_t) xdr_nlm4_testres, &res4);
return (FALSE);
@ -592,14 +571,15 @@ bool_t
nlm4_lock_msg_4_svc(nlm4_lockargs *argp, void *result, struct svc_req *rqstp)
{
nlm4_res res4;
struct nlm_host *host;
CLIENT *rpc;
char dummy;
host = nlm_do_lock(argp, &res4, rqstp, TRUE);
rpc = nlm_host_get_rpc(host);
if (rpc)
nlm4_lock_res_4(&res4, &dummy, rpc);
if (nlm_do_lock(argp, &res4, rqstp, TRUE, &rpc))
return (FALSE);
if (rpc) {
nlm4_lock_res_4(&res4, &dummy, rpc, NULL, nlm_zero_tv);
CLNT_RELEASE(rpc);
}
xdr_free((xdrproc_t) xdr_nlm4_res, &res4);
return (FALSE);
@ -609,14 +589,15 @@ bool_t
nlm4_cancel_msg_4_svc(nlm4_cancargs *argp, void *result, struct svc_req *rqstp)
{
nlm4_res res4;
struct nlm_host *host;
CLIENT *rpc;
char dummy;
host = nlm_do_cancel(argp, &res4, rqstp);
rpc = nlm_host_get_rpc(host);
if (rpc)
nlm4_cancel_res_4(&res4, &dummy, rpc);
if (nlm_do_cancel(argp, &res4, rqstp, &rpc))
return (FALSE);
if (rpc) {
nlm4_cancel_res_4(&res4, &dummy, rpc, NULL, nlm_zero_tv);
CLNT_RELEASE(rpc);
}
xdr_free((xdrproc_t) xdr_nlm4_res, &res4);
return (FALSE);
@ -626,14 +607,14 @@ bool_t
nlm4_unlock_msg_4_svc(nlm4_unlockargs *argp, void *result, struct svc_req *rqstp)
{
nlm4_res res4;
struct nlm_host *host;
CLIENT *rpc;
char dummy;
host = nlm_do_unlock(argp, &res4, rqstp);
rpc = nlm_host_get_rpc(host);
if (rpc)
nlm4_unlock_res_4(&res4, &dummy, rpc);
if (nlm_do_unlock(argp, &res4, rqstp, &rpc))
if (rpc) {
nlm4_unlock_res_4(&res4, &dummy, rpc, NULL, nlm_zero_tv);
CLNT_RELEASE(rpc);
}
xdr_free((xdrproc_t) xdr_nlm4_res, &res4);
return (FALSE);
@ -642,23 +623,16 @@ nlm4_unlock_msg_4_svc(nlm4_unlockargs *argp, void *result, struct svc_req *rqstp
bool_t
nlm4_granted_msg_4_svc(nlm4_testargs *argp, void *result, struct svc_req *rqstp)
{
struct nlm_host *host;
CLIENT *rpc;
nlm4_res res4;
CLIENT *rpc;
char dummy;
/*
* We make a synchronous call to userland and send the reply
* back async.
*/
nlm4_granted_4_svc(argp, &res4, rqstp);
host = nlm_find_host_by_addr(
(struct sockaddr *) rqstp->rq_xprt->xp_rtaddr.buf,
rqstp->rq_vers);
rpc = nlm_host_get_rpc(host);
if (rpc)
nlm4_granted_res_4(&res4, &dummy, rpc);
if (nlm_do_granted(argp, &res4, rqstp, &rpc))
return (FALSE);
if (rpc) {
nlm4_granted_res_4(&res4, &dummy, rpc, NULL, nlm_zero_tv);
CLNT_RELEASE(rpc);
}
xdr_free((xdrproc_t) xdr_nlm4_res, &res4);
return (FALSE);
@ -667,11 +641,6 @@ nlm4_granted_msg_4_svc(nlm4_testargs *argp, void *result, struct svc_req *rqstp)
bool_t
nlm4_test_res_4_svc(nlm4_testres *argp, void *result, struct svc_req *rqstp)
{
CLIENT* lockd;
lockd = nlm_user_lockd();
if (lockd)
nlm4_test_res_4(argp, result, lockd);
return (FALSE);
}
@ -679,11 +648,6 @@ nlm4_test_res_4_svc(nlm4_testres *argp, void *result, struct svc_req *rqstp)
bool_t
nlm4_lock_res_4_svc(nlm4_res *argp, void *result, struct svc_req *rqstp)
{
CLIENT* lockd;
lockd = nlm_user_lockd();
if (lockd)
nlm4_lock_res_4(argp, result, lockd);
return (FALSE);
}
@ -691,11 +655,6 @@ nlm4_lock_res_4_svc(nlm4_res *argp, void *result, struct svc_req *rqstp)
bool_t
nlm4_cancel_res_4_svc(nlm4_res *argp, void *result, struct svc_req *rqstp)
{
CLIENT* lockd;
lockd = nlm_user_lockd();
if (lockd)
nlm4_cancel_res_4(argp, result, lockd);
return (FALSE);
}
@ -703,11 +662,6 @@ nlm4_cancel_res_4_svc(nlm4_res *argp, void *result, struct svc_req *rqstp)
bool_t
nlm4_unlock_res_4_svc(nlm4_res *argp, void *result, struct svc_req *rqstp)
{
CLIENT* lockd;
lockd = nlm_user_lockd();
if (lockd)
nlm4_unlock_res_4(argp, result, lockd);
return (FALSE);
}
@ -741,7 +695,7 @@ bool_t
nlm4_nm_lock_4_svc(nlm4_lockargs *argp, nlm4_res *result, struct svc_req *rqstp)
{
nlm_do_lock(argp, result, rqstp, FALSE);
nlm_do_lock(argp, result, rqstp, FALSE, NULL);
return (TRUE);
}

View File

@ -50,9 +50,11 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/hash.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/sx.h>
#include <sys/ucred.h>
#include <rpc/types.h>
@ -81,14 +83,39 @@ static struct auth_ops authunix_ops = {
* This struct is pointed to by the ah_private field of an auth_handle.
*/
struct audata {
TAILQ_ENTRY(audata) au_link;
TAILQ_ENTRY(audata) au_alllink;
int au_refs;
struct xucred au_xcred;
struct opaque_auth au_origcred; /* original credentials */
struct opaque_auth au_shcred; /* short hand cred */
u_long au_shfaults; /* short hand cache faults */
char au_marshed[MAX_AUTH_BYTES];
u_int au_mpos; /* xdr pos at end of marshed */
AUTH *au_auth; /* link back to AUTH */
};
TAILQ_HEAD(audata_list, audata);
#define AUTH_PRIVATE(auth) ((struct audata *)auth->ah_private)
#define AUTH_UNIX_HASH_SIZE 16
#define AUTH_UNIX_MAX 256
static struct audata_list auth_unix_cache[AUTH_UNIX_HASH_SIZE];
static struct audata_list auth_unix_all;
static struct sx auth_unix_lock;
static int auth_unix_count;
static void
authunix_init(void *dummy)
{
int i;
for (i = 0; i < AUTH_UNIX_HASH_SIZE; i++)
TAILQ_INIT(&auth_unix_cache[i]);
TAILQ_INIT(&auth_unix_all);
sx_init(&auth_unix_lock, "auth_unix_lock");
}
SYSINIT(authunix_init, SI_SUB_KMEM, SI_ORDER_ANY, authunix_init, NULL);
/*
* Create a unix style authenticator.
* Returns an auth handle with the given stuff in it.
@ -96,38 +123,70 @@ struct audata {
AUTH *
authunix_create(struct ucred *cred)
{
uint32_t h, th;
struct xucred xcr;
char mymem[MAX_AUTH_BYTES];
XDR xdrs;
AUTH *auth;
struct audata *au;
struct audata *au, *tau;
struct timeval now;
uint32_t time;
int len;
if (auth_unix_count > AUTH_UNIX_MAX) {
while (auth_unix_count > AUTH_UNIX_MAX) {
sx_xlock(&auth_unix_lock);
tau = TAILQ_FIRST(&auth_unix_all);
th = HASHSTEP(HASHINIT, tau->au_xcred.cr_uid)
% AUTH_UNIX_HASH_SIZE;
TAILQ_REMOVE(&auth_unix_cache[th], tau, au_link);
TAILQ_REMOVE(&auth_unix_all, tau, au_alllink);
auth_unix_count--;
sx_xunlock(&auth_unix_lock);
AUTH_DESTROY(tau->au_auth);
}
}
/*
* Hash the uid to see if we already have an AUTH with this cred.
*/
h = HASHSTEP(HASHINIT, cred->cr_uid) % AUTH_UNIX_HASH_SIZE;
cru2x(cred, &xcr);
again:
sx_slock(&auth_unix_lock);
TAILQ_FOREACH(au, &auth_unix_cache[h], au_link) {
if (!memcmp(&xcr, &au->au_xcred, sizeof(xcr))) {
if (sx_try_upgrade(&auth_unix_lock)) {
/*
* Keep auth_unix_all LRU sorted.
*/
TAILQ_REMOVE(&auth_unix_all, au, au_alllink);
TAILQ_INSERT_TAIL(&auth_unix_all, au,
au_alllink);
au->au_refs++;
sx_xunlock(&auth_unix_lock);
return (au->au_auth);
} else {
sx_sunlock(&auth_unix_lock);
goto again;
}
}
}
/*
* Allocate and set up auth handle
*/
au = NULL;
auth = mem_alloc(sizeof(*auth));
#ifndef _KERNEL
if (auth == NULL) {
printf("authunix_create: out of memory");
goto cleanup_authunix_create;
}
#endif
au = mem_alloc(sizeof(*au));
#ifndef _KERNEL
if (au == NULL) {
printf("authunix_create: out of memory");
goto cleanup_authunix_create;
}
#endif
auth->ah_ops = &authunix_ops;
auth->ah_private = (caddr_t)au;
auth->ah_verf = au->au_shcred = _null_auth;
au->au_refs = 1;
au->au_xcred = xcr;
au->au_shfaults = 0;
au->au_origcred.oa_base = NULL;
au->au_auth = auth;
getmicrotime(&now);
time = now.tv_sec;
@ -141,14 +200,7 @@ authunix_create(struct ucred *cred)
panic("authunix_create: failed to encode creds");
au->au_origcred.oa_length = len = XDR_GETPOS(&xdrs);
au->au_origcred.oa_flavor = AUTH_UNIX;
#ifdef _KERNEL
au->au_origcred.oa_base = mem_alloc((u_int) len);
#else
if ((au->au_origcred.oa_base = mem_alloc((u_int) len)) == NULL) {
printf("authunix_create: out of memory");
goto cleanup_authunix_create;
}
#endif
memcpy(au->au_origcred.oa_base, mymem, (size_t)len);
/*
@ -156,18 +208,19 @@ authunix_create(struct ucred *cred)
*/
auth->ah_cred = au->au_origcred;
marshal_new_auth(auth);
return (auth);
#ifndef _KERNEL
cleanup_authunix_create:
if (auth)
mem_free(auth, sizeof(*auth));
if (au) {
if (au->au_origcred.oa_base)
mem_free(au->au_origcred.oa_base, (u_int)len);
mem_free(au, sizeof(*au));
if (sx_try_upgrade(&auth_unix_lock)) {
auth_unix_count++;
TAILQ_INSERT_TAIL(&auth_unix_cache[h], au, au_link);
TAILQ_INSERT_TAIL(&auth_unix_all, au, au_alllink);
au->au_refs++; /* one for the cache, one for user */
sx_xunlock(&auth_unix_lock);
return (auth);
} else {
sx_sunlock(&auth_unix_lock);
AUTH_DESTROY(auth);
goto again;
}
return (NULL);
#endif
}
/*
@ -262,8 +315,18 @@ static void
authunix_destroy(AUTH *auth)
{
struct audata *au;
int refs;
au = AUTH_PRIVATE(auth);
sx_xlock(&auth_unix_lock);
au->au_refs--;
refs = au->au_refs;
sx_xunlock(&auth_unix_lock);
if (refs > 0)
return;
mem_free(au->au_origcred.oa_base, au->au_origcred.oa_length);
if (au->au_shcred.oa_base != NULL)

View File

@ -68,7 +68,12 @@ xdr_authunix_parms(XDR *xdrs, uint32_t *time, struct xucred *cred)
uint32_t junk;
if (xdrs->x_op == XDR_ENCODE) {
/*
* Restrict name length to 255 according to RFC 1057.
*/
namelen = strlen(hostname);
if (namelen > 255)
namelen = 255;
} else {
namelen = 0;
}

View File

@ -62,6 +62,7 @@
#include <rpc/clnt_stat.h>
#include <sys/cdefs.h>
#ifdef _KERNEL
#include <sys/refcount.h>
#include <rpc/netconfig.h>
#else
#include <netconfig.h>
@ -109,6 +110,23 @@ struct rpc_err {
#define re_lb ru.RE_lb
};
#ifdef _KERNEL
/*
* Functions of this type may be used to receive notification when RPC
* calls have to be re-transmitted etc.
*/
typedef void rpc_feedback(int cmd, int procnum, void *);
/*
* A structure used with CLNT_CALL_EXT to pass extra information used
* while processing an RPC call.
*/
struct rpc_callextra {
AUTH *rc_auth; /* auth handle to use for this call */
rpc_feedback *rc_feedback; /* callback for retransmits etc. */
void *rc_feedback_arg; /* argument for callback */
};
#endif
/*
* Client rpc handle.
@ -116,12 +134,14 @@ struct rpc_err {
* Client is responsible for initializing auth, see e.g. auth_none.c.
*/
typedef struct __rpc_client {
#ifdef _KERNEL
volatile u_int cl_refs; /* reference count */
AUTH *cl_auth; /* authenticator */
struct clnt_ops {
/* call remote procedure */
enum clnt_stat (*cl_call)(struct __rpc_client *,
rpcproc_t, xdrproc_t, void *, xdrproc_t,
void *, struct timeval);
struct rpc_callextra *, rpcproc_t, xdrproc_t, void *,
xdrproc_t, void *, struct timeval);
/* abort a call */
void (*cl_abort)(struct __rpc_client *);
/* get specific error code */
@ -136,12 +156,33 @@ typedef struct __rpc_client {
bool_t (*cl_control)(struct __rpc_client *, u_int,
void *);
} *cl_ops;
#else
AUTH *cl_auth; /* authenticator */
struct clnt_ops {
/* call remote procedure */
enum clnt_stat (*cl_call)(struct __rpc_client *,
rpcproc_t, xdrproc_t, void *, xdrproc_t,
void *, struct timeval);
/* abort a call */
void (*cl_abort)(struct __rpc_client *);
/* get specific error code */
void (*cl_geterr)(struct __rpc_client *,
struct rpc_err *);
/* frees results */
bool_t (*cl_freeres)(struct __rpc_client *,
xdrproc_t, void *);
/* destroy this structure */
void (*cl_destroy)(struct __rpc_client *);
/* the ioctl() of rpc */
bool_t (*cl_control)(struct __rpc_client *, u_int,
void *);
} *cl_ops;
#endif
void *cl_private; /* private stuff */
char *cl_netid; /* network token */
char *cl_tp; /* device name */
} CLIENT;
/*
* Timers used for the pseudo-transport protocol when using datagrams
*/
@ -154,8 +195,10 @@ struct rpc_timers {
/*
* Feedback values used for possible congestion and rate control
*/
#define FEEDBACK_REXMIT1 1 /* first retransmit */
#define FEEDBACK_OK 2 /* no retransmits */
#define FEEDBACK_OK 1 /* no retransmits */
#define FEEDBACK_REXMIT1 2 /* first retransmit */
#define FEEDBACK_REXMIT2 3 /* second and subsequent retransmit */
#define FEEDBACK_RECONNECT 4 /* client reconnect */
/* Used to set version of portmapper used in broadcast */
@ -171,6 +214,30 @@ struct rpc_timers {
*
*/
#ifdef _KERNEL
#define CLNT_ACQUIRE(rh) \
refcount_acquire(&(rh)->cl_refs)
#define CLNT_RELEASE(rh) \
if (refcount_release(&(rh)->cl_refs)) \
CLNT_DESTROY(rh)
/*
* enum clnt_stat
* CLNT_CALL_EXT(rh, ext, proc, xargs, argsp, xres, resp, timeout)
* CLIENT *rh;
* struct rpc_callextra *ext;
* rpcproc_t proc;
* xdrproc_t xargs;
* void *argsp;
* xdrproc_t xres;
* void *resp;
* struct timeval timeout;
*/
#define CLNT_CALL_EXT(rh, ext, proc, xargs, argsp, xres, resp, secs) \
((*(rh)->cl_ops->cl_call)(rh, ext, proc, xargs, \
argsp, xres, resp, secs))
#endif
/*
* enum clnt_stat
* CLNT_CALL(rh, proc, xargs, argsp, xres, resp, timeout)
@ -182,12 +249,21 @@ struct rpc_timers {
* void *resp;
* struct timeval timeout;
*/
#define CLNT_CALL(rh, proc, xargs, argsp, xres, resp, secs) \
((*(rh)->cl_ops->cl_call)(rh, proc, xargs, \
#ifdef _KERNEL
#define CLNT_CALL(rh, proc, xargs, argsp, xres, resp, secs) \
((*(rh)->cl_ops->cl_call)(rh, NULL, proc, xargs, \
argsp, xres, resp, secs))
#define clnt_call(rh, proc, xargs, argsp, xres, resp, secs) \
((*(rh)->cl_ops->cl_call)(rh, proc, xargs, \
#define clnt_call(rh, proc, xargs, argsp, xres, resp, secs) \
((*(rh)->cl_ops->cl_call)(rh, NULL, proc, xargs, \
argsp, xres, resp, secs))
#else
#define CLNT_CALL(rh, proc, xargs, argsp, xres, resp, secs) \
((*(rh)->cl_ops->cl_call)(rh, proc, xargs, \
argsp, xres, resp, secs))
#define clnt_call(rh, proc, xargs, argsp, xres, resp, secs) \
((*(rh)->cl_ops->cl_call)(rh, proc, xargs, \
argsp, xres, resp, secs))
#endif
/*
* void
@ -262,6 +338,8 @@ struct rpc_timers {
#define CLGET_WAITCHAN 22 /* get string used in msleep call */
#define CLSET_INTERRUPTIBLE 23 /* set interruptible flag */
#define CLGET_INTERRUPTIBLE 24 /* set interruptible flag */
#define CLSET_RETRIES 25 /* set retry count for reconnect */
#define CLGET_RETRIES 26 /* get retry count for reconnect */
#endif
@ -534,6 +612,7 @@ __END_DECLS
#define rpc_createerr (*(__rpc_createerr()))
#endif
#ifndef _KERNEL
/*
* The simplified interface:
* enum clnt_stat
@ -612,7 +691,6 @@ extern enum clnt_stat rpc_broadcast_exp(const rpcprog_t, const rpcvers_t,
const int, const char *);
__END_DECLS
#ifndef _KERNEL
/* For backward compatibility */
#include <rpc/clnt_soc.h>
#endif

View File

@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
@ -70,8 +71,8 @@ __FBSDID("$FreeBSD$");
#endif
static bool_t time_not_ok(struct timeval *);
static enum clnt_stat clnt_dg_call(CLIENT *, rpcproc_t, xdrproc_t, void *,
xdrproc_t, void *, struct timeval);
static enum clnt_stat clnt_dg_call(CLIENT *, struct rpc_callextra *,
rpcproc_t, xdrproc_t, void *, xdrproc_t, void *, struct timeval);
static void clnt_dg_geterr(CLIENT *, struct rpc_err *);
static bool_t clnt_dg_freeres(CLIENT *, xdrproc_t, void *);
static void clnt_dg_abort(CLIENT *);
@ -91,10 +92,13 @@ static struct clnt_ops clnt_dg_ops = {
static const char mem_err_clnt_dg[] = "clnt_dg_create: out of memory";
/*
* A pending RPC request which awaits a reply.
* A pending RPC request which awaits a reply. Requests which have
* received their reply will have cr_xid set to zero and cr_mrep to
* the mbuf chain of the reply.
*/
struct cu_request {
TAILQ_ENTRY(cu_request) cr_link;
CLIENT *cr_client; /* owner */
uint32_t cr_xid; /* XID of request */
struct mbuf *cr_mrep; /* reply received by upcall */
int cr_error; /* any error from upcall */
@ -123,6 +127,8 @@ struct cu_socket {
* Private data kept per client handle
*/
struct cu_data {
int cu_threads; /* # threads in clnt_vc_call */
bool_t cu_closing; /* TRUE if we are destroying */
struct socket *cu_socket; /* connection socket */
bool_t cu_closeit; /* opened by library */
struct sockaddr_storage cu_raddr; /* remote address */
@ -203,10 +209,12 @@ clnt_dg_create(
sendsz = ((sendsz + 3) / 4) * 4;
recvsz = ((recvsz + 3) / 4) * 4;
cu = mem_alloc(sizeof (*cu));
cu->cu_threads = 0;
cu->cu_closing = FALSE;
(void) memcpy(&cu->cu_raddr, svcaddr, (size_t)svcaddr->sa_len);
cu->cu_rlen = svcaddr->sa_len;
/* Other values can also be set through clnt_control() */
cu->cu_wait.tv_sec = 15; /* heuristically chosen */
cu->cu_wait.tv_sec = 3; /* heuristically chosen */
cu->cu_wait.tv_usec = 0;
cu->cu_total.tv_sec = -1;
cu->cu_total.tv_usec = -1;
@ -237,6 +245,7 @@ clnt_dg_create(
*/
cu->cu_closeit = FALSE;
cu->cu_socket = so;
soreserve(so, 256*1024, 256*1024);
SOCKBUF_LOCK(&so->so_rcv);
recheck_socket:
@ -274,6 +283,7 @@ clnt_dg_create(
}
SOCKBUF_UNLOCK(&so->so_rcv);
cl->cl_refs = 1;
cl->cl_ops = &clnt_dg_ops;
cl->cl_private = (caddr_t)(void *)cu;
cl->cl_auth = authnone_create();
@ -291,7 +301,8 @@ clnt_dg_create(
static enum clnt_stat
clnt_dg_call(
CLIENT *cl, /* client handle */
CLIENT *cl, /* client handle */
struct rpc_callextra *ext, /* call metadata */
rpcproc_t proc, /* procedure number */
xdrproc_t xargs, /* xdr routine for args */
void *argsp, /* pointer to args */
@ -301,30 +312,52 @@ clnt_dg_call(
{
struct cu_data *cu = (struct cu_data *)cl->cl_private;
struct cu_socket *cs = (struct cu_socket *) cu->cu_socket->so_upcallarg;
AUTH *auth;
XDR xdrs;
struct rpc_msg reply_msg;
bool_t ok;
int retrans; /* number of re-transmits so far */
int nrefreshes = 2; /* number of times to refresh cred */
struct timeval timeout;
struct timeval retransmit_time;
struct timeval next_sendtime, starttime, time_waited, tv;
struct timeval *tvp;
int timeout;
int retransmit_time;
int next_sendtime, starttime, time_waited, tv;
struct sockaddr *sa;
socklen_t salen;
uint32_t xid;
struct mbuf *mreq = NULL;
struct cu_request cr;
struct cu_request *cr;
int error;
cr = malloc(sizeof(struct cu_request), M_RPC, M_WAITOK);
mtx_lock(&cs->cs_lock);
cr.cr_mrep = NULL;
cr.cr_error = 0;
if (cu->cu_closing) {
mtx_unlock(&cs->cs_lock);
free(cr, M_RPC);
return (RPC_CANTSEND);
}
cu->cu_threads++;
if (ext)
auth = ext->rc_auth;
else
auth = cl->cl_auth;
cr->cr_client = cl;
cr->cr_mrep = NULL;
cr->cr_error = 0;
if (cu->cu_total.tv_usec == -1) {
timeout = utimeout; /* use supplied timeout */
tvp = &utimeout; /* use supplied timeout */
} else {
timeout = cu->cu_total; /* use default timeout */
tvp = &cu->cu_total; /* use default timeout */
}
if (tvp->tv_sec || tvp->tv_usec)
timeout = tvtohz(tvp);
else
timeout = 0;
if (cu->cu_connect && !cu->cu_connected) {
mtx_unlock(&cs->cs_lock);
@ -345,11 +378,11 @@ clnt_dg_call(
sa = (struct sockaddr *)&cu->cu_raddr;
salen = cu->cu_rlen;
}
time_waited.tv_sec = 0;
time_waited.tv_usec = 0;
retransmit_time = next_sendtime = cu->cu_wait;
time_waited = 0;
retrans = 0;
retransmit_time = next_sendtime = tvtohz(&cu->cu_wait);
getmicrotime(&starttime);
starttime = ticks;
call_again:
mtx_assert(&cs->cs_lock, MA_OWNED);
@ -376,7 +409,7 @@ clnt_dg_call(
goto get_reply;
if ((! XDR_PUTINT32(&xdrs, &proc)) ||
(! AUTH_MARSHALL(cl->cl_auth, &xdrs)) ||
(! AUTH_MARSHALL(auth, &xdrs)) ||
(! (*xargs)(&xdrs, argsp))) {
cu->cu_error.re_status = RPC_CANTENCODEARGS;
mtx_lock(&cs->cs_lock);
@ -384,9 +417,9 @@ clnt_dg_call(
}
m_fixhdr(mreq);
cr.cr_xid = xid;
cr->cr_xid = xid;
mtx_lock(&cs->cs_lock);
TAILQ_INSERT_TAIL(&cs->cs_pending, &cr, cr_link);
TAILQ_INSERT_TAIL(&cs->cs_pending, cr, cr_link);
mtx_unlock(&cs->cs_lock);
/*
@ -406,8 +439,7 @@ clnt_dg_call(
mtx_lock(&cs->cs_lock);
if (error) {
TAILQ_REMOVE(&cs->cs_pending, &cr, cr_link);
TAILQ_REMOVE(&cs->cs_pending, cr, cr_link);
cu->cu_error.re_errno = error;
cu->cu_error.re_status = RPC_CANTSEND;
goto out;
@ -415,24 +447,24 @@ clnt_dg_call(
/*
* Check to see if we got an upcall while waiting for the
* lock. In both these cases, the request has been removed
* from cs->cs_pending.
* lock.
*/
if (cr.cr_error) {
cu->cu_error.re_errno = cr.cr_error;
if (cr->cr_error) {
TAILQ_REMOVE(&cs->cs_pending, cr, cr_link);
cu->cu_error.re_errno = cr->cr_error;
cu->cu_error.re_status = RPC_CANTRECV;
goto out;
}
if (cr.cr_mrep) {
if (cr->cr_mrep) {
TAILQ_REMOVE(&cs->cs_pending, cr, cr_link);
goto got_reply;
}
/*
* Hack to provide rpc-based message passing
*/
if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
if (cr.cr_xid)
TAILQ_REMOVE(&cs->cs_pending, &cr, cr_link);
if (timeout == 0) {
TAILQ_REMOVE(&cs->cs_pending, cr, cr_link);
cu->cu_error.re_status = RPC_TIMEDOUT;
goto out;
}
@ -440,17 +472,23 @@ clnt_dg_call(
get_reply:
for (;;) {
/* Decide how long to wait. */
if (timevalcmp(&next_sendtime, &timeout, <)) {
if (next_sendtime < timeout)
tv = next_sendtime;
} else {
else
tv = timeout;
}
timevalsub(&tv, &time_waited);
if (tv.tv_sec < 0 || tv.tv_usec < 0)
tv.tv_sec = tv.tv_usec = 0;
tv -= time_waited;
error = msleep(&cr, &cs->cs_lock, cu->cu_waitflag,
cu->cu_waitchan, tvtohz(&tv));
if (tv > 0) {
if (cu->cu_closing)
error = 0;
else
error = msleep(cr, &cs->cs_lock,
cu->cu_waitflag, cu->cu_waitchan, tv);
} else {
error = EWOULDBLOCK;
}
TAILQ_REMOVE(&cs->cs_pending, cr, cr_link);
if (!error) {
/*
@ -458,8 +496,8 @@ clnt_dg_call(
* upcall had a receive error, report that,
* otherwise we have a reply.
*/
if (cr.cr_error) {
cu->cu_error.re_errno = cr.cr_error;
if (cr->cr_error) {
cu->cu_error.re_errno = cr->cr_error;
cu->cu_error.re_status = RPC_CANTRECV;
goto out;
}
@ -472,8 +510,6 @@ clnt_dg_call(
* re-send the request.
*/
if (error != EWOULDBLOCK) {
if (cr.cr_xid)
TAILQ_REMOVE(&cs->cs_pending, &cr, cr_link);
cu->cu_error.re_errno = error;
if (error == EINTR)
cu->cu_error.re_status = RPC_INTR;
@ -482,29 +518,40 @@ clnt_dg_call(
goto out;
}
getmicrotime(&tv);
time_waited = tv;
timevalsub(&time_waited, &starttime);
time_waited = ticks - starttime;
/* Check for timeout. */
if (timevalcmp(&time_waited, &timeout, >)) {
if (cr.cr_xid)
TAILQ_REMOVE(&cs->cs_pending, &cr, cr_link);
if (time_waited > timeout) {
cu->cu_error.re_errno = EWOULDBLOCK;
cu->cu_error.re_status = RPC_TIMEDOUT;
goto out;
}
/* Retransmit if necessary. */
if (timevalcmp(&time_waited, &next_sendtime, >)) {
if (cr.cr_xid)
TAILQ_REMOVE(&cs->cs_pending, &cr, cr_link);
if (time_waited >= next_sendtime) {
if (ext && ext->rc_feedback) {
mtx_unlock(&cs->cs_lock);
if (retrans == 0)
ext->rc_feedback(FEEDBACK_REXMIT1,
proc, ext->rc_feedback_arg);
else
ext->rc_feedback(FEEDBACK_REXMIT2,
proc, ext->rc_feedback_arg);
mtx_lock(&cs->cs_lock);
}
if (cu->cu_closing) {
cu->cu_error.re_errno = ESHUTDOWN;
cu->cu_error.re_status = RPC_CANTRECV;
goto out;
}
retrans++;
/* update retransmit_time */
if (retransmit_time.tv_sec < RPC_MAX_BACKOFF)
timevaladd(&retransmit_time, &retransmit_time);
timevaladd(&next_sendtime, &retransmit_time);
if (retransmit_time < RPC_MAX_BACKOFF * hz)
retransmit_time = 2 * retransmit_time;
next_sendtime += retransmit_time;
goto send_again;
}
TAILQ_INSERT_TAIL(&cs->cs_pending, cr, cr_link);
}
got_reply:
@ -514,10 +561,13 @@ clnt_dg_call(
*/
mtx_unlock(&cs->cs_lock);
xdrmbuf_create(&xdrs, cr.cr_mrep, XDR_DECODE);
if (ext && ext->rc_feedback)
ext->rc_feedback(FEEDBACK_OK, proc, ext->rc_feedback_arg);
xdrmbuf_create(&xdrs, cr->cr_mrep, XDR_DECODE);
ok = xdr_replymsg(&xdrs, &reply_msg);
XDR_DESTROY(&xdrs);
cr.cr_mrep = NULL;
cr->cr_mrep = NULL;
mtx_lock(&cs->cs_lock);
@ -562,10 +612,17 @@ clnt_dg_call(
if (mreq)
m_freem(mreq);
if (cr.cr_mrep)
m_freem(cr.cr_mrep);
if (cr->cr_mrep)
m_freem(cr->cr_mrep);
cu->cu_threads--;
if (cu->cu_closing)
wakeup(cu);
mtx_unlock(&cs->cs_lock);
free(cr, M_RPC);
return (cu->cu_error.re_status);
}
@ -732,30 +789,44 @@ clnt_dg_destroy(CLIENT *cl)
{
struct cu_data *cu = (struct cu_data *)cl->cl_private;
struct cu_socket *cs = (struct cu_socket *) cu->cu_socket->so_upcallarg;
struct cu_request *cr;
struct socket *so = NULL;
bool_t lastsocketref;
SOCKBUF_LOCK(&cu->cu_socket->so_rcv);
mtx_lock(&cs->cs_lock);
/*
* Abort any pending requests and wait until everyone
* has finished with clnt_vc_call.
*/
cu->cu_closing = TRUE;
TAILQ_FOREACH(cr, &cs->cs_pending, cr_link) {
if (cr->cr_client == cl) {
cr->cr_xid = 0;
cr->cr_error = ESHUTDOWN;
wakeup(cr);
}
}
while (cu->cu_threads)
msleep(cu, &cs->cs_lock, 0, "rpcclose", 0);
cs->cs_refs--;
if (cs->cs_refs == 0) {
mtx_destroy(&cs->cs_lock);
SOCKBUF_LOCK(&cu->cu_socket->so_rcv);
cu->cu_socket->so_upcallarg = NULL;
cu->cu_socket->so_upcall = NULL;
cu->cu_socket->so_rcv.sb_flags &= ~SB_UPCALL;
mtx_destroy(&cs->cs_lock);
SOCKBUF_UNLOCK(&cu->cu_socket->so_rcv);
mem_free(cs, sizeof(*cs));
lastsocketref = TRUE;
} else {
mtx_unlock(&cs->cs_lock);
SOCKBUF_UNLOCK(&cu->cu_socket->so_rcv);
lastsocketref = FALSE;
}
if (cu->cu_closeit) {
KASSERT(lastsocketref, ("clnt_dg_destroy(): closing a socket "
"shared with other clients"));
if (cu->cu_closeit && lastsocketref) {
so = cu->cu_socket;
cu->cu_socket = NULL;
}
@ -812,10 +883,10 @@ clnt_dg_soupcall(struct socket *so, void *arg, int waitflag)
if (error) {
mtx_lock(&cs->cs_lock);
TAILQ_FOREACH(cr, &cs->cs_pending, cr_link) {
cr->cr_xid = 0;
cr->cr_error = error;
wakeup(cr);
}
TAILQ_INIT(&cs->cs_pending);
mtx_unlock(&cs->cs_lock);
break;
}
@ -825,7 +896,11 @@ clnt_dg_soupcall(struct socket *so, void *arg, int waitflag)
*/
m = m_pullup(m, sizeof(xid));
if (!m)
break;
/*
* Should never happen.
*/
continue;
xid = ntohl(*mtod(m, uint32_t *));
/*
@ -836,14 +911,13 @@ clnt_dg_soupcall(struct socket *so, void *arg, int waitflag)
TAILQ_FOREACH(cr, &cs->cs_pending, cr_link) {
if (cr->cr_xid == xid) {
/*
* This one matches. We snip it out of
* the pending list and leave the
* This one matches. We leave the
* reply mbuf in cr->cr_mrep. Set the
* XID to zero so that clnt_dg_call
* can know not to repeat the
* TAILQ_REMOVE.
* XID to zero so that we will ignore
* any duplicated replies that arrive
* before clnt_dg_call removes it from
* the queue.
*/
TAILQ_REMOVE(&cs->cs_pending, cr, cr_link);
cr->cr_xid = 0;
cr->cr_mrep = m;
cr->cr_error = 0;

View File

@ -30,6 +30,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/limits.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
@ -44,8 +45,8 @@ __FBSDID("$FreeBSD$");
#include <rpc/rpc.h>
#include <rpc/rpc_com.h>
static enum clnt_stat clnt_reconnect_call(CLIENT *, rpcproc_t,
xdrproc_t, void *, xdrproc_t, void *, struct timeval);
static enum clnt_stat clnt_reconnect_call(CLIENT *, struct rpc_callextra *,
rpcproc_t, xdrproc_t, void *, xdrproc_t, void *, struct timeval);
static void clnt_reconnect_geterr(CLIENT *, struct rpc_err *);
static bool_t clnt_reconnect_freeres(CLIENT *, xdrproc_t, void *);
static void clnt_reconnect_abort(CLIENT *);
@ -62,6 +63,7 @@ static struct clnt_ops clnt_reconnect_ops = {
};
struct rc_data {
struct mtx rc_lock;
struct sockaddr_storage rc_addr; /* server address */
struct netconfig* rc_nconf; /* network type */
rpcprog_t rc_prog; /* program number */
@ -70,8 +72,10 @@ struct rc_data {
size_t rc_recvsz;
struct timeval rc_timeout;
struct timeval rc_retry;
int rc_retries;
const char *rc_waitchan;
int rc_intr;
int rc_connecting;
CLIENT* rc_client; /* underlying RPC client */
};
@ -94,6 +98,7 @@ clnt_reconnect_create(
cl = mem_alloc(sizeof (CLIENT));
rc = mem_alloc(sizeof (*rc));
mtx_init(&rc->rc_lock, "rc->rc_lock", NULL, MTX_DEF);
(void) memcpy(&rc->rc_addr, svcaddr, (size_t)svcaddr->sa_len);
rc->rc_nconf = nconf;
rc->rc_prog = program;
@ -102,12 +107,15 @@ clnt_reconnect_create(
rc->rc_recvsz = recvsz;
rc->rc_timeout.tv_sec = -1;
rc->rc_timeout.tv_usec = -1;
rc->rc_retry.tv_sec = 15;
rc->rc_retry.tv_sec = 3;
rc->rc_retry.tv_usec = 0;
rc->rc_retries = INT_MAX;
rc->rc_waitchan = "rpcrecv";
rc->rc_intr = 0;
rc->rc_connecting = FALSE;
rc->rc_client = NULL;
cl->cl_refs = 1;
cl->cl_ops = &clnt_reconnect_ops;
cl->cl_private = (caddr_t)(void *)rc;
cl->cl_auth = authnone_create();
@ -121,13 +129,39 @@ clnt_reconnect_connect(CLIENT *cl)
{
struct rc_data *rc = (struct rc_data *)cl->cl_private;
struct socket *so;
enum clnt_stat stat;
int error;
int one = 1;
mtx_lock(&rc->rc_lock);
again:
if (rc->rc_connecting) {
while (!rc->rc_client) {
error = msleep(rc, &rc->rc_lock,
rc->rc_intr ? PCATCH : 0, "rpcrecon", 0);
if (error) {
mtx_unlock(&rc->rc_lock);
return (RPC_INTR);
}
}
/*
* If the other guy failed to connect, we might as
* well have another go.
*/
if (!rc->rc_client && !rc->rc_connecting)
goto again;
mtx_unlock(&rc->rc_lock);
return (RPC_SUCCESS);
} else {
rc->rc_connecting = TRUE;
}
mtx_unlock(&rc->rc_lock);
so = __rpc_nconf2socket(rc->rc_nconf);
if (!so) {
rpc_createerr.cf_stat = RPC_TLIERROR;
stat = rpc_createerr.cf_stat = RPC_TLIERROR;
rpc_createerr.cf_error.re_errno = 0;
return (RPC_TLIERROR);
goto out;
}
if (rc->rc_nconf->nc_semantics == NC_TPI_CLTS)
@ -139,8 +173,10 @@ clnt_reconnect_connect(CLIENT *cl)
(struct sockaddr *) &rc->rc_addr, rc->rc_prog, rc->rc_vers,
rc->rc_sendsz, rc->rc_recvsz);
if (!rc->rc_client)
return (rpc_createerr.cf_stat);
if (!rc->rc_client) {
stat = rpc_createerr.cf_stat;
goto out;
}
CLNT_CONTROL(rc->rc_client, CLSET_FD_CLOSE, 0);
CLNT_CONTROL(rc->rc_client, CLSET_CONNECT, &one);
@ -148,13 +184,21 @@ clnt_reconnect_connect(CLIENT *cl)
CLNT_CONTROL(rc->rc_client, CLSET_RETRY_TIMEOUT, &rc->rc_retry);
CLNT_CONTROL(rc->rc_client, CLSET_WAITCHAN, &rc->rc_waitchan);
CLNT_CONTROL(rc->rc_client, CLSET_INTERRUPTIBLE, &rc->rc_intr);
stat = RPC_SUCCESS;
return (RPC_SUCCESS);
out:
mtx_lock(&rc->rc_lock);
rc->rc_connecting = FALSE;
wakeup(rc);
mtx_unlock(&rc->rc_lock);
return (stat);
}
static enum clnt_stat
clnt_reconnect_call(
CLIENT *cl, /* client handle */
CLIENT *cl, /* client handle */
struct rpc_callextra *ext, /* call metadata */
rpcproc_t proc, /* procedure number */
xdrproc_t xargs, /* xdr routine for args */
void *argsp, /* pointer to args */
@ -163,8 +207,11 @@ clnt_reconnect_call(
struct timeval utimeout) /* seconds to wait before giving up */
{
struct rc_data *rc = (struct rc_data *)cl->cl_private;
CLIENT *client;
enum clnt_stat stat;
int tries;
tries = 0;
do {
if (!rc->rc_client) {
stat = clnt_reconnect_connect(cl);
@ -172,9 +219,14 @@ clnt_reconnect_call(
return (stat);
}
stat = CLNT_CALL(rc->rc_client, proc, xargs, argsp,
mtx_lock(&rc->rc_lock);
CLNT_ACQUIRE(rc->rc_client);
client = rc->rc_client;
mtx_unlock(&rc->rc_lock);
stat = CLNT_CALL_EXT(client, ext, proc, xargs, argsp,
xresults, resultsp, utimeout);
CLNT_RELEASE(client);
if (stat == RPC_TIMEDOUT) {
/*
* Check for async send misfeature for NLM
@ -184,16 +236,33 @@ clnt_reconnect_call(
&& rc->rc_timeout.tv_usec == 0)
|| (rc->rc_timeout.tv_sec == -1
&& utimeout.tv_sec == 0
&& utimeout.tv_usec == 0))
&& utimeout.tv_usec == 0)) {
break;
}
}
if (stat == RPC_INTR)
break;
if (stat != RPC_SUCCESS) {
CLNT_DESTROY(rc->rc_client);
rc->rc_client = NULL;
tries++;
if (tries >= rc->rc_retries)
break;
if (ext && ext->rc_feedback)
ext->rc_feedback(FEEDBACK_RECONNECT, proc,
ext->rc_feedback_arg);
mtx_lock(&rc->rc_lock);
/*
* Make sure that someone else hasn't already
* reconnected.
*/
if (rc->rc_client == client) {
CLNT_RELEASE(rc->rc_client);
rc->rc_client = NULL;
}
mtx_unlock(&rc->rc_lock);
}
} while (stat != RPC_SUCCESS);
@ -294,6 +363,14 @@ clnt_reconnect_control(CLIENT *cl, u_int request, void *info)
*(int *) info = rc->rc_intr;
break;
case CLSET_RETRIES:
rc->rc_retries = *(int *) info;
break;
case CLGET_RETRIES:
*(int *) info = rc->rc_retries;
break;
default:
return (FALSE);
}

View File

@ -80,8 +80,8 @@ struct cmessage {
struct cmsgcred cmcred;
};
static enum clnt_stat clnt_vc_call(CLIENT *, rpcproc_t, xdrproc_t, void *,
xdrproc_t, void *, struct timeval);
static enum clnt_stat clnt_vc_call(CLIENT *, struct rpc_callextra *,
rpcproc_t, xdrproc_t, void *, xdrproc_t, void *, struct timeval);
static void clnt_vc_geterr(CLIENT *, struct rpc_err *);
static bool_t clnt_vc_freeres(CLIENT *, xdrproc_t, void *);
static void clnt_vc_abort(CLIENT *);
@ -100,7 +100,9 @@ static struct clnt_ops clnt_vc_ops = {
};
/*
* A pending RPC request which awaits a reply.
* A pending RPC request which awaits a reply. Requests which have
* received their reply will have cr_xid set to zero and cr_mrep to
* the mbuf chain of the reply.
*/
struct ct_request {
TAILQ_ENTRY(ct_request) cr_link;
@ -113,6 +115,8 @@ TAILQ_HEAD(ct_request_list, ct_request);
struct ct_data {
struct mtx ct_lock;
int ct_threads; /* number of threads in clnt_vc_call */
bool_t ct_closing; /* TRUE if we are destroying client */
struct socket *ct_socket; /* connection socket */
bool_t ct_closeit; /* close it on destroy */
struct timeval ct_wait; /* wait interval in milliseconds */
@ -161,7 +165,7 @@ clnt_vc_create(
static uint32_t disrupt;
struct __rpc_sockinfo si;
XDR xdrs;
int error;
int error, interrupted;
if (disrupt == 0)
disrupt = (uint32_t)(long)raddr;
@ -170,10 +174,31 @@ clnt_vc_create(
ct = (struct ct_data *)mem_alloc(sizeof (*ct));
mtx_init(&ct->ct_lock, "ct->ct_lock", NULL, MTX_DEF);
ct->ct_threads = 0;
ct->ct_closing = FALSE;
if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
error = soconnect(so, raddr, curthread);
SOCK_LOCK(so);
interrupted = 0;
while ((so->so_state & SS_ISCONNECTING)
&& so->so_error == 0) {
error = msleep(&so->so_timeo, SOCK_MTX(so),
PSOCK | PCATCH, "connec", 0);
if (error) {
if (error == EINTR || error == ERESTART)
interrupted = 1;
break;
}
}
if (error == 0) {
error = so->so_error;
so->so_error = 0;
}
SOCK_UNLOCK(so);
if (error) {
if (!interrupted)
so->so_state &= ~SS_ISCONNECTING;
rpc_createerr.cf_stat = RPC_SYSTEMERROR;
rpc_createerr.cf_error.re_errno = error;
goto err;
@ -224,6 +249,7 @@ clnt_vc_create(
* Create a client handle which uses xdrrec for serialization
* and authnone for authentication.
*/
cl->cl_refs = 1;
cl->cl_ops = &clnt_vc_ops;
cl->cl_private = ct;
cl->cl_auth = authnone_create();
@ -255,6 +281,7 @@ clnt_vc_create(
static enum clnt_stat
clnt_vc_call(
CLIENT *cl,
struct rpc_callextra *ext,
rpcproc_t proc,
xdrproc_t xdr_args,
void *args_ptr,
@ -263,6 +290,7 @@ clnt_vc_call(
struct timeval utimeout)
{
struct ct_data *ct = (struct ct_data *) cl->cl_private;
AUTH *auth;
XDR xdrs;
struct rpc_msg reply_msg;
bool_t ok;
@ -270,13 +298,27 @@ clnt_vc_call(
struct timeval timeout;
uint32_t xid;
struct mbuf *mreq = NULL;
struct ct_request cr;
struct ct_request *cr;
int error;
cr = malloc(sizeof(struct ct_request), M_RPC, M_WAITOK);
mtx_lock(&ct->ct_lock);
cr.cr_mrep = NULL;
cr.cr_error = 0;
if (ct->ct_closing) {
mtx_unlock(&ct->ct_lock);
free(cr, M_RPC);
return (RPC_CANTSEND);
}
ct->ct_threads++;
if (ext)
auth = ext->rc_auth;
else
auth = cl->cl_auth;
cr->cr_mrep = NULL;
cr->cr_error = 0;
if (ct->ct_wait.tv_usec == -1) {
timeout = utimeout; /* use supplied timeout */
@ -311,12 +353,12 @@ clnt_vc_call(
ct->ct_error.re_status = RPC_SUCCESS;
if ((! XDR_PUTINT32(&xdrs, &proc)) ||
(! AUTH_MARSHALL(cl->cl_auth, &xdrs)) ||
(! AUTH_MARSHALL(auth, &xdrs)) ||
(! (*xdr_args)(&xdrs, args_ptr))) {
if (ct->ct_error.re_status == RPC_SUCCESS)
ct->ct_error.re_status = RPC_CANTENCODEARGS;
m_freem(mreq);
return (ct->ct_error.re_status);
mtx_lock(&ct->ct_lock);
goto out;
}
m_fixhdr(mreq);
@ -327,9 +369,9 @@ clnt_vc_call(
*mtod(mreq, uint32_t *) =
htonl(0x80000000 | (mreq->m_pkthdr.len - sizeof(uint32_t)));
cr.cr_xid = xid;
cr->cr_xid = xid;
mtx_lock(&ct->ct_lock);
TAILQ_INSERT_TAIL(&ct->ct_pending, &cr, cr_link);
TAILQ_INSERT_TAIL(&ct->ct_pending, cr, cr_link);
mtx_unlock(&ct->ct_lock);
/*
@ -343,10 +385,8 @@ clnt_vc_call(
reply_msg.acpted_rply.ar_results.proc = xdr_results;
mtx_lock(&ct->ct_lock);
if (error) {
TAILQ_REMOVE(&ct->ct_pending, &cr, cr_link);
TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
ct->ct_error.re_errno = error;
ct->ct_error.re_status = RPC_CANTSEND;
goto out;
@ -357,12 +397,14 @@ clnt_vc_call(
* lock. In both these cases, the request has been removed
* from ct->ct_pending.
*/
if (cr.cr_error) {
ct->ct_error.re_errno = cr.cr_error;
if (cr->cr_error) {
TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
ct->ct_error.re_errno = cr->cr_error;
ct->ct_error.re_status = RPC_CANTRECV;
goto out;
}
if (cr.cr_mrep) {
if (cr->cr_mrep) {
TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
goto got_reply;
}
@ -370,23 +412,22 @@ clnt_vc_call(
* Hack to provide rpc-based message passing
*/
if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
if (cr.cr_xid)
TAILQ_REMOVE(&ct->ct_pending, &cr, cr_link);
TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
ct->ct_error.re_status = RPC_TIMEDOUT;
goto out;
}
error = msleep(&cr, &ct->ct_lock, ct->ct_waitflag, ct->ct_waitchan,
error = msleep(cr, &ct->ct_lock, ct->ct_waitflag, ct->ct_waitchan,
tvtohz(&timeout));
TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
if (error) {
/*
* The sleep returned an error so our request is still
* on the list. Turn the error code into an
* appropriate client status.
*/
if (cr.cr_xid)
TAILQ_REMOVE(&ct->ct_pending, &cr, cr_link);
ct->ct_error.re_errno = error;
switch (error) {
case EINTR:
@ -405,8 +446,8 @@ clnt_vc_call(
* upcall had a receive error, report that,
* otherwise we have a reply.
*/
if (cr.cr_error) {
ct->ct_error.re_errno = cr.cr_error;
if (cr->cr_error) {
ct->ct_error.re_errno = cr->cr_error;
ct->ct_error.re_status = RPC_CANTRECV;
goto out;
}
@ -419,10 +460,10 @@ clnt_vc_call(
*/
mtx_unlock(&ct->ct_lock);
xdrmbuf_create(&xdrs, cr.cr_mrep, XDR_DECODE);
xdrmbuf_create(&xdrs, cr->cr_mrep, XDR_DECODE);
ok = xdr_replymsg(&xdrs, &reply_msg);
XDR_DESTROY(&xdrs);
cr.cr_mrep = NULL;
cr->cr_mrep = NULL;
mtx_lock(&ct->ct_lock);
@ -466,10 +507,17 @@ clnt_vc_call(
if (mreq)
m_freem(mreq);
if (cr.cr_mrep)
m_freem(cr.cr_mrep);
if (cr->cr_mrep)
m_freem(cr->cr_mrep);
ct->ct_threads--;
if (ct->ct_closing)
wakeup(ct);
mtx_unlock(&ct->ct_lock);
free(cr, M_RPC);
return (ct->ct_error.re_status);
}
@ -628,6 +676,7 @@ static void
clnt_vc_destroy(CLIENT *cl)
{
struct ct_data *ct = (struct ct_data *) cl->cl_private;
struct ct_request *cr;
struct socket *so = NULL;
mtx_lock(&ct->ct_lock);
@ -639,8 +688,19 @@ clnt_vc_destroy(CLIENT *cl)
ct->ct_socket->so_rcv.sb_flags &= ~SB_UPCALL;
SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv);
KASSERT(!TAILQ_FIRST(&ct->ct_pending),
("Destroying RPC client with pending RPC requests"));
/*
* Abort any pending requests and wait until everyone
* has finished with clnt_vc_call.
*/
ct->ct_closing = TRUE;
TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) {
cr->cr_xid = 0;
cr->cr_error = ESHUTDOWN;
wakeup(cr);
}
while (ct->ct_threads)
msleep(ct, &ct->ct_lock, 0, "rpcclose", 0);
if (ct->ct_closeit) {
so = ct->ct_socket;
@ -732,7 +792,6 @@ clnt_vc_soupcall(struct socket *so, void *arg, int waitflag)
cr->cr_error = error;
wakeup(cr);
}
TAILQ_INIT(&ct->ct_pending);
mtx_unlock(&ct->ct_lock);
break;
}
@ -795,19 +854,14 @@ clnt_vc_soupcall(struct socket *so, void *arg, int waitflag)
if (cr->cr_xid == xid) {
/*
* This one
* matches. We snip it
* out of the pending
* list and leave the
* reply mbuf in
* matches. We leave
* the reply mbuf in
* cr->cr_mrep. Set
* the XID to zero so
* that clnt_vc_call
* can know not to
* repeat the
* TAILQ_REMOVE.
* that we will ignore
* any duplicaed
* replies.
*/
TAILQ_REMOVE(&ct->ct_pending,
cr, cr_link);
cr->cr_xid = 0;
cr->cr_mrep = ct->ct_record;
cr->cr_error = 0;

View File

@ -132,6 +132,15 @@ svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
struct sockaddr* sa;
int error;
if (so->so_state & SS_ISCONNECTED) {
error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
if (error)
return (NULL);
xprt = svc_vc_create_conn(pool, so, sa);
free(sa, M_SONAME);
return (xprt);
}
xprt = mem_alloc(sizeof(SVCXPRT));
mtx_init(&xprt->xp_lock, "xprt->xp_lock", NULL, MTX_DEF);
xprt->xp_pool = pool;
@ -180,8 +189,32 @@ svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr)
SVCXPRT *xprt = NULL;
struct cf_conn *cd = NULL;
struct sockaddr* sa = NULL;
struct sockopt opt;
int one = 1;
int error;
bzero(&opt, sizeof(struct sockopt));
opt.sopt_dir = SOPT_SET;
opt.sopt_level = SOL_SOCKET;
opt.sopt_name = SO_KEEPALIVE;
opt.sopt_val = &one;
opt.sopt_valsize = sizeof(one);
error = sosetopt(so, &opt);
if (error)
return (NULL);
if (so->so_proto->pr_protocol == IPPROTO_TCP) {
bzero(&opt, sizeof(struct sockopt));
opt.sopt_dir = SOPT_SET;
opt.sopt_level = IPPROTO_TCP;
opt.sopt_name = TCP_NODELAY;
opt.sopt_val = &one;
opt.sopt_valsize = sizeof(one);
error = sosetopt(so, &opt);
if (error)
return (NULL);
}
cd = mem_alloc(sizeof(*cd));
cd->strm_stat = XPRT_IDLE;
@ -306,8 +339,6 @@ svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg)
{
struct socket *so = NULL;
struct sockaddr *sa = NULL;
struct sockopt opt;
int one = 1;
int error;
/*
@ -351,16 +382,6 @@ svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg)
sa = 0;
error = soaccept(so, &sa);
if (!error) {
bzero(&opt, sizeof(struct sockopt));
opt.sopt_dir = SOPT_SET;
opt.sopt_level = IPPROTO_TCP;
opt.sopt_name = TCP_NODELAY;
opt.sopt_val = &one;
opt.sopt_valsize = sizeof(one);
error = sosetopt(so, &opt);
}
if (error) {
/*
* XXX not sure if I need to call sofree or soclose here.
@ -374,7 +395,9 @@ svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg)
* svc_vc_create_conn will call xprt_register - we don't need
* to do anything with the new connection.
*/
svc_vc_create_conn(xprt->xp_pool, so, sa);
if (!svc_vc_create_conn(xprt->xp_pool, so, sa))
soclose(so);
free(sa, M_SONAME);
return (FALSE); /* there is never an rpc msg to be processed */

View File

@ -227,6 +227,7 @@ typedef __pid_t pid_t;
#define F_FLOCK 0x020 /* Use flock(2) semantics for lock */
#define F_POSIX 0x040 /* Use POSIX semantics for lock */
#define F_REMOTE 0x080 /* Lock owner is remote NFS client */
#define F_NOINTR 0x100 /* Ignore signals when waiting */
#endif
/*

View File

@ -40,6 +40,7 @@
#include <sys/_lock.h>
#include <sys/_sx.h>
struct flock;
struct vop_advlock_args;
struct vop_advlockasync_args;
@ -118,9 +119,13 @@ struct lockf {
};
LIST_HEAD(lockf_list, lockf);
typedef int lf_iterator(struct vnode *, struct flock *, void *);
int lf_advlock(struct vop_advlock_args *, struct lockf **, u_quad_t);
int lf_advlockasync(struct vop_advlockasync_args *, struct lockf **, u_quad_t);
void lf_purgelocks(struct vnode *vp, struct lockf **statep);
int lf_iteratelocks_sysid(int sysid, lf_iterator *, void *);
int lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *, void *);
int lf_countlocks(int sysid);
void lf_clearremotesys(int sysid);

View File

@ -57,7 +57,7 @@
* is created, otherwise 1.
*/
#undef __FreeBSD_version
#define __FreeBSD_version 800039 /* Master, propagated to newvers */
#define __FreeBSD_version 800040 /* Master, propagated to newvers */
#ifndef LOCORE
#include <sys/types.h>

View File

@ -31,6 +31,7 @@
#ifdef __FreeBSD__
#include <sys/mount.h>
#endif
#include <sys/stat.h>
#include <sys/wait.h>
#include <err.h>
@ -56,16 +57,28 @@
int verbose = 0;
static int
make_file(const char *dir, off_t sz)
make_file(const char *pathname, off_t sz)
{
struct stat st;
const char *template = "/flocktempXXXXXX";
size_t len;
char *filename;
int fd;
len = strlen(dir) + strlen(template) + 1;
if (stat(pathname, &st) == 0) {
if (S_ISREG(st.st_mode)) {
fd = open(pathname, O_RDWR);
if (fd < 0)
err(1, "open(%s)", pathname);
if (ftruncate(fd, sz) < 0)
err(1, "ftruncate");
return (fd);
}
}
len = strlen(pathname) + strlen(template) + 1;
filename = malloc(len);
strcpy(filename, dir);
strcpy(filename, pathname);
strcat(filename, template);
fd = mkstemp(filename);
if (fd < 0)
@ -84,6 +97,24 @@ ignore_alarm(int __unused sig)
{
}
static int
safe_waitpid(pid_t pid)
{
int save_errno;
int status;
save_errno = errno;
errno = 0;
while (waitpid(pid, &status, 0) != pid) {
if (errno == EINTR)
continue;
err(1, "waitpid");
}
errno = save_errno;
return (status);
}
#define FAIL(test) \
do { \
if (test) { \
@ -103,7 +134,7 @@ ignore_alarm(int __unused sig)
* except for the lock type which is set to F_UNLCK.
*/
static int
test1(int fd)
test1(int fd, __unused int argc, const __unused char **argv)
{
struct flock fl1, fl2;
@ -128,24 +159,6 @@ test1(int fd)
SUCCEED;
}
static int
safe_waitpid(pid_t pid)
{
int save_errno;
int stat;
save_errno = errno;
errno = 0;
while (waitpid(pid, &stat, 0) != pid) {
if (errno == EINTR)
continue;
err(1, "waitpid");
}
errno = save_errno;
return (stat);
}
/*
* Test 2 - F_SETLK on locked region
*
@ -153,7 +166,7 @@ safe_waitpid(pid_t pid)
* immediately with EACCES or EAGAIN.
*/
static int
test2(int fd)
test2(int fd, __unused int argc, const __unused char **argv)
{
/*
* We create a child process to hold the lock which we will
@ -224,7 +237,7 @@ test2(int fd)
* in FreeBSD's client (and server) lockd implementation.
*/
static int
test3(int fd)
test3(int fd, __unused int argc, const __unused char **argv)
{
/*
* We create a child process to hold the lock which we will
@ -294,7 +307,7 @@ test3(int fd)
* Get the first lock that blocks the lock.
*/
static int
test4(int fd)
test4(int fd, __unused int argc, const __unused char **argv)
{
/*
* We create a child process to hold the lock which we will
@ -371,7 +384,7 @@ test4(int fd)
* EDEADLK is returned.
*/
static int
test5(int fd)
test5(int fd, __unused int argc, const __unused char **argv)
{
/*
* We create a child process to hold the lock which we will
@ -426,8 +439,11 @@ test5(int fd)
sleep(1);
/*
* fcntl should immediately return -1 with errno set to EDEADLK.
* fcntl should immediately return -1 with errno set to
* EDEADLK. If the alarm fires, we failed to detect the
* deadlock.
*/
alarm(1);
printf("5 - F_SETLKW simple deadlock: ");
fl.l_start = 1;
@ -444,6 +460,11 @@ test5(int fd)
if (fcntl(fd, F_SETLK, &fl) < 0)
err(1, "F_UNLCK");
/*
* Cancel the alarm to avoid confusing later tests.
*/
alarm(0);
SUCCEED;
}
@ -457,7 +478,7 @@ test5(int fd)
* (due to C2's blocking attempt to lock byte zero).
*/
static int
test6(int fd)
test6(int fd, __unused int argc, const __unused char **argv)
{
/*
* Because our test relies on the child process being blocked
@ -560,7 +581,7 @@ test6(int fd)
* immediately with EACCES or EAGAIN.
*/
static int
test7(int fd)
test7(int fd, __unused int argc, const __unused char **argv)
{
/*
* We create a child process to hold the lock which we will
@ -632,7 +653,7 @@ test7(int fd)
* it.
*/
static int
test8(int fd)
test8(int fd, __unused int argc, const __unused char **argv)
{
/*
* We create a child process to hold the lock which we will
@ -709,7 +730,7 @@ test8(int fd)
* immediately with EACCES or EAGAIN.
*/
static int
test9(int fd)
test9(int fd, __unused int argc, const __unused char **argv)
{
/*
* We create a child process to hold the lock which we will
@ -781,7 +802,7 @@ test9(int fd)
* system ID of the system that owns that process
*/
static int
test10(int fd)
test10(int fd, __unused int argc, const __unused char **argv)
{
/*
* We create a child process to hold the lock which we will
@ -854,7 +875,7 @@ test10(int fd)
* is added.
*/
static int
test11(int fd)
test11(int fd, __unused int argc, const __unused char **argv)
{
#ifdef F_SETLK_REMOTE
struct flock fl;
@ -934,7 +955,7 @@ test11(int fd)
* process waits until the request can be satisfied.
*/
static int
test12(int fd)
test12(int fd, __unused int argc, const __unused char **argv)
{
/*
* We create a child process to hold the lock which we will
@ -1011,7 +1032,7 @@ test12(int fd)
* process waits until the request can be satisfied.
*/
static int
test13(int fd)
test13(int fd, __unused int argc, const __unused char **argv)
{
/*
* We create a child process to hold the lock which we will
@ -1096,14 +1117,14 @@ test13(int fd)
* Test 14 - soak test
*/
static int
test14(int fd)
test14(int fd, int argc, const char **argv)
{
#define CHILD_COUNT 20
/*
* We create a set of child processes and let each one run
* through a random sequence of locks and unlocks.
*/
int i, j, id;
int i, j, id, id_base;
int pids[CHILD_COUNT], pid;
char buf[128];
char tbuf[128];
@ -1113,11 +1134,13 @@ test14(int fd)
struct itimerval itv;
int status;
id_base = 0;
if (argc >= 2)
id_base = strtol(argv[1], NULL, 0);
printf("14 - soak test: ");
fflush(stdout);
memset(buf, 255, sizeof(buf));
pwrite(fd, buf, sizeof(buf), 0);
for (i = 0; i < 128; i++)
map[i] = F_UNLCK;
@ -1137,8 +1160,8 @@ test14(int fd)
/*
* Child - do some work and exit.
*/
id = getpid();
srandom(id);
id = id_base + i;
srandom(getpid());
for (j = 0; j < 50; j++) {
int start, end, len;
@ -1277,8 +1300,109 @@ test14(int fd)
SUCCEED;
}
/*
* Test 15 - flock(2) semantcs
*
* When a lock holder has a shared lock and attempts to upgrade that
* shared lock to exclusive, it must drop the shared lock before
* blocking on the exclusive lock.
*
* To test this, we first arrange for two shared locks on the file,
* and then attempt to upgrade one of them to exclusive. This should
* drop one of the shared locks and block. We interrupt the blocking
* lock request and examine the lock state of the file after dropping
* the other shared lock - there should be no active locks at this
* point.
*/
static int
test15(int fd, __unused int argc, const __unused char **argv)
{
#ifdef LOCK_EX
/*
* We create a child process to hold the lock which we will
* test. We use a pipe to communicate with the child.
*
* Since we only have one file descriptors and lock ownership
* for flock(2) goes with the file descriptor, we use fcntl to
* set the child's shared lock.
*/
int pid;
int pfd[2];
int fd2;
struct flock fl;
char ch;
int res;
if (pipe(pfd) < 0)
err(1, "pipe");
pid = fork();
if (pid < 0)
err(1, "fork");
if (pid == 0) {
/*
* We are the child. We set a shared lock and then
* write one byte back to the parent to tell it. The
* parent will kill us when its done.
*/
fl.l_start = 0;
fl.l_len = 0;
fl.l_type = F_RDLCK;
fl.l_whence = SEEK_SET;
if (fcntl(fd, F_SETLK, &fl) < 0)
err(1, "fcntl(F_SETLK) (child)");
if (write(pfd[1], "a", 1) < 0)
err(1, "writing to pipe (child)");
pause();
exit(0);
}
/*
* Wait until the child has set its lock and then perform the
* test.
*/
if (read(pfd[0], &ch, 1) != 1)
err(1, "reading from pipe (child)");
fd2 = dup(fd);
if (flock(fd, LOCK_SH) < 0)
err(1, "flock shared");
/*
* flock should wait until the alarm and then return -1 with
* errno set to EINTR.
*/
printf("15 - flock(2) semantics: ");
alarm(1);
flock(fd, LOCK_EX);
/*
* Kill the child to force it to drop its locks.
*/
kill(pid, SIGTERM);
safe_waitpid(pid);
fl.l_start = 0;
fl.l_len = 0;
fl.l_type = F_WRLCK;
fl.l_whence = SEEK_SET;
res = fcntl(fd, F_GETLK, &fl);
close(pfd[0]);
close(pfd[1]);
FAIL(res != 0);
FAIL(fl.l_type != F_UNLCK);
SUCCEED;
#else
return 0;
#endif
}
struct test {
int (*testfn)(int); /* function to perform the test */
int (*testfn)(int, int, const char **); /* function to perform the test */
int num; /* test number */
int intr; /* non-zero if the test interrupts a lock */
};
@ -1298,6 +1422,7 @@ struct test tests[] = {
{ test12, 12, 0 },
{ test13, 13, 1 },
{ test14, 14, 0 },
{ test15, 15, 1 },
};
int test_count = sizeof(tests) / sizeof(tests[0]);
@ -1309,16 +1434,23 @@ main(int argc, const char *argv[])
int nointr;
int i;
struct sigaction sa;
int test_argc;
const char **test_argv;
if (argc < 2 || argc > 3) {
errx(1, "usage: flock <directory> [test number]");
if (argc < 2) {
errx(1, "usage: flock <directory> [test number] ...");
}
fd = make_file(argv[1], 1024);
if (argc == 3)
if (argc >= 3) {
testnum = strtol(argv[2], NULL, 0);
else
test_argc = argc - 2;
test_argv = argv + 2;
} else {
testnum = 0;
test_argc = 0;
test_argv = 0;
}
sa.sa_handler = ignore_alarm;
sigemptyset(&sa.sa_mask);
@ -1326,11 +1458,11 @@ main(int argc, const char *argv[])
sigaction(SIGALRM, &sa, 0);
nointr = 0;
#ifdef __FreeBSD__
#if defined(__FreeBSD__) && __FreeBSD_version < 800040
{
/*
* FreeBSD can't interrupt a blocked lock request on
* an NFS mounted filesystem.
* FreeBSD with userland NLM can't interrupt a blocked
* lock request on an NFS mounted filesystem.
*/
struct statfs st;
fstatfs(fd, &st);
@ -1342,7 +1474,7 @@ main(int argc, const char *argv[])
if (tests[i].intr && nointr)
continue;
if (!testnum || tests[i].num == testnum)
tests[i].testfn(fd);
tests[i].testfn(fd, test_argc, test_argv);
}
return 0;

View File

@ -80,6 +80,7 @@ int _rpcsvcdirty = 0;
int grace_expired;
int nsm_state;
int kernel_lockd;
int kernel_lockd_client;
pid_t client_pid;
struct mon mon_host;
char **hosts, *svcport_str = NULL;
@ -175,6 +176,7 @@ main(int argc, char **argv)
}
kernel_lockd = FALSE;
kernel_lockd_client = FALSE;
if (modfind("nfslockd") < 0) {
if (kldload("nfslockd") < 0) {
fprintf(stderr, "Can't find or load kernel support for rpc.lockd - using non-kernel implementation\n");
@ -184,6 +186,10 @@ main(int argc, char **argv)
} else {
kernel_lockd = TRUE;
}
if (kernel_lockd) {
if (getosreldate() >= 800040)
kernel_lockd_client = TRUE;
}
(void)rpcb_unset(NLM_PROG, NLM_SM, NULL);
(void)rpcb_unset(NLM_PROG, NLM_VERS, NULL);
@ -245,41 +251,42 @@ main(int argc, char **argv)
}
if (kernel_lockd) {
/*
* For the kernel lockd case, we run a cut-down RPC
* service on a local-domain socket. The kernel's RPC
* server will pass what it can't handle (mainly
* client replies) down to us. This can go away
* entirely if/when we move the client side of NFS
* locking into the kernel.
*/
struct sockaddr_un sun;
int fd, oldmask;
SVCXPRT *xprt;
if (!kernel_lockd_client) {
/*
* For the case where we have a kernel lockd but it
* doesn't provide client locking, we run a cut-down
* RPC service on a local-domain socket. The kernel's
* RPC server will pass what it can't handle (mainly
* client replies) down to us.
*/
struct sockaddr_un sun;
int fd, oldmask;
SVCXPRT *xprt;
memset(&sun, 0, sizeof sun);
sun.sun_family = AF_LOCAL;
unlink(_PATH_RPCLOCKDSOCK);
strcpy(sun.sun_path, _PATH_RPCLOCKDSOCK);
sun.sun_len = SUN_LEN(&sun);
fd = socket(AF_LOCAL, SOCK_STREAM, 0);
if (!fd) {
err(1, "Can't create local lockd socket");
}
oldmask = umask(S_IXUSR|S_IRWXG|S_IRWXO);
if (bind(fd, (struct sockaddr *) &sun, sun.sun_len) < 0) {
err(1, "Can't bind local lockd socket");
}
umask(oldmask);
if (listen(fd, SOMAXCONN) < 0) {
err(1, "Can't listen on local lockd socket");
}
xprt = svc_vc_create(fd, RPC_MAXDATASIZE, RPC_MAXDATASIZE);
if (!xprt) {
err(1, "Can't create transport for local lockd socket");
}
if (!svc_reg(xprt, NLM_PROG, NLM_VERS4, nlm_prog_4, NULL)) {
err(1, "Can't register service for local lockd socket");
memset(&sun, 0, sizeof sun);
sun.sun_family = AF_LOCAL;
unlink(_PATH_RPCLOCKDSOCK);
strcpy(sun.sun_path, _PATH_RPCLOCKDSOCK);
sun.sun_len = SUN_LEN(&sun);
fd = socket(AF_LOCAL, SOCK_STREAM, 0);
if (!fd) {
err(1, "Can't create local lockd socket");
}
oldmask = umask(S_IXUSR|S_IRWXG|S_IRWXO);
if (bind(fd, (struct sockaddr *) &sun, sun.sun_len) < 0) {
err(1, "Can't bind local lockd socket");
}
umask(oldmask);
if (listen(fd, SOMAXCONN) < 0) {
err(1, "Can't listen on local lockd socket");
}
xprt = svc_vc_create(fd, RPC_MAXDATASIZE, RPC_MAXDATASIZE);
if (!xprt) {
err(1, "Can't create transport for local lockd socket");
}
if (!svc_reg(xprt, NLM_PROG, NLM_VERS4, nlm_prog_4, NULL)) {
err(1, "Can't register service for local lockd socket");
}
}
/*
@ -342,17 +349,27 @@ main(int argc, char **argv)
}
if (kernel_lockd) {
init_nsm();
client_pid = client_request();
if (!kernel_lockd_client) {
init_nsm();
client_pid = client_request();
/*
* Create a child process to enter the kernel and then
* wait for RPCs on our local domain socket.
*/
if (!fork())
/*
* Create a child process to enter the kernel and then
* wait for RPCs on our local domain socket.
*/
if (!fork())
nlm_syscall(debug_level, grace_period,
naddrs, addrs);
else
svc_run();
} else {
/*
* The kernel lockd implementation provides
* both client and server so we don't need to
* do anything else.
*/
nlm_syscall(debug_level, grace_period, naddrs, addrs);
else
svc_run();
}
} else {
grace_expired = 0;
alarm(grace_period);

View File

@ -36,6 +36,7 @@
#include <err.h>
#include <errno.h>
#include <fcntl.h>
#include <netdb.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
@ -78,8 +79,11 @@ HostInfo *find_host(char *hostname, int create)
HostInfo *hp;
HostInfo *spare_slot = NULL;
HostInfo *result = NULL;
struct addrinfo *ai1, *ai2;
int i;
if (getaddrinfo(hostname, NULL, NULL, &ai1) != 0)
ai1 = NULL;
for (i = 0, hp = status_info->hosts; i < status_info->noOfHosts; i++, hp++)
{
if (!strncasecmp(hostname, hp->hostname, SM_MAXSTRLEN))
@ -87,9 +91,35 @@ HostInfo *find_host(char *hostname, int create)
result = hp;
break;
}
if (hp->hostname[0] &&
getaddrinfo(hp->hostname, NULL, NULL, &ai2) != 0)
ai2 = NULL;
if (ai1 && ai2)
{
struct addrinfo *p1, *p2;
for (p1 = ai1; !result && p1; p1 = p1->ai_next)
{
for (p2 = ai2; !result && p2; p2 = p2->ai_next)
{
if (p1->ai_family == p2->ai_family
&& p1->ai_addrlen == p2->ai_addrlen
&& !memcmp(p1->ai_addr, p2->ai_addr, p1->ai_addrlen))
{
result = hp;
break;
}
}
}
if (result)
break;
}
if (ai2)
freeaddrinfo(ai2);
if (!spare_slot && !hp->monList && !hp->notifyReqd)
spare_slot = hp;
}
if (ai1)
freeaddrinfo(ai1);
/* Return if entry found, or if not asked to create one. */
if (result || !create) return (result);