freebsd-dev/sys/nlm/sm_inter_xdr.c

108 lines
1.8 KiB
C
Raw Normal View History

Add the new kernel-mode NFS Lock Manager. To use it instead of the user-mode lock manager, build a kernel with the NFSLOCKD option and add '-k' to 'rpc_lockd_flags' in rc.conf. Highlights include: * Thread-safe kernel RPC client - many threads can use the same RPC client handle safely with replies being de-multiplexed at the socket upcall (typically driven directly by the NIC interrupt) and handed off to whichever thread matches the reply. For UDP sockets, many RPC clients can share the same socket. This allows the use of a single privileged UDP port number to talk to an arbitrary number of remote hosts. * Single-threaded kernel RPC server. Adding support for multi-threaded server would be relatively straightforward and would follow approximately the Solaris KPI. A single thread should be sufficient for the NLM since it should rarely block in normal operation. * Kernel mode NLM server supporting cancel requests and granted callbacks. I've tested the NLM server reasonably extensively - it passes both my own tests and the NFS Connectathon locking tests running on Solaris, Mac OS X and Ubuntu Linux. * Userland NLM client supported. While the NLM server doesn't have support for the local NFS client's locking needs, it does have to field async replies and granted callbacks from remote NLMs that the local client has contacted. We relay these replies to the userland rpc.lockd over a local domain RPC socket. * Robust deadlock detection for the local lock manager. In particular it will detect deadlocks caused by a lock request that covers more than one blocking request. As required by the NLM protocol, all deadlock detection happens synchronously - a user is guaranteed that if a lock request isn't rejected immediately, the lock will eventually be granted. The old system allowed for a 'deferred deadlock' condition where a blocked lock request could wake up and find that some other deadlock-causing lock owner had beaten them to the lock. * Since both local and remote locks are managed by the same kernel locking code, local and remote processes can safely use file locks for mutual exclusion. Local processes have no fairness advantage compared to remote processes when contending to lock a region that has just been unlocked - the local lock manager enforces a strict first-come first-served model for both local and remote lockers. Sponsored by: Isilon Systems PR: 95247 107555 115524 116679 MFC after: 2 weeks
2008-03-26 15:23:12 +00:00
/*
* Please do not edit this file.
* It was generated using rpcgen.
*/
#include <nlm/sm_inter.h>
Add the new kernel-mode NFS Lock Manager. To use it instead of the user-mode lock manager, build a kernel with the NFSLOCKD option and add '-k' to 'rpc_lockd_flags' in rc.conf. Highlights include: * Thread-safe kernel RPC client - many threads can use the same RPC client handle safely with replies being de-multiplexed at the socket upcall (typically driven directly by the NIC interrupt) and handed off to whichever thread matches the reply. For UDP sockets, many RPC clients can share the same socket. This allows the use of a single privileged UDP port number to talk to an arbitrary number of remote hosts. * Single-threaded kernel RPC server. Adding support for multi-threaded server would be relatively straightforward and would follow approximately the Solaris KPI. A single thread should be sufficient for the NLM since it should rarely block in normal operation. * Kernel mode NLM server supporting cancel requests and granted callbacks. I've tested the NLM server reasonably extensively - it passes both my own tests and the NFS Connectathon locking tests running on Solaris, Mac OS X and Ubuntu Linux. * Userland NLM client supported. While the NLM server doesn't have support for the local NFS client's locking needs, it does have to field async replies and granted callbacks from remote NLMs that the local client has contacted. We relay these replies to the userland rpc.lockd over a local domain RPC socket. * Robust deadlock detection for the local lock manager. In particular it will detect deadlocks caused by a lock request that covers more than one blocking request. As required by the NLM protocol, all deadlock detection happens synchronously - a user is guaranteed that if a lock request isn't rejected immediately, the lock will eventually be granted. The old system allowed for a 'deferred deadlock' condition where a blocked lock request could wake up and find that some other deadlock-causing lock owner had beaten them to the lock. * Since both local and remote locks are managed by the same kernel locking code, local and remote processes can safely use file locks for mutual exclusion. Local processes have no fairness advantage compared to remote processes when contending to lock a region that has just been unlocked - the local lock manager enforces a strict first-come first-served model for both local and remote lockers. Sponsored by: Isilon Systems PR: 95247 107555 115524 116679 MFC after: 2 weeks
2008-03-26 15:23:12 +00:00
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
bool_t
xdr_sm_name(XDR *xdrs, sm_name *objp)
{
if (!xdr_string(xdrs, &objp->mon_name, SM_MAXSTRLEN))
return (FALSE);
return (TRUE);
}
bool_t
xdr_my_id(XDR *xdrs, my_id *objp)
{
if (!xdr_string(xdrs, &objp->my_name, SM_MAXSTRLEN))
return (FALSE);
if (!xdr_int(xdrs, &objp->my_prog))
return (FALSE);
if (!xdr_int(xdrs, &objp->my_vers))
return (FALSE);
if (!xdr_int(xdrs, &objp->my_proc))
return (FALSE);
return (TRUE);
}
bool_t
xdr_mon_id(XDR *xdrs, mon_id *objp)
{
if (!xdr_string(xdrs, &objp->mon_name, SM_MAXSTRLEN))
return (FALSE);
if (!xdr_my_id(xdrs, &objp->my_id))
return (FALSE);
return (TRUE);
}
bool_t
xdr_mon(XDR *xdrs, mon *objp)
{
if (!xdr_mon_id(xdrs, &objp->mon_id))
return (FALSE);
if (!xdr_opaque(xdrs, objp->priv, 16))
return (FALSE);
return (TRUE);
}
bool_t
xdr_stat_chge(XDR *xdrs, stat_chge *objp)
{
if (!xdr_string(xdrs, &objp->mon_name, SM_MAXSTRLEN))
return (FALSE);
if (!xdr_int(xdrs, &objp->state))
return (FALSE);
return (TRUE);
}
bool_t
xdr_sm_stat(XDR *xdrs, sm_stat *objp)
{
if (!xdr_int(xdrs, &objp->state))
return (FALSE);
return (TRUE);
}
bool_t
xdr_sm_res(XDR *xdrs, sm_res *objp)
{
if (!xdr_enum(xdrs, (enum_t *)objp))
return (FALSE);
return (TRUE);
}
bool_t
xdr_sm_stat_res(XDR *xdrs, sm_stat_res *objp)
{
if (!xdr_sm_res(xdrs, &objp->res_stat))
return (FALSE);
if (!xdr_int(xdrs, &objp->state))
return (FALSE);
return (TRUE);
}
bool_t
xdr_sm_status(XDR *xdrs, sm_status *objp)
{
if (!xdr_string(xdrs, &objp->mon_name, SM_MAXSTRLEN))
return (FALSE);
if (!xdr_int(xdrs, &objp->state))
return (FALSE);
if (!xdr_opaque(xdrs, objp->priv, 16))
return (FALSE);
return (TRUE);
}