Add kern.rpc.gss.client_hash tunable, to make it possible to bump

it easily.  This can lower the load on gssd(8) on large NFS servers.

Submitted by:	Per Andersson <pa at chalmers dot se>
Reviewed by:	rmacklem@
MFC after:	2 weeks
Sponsored by:	Chalmers University of Technology
This commit is contained in:
Edward Tomasz Napierala 2019-02-19 10:17:49 +00:00
parent aa43298309
commit 52eb49951a

View File

@ -172,6 +172,7 @@ struct svc_rpc_gss_cookedcred {
#define CLIENT_HASH_SIZE 256
#define CLIENT_MAX 128
u_int svc_rpc_gss_client_max = CLIENT_MAX;
u_int svc_rpc_gss_client_hash_size = CLIENT_HASH_SIZE;
SYSCTL_NODE(_kern, OID_AUTO, rpc, CTLFLAG_RW, 0, "RPC");
SYSCTL_NODE(_kern_rpc, OID_AUTO, gss, CTLFLAG_RW, 0, "GSS");
@ -180,12 +181,16 @@ SYSCTL_UINT(_kern_rpc_gss, OID_AUTO, client_max, CTLFLAG_RW,
&svc_rpc_gss_client_max, 0,
"Max number of rpc-gss clients");
SYSCTL_UINT(_kern_rpc_gss, OID_AUTO, client_hash, CTLFLAG_RDTUN,
&svc_rpc_gss_client_hash_size, 0,
"Size of rpc-gss client hash table");
static u_int svc_rpc_gss_client_count;
SYSCTL_UINT(_kern_rpc_gss, OID_AUTO, client_count, CTLFLAG_RD,
&svc_rpc_gss_client_count, 0,
"Number of rpc-gss clients");
struct svc_rpc_gss_client_list svc_rpc_gss_client_hash[CLIENT_HASH_SIZE];
struct svc_rpc_gss_client_list *svc_rpc_gss_client_hash;
struct svc_rpc_gss_client_list svc_rpc_gss_clients;
static uint32_t svc_rpc_gss_next_clientid = 1;
@ -194,7 +199,8 @@ svc_rpc_gss_init(void *arg)
{
int i;
for (i = 0; i < CLIENT_HASH_SIZE; i++)
svc_rpc_gss_client_hash = mem_alloc(sizeof(struct svc_rpc_gss_client_list) * svc_rpc_gss_client_hash_size);
for (i = 0; i < svc_rpc_gss_client_hash_size; i++)
TAILQ_INIT(&svc_rpc_gss_client_hash[i]);
TAILQ_INIT(&svc_rpc_gss_clients);
svc_auth_reg(RPCSEC_GSS, svc_rpc_gss, rpc_gss_svc_getcred);
@ -530,7 +536,7 @@ svc_rpc_gss_find_client(struct svc_rpc_gss_clientid *id)
if (id->ci_hostid != hostid || id->ci_boottime != boottime.tv_sec)
return (NULL);
list = &svc_rpc_gss_client_hash[id->ci_id % CLIENT_HASH_SIZE];
list = &svc_rpc_gss_client_hash[id->ci_id % svc_rpc_gss_client_hash_size];
sx_xlock(&svc_rpc_gss_lock);
TAILQ_FOREACH(client, list, cl_link) {
if (client->cl_id.ci_id == id->ci_id) {
@ -569,7 +575,7 @@ svc_rpc_gss_create_client(void)
getboottime(&boottime);
client->cl_id.ci_boottime = boottime.tv_sec;
client->cl_id.ci_id = svc_rpc_gss_next_clientid++;
list = &svc_rpc_gss_client_hash[client->cl_id.ci_id % CLIENT_HASH_SIZE];
list = &svc_rpc_gss_client_hash[client->cl_id.ci_id % svc_rpc_gss_client_hash_size];
sx_xlock(&svc_rpc_gss_lock);
TAILQ_INSERT_HEAD(list, client, cl_link);
TAILQ_INSERT_HEAD(&svc_rpc_gss_clients, client, cl_alllink);
@ -635,7 +641,7 @@ svc_rpc_gss_forget_client_locked(struct svc_rpc_gss_client *client)
struct svc_rpc_gss_client_list *list;
sx_assert(&svc_rpc_gss_lock, SX_XLOCKED);
list = &svc_rpc_gss_client_hash[client->cl_id.ci_id % CLIENT_HASH_SIZE];
list = &svc_rpc_gss_client_hash[client->cl_id.ci_id % svc_rpc_gss_client_hash_size];
TAILQ_REMOVE(list, client, cl_link);
TAILQ_REMOVE(&svc_rpc_gss_clients, client, cl_alllink);
svc_rpc_gss_client_count--;
@ -650,7 +656,7 @@ svc_rpc_gss_forget_client(struct svc_rpc_gss_client *client)
struct svc_rpc_gss_client_list *list;
struct svc_rpc_gss_client *tclient;
list = &svc_rpc_gss_client_hash[client->cl_id.ci_id % CLIENT_HASH_SIZE];
list = &svc_rpc_gss_client_hash[client->cl_id.ci_id % svc_rpc_gss_client_hash_size];
sx_xlock(&svc_rpc_gss_lock);
TAILQ_FOREACH(tclient, list, cl_link) {
/*