Normalize spelling and grammar in TCP hostcache comments.

This commit is contained in:
Robert Watson 2007-05-27 19:39:26 +00:00
parent 87066f04c6
commit e487a5e2a0

View File

@ -30,34 +30,33 @@
*/
/*
* The tcp_hostcache moves the tcp specific cached metrics from the routing
* table into a dedicated structure indexed by the remote IP address. It
* keeps information on the measured tcp parameters of past tcp sessions
* to have better initial start values for following connections from the
* same source. Depending on the network parameters (delay, bandwidth, max
* MTU, congestion window) between local and remote site this can lead to
* significant speedups for new tcp connections after the first one.
* The tcp_hostcache moves the tcp-specific cached metrics from the routing
* table to a dedicated structure indexed by the remote IP address. It keeps
* information on the measured TCP parameters of past TCP sessions to allow
* better initial start values to be used with later connections to/from the
* same source. Depending on the network parameters (delay, bandwidth, max
* MTU, congestion window) between local and remote sites, this can lead to
* significant speed-ups for new TCP connections after the first one.
*
* Due to this new tcp_hostcache all tcp specific metrics information in
* the routing table has been removed. The INPCB no longer keeps a pointer
* to the routing entry and protocol initiated route cloning has been
* removed as well. With these changes the routing table has gone back
* to being more lightwight and only carries information related to packet
* forwarding.
* Due to the tcp_hostcache, all TCP-specific metrics information in the
* routing table has been removed. The inpcb no longer keeps a pointer to
* the routing entry, and protocol-initiated route cloning has been removed
* as well. With these changes, the routing table has gone back to being
* more lightwight and only carries information related to packet forwarding.
*
* Tcp_hostcache is designed for multiple concurrent access in SMP
* environments and high contention. All bucket rows have their own
* lock and thus multiple lookups and modifies can be done at the same
* time as long as they are in different bucket rows. If a request for
* insertion of a new record can't be satisfied it simply returns an
* empty structure. Nobody and nothing shall ever point directly to
* any entry in tcp_hostcache. All communication is done in an object
* oriented way and only funtions of tcp_hostcache will manipulate hostcache
* entries. Otherwise we are unable to achieve good behaviour in concurrent
* access situations. Since tcp_hostcache is only caching information there
* are no fatal consequences if we either can't satisfy any particular request
* or have to drop/overwrite an existing entry because of bucket limit
* memory constrains.
* tcp_hostcache is designed for multiple concurrent access in SMP
* environments and high contention. All bucket rows have their own lock and
* thus multiple lookups and modifies can be done at the same time as long as
* they are in different bucket rows. If a request for insertion of a new
* record can't be satisfied, it simply returns an empty structure. Nobody
* and nothing outside of tcp_hostcache.c will ever point directly to any
* entry in the tcp_hostcache. All communication is done in an
* object-oriented way and only functions of tcp_hostcache will manipulate
* hostcache entries. Otherwise, we are unable to achieve good behaviour in
* concurrent access situations. Since tcp_hostcache is only caching
* information, there are no fatal consequences if we either can't satisfy
* any particular request or have to drop/overwrite an existing entry because
* of bucket limit memory constrains.
*/
/*
@ -112,7 +111,7 @@ struct hc_metrics {
struct hc_head *rmx_head; /* head of bucket tail queue */
struct in_addr ip4; /* IP address */
struct in6_addr ip6; /* IP6 address */
/* endpoint specific values for tcp */
/* endpoint specific values for TCP */
u_long rmx_mtu; /* MTU for this path */
u_long rmx_ssthresh; /* outbound gateway buffer limit */
u_long rmx_rtt; /* estimated round trip time */
@ -121,7 +120,7 @@ struct hc_metrics {
u_long rmx_cwnd; /* congestion window */
u_long rmx_sendpipe; /* outbound delay-bandwidth product */
u_long rmx_recvpipe; /* inbound delay-bandwidth product */
/* tcp hostcache internal data */
/* TCP hostcache internal data */
int rmx_expire; /* lifetime for object */
u_long rmx_hits; /* number of hits */
u_long rmx_updates; /* number of updates */
@ -201,7 +200,7 @@ tcp_hc_init(void)
int i;
/*
* Initialize hostcache structures
* Initialize hostcache structures.
*/
tcp_hostcache.cache_count = 0;
tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE;
@ -223,14 +222,14 @@ tcp_hc_init(void)
tcp_hostcache.hashmask = tcp_hostcache.hashsize - 1;
/*
* Allocate the hash table
* Allocate the hash table.
*/
tcp_hostcache.hashbase = (struct hc_head *)
malloc(tcp_hostcache.hashsize * sizeof(struct hc_head),
M_HOSTCACHE, M_WAITOK | M_ZERO);
/*
* Initialize the hash buckets
* Initialize the hash buckets.
*/
for (i = 0; i < tcp_hostcache.hashsize; i++) {
TAILQ_INIT(&tcp_hostcache.hashbase[i].hch_bucket);
@ -254,7 +253,7 @@ tcp_hc_init(void)
}
/*
* Internal function: lookup an entry in the hostcache or return NULL.
* Internal function: look up an entry in the hostcache or return NULL.
*
* If an entry has been returned, the caller becomes responsible for
* unlocking the bucket row after he is done reading/modifying the entry.
@ -279,14 +278,14 @@ tcp_hc_lookup(struct in_conninfo *inc)
hc_head = &tcp_hostcache.hashbase[hash];
/*
* aquire lock for this bucket row
* we release the lock if we don't find an entry,
* otherwise the caller has to unlock after he is done
* Acquire lock for this bucket row; we release the lock if we don't
* find an entry, otherwise the caller has to unlock after he is
* done.
*/
THC_LOCK(&hc_head->hch_mtx);
/*
* circle through entries in bucket row looking for a match
* Iterate through entries in bucket row looking for a match.
*/
TAILQ_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q) {
if (inc->inc_isipv6) {
@ -301,15 +300,15 @@ tcp_hc_lookup(struct in_conninfo *inc)
}
/*
* We were unsuccessful and didn't find anything
* We were unsuccessful and didn't find anything.
*/
THC_UNLOCK(&hc_head->hch_mtx);
return NULL;
}
/*
* Internal function: insert an entry into the hostcache or return NULL
* if unable to allocate a new one.
* Internal function: insert an entry into the hostcache or return NULL if
* unable to allocate a new one.
*
* If an entry has been returned, the caller becomes responsible for
* unlocking the bucket row after he is done reading/modifying the entry.
@ -324,7 +323,7 @@ tcp_hc_insert(struct in_conninfo *inc)
KASSERT(inc != NULL, ("tcp_hc_insert with NULL in_conninfo pointer"));
/*
* Hash the foreign ip address
* Hash the foreign ip address.
*/
if (inc->inc_isipv6)
hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
@ -334,24 +333,24 @@ tcp_hc_insert(struct in_conninfo *inc)
hc_head = &tcp_hostcache.hashbase[hash];
/*
* aquire lock for this bucket row
* we release the lock if we don't find an entry,
* otherwise the caller has to unlock after he is done
* Acquire lock for this bucket row; we release the lock if we don't
* find an entry, otherwise the caller has to unlock after he is
* done.
*/
THC_LOCK(&hc_head->hch_mtx);
/*
* If the bucket limit is reached reuse the least used element
* If the bucket limit is reached, reuse the least-used element.
*/
if (hc_head->hch_length >= tcp_hostcache.bucket_limit ||
tcp_hostcache.cache_count >= tcp_hostcache.cache_limit) {
hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead);
/*
* At first we were dropping the last element, just to
* reaquire it in the next two lines again which ain't
* very efficient. Instead just reuse the least used element.
* Maybe we drop something that is still "in-use" but we can
* be "lossy".
* reacquire it in the next two lines again, which isn't very
* efficient. Instead just reuse the least used element.
* We may drop something that is still "in-use" but we can be
* "lossy".
*/
TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q);
tcp_hostcache.hashbase[hash].hch_length--;
@ -362,7 +361,7 @@ tcp_hc_insert(struct in_conninfo *inc)
#endif
} else {
/*
* Allocate a new entry, or balk if not possible
* Allocate a new entry, or balk if not possible.
*/
hc_entry = uma_zalloc(tcp_hostcache.zone, M_NOWAIT);
if (hc_entry == NULL) {
@ -372,7 +371,7 @@ tcp_hc_insert(struct in_conninfo *inc)
}
/*
* Initialize basic information of hostcache entry
* Initialize basic information of hostcache entry.
*/
bzero(hc_entry, sizeof(*hc_entry));
if (inc->inc_isipv6)
@ -383,7 +382,7 @@ tcp_hc_insert(struct in_conninfo *inc)
hc_entry->rmx_expire = tcp_hostcache.expire;
/*
* Put it upfront
* Put it upfront.
*/
TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q);
tcp_hostcache.hashbase[hash].hch_length++;
@ -394,9 +393,9 @@ tcp_hc_insert(struct in_conninfo *inc)
}
/*
* External function: lookup an entry in the hostcache and fill out the
* supplied tcp metrics structure. Fills in null when no entry was found
* or a value is not set.
* External function: look up an entry in the hostcache and fill out the
* supplied TCP metrics structure. Fills in NULL when no entry was found or
* a value is not set.
*/
void
tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite)
@ -404,12 +403,12 @@ tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite)
struct hc_metrics *hc_entry;
/*
* Find the right bucket
* Find the right bucket.
*/
hc_entry = tcp_hc_lookup(inc);
/*
* If we don't have an existing object
* If we don't have an existing object.
*/
if (hc_entry == NULL) {
bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
@ -428,14 +427,14 @@ tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite)
hc_metrics_lite->rmx_recvpipe = hc_entry->rmx_recvpipe;
/*
* unlock bucket row
* Unlock bucket row.
*/
THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
}
/*
* External function: lookup an entry in the hostcache and return the
* discovered path mtu. Returns null if no entry is found or value is not
* External function: look up an entry in the hostcache and return the
* discovered path MTU. Returns NULL if no entry is found or value is not
* set.
*/
u_long
@ -457,7 +456,7 @@ tcp_hc_getmtu(struct in_conninfo *inc)
}
/*
* External function: update the mtu value of an entry in the hostcache.
* External function: update the MTU value of an entry in the hostcache.
* Creates a new entry if none was found.
*/
void
@ -466,12 +465,12 @@ tcp_hc_updatemtu(struct in_conninfo *inc, u_long mtu)
struct hc_metrics *hc_entry;
/*
* Find the right bucket
* Find the right bucket.
*/
hc_entry = tcp_hc_lookup(inc);
/*
* If we don't have an existing object try to insert a new one
* If we don't have an existing object, try to insert a new one.
*/
if (hc_entry == NULL) {
hc_entry = tcp_hc_insert(inc);
@ -484,19 +483,19 @@ tcp_hc_updatemtu(struct in_conninfo *inc, u_long mtu)
hc_entry->rmx_mtu = mtu;
/*
* put it upfront so we find it faster next time
* Put it upfront so we find it faster next time.
*/
TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
/*
* unlock bucket row
* Unlock bucket row.
*/
THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
}
/*
* External function: update the tcp metrics of an entry in the hostcache.
* External function: update the TCP metrics of an entry in the hostcache.
* Creates a new entry if none was found.
*/
void
@ -638,8 +637,8 @@ sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
}
/*
* Expire and purge (old|all) entries in the tcp_hostcache. Runs periodically
* from the callout.
* Expire and purge (old|all) entries in the tcp_hostcache. Runs
* periodically from the callout.
*/
static void
tcp_hc_purge(void *arg)