Make a pass over most of the comments.

This commit is contained in:
alc 2013-03-04 07:11:10 +00:00
parent 475367da61
commit c3be5353b8

View File

@ -62,10 +62,9 @@
#endif
/*
* Such sizes should permit to keep node children contained into a single
* cache-line, or to at least not span many of those.
* In particular, sparse tries should however be compressed properly and
* then make some extra-levels not a big deal.
* These widths should allow the pointers to a node's children to fit within
* a single cache line. The extra levels from a narrow width should not be
* a problem thanks to path compression.
*/
#ifdef __LP64__
#define VM_RADIX_WIDTH 4
@ -97,8 +96,8 @@ struct vm_radix_node {
static uma_zone_t vm_radix_node_zone;
/*
* Allocate a radix node. Pre-allocation ensures that the request will be
* always successfully satisfied.
* Allocate a radix node. Pre-allocation should ensure that the request
* will always be satisfied.
*/
static __inline struct vm_radix_node *
vm_radix_node_get(vm_pindex_t owner, uint16_t count, uint16_t clevel)
@ -108,15 +107,15 @@ vm_radix_node_get(vm_pindex_t owner, uint16_t count, uint16_t clevel)
rnode = uma_zalloc(vm_radix_node_zone, M_NOWAIT | M_ZERO);
/*
* The required number of nodes might be already correctly
* pre-allocated in vm_radix_init(). However, UMA can reserve
* few nodes on per-cpu specific buckets, which will not be
* accessible from the curcpu. The allocation could then
* return NULL when the pre-allocation pool is close to be
* exhausted. Anyway, in practice this should never be a
* problem because a new node is not always required for
* insert, thus the pre-allocation pool should already have
* some extra-pages that indirectly deal with this situation.
* The required number of nodes should already be pre-allocated
* by vm_radix_prealloc(). However, UMA can hold a few nodes
* in per-CPU buckets, which will not be accessible by the
* current CPU. Thus, the allocation could return NULL when
* the pre-allocated pool is close to exhaustion. Anyway,
* in practice this should never occur because a new node
* is not always required for insert. Thus, the pre-allocated
* pool should have some extra pages that prevent this from
* becoming a problem.
*/
if (rnode == NULL)
panic("%s: uma_zalloc() returned NULL for a new node",
@ -184,7 +183,7 @@ vm_radix_setroot(struct vm_radix *rtree, struct vm_radix_node *rnode)
/*
* Returns the associated page extracted from rnode if available,
* NULL otherwise.
* and NULL otherwise.
*/
static __inline vm_page_t
vm_radix_node_page(struct vm_radix_node *rnode)
@ -195,7 +194,7 @@ vm_radix_node_page(struct vm_radix_node *rnode)
}
/*
* Adds the page as a child of provided node.
* Adds the page as a child of the provided node.
*/
static __inline void
vm_radix_addpage(struct vm_radix_node *rnode, vm_pindex_t index, uint16_t clev,
@ -229,7 +228,7 @@ vm_radix_keydiff(vm_pindex_t index1, vm_pindex_t index2)
/*
* Returns TRUE if it can be determined that key does not belong to the
* specified rnode. FALSE otherwise.
* specified rnode. Otherwise, returns FALSE.
*/
static __inline boolean_t
vm_radix_keybarr(struct vm_radix_node *rnode, vm_pindex_t idx)
@ -296,7 +295,7 @@ vm_radix_declev(vm_pindex_t *idx, boolean_t *levels, uint16_t ilev)
}
/*
* Internal handwork for vm_radix_reclaim_allonodes() primitive.
* Internal helper for vm_radix_reclaim_allonodes().
* This function is recursive.
*/
static void
@ -364,7 +363,7 @@ vm_radix_init(void)
}
/*
* Inserts the key-value pair in to the trie.
* Inserts the key-value pair into the trie.
* Panics if the key already exists.
*/
void
@ -452,7 +451,7 @@ vm_radix_insert(struct vm_radix *rtree, vm_pindex_t index, vm_page_t page)
}
/*
* Returns the value stored at the index. If the index is not present
* Returns the value stored at the index. If the index is not present,
* NULL is returned.
*/
vm_page_t
@ -480,7 +479,7 @@ vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index)
}
/*
* Look up any entry at a position bigger than or equal to index.
* Look up the nearest entry at a position bigger than or equal to index.
*/
vm_page_t
vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index)
@ -553,7 +552,7 @@ restart:
}
/*
* If a valid page or edge, bigger than the search slot, is
* If a valid page or edge bigger than the search slot is
* found in the traversal, skip to the next higher-level key.
*/
if (slot == (VM_RADIX_COUNT - 1) &&
@ -569,7 +568,7 @@ restart:
}
/*
* Look up any entry at a position less than or equal to index.
* Look up the nearest entry at a position less than or equal to index.
*/
vm_page_t
vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index)
@ -643,7 +642,7 @@ restart:
}
/*
* If a valid page or edge, smaller than the search slot, is
* If a valid page or edge smaller than the search slot is
* found in the traversal, skip to the next higher-level key.
*/
if (slot == 0 && (rnode->rn_child[slot] == NULL || m != NULL)) {
@ -710,7 +709,7 @@ vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
/*
* Remove and free all the nodes from the radix tree.
* This function is recrusive but there is a tight control on it as the
* This function is recursive but there is a tight control on it as the
* maximum depth of the tree is fixed.
*/
void