sys/vm: minor spelling fixes in comments.

No functional change.
This commit is contained in:
pfg 2016-05-02 20:16:29 +00:00
parent 1940241a72
commit 8327dbfe4d
9 changed files with 16 additions and 16 deletions

View File

@ -965,7 +965,7 @@ swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
/*
* Free left over swap blocks in source.
*
* We have to revert the type to OBJT_DEFAULT so we do not accidently
* We have to revert the type to OBJT_DEFAULT so we do not accidentally
* double-remove the object from the swap queues.
*/
if (destroysource) {
@ -2623,7 +2623,7 @@ swapongeom_ev(void *arg, int flags)
cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
g_attach(cp, pp);
/*
* XXX: Everytime you think you can improve the margin for
* XXX: Every time you think you can improve the margin for
* footshooting, somebody depends on the ability to do so:
* savecore(8) wants to write to our swapdev so we cannot
* set an exclusive count :-(

View File

@ -31,7 +31,7 @@
*
* This allocator is intended to replace the multitude of similar object caches
* in the standard FreeBSD kernel. The intent is to be flexible as well as
* effecient. A primary design goal is to return unused memory to the rest of
* efficient. A primary design goal is to return unused memory to the rest of
* the system. This will make the system as a whole more flexible due to the
* ability to move memory to subsystems which most need it instead of leaving
* pools of reserved memory unused.
@ -531,7 +531,7 @@ zone_timeout(uma_zone_t zone)
* hash A new hash structure with the old hash size in uh_hashsize
*
* Returns:
* 1 on sucess and 0 on failure.
* 1 on success and 0 on failure.
*/
static int
hash_alloc(struct uma_hash *hash)
@ -2257,7 +2257,7 @@ uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
/*
* Now lets just fill a bucket and put it on the free list. If that
* works we'll restart the allocation from the begining and it
* works we'll restart the allocation from the beginning and it
* will use the just filled bucket.
*/
bucket = zone_alloc_bucket(zone, udata, flags);

View File

@ -415,7 +415,7 @@ vsetslab(vm_offset_t va, uma_slab_t slab)
/*
* The following two functions may be defined by architecture specific code
* if they can provide more effecient allocation functions. This is useful
* if they can provide more efficient allocation functions. This is useful
* for using direct mapped addresses.
*/
void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag,

View File

@ -149,7 +149,7 @@ kernacc(addr, len, rw)
* the associated vm_map_entry range. It does not determine whether the
* contents of the memory is actually readable or writable. vmapbuf(),
* vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
* used in conjuction with this call.
* used in conjunction with this call.
*/
int
useracc(addr, len, rw)
@ -665,7 +665,7 @@ vm_forkproc(td, p2, td2, vm2, flags)
}
/*
* Called after process has been wait(2)'ed apon and is being reaped.
* Called after process has been wait(2)'ed upon and is being reaped.
* The idea is to reclaim resources that we could not reclaim while
* the process was still executing.
*/

View File

@ -3519,7 +3519,7 @@ vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
return (KERN_NO_SPACE);
/*
* If we can't accomodate max_ssize in the current mapping, no go.
* If we can't accommodate max_ssize in the current mapping, no go.
* However, we need to be aware that subsequent user mappings might
* map into the space we have reserved for stack, and currently this
* space is not protected.

View File

@ -2111,7 +2111,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
/*
* If prev_object was charged, then this mapping,
* althought not charged now, may become writable
* although not charged now, may become writable
* later. Non-NULL cred in the object would prevent
* swap reservation during enabling of the write
* access, so reserve swap now. Failed reservation

View File

@ -141,7 +141,7 @@ struct vm_page {
vm_object_t object; /* which object am I in (O,P) */
vm_pindex_t pindex; /* offset into object (O,P) */
vm_paddr_t phys_addr; /* physical address of page */
struct md_page md; /* machine dependant stuff */
struct md_page md; /* machine dependent stuff */
u_int wire_count; /* wired down maps refs (P) */
volatile u_int busy_lock; /* busy owners lock */
uint16_t hold_count; /* page hold count (P) */

View File

@ -447,7 +447,7 @@ vm_pageout_cluster(vm_page_t m)
++pageout_count;
++ib;
/*
* alignment boundry, stop here and switch directions. Do
* alignment boundary, stop here and switch directions. Do
* not clear ib.
*/
if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
@ -477,7 +477,7 @@ vm_pageout_cluster(vm_page_t m)
/*
* If we exhausted our forward scan, continue with the reverse scan
* when possible, even past a page boundry. This catches boundry
* when possible, even past a page boundary. This catches boundary
* conditions.
*/
if (ib && pageout_count < vm_pageout_page_count)

View File

@ -819,7 +819,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count,
/*
* A sparse file can be encountered only for a single page request,
* which may not be preceeded by call to vm_pager_haspage().
* which may not be preceded by call to vm_pager_haspage().
*/
if (bp->b_blkno == -1) {
KASSERT(count == 1,
@ -1139,7 +1139,7 @@ vnode_pager_putpages(vm_object_t object, vm_page_t *m, int count,
* own vnodes if they fail to implement VOP_PUTPAGES.
*
* This is typically called indirectly via the pageout daemon and
* clustering has already typically occured, so in general we ask the
* clustering has already typically occurred, so in general we ask the
* underlying filesystem to write the data out asynchronously rather
* then delayed.
*/
@ -1182,7 +1182,7 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
/*
* If the page-aligned write is larger then the actual file we
* have to invalidate pages occuring beyond the file EOF. However,
* have to invalidate pages occurring beyond the file EOF. However,
* there is an edge case where a file may not be page-aligned where
* the last page is partially invalid. In this case the filesystem
* may not properly clear the dirty bits for the entire page (which