sys/kern: spelling fixes in comments.
No functional change.
This commit is contained in:
parent
a061fea6ee
commit
e3043798aa
@ -121,7 +121,7 @@ METHOD void probe_nomatch {
|
||||
* @param _child the child device whose instance variable is
|
||||
* being read
|
||||
* @param _index the instance variable to read
|
||||
* @param _result a loction to recieve the instance variable
|
||||
* @param _result a location to receive the instance variable
|
||||
* value
|
||||
*
|
||||
* @retval 0 success
|
||||
@ -389,7 +389,7 @@ METHOD int release_resource {
|
||||
* triggers
|
||||
* @param _arg a value to use as the single argument in calls
|
||||
* to @p _intr
|
||||
* @param _cookiep a pointer to a location to recieve a cookie
|
||||
* @param _cookiep a pointer to a location to receive a cookie
|
||||
* value that may be used to remove the interrupt
|
||||
* handler
|
||||
*/
|
||||
@ -460,9 +460,9 @@ METHOD int set_resource {
|
||||
* @param _child the device which owns the resource
|
||||
* @param _type the type of resource
|
||||
* @param _rid the resource identifier
|
||||
* @param _start the address of a location to recieve the start
|
||||
* @param _start the address of a location to receive the start
|
||||
* index of the resource range
|
||||
* @param _count the address of a location to recieve the size
|
||||
* @param _count the address of a location to receive the size
|
||||
* of the resource range
|
||||
*/
|
||||
METHOD int get_resource {
|
||||
|
@ -700,7 +700,7 @@ imgact_binmisc_exec(struct image_params *imgp)
|
||||
break;
|
||||
|
||||
case ' ':
|
||||
/* Replace space with NUL to seperate arguments. */
|
||||
/* Replace space with NUL to separate arguments. */
|
||||
*d++ = '\0';
|
||||
break;
|
||||
|
||||
|
@ -206,7 +206,7 @@ extern void kzipfree (void*);
|
||||
end-of-block. Note however that the static length tree defines
|
||||
288 codes just to fill out the Huffman codes. Codes 286 and 287
|
||||
cannot be used though, since there is no length base or extra bits
|
||||
defined for them. Similarily, there are up to 30 distance codes.
|
||||
defined for them. Similarly, there are up to 30 distance codes.
|
||||
However, static trees define 32 codes (all 5 bits) to fill out the
|
||||
Huffman codes, but the last two had better not show up in the data.
|
||||
7. Unzip can check dynamic Huffman blocks for complete code sets.
|
||||
@ -335,7 +335,7 @@ static const ush mask[] = {
|
||||
where NEEDBITS makes sure that b has at least j bits in it, and
|
||||
DUMPBITS removes the bits from b. The macros use the variable k
|
||||
for the number of bits in b. Normally, b and k are register
|
||||
variables for speed, and are initialized at the begining of a
|
||||
variables for speed, and are initialized at the beginning of a
|
||||
routine that uses these macros from a global bit buffer and count.
|
||||
|
||||
In order to not ask for more bits than there are in the compressed
|
||||
|
@ -156,7 +156,7 @@ _cv_wait(struct cv *cvp, struct lock_object *lock)
|
||||
|
||||
/*
|
||||
* Wait on a condition variable. This function differs from cv_wait by
|
||||
* not aquiring the mutex after condition variable was signaled.
|
||||
* not acquiring the mutex after condition variable was signaled.
|
||||
*/
|
||||
void
|
||||
_cv_wait_unlock(struct cv *cvp, struct lock_object *lock)
|
||||
|
@ -1530,7 +1530,7 @@ fdgrowtable_exp(struct filedesc *fdp, int nfd)
|
||||
}
|
||||
|
||||
/*
|
||||
* Grow the file table to accomodate (at least) nfd descriptors.
|
||||
* Grow the file table to accommodate (at least) nfd descriptors.
|
||||
*/
|
||||
static void
|
||||
fdgrowtable(struct filedesc *fdp, int nfd)
|
||||
@ -1544,7 +1544,7 @@ fdgrowtable(struct filedesc *fdp, int nfd)
|
||||
|
||||
/*
|
||||
* If lastfile is -1 this struct filedesc was just allocated and we are
|
||||
* growing it to accomodate for the one we are going to copy from. There
|
||||
* growing it to accommodate for the one we are going to copy from. There
|
||||
* is no need to have a lock on this one as it's not visible to anyone.
|
||||
*/
|
||||
if (fdp->fd_lastfile != -1)
|
||||
@ -1709,7 +1709,7 @@ fdallocn(struct thread *td, int minfd, int *fds, int n)
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a new open file structure and allocate a file decriptor for the
|
||||
* Create a new open file structure and allocate a file descriptor for the
|
||||
* process that refers to it. We add one reference to the file for the
|
||||
* descriptor table and one reference for resultfp. This is to prevent us
|
||||
* being preempted and the entry in the descriptor table closed after we
|
||||
@ -2535,7 +2535,7 @@ fget_unlocked(struct filedesc *fdp, int fd, cap_rights_t *needrightsp,
|
||||
*
|
||||
* File's rights will be checked against the capability rights mask.
|
||||
*
|
||||
* If an error occured the non-zero error is returned and *fpp is set to
|
||||
* If an error occurred the non-zero error is returned and *fpp is set to
|
||||
* NULL. Otherwise *fpp is held and set and zero is returned. Caller is
|
||||
* responsible for fdrop().
|
||||
*/
|
||||
|
@ -413,7 +413,7 @@ do_execve(td, args, mac_p)
|
||||
|
||||
/*
|
||||
* Translate the file name. namei() returns a vnode pointer
|
||||
* in ni_vp amoung other things.
|
||||
* in ni_vp among other things.
|
||||
*
|
||||
* XXXAUDIT: It would be desirable to also audit the name of the
|
||||
* interpreter if this is an interpreted binary.
|
||||
|
@ -918,7 +918,7 @@ fork1(struct thread *td, struct fork_req *fr)
|
||||
/*
|
||||
* The swap reservation failed. The accounting
|
||||
* from the entries of the copied vm2 will be
|
||||
* substracted in vmspace_free(), so force the
|
||||
* subtracted in vmspace_free(), so force the
|
||||
* reservation there.
|
||||
*/
|
||||
swap_reserve_force(mem_charged);
|
||||
|
@ -4039,7 +4039,7 @@ prison_priv_check(struct ucred *cred, int priv)
|
||||
return (0);
|
||||
|
||||
/*
|
||||
* Allow jailed root to set certian IPv4/6 (option) headers.
|
||||
* Allow jailed root to set certain IPv4/6 (option) headers.
|
||||
*/
|
||||
case PRIV_NETINET_SETHDROPTS:
|
||||
return (0);
|
||||
@ -4280,7 +4280,7 @@ SYSCTL_UINT(_security_jail, OID_AUTO, jail_max_af_ips, CTLFLAG_RW,
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Default parameters for jail(2) compatability. For historical reasons,
|
||||
* Default parameters for jail(2) compatibility. For historical reasons,
|
||||
* the sysctl names have varying similarity to the parameter names. Prisons
|
||||
* just see their own parameters, and can't change them.
|
||||
*/
|
||||
|
@ -949,7 +949,7 @@ linker_debug_search_symbol_name(caddr_t value, char *buf, u_int buflen,
|
||||
*
|
||||
* Note that we do not obey list locking protocols here. We really don't need
|
||||
* DDB to hang because somebody's got the lock held. We'll take the chance
|
||||
* that the files list is inconsistant instead.
|
||||
* that the files list is inconsistent instead.
|
||||
*/
|
||||
#ifdef DDB
|
||||
int
|
||||
@ -2037,7 +2037,7 @@ linker_load_dependencies(linker_file_t lf)
|
||||
int ver, error = 0, count;
|
||||
|
||||
/*
|
||||
* All files are dependant on /kernel.
|
||||
* All files are dependent on /kernel.
|
||||
*/
|
||||
sx_assert(&kld_sx, SA_XLOCKED);
|
||||
if (linker_kernel_file) {
|
||||
|
@ -281,7 +281,7 @@ wakeupshlk(struct lock *lk, const char *file, int line)
|
||||
* exclusive waiters bit anyway.
|
||||
* Please note that lk_exslpfail count may be lying about
|
||||
* the real number of waiters with the LK_SLEEPFAIL flag on
|
||||
* because they may be used in conjuction with interruptible
|
||||
* because they may be used in conjunction with interruptible
|
||||
* sleeps so lk_exslpfail might be considered an 'upper limit'
|
||||
* bound, including the edge cases.
|
||||
*/
|
||||
@ -1058,7 +1058,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
|
||||
* Please note that lk_exslpfail count may be lying
|
||||
* about the real number of waiters with the
|
||||
* LK_SLEEPFAIL flag on because they may be used in
|
||||
* conjuction with interruptible sleeps so
|
||||
* conjunction with interruptible sleeps so
|
||||
* lk_exslpfail might be considered an 'upper limit'
|
||||
* bound, including the edge cases.
|
||||
*/
|
||||
@ -1171,7 +1171,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
|
||||
* Please note that lk_exslpfail count may be
|
||||
* lying about the real number of waiters with
|
||||
* the LK_SLEEPFAIL flag on because they may
|
||||
* be used in conjuction with interruptible
|
||||
* be used in conjunction with interruptible
|
||||
* sleeps so lk_exslpfail might be considered
|
||||
* an 'upper limit' bound, including the edge
|
||||
* cases.
|
||||
|
@ -362,7 +362,7 @@ lf_free_lock(struct lockf_entry *lock)
|
||||
struct lock_owner *lo = lock->lf_owner;
|
||||
if (lo) {
|
||||
KASSERT(LIST_EMPTY(&lock->lf_outedges),
|
||||
("freeing lock with dependancies"));
|
||||
("freeing lock with dependencies"));
|
||||
KASSERT(LIST_EMPTY(&lock->lf_inedges),
|
||||
("freeing lock with dependants"));
|
||||
sx_xlock(&lf_lock_owners_lock);
|
||||
@ -827,7 +827,7 @@ lf_purgelocks(struct vnode *vp, struct lockf **statep)
|
||||
|
||||
/*
|
||||
* We can just free all the active locks since they
|
||||
* will have no dependancies (we removed them all
|
||||
* will have no dependencies (we removed them all
|
||||
* above). We don't need to bother locking since we
|
||||
* are the last thread using this state structure.
|
||||
*/
|
||||
@ -1112,7 +1112,7 @@ lf_insert_lock(struct lockf *state, struct lockf_entry *lock)
|
||||
|
||||
/*
|
||||
* Wake up a sleeping lock and remove it from the pending list now
|
||||
* that all its dependancies have been resolved. The caller should
|
||||
* that all its dependencies have been resolved. The caller should
|
||||
* arrange for the lock to be added to the active list, adjusting any
|
||||
* existing locks for the same owner as needed.
|
||||
*/
|
||||
@ -1137,9 +1137,9 @@ lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock)
|
||||
}
|
||||
|
||||
/*
|
||||
* Re-check all dependant locks and remove edges to locks that we no
|
||||
* Re-check all dependent locks and remove edges to locks that we no
|
||||
* longer block. If 'all' is non-zero, the lock has been removed and
|
||||
* we must remove all the dependancies, otherwise it has simply been
|
||||
* we must remove all the dependencies, otherwise it has simply been
|
||||
* reduced but remains active. Any pending locks which have been been
|
||||
* unblocked are added to 'granted'
|
||||
*/
|
||||
@ -1165,7 +1165,7 @@ lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all,
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the start of an existing active lock, updating dependancies and
|
||||
* Set the start of an existing active lock, updating dependencies and
|
||||
* adding any newly woken locks to 'granted'.
|
||||
*/
|
||||
static void
|
||||
@ -1181,7 +1181,7 @@ lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start,
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the end of an existing active lock, updating dependancies and
|
||||
* Set the end of an existing active lock, updating dependencies and
|
||||
* adding any newly woken locks to 'granted'.
|
||||
*/
|
||||
static void
|
||||
@ -1204,7 +1204,7 @@ lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end,
|
||||
* pending locks as a result of downgrading/unlocking. We simply
|
||||
* activate the newly granted locks by looping.
|
||||
*
|
||||
* Since the new lock already has its dependancies set up, we always
|
||||
* Since the new lock already has its dependencies set up, we always
|
||||
* add it to the list (unless its an unlock request). This may
|
||||
* fragment the lock list in some pathological cases but its probably
|
||||
* not a real problem.
|
||||
@ -1332,7 +1332,7 @@ lf_cancel_lock(struct lockf *state, struct lockf_entry *lock)
|
||||
* may allow some other pending lock to become
|
||||
* active. Consider this case:
|
||||
*
|
||||
* Owner Action Result Dependancies
|
||||
* Owner Action Result Dependencies
|
||||
*
|
||||
* A: lock [0..0] succeeds
|
||||
* B: lock [2..2] succeeds
|
||||
@ -1840,7 +1840,7 @@ lf_split(struct lockf *state, struct lockf_entry *lock1,
|
||||
/*
|
||||
* This cannot cause a deadlock since any edges we would add
|
||||
* to splitlock already exist in lock1. We must be sure to add
|
||||
* necessary dependancies to splitlock before we reduce lock1
|
||||
* necessary dependencies to splitlock before we reduce lock1
|
||||
* otherwise we may accidentally grant a pending lock that
|
||||
* was blocked by the tail end of lock1.
|
||||
*/
|
||||
|
@ -92,7 +92,7 @@ __FBSDID("$FreeBSD$");
|
||||
*
|
||||
* Whenever an object is allocated from the underlying global
|
||||
* memory pool it gets pre-initialized with the _zinit_ functions.
|
||||
* When the Keg's are overfull objects get decomissioned with
|
||||
* When the Keg's are overfull objects get decommissioned with
|
||||
* _zfini_ functions and free'd back to the global memory pool.
|
||||
*
|
||||
*/
|
||||
|
@ -39,7 +39,7 @@
|
||||
*
|
||||
* Disadvantages:
|
||||
* - should generally only be used as leaf mutexes.
|
||||
* - pool/pool dependancy ordering cannot be depended on.
|
||||
* - pool/pool dependency ordering cannot be depended on.
|
||||
* - possible L1 cache mastersip contention between cpus.
|
||||
*/
|
||||
|
||||
|
@ -164,7 +164,7 @@ sys_getpgrp(struct thread *td, struct getpgrp_args *uap)
|
||||
return (0);
|
||||
}
|
||||
|
||||
/* Get an arbitary pid's process group id */
|
||||
/* Get an arbitrary pid's process group id */
|
||||
#ifndef _SYS_SYSPROTO_H_
|
||||
struct getpgid_args {
|
||||
pid_t pid;
|
||||
@ -195,7 +195,7 @@ sys_getpgid(struct thread *td, struct getpgid_args *uap)
|
||||
}
|
||||
|
||||
/*
|
||||
* Get an arbitary pid's session id.
|
||||
* Get an arbitrary pid's session id.
|
||||
*/
|
||||
#ifndef _SYS_SYSPROTO_H_
|
||||
struct getsid_args {
|
||||
|
@ -444,7 +444,7 @@ rctl_pcpu_available(const struct proc *p) {
|
||||
|
||||
/*
|
||||
* Return slightly less than actual value of the available
|
||||
* %cpu resource. This makes %cpu throttling more agressive
|
||||
* %cpu resource. This makes %cpu throttling more aggressive
|
||||
* and lets us act sooner than the limits are already exceeded.
|
||||
*/
|
||||
if (limit != 0) {
|
||||
|
@ -375,7 +375,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
|
||||
}
|
||||
|
||||
/*
|
||||
* We allow readers to aquire a lock even if a writer is blocked if
|
||||
* We allow readers to acquire a lock even if a writer is blocked if
|
||||
* the lock is recursive and the reader already holds the lock.
|
||||
*/
|
||||
if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
|
||||
|
@ -359,7 +359,7 @@ sysctl_register_oid(struct sysctl_oid *oidp)
|
||||
*
|
||||
* NOTE: DO NOT change the starting value here, change it in
|
||||
* <sys/sysctl.h>, and make sure it is at least 256 to
|
||||
* accomodate e.g. net.inet.raw as a static sysctl node.
|
||||
* accommodate e.g. net.inet.raw as a static sysctl node.
|
||||
*/
|
||||
if (oid_number < 0) {
|
||||
static int newoid;
|
||||
@ -494,7 +494,7 @@ sysctl_ctx_free(struct sysctl_ctx_list *clist)
|
||||
}
|
||||
/*
|
||||
* Restore deregistered entries, either from the end,
|
||||
* or from the place where error occured.
|
||||
* or from the place where error occurred.
|
||||
* e contains the entry that was not unregistered
|
||||
*/
|
||||
if (error)
|
||||
|
@ -1898,7 +1898,7 @@ inittimecounter(void *dummy)
|
||||
* Set the initial timeout to
|
||||
* max(1, <approx. number of hardclock ticks in a millisecond>).
|
||||
* People should probably not use the sysctl to set the timeout
|
||||
* to smaller than its inital value, since that value is the
|
||||
* to smaller than its initial value, since that value is the
|
||||
* smallest reasonable one. If they want better timestamps they
|
||||
* should use the non-"get"* functions.
|
||||
*/
|
||||
|
@ -1446,7 +1446,7 @@ _callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
|
||||
* which set the timer can do the maintanence the timer was for as close
|
||||
* as possible to the originally intended time. Testing this code for a
|
||||
* week showed that resuming from a suspend resulted in 22 to 25 timers
|
||||
* firing, which seemed independant on whether the suspend was 2 hours or
|
||||
* firing, which seemed independent on whether the suspend was 2 hours or
|
||||
* 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu>
|
||||
*/
|
||||
void
|
||||
|
@ -1128,7 +1128,7 @@ relocate_file(elf_file_t ef)
|
||||
}
|
||||
|
||||
/*
|
||||
* Only clean SHN_FBSD_CACHED for successfull return. If we
|
||||
* Only clean SHN_FBSD_CACHED for successful return. If we
|
||||
* modified symbol table for the object but found an
|
||||
* unresolved symbol, there is no reason to roll back.
|
||||
*/
|
||||
|
@ -89,7 +89,7 @@ METHOD int lookup_set {
|
||||
};
|
||||
|
||||
#
|
||||
# Unload a file, releasing dependancies and freeing storage.
|
||||
# Unload a file, releasing dependencies and freeing storage.
|
||||
#
|
||||
METHOD void unload {
|
||||
linker_file_t file;
|
||||
|
@ -1395,7 +1395,7 @@ sched_add(struct thread *td, int flags)
|
||||
* or kicking off another CPU as it won't help and may hinder.
|
||||
* In the YIEDLING case, we are about to run whoever is being
|
||||
* put in the queue anyhow, and in the OURSELF case, we are
|
||||
* puting ourself on the run queue which also only happens
|
||||
* putting ourself on the run queue which also only happens
|
||||
* when we are about to yield.
|
||||
*/
|
||||
if ((flags & SRQ_YIELDING) == 0) {
|
||||
|
@ -57,8 +57,8 @@
|
||||
* The non-blocking features of the blist code are used in the swap code
|
||||
* (vm/swap_pager.c).
|
||||
*
|
||||
* LAYOUT: The radix tree is layed out recursively using a
|
||||
* linear array. Each meta node is immediately followed (layed out
|
||||
* LAYOUT: The radix tree is laid out recursively using a
|
||||
* linear array. Each meta node is immediately followed (laid out
|
||||
* sequentially in memory) by BLIST_META_RADIX lower level nodes. This
|
||||
* is a recursive structure but one that can be easily scanned through
|
||||
* a very simple 'skip' calculation. In order to support large radixes,
|
||||
|
@ -4699,7 +4699,7 @@ root_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
|
||||
}
|
||||
|
||||
/*
|
||||
* If we get here, assume that the device is permanant and really is
|
||||
* If we get here, assume that the device is permanent and really is
|
||||
* present in the system. Removable bus drivers are expected to intercept
|
||||
* this call long before it gets here. We return -1 so that drivers that
|
||||
* really care can check vs -1 or some ERRNO returned higher in the food
|
||||
|
@ -389,7 +389,7 @@ sysctl_devstat(SYSCTL_HANDLER_ARGS)
|
||||
* XXX devstat_generation should really be "volatile" but that
|
||||
* XXX freaks out the sysctl macro below. The places where we
|
||||
* XXX change it and inspect it are bracketed in the mutex which
|
||||
* XXX guarantees us proper write barriers. I don't belive the
|
||||
* XXX guarantees us proper write barriers. I don't believe the
|
||||
* XXX compiler is allowed to optimize mygen away across calls
|
||||
* XXX to other functions, so the following is belived to be safe.
|
||||
*/
|
||||
|
@ -705,7 +705,7 @@ intr_isrc_assign_cpu(void *arg, int cpu)
|
||||
* In NOCPU case, it's up to PIC to either leave ISRC on same CPU or
|
||||
* re-balance it to another CPU or enable it on more CPUs. However,
|
||||
* PIC is expected to change isrc_cpu appropriately to keep us well
|
||||
* informed if the call is successfull.
|
||||
* informed if the call is successful.
|
||||
*/
|
||||
if (irq_assign_cpu) {
|
||||
error = PIC_BIND_INTR(isrc->isrc_dev, isrc);
|
||||
@ -1032,7 +1032,7 @@ intr_setup_irq(device_t dev, struct resource *res, driver_filter_t filt,
|
||||
|
||||
#ifdef INTR_SOLO
|
||||
/*
|
||||
* Standard handling is done thru MI interrupt framework. However,
|
||||
* Standard handling is done through MI interrupt framework. However,
|
||||
* some interrupts could request solely own special handling. This
|
||||
* non standard handling can be used for interrupt controllers without
|
||||
* handler (filter only), so in case that interrupt controllers are
|
||||
|
@ -288,7 +288,7 @@ mbp_ext_free(struct mbuf *m, void *buf, void *arg)
|
||||
}
|
||||
|
||||
/*
|
||||
* Free all buffers that are marked as beeing on the card
|
||||
* Free all buffers that are marked as being on the card
|
||||
*/
|
||||
void
|
||||
mbp_card_free(struct mbpool *p)
|
||||
|
@ -102,7 +102,7 @@ mb_fixhdr(struct mbchain *mbp)
|
||||
/*
|
||||
* Check if object of size 'size' fit to the current position and
|
||||
* allocate new mbuf if not. Advance pointers and increase length of mbuf(s).
|
||||
* Return pointer to the object placeholder or NULL if any error occured.
|
||||
* Return pointer to the object placeholder or NULL if any error occurred.
|
||||
* Note: size should be <= MLEN
|
||||
*/
|
||||
caddr_t
|
||||
|
@ -50,7 +50,7 @@ static u_int msgbuf_cksum(struct msgbuf *mbp);
|
||||
|
||||
/*
|
||||
* Timestamps in msgbuf are useful when trying to diagnose when core dumps
|
||||
* or other actions occured.
|
||||
* or other actions occurred.
|
||||
*/
|
||||
static int msgbuf_show_timestamp = 0;
|
||||
SYSCTL_INT(_kern, OID_AUTO, msgbuf_show_timestamp, CTLFLAG_RWTUN,
|
||||
|
@ -269,7 +269,7 @@ kmstartup(dummy)
|
||||
* without much risk of reducing the profiling times below what they
|
||||
* would be when profiling is not configured. Abbreviate:
|
||||
* ab = minimum time between MC1 and MC3
|
||||
* a = minumum time between MC1 and MC2
|
||||
* a = minimum time between MC1 and MC2
|
||||
* b = minimum time between MC2 and MC3
|
||||
* cd = minimum time between ME1 and ME3
|
||||
* c = minimum time between ME1 and ME2
|
||||
|
@ -603,7 +603,7 @@ doswitch:
|
||||
* z', but treats `a-a' as `the letter a, the
|
||||
* character -, and the letter a'.
|
||||
*
|
||||
* For compatibility, the `-' is not considerd
|
||||
* For compatibility, the `-' is not considered
|
||||
* to define a range if the character following
|
||||
* it is either a close bracket (required by ANSI)
|
||||
* or is not numerically greater than the character
|
||||
|
@ -466,7 +466,7 @@ copyout_map(struct thread *td, vm_offset_t *addr, size_t sz)
|
||||
*addr = round_page((vm_offset_t)vms->vm_daddr +
|
||||
lim_max(td, RLIMIT_DATA));
|
||||
|
||||
/* round size up to page boundry */
|
||||
/* round size up to page boundary */
|
||||
size = (vm_size_t)round_page(sz);
|
||||
|
||||
error = vm_mmap(&vms->vm_map, addr, size, VM_PROT_READ | VM_PROT_WRITE,
|
||||
|
@ -2971,7 +2971,7 @@ witness_lock_order_add(struct witness *parent, struct witness *child)
|
||||
return (1);
|
||||
}
|
||||
|
||||
/* Call this whenver the structure of the witness graph changes. */
|
||||
/* Call this whenever the structure of the witness graph changes. */
|
||||
static void
|
||||
witness_increment_graph_generation(void)
|
||||
{
|
||||
|
@ -1143,7 +1143,7 @@ sys_semop(struct thread *td, struct semop_args *uap)
|
||||
if ((error = sem_prison_cansee(rpr, semakptr)) != 0)
|
||||
goto done2;
|
||||
/*
|
||||
* Initial pass thru sops to see what permissions are needed.
|
||||
* Initial pass through sops to see what permissions are needed.
|
||||
* Also perform any checks that don't need repeating on each
|
||||
* attempt to satisfy the request vector.
|
||||
*/
|
||||
|
@ -388,7 +388,7 @@ tty_wait_background(struct tty *tp, struct thread *td, int sig)
|
||||
PROC_LOCK(p);
|
||||
/*
|
||||
* The process should only sleep, when:
|
||||
* - This terminal is the controling terminal
|
||||
* - This terminal is the controlling terminal
|
||||
* - Its process group is not the foreground process
|
||||
* group
|
||||
* - The parent process isn't waiting for the child to
|
||||
|
@ -124,7 +124,7 @@ ptsdev_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
|
||||
/*
|
||||
* Implement packet mode. When packet mode is turned on,
|
||||
* the first byte contains a bitmask of events that
|
||||
* occured (start, stop, flush, window size, etc).
|
||||
* occurred (start, stop, flush, window size, etc).
|
||||
*/
|
||||
if (psc->pts_flags & PTS_PKT && psc->pts_pkt) {
|
||||
pkt = psc->pts_pkt;
|
||||
|
@ -143,7 +143,7 @@ m_pulldown(struct mbuf *m, int off, int len, int *offp)
|
||||
* Ideally, the requirement should only be (iii).
|
||||
*
|
||||
* If we're writable, we're sure we're writable, because the ref. count
|
||||
* cannot increase from 1, as that would require posession of mbuf
|
||||
* cannot increase from 1, as that would require possession of mbuf
|
||||
* n by someone else (which is impossible). However, if we're _not_
|
||||
* writable, we may eventually become writable )if the ref. count drops
|
||||
* to 1), but we'll fail to notice it unless we re-evaluate
|
||||
|
@ -194,7 +194,7 @@ VNET_DEFINE(struct hhook_head *, socket_hhh[HHOOK_SOCKET_LAST + 1]);
|
||||
/*
|
||||
* Limit on the number of connections in the listen queue waiting
|
||||
* for accept(2).
|
||||
* NB: The orginal sysctl somaxconn is still available but hidden
|
||||
* NB: The original sysctl somaxconn is still available but hidden
|
||||
* to prevent confusion about the actual purpose of this number.
|
||||
*/
|
||||
static u_int somaxconn = SOMAXCONN;
|
||||
@ -1164,7 +1164,7 @@ sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
|
||||
}
|
||||
/*
|
||||
* XXX all the SBS_CANTSENDMORE checks previously done could be out
|
||||
* of date. We could have recieved a reset packet in an interrupt or
|
||||
* of date. We could have received a reset packet in an interrupt or
|
||||
* maybe we slept while doing page faults in uiomove() etc. We could
|
||||
* probably recheck again inside the locking protection here, but
|
||||
* there are probably other places that this also happens. We must
|
||||
@ -1347,7 +1347,7 @@ restart:
|
||||
}
|
||||
/*
|
||||
* XXX all the SBS_CANTSENDMORE checks previously
|
||||
* done could be out of date. We could have recieved
|
||||
* done could be out of date. We could have received
|
||||
* a reset packet in an interrupt or maybe we slept
|
||||
* while doing page faults in uiomove() etc. We
|
||||
* could probably recheck again inside the locking
|
||||
|
@ -150,7 +150,7 @@ struct namecache_ts {
|
||||
*/
|
||||
|
||||
/*
|
||||
* Structures associated with name cacheing.
|
||||
* Structures associated with name caching.
|
||||
*/
|
||||
#define NCHHASH(hash) \
|
||||
(&nchashtbl[(hash) & nchash])
|
||||
@ -485,7 +485,7 @@ cache_zap(struct namecache *ncp)
|
||||
* cnp pointing to the name of the entry being sought. If the lookup
|
||||
* succeeds, the vnode is returned in *vpp, and a status of -1 is
|
||||
* returned. If the lookup determines that the name does not exist
|
||||
* (negative cacheing), a status of ENOENT is returned. If the lookup
|
||||
* (negative caching), a status of ENOENT is returned. If the lookup
|
||||
* fails, a status of zero is returned. If the directory vnode is
|
||||
* recycled out from under us due to a forced unmount, a status of
|
||||
* ENOENT is returned.
|
||||
|
@ -563,7 +563,7 @@ cluster_callback(bp)
|
||||
int error = 0;
|
||||
|
||||
/*
|
||||
* Must propogate errors to all the components.
|
||||
* Must propagate errors to all the components.
|
||||
*/
|
||||
if (bp->b_ioflags & BIO_ERROR)
|
||||
error = bp->b_error;
|
||||
|
@ -1154,7 +1154,7 @@ NDFREE(struct nameidata *ndp, const u_int flags)
|
||||
* Determine if there is a suitable alternate filename under the specified
|
||||
* prefix for the specified path. If the create flag is set, then the
|
||||
* alternate prefix will be used so long as the parent directory exists.
|
||||
* This is used by the various compatiblity ABIs so that Linux binaries prefer
|
||||
* This is used by the various compatibility ABIs so that Linux binaries prefer
|
||||
* files under /compat/linux for example. The chosen path (whether under
|
||||
* the prefix or under /) is returned in a kernel malloc'd buffer pointed
|
||||
* to by pathbuf. The caller is responsible for free'ing the buffer from
|
||||
|
@ -79,7 +79,7 @@ __FBSDID("$FreeBSD$");
|
||||
*
|
||||
* If the environment variable vfs.root.mountfrom is a space separated list,
|
||||
* each list element is tried in turn and the root filesystem will be mounted
|
||||
* from the first one that suceeds.
|
||||
* from the first one that succeeds.
|
||||
*
|
||||
* The environment variable vfs.root.mountfrom.options is a comma delimited
|
||||
* set of string mount options. These mount options must be parseable
|
||||
|
@ -534,7 +534,7 @@ vfs_busy(struct mount *mp, int flags)
|
||||
MNT_ILOCK(mp);
|
||||
MNT_REF(mp);
|
||||
/*
|
||||
* If mount point is currenly being unmounted, sleep until the
|
||||
* If mount point is currently being unmounted, sleep until the
|
||||
* mount point fate is decided. If thread doing the unmounting fails,
|
||||
* it will clear MNTK_UNMOUNT flag before waking us up, indicating
|
||||
* that this mount point has survived the unmount attempt and vfs_busy
|
||||
@ -830,7 +830,7 @@ vattr_null(struct vattr *vap)
|
||||
* the buffer cache may have references on the vnode, a directory
|
||||
* vnode may still have references due to the namei cache representing
|
||||
* underlying files, or the vnode may be in active use. It is not
|
||||
* desireable to reuse such vnodes. These conditions may cause the
|
||||
* desirable to reuse such vnodes. These conditions may cause the
|
||||
* number of vnodes to reach some minimum value regardless of what
|
||||
* you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
|
||||
*/
|
||||
@ -3945,7 +3945,7 @@ destroy_vpollinfo(struct vpollinfo *vi)
|
||||
}
|
||||
|
||||
/*
|
||||
* Initalize per-vnode helper structure to hold poll-related state.
|
||||
* Initialize per-vnode helper structure to hold poll-related state.
|
||||
*/
|
||||
void
|
||||
v_addpollinfo(struct vnode *vp)
|
||||
@ -4356,7 +4356,7 @@ extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred,
|
||||
|
||||
#ifdef DEBUG_VFS_LOCKS
|
||||
/*
|
||||
* This only exists to supress warnings from unlocked specfs accesses. It is
|
||||
* This only exists to suppress warnings from unlocked specfs accesses. It is
|
||||
* no longer ok to have an unlocked VFS.
|
||||
*/
|
||||
#define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \
|
||||
|
@ -35,7 +35,7 @@
|
||||
# is a specification of the locking protocol used by each vop call.
|
||||
# The first column is the name of the variable, the remaining three
|
||||
# columns are in, out and error respectively. The "in" column defines
|
||||
# the lock state on input, the "out" column defines the state on succesful
|
||||
# the lock state on input, the "out" column defines the state on successful
|
||||
# return, and the "error" column defines the locking state on error exit.
|
||||
#
|
||||
# The locking value can take the following values:
|
||||
|
Loading…
x
Reference in New Issue
Block a user