Fix improper use of "its".

Sponsored by:	Dell EMC Isilon
This commit is contained in:
Bryan Drewery 2016-11-08 23:59:41 +00:00
parent b5b4f379e0
commit 28323add09
42 changed files with 47 additions and 47 deletions

View File

@ -84,7 +84,7 @@ TAILQ_HEAD(superblocks, superblock);
* Description of the PF rule structure.
*/
enum {
BARRIER, /* the presence of the field puts the rule in it's own block */
BARRIER, /* the presence of the field puts the rule in its own block */
BREAK, /* the field may not differ between rules in a superblock */
NOMERGE, /* the field may not differ between rules when combined */
COMBINED, /* the field may itself be combined with other rules */
@ -104,7 +104,7 @@ static struct pf_rule_field {
/*
* The presence of these fields in a rule put the rule in it's own
* The presence of these fields in a rule put the rule in its own
* superblock. Thus it will not be optimized. It also prevents the
* rule from being re-ordered at all.
*/

View File

@ -859,7 +859,7 @@ vlapic_calcdest(struct vm *vm, cpuset_t *dmask, uint32_t dest, bool phys,
/*
* Logical mode: match each APIC that has a bit set
* in it's LDR that matches a bit in the ldest.
* in its LDR that matches a bit in the ldest.
*/
CPU_ZERO(dmask);
amask = vm_active_cpus(vm);

View File

@ -89,7 +89,7 @@ typedef u_long fptrdiff_t;
__asm__("ldmfd sp!, {r0-r3, lr}"); \
/* \
* Return to the caller. Loading lr and pc in one instruction \
* is deprecated on ARMv7 so we need this on it's own. \
* is deprecated on ARMv7 so we need this on its own. \
*/ \
__asm__("ldmfd sp!, {pc}");
void bintr(void);

View File

@ -1033,7 +1033,7 @@ omap4_clk_get_arm_fclk_freq(struct ti_clock_dev *clkdev,
* The USB clocking setup seems to be a bit more tricky than the other modules,
* to start with the clocking diagram for the HS host module shows 13 different
* clocks. So to try and make it easier to follow the clocking activation
* and deactivation is handled in it's own set of callbacks.
* and deactivation is handled in its own set of callbacks.
*
* LOCKING:
* Inherits the locks from the omap_prcm driver, no internal locking.

View File

@ -205,7 +205,7 @@ print_cpu_features(u_int cpu)
* https://lkml.org/lkml/2016/8/4/722
*/
/*
* XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1 on it's own also
* XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1 on its own also
* triggers on pass 2.0+.
*/
if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&

View File

@ -290,7 +290,7 @@ typedef enum {
/* SIM ready to take more commands */
CAM_RELEASE_SIMQ = 0x100,
/* SIM has this command in it's queue */
/* SIM has this command in its queue */
CAM_SIM_QUEUED = 0x200,
/* Quality of service data is valid */

View File

@ -3104,7 +3104,7 @@ arc_buf_size(arc_buf_t *buf)
/*
* Evict the arc_buf_hdr that is provided as a parameter. The resultant
* state of the header is dependent on it's state prior to entering this
* state of the header is dependent on its state prior to entering this
* function. The following transitions are possible:
*
* - arc_mru -> arc_mru_ghost

View File

@ -186,7 +186,7 @@ dtrace_trap(struct trapframe *frame, u_int type)
/*
* A trap can occur while DTrace executes a probe. Before
* executing the probe, DTrace blocks re-scheduling and sets
* a flag in it's per-cpu flags to indicate that it doesn't
* a flag in its per-cpu flags to indicate that it doesn't
* want to fault. On returning from the probe, the no-fault
* flag is cleared and finally re-scheduling is enabled.
*

View File

@ -186,7 +186,7 @@ dtrace_trap(struct trapframe *frame, u_int type)
/*
* A trap can occur while DTrace executes a probe. Before
* executing the probe, DTrace blocks re-scheduling and sets
* a flag in it's per-cpu flags to indicate that it doesn't
* a flag in its per-cpu flags to indicate that it doesn't
* want to fault. On returning from the probe, the no-fault
* flag is cleared and finally re-scheduling is enabled.
*

View File

@ -189,7 +189,7 @@ dtrace_trap(struct trapframe *frame, u_int type)
/*
* A trap can occur while DTrace executes a probe. Before
* executing the probe, DTrace blocks re-scheduling and sets
* a flag in it's per-cpu flags to indicate that it doesn't
* a flag in its per-cpu flags to indicate that it doesn't
* want to fault. On returning from the probe, the no-fault
* flag is cleared and finally re-scheduling is enabled.
*

View File

@ -4397,7 +4397,7 @@ ar9300_eeprom_restore(struct ath_hal *ah)
#endif
/*
* At this point, mptr points to the eeprom data structure
* in it's "default" state. If this is big endian, swap the
* in its "default" state. If this is big endian, swap the
* data structures back to "little endian" form.
*/
if (ar9300_eeprom_restore_internal(ah, mptr, mdata_size) >= 0) {

View File

@ -1806,7 +1806,7 @@ vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
** suspend completion and reset the resume state machine.
**
** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
** resume completion is in it's 'done' state whenever
** resume completion is in its 'done' state whenever
** videcore is running. Therfore, the VC_RESUME_IDLE state
** implies that videocore is suspended.
** Hence, any thread which needs to wait until videocore is

View File

@ -1446,7 +1446,7 @@ p_command_xfer:
test SSTAT0, SDONE jnz . + 2;
test SSTAT1, PHASEMIS jz . - 1;
/*
* Wait for our ACK to go-away on it's own
* Wait for our ACK to go-away on its own
* instead of being killed by SCSIEN getting cleared.
*/
test SCSISIGI, ACKI jnz .;

View File

@ -270,7 +270,7 @@ ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap)
* fixed/lowest transmit rate. Note that the interface
* mtu does not include the 802.11 overhead so we must
* tack that on (ath_hal_computetxtime includes the
* preamble and plcp in it's calculation).
* preamble and plcp in its calculation).
*/
tdma = vap->iv_tdma;
if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)

View File

@ -130,7 +130,7 @@ block devices (e.g. disks).
@image html Use_Case_Diagram__SATI__SATI_-_SBC.jpg "SCSI Block Command Translation Use Cases"
The SCSI-to-ATA Translation (SAT) specification defines a few of it's own
The SCSI-to-ATA Translation (SAT) specification defines a few of its own
commands, parameter data, and log pages. This use case diagram, however, only
captures the SAT specific commands being translated.

View File

@ -90,7 +90,7 @@ typedef enum _SCI_BASE_CONTROLLER_STATES
/**
* This state indicates that the controller is reset. The memory for
* the controller is in it's initial state, but the controller requires
* the controller is in its initial state, but the controller requires
* initialization.
* This state is entered from the INITIAL state.
* This state is entered from the RESETTING state.

View File

@ -1092,7 +1092,7 @@ efx_mcdi_read_assertion(
/*
* Before we attempt to chat to the MC, we should verify that the MC
* isn't in it's assertion handler, either due to a previous reboot,
* isn't in its assertion handler, either due to a previous reboot,
* or because we're reinitializing due to an eec_exception().
*
* Use GET_ASSERTS to read any assertion state that may be present.

View File

@ -2382,7 +2382,7 @@ hdaa_audio_ctl_source_volume(struct hdaa_pcm_devinfo *pdevinfo,
}
/* If widget has own ossdev - not traverse it.
It will be traversed on it's own. */
It will be traversed on its own. */
if (w->ossdev >= 0 && depth > 0)
return;
@ -4550,7 +4550,7 @@ hdaa_audio_ctl_source_amp(struct hdaa_devinfo *devinfo, nid_t nid, int index,
}
/* If widget has own ossdev - not traverse it.
It will be traversed on it's own. */
It will be traversed on its own. */
if (w->ossdev >= 0 && depth > 0)
return (found);

View File

@ -426,7 +426,7 @@ hdac_reset(struct hdac_softc *sc, int wakeup)
/*
* Wait for codecs to finish their own reset sequence. The delay here
* should be of 250us but for some reasons, on it's not enough on my
* should be of 250us but for some reasons, it's not enough on my
* computer. Let's use twice as much as necessary to make sure that
* it's reset properly.
*/

View File

@ -649,7 +649,7 @@ null_lock(struct vop_lock1_args *ap)
nn = VTONULL(vp);
/*
* If we're still active we must ask the lower layer to
* lock as ffs has special lock considerations in it's
* lock as ffs has special lock considerations in its
* vop lock.
*/
if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
@ -662,7 +662,7 @@ null_lock(struct vop_lock1_args *ap)
* the lowervp's vop_lock routine. When we vgone we will
* drop our last ref to the lowervp, which would allow it
* to be reclaimed. The lowervp could then be recycled,
* in which case it is not legal to be sleeping in it's VOP.
* in which case it is not legal to be sleeping in its VOP.
* We prevent it from being recycled by holding the vnode
* here.
*/

View File

@ -1626,7 +1626,7 @@ g_raid_md_ddf_start_disk(struct g_raid_disk *disk, struct g_raid_volume *vol)
vmeta = &pv->pv_meta;
gmeta = &mdi->mdio_meta;
/* Find disk position in metadata by it's reference. */
/* Find disk position in metadata by its reference. */
disk_pos = ddf_meta_find_disk(vmeta, reference,
&md_disk_bvd, &md_disk_pos);
md_pde_pos = ddf_meta_find_pd(gmeta, NULL, reference);

View File

@ -923,7 +923,7 @@ g_raid_md_intel_start_disk(struct g_raid_disk *disk)
pd = (struct g_raid_md_intel_perdisk *)disk->d_md_data;
olddisk = NULL;
/* Find disk position in metadata by it's serial. */
/* Find disk position in metadata by its serial. */
disk_pos = intel_meta_find_disk(meta, pd->pd_disk_meta.serial);
if (disk_pos < 0) {
G_RAID_DEBUG1(1, sc, "Unknown, probably new or stale disk");

View File

@ -434,7 +434,7 @@ g_raid_md_jmicron_start_disk(struct g_raid_disk *disk)
pd = (struct g_raid_md_jmicron_perdisk *)disk->d_md_data;
olddisk = NULL;
/* Find disk position in metadata by it's serial. */
/* Find disk position in metadata by its serial. */
if (pd->pd_meta != NULL)
disk_pos = jmicron_meta_find_disk(meta, pd->pd_disk_id);
else

View File

@ -441,7 +441,7 @@ g_raid_md_nvidia_start_disk(struct g_raid_disk *disk)
pd = (struct g_raid_md_nvidia_perdisk *)disk->d_md_data;
olddisk = NULL;
/* Find disk position in metadata by it's serial. */
/* Find disk position in metadata by its serial. */
if (pd->pd_meta != NULL) {
disk_pos = pd->pd_meta->disk_number;
if (disk_pos >= meta->total_disks || mdi->mdio_started)

View File

@ -673,7 +673,7 @@ g_raid_md_promise_start_disk(struct g_raid_disk *disk, int sdn,
meta = pv->pv_meta;
if (sdn >= 0) {
/* Find disk position in metadata by it's serial. */
/* Find disk position in metadata by its serial. */
md_disk_pos = promise_meta_find_disk(meta, pd->pd_meta[sdn]->disk.id);
/* For RAID0+1 we need to translate order. */
disk_pos = promise_meta_translate_disk(vol, md_disk_pos);

View File

@ -489,7 +489,7 @@ g_raid_md_sii_start_disk(struct g_raid_disk *disk)
pd = (struct g_raid_md_sii_perdisk *)disk->d_md_data;
olddisk = NULL;
/* Find disk position in metadata by it's serial. */
/* Find disk position in metadata by its serial. */
if (pd->pd_meta != NULL)
disk_pos = sii_meta_disk_pos(meta, pd->pd_meta);
else

View File

@ -117,7 +117,7 @@ struct e3_statistics {
/*
* Attach/detach the protocol to the channel.
* The protocol is given by it's name, char[8].
* The protocol is given by its name, char[8].
* For example "async", "hdlc", "cisco", "fr", "ppp".
*/
#define SERIAL_GETPROTO _IOR ('x', 1, char [8])

View File

@ -793,7 +793,7 @@ start_init(void *dummy)
}
/*
* Like kproc_create(), but runs in it's own address space.
* Like kproc_create(), but runs in its own address space.
* We do this early to reserve pid 1.
*
* Note special case - do not make it runnable yet. Other work

View File

@ -236,7 +236,7 @@ link_elf_ctf_get(linker_file_t lf, linker_ctf_t *lc)
}
/*
* Allocate memory to buffer the CTF data in it's decompressed
* Allocate memory to buffer the CTF data in its decompressed
* form.
*/
ctftab = malloc(sz, M_LINKER, M_WAITOK);

View File

@ -229,7 +229,7 @@ firmware_unregister(const char *imagename)
/*
* It is ok for the lookup to fail; this can happen
* when a module is unloaded on last reference and the
* module unload handler unregister's each of it's
* module unload handler unregister's each of its
* firmware images.
*/
err = 0;

View File

@ -1336,7 +1336,7 @@ m_defrag(struct mbuf *m0, int how)
/*
* Defragment an mbuf chain, returning at most maxfrags separate
* mbufs+clusters. If this is not possible NULL is returned and
* the original mbuf chain is left in it's present (potentially
* the original mbuf chain is left in its present (potentially
* modified) state. We use two techniques: collapsing consecutive
* mbufs and replacing consecutive mbufs by a cluster.
*

View File

@ -36,7 +36,7 @@
* IEEE80211_INACT_WAIT seconds to handle "inactivity processing".
* This is used to do node inactivity processing when operating
* as an AP, adhoc or mesh mode. For inactivity processing each node
* has a timeout set in it's ni_inact field that is decremented
* has a timeout set in its ni_inact field that is decremented
* on each timeout and the node is reclaimed when the counter goes
* to zero. We use different inactivity timeout values depending
* on whether the node is associated and authorized (either by

View File

@ -1179,7 +1179,7 @@ ng_destroy_hook(hook_p hook)
/*
* Set the peer to point to ng_deadhook
* from this moment on we are effectively independent it.
* send it an rmhook message of it's own.
* send it an rmhook message of its own.
*/
peer->hk_peer = &ng_deadhook; /* They no longer know us */
hook->hk_peer = &ng_deadhook; /* Nor us, them */
@ -3005,7 +3005,7 @@ void
ng_free_item(item_p item)
{
/*
* The item may hold resources on it's own. We need to free
* The item may hold resources on its own. We need to free
* these before we can free the item. What they are depends upon
* what kind of item it is. it is important that nodes zero
* out pointers to resources that they remove from the item
@ -3577,7 +3577,7 @@ ng_address_hook(node_p here, item_p item, hook_p hook, ng_ID_t retaddr)
ITEM_DEBUG_CHECKS;
/*
* Quick sanity check..
* Since a hook holds a reference on it's node, once we know
* Since a hook holds a reference on its node, once we know
* that the peer is still connected (even if invalid,) we know
* that the peer node is present, though maybe invalid.
*/

View File

@ -725,7 +725,7 @@ METHOD void align_superpage {
/**
* @brief Bootstrap the VM system. At the completion of this routine, the
* kernel will be running in it's own address space with full control over
* kernel will be running in its own address space with full control over
* paging.
*
* @param _start start of reserved memory (obsolete ???)

View File

@ -435,7 +435,7 @@ mac_netinet_firewall_send(struct mbuf *m)
/*
* These functions really should be referencing the syncache structure
* instead of the label. However, due to some of the complexities associated
* with exposing this syncache structure we operate directly on it's label
* with exposing this syncache structure we operate directly on its label
* pointer. This should be OK since we aren't making any access control
* decisions within this code directly, we are merely allocating and copying
* label storage so we can properly initialize mbuf labels for any packets

View File

@ -1410,7 +1410,7 @@ vm_pageout_oom(int shortage)
/*
* We keep the process bigproc locked once we find it to keep anyone
* from messing with it; however, there is a possibility of
* deadlock if process B is bigproc and one of it's child processes
* deadlock if process B is bigproc and one of its child processes
* attempts to propagate a signal to B while we are waiting for A's
* lock while walking this list. To avoid this, we don't block on
* the process lock but just skip a process if it is already locked.

View File

@ -940,7 +940,7 @@ xen_intr_disable_source(struct intsrc *base_isrc, int eoi)
/*
* NB: checking if the event channel is already masked is
* needed because the event channel user-space device
* masks event channels on it's filter as part of it's
* masks event channels on its filter as part of its
* normal operation, and those shouldn't be automatically
* unmasked by the generic interrupt code. The event channel
* device will unmask them when needed.

View File

@ -421,7 +421,7 @@ linkchk(FTSENT *p)
if (le->dev == st->st_dev && le->ino == st->st_ino) {
/*
* Save memory by releasing an entry when we've seen
* all of it's links.
* all of its links.
*/
if (--le->links <= 0) {
if (le->previous != NULL)

View File

@ -909,7 +909,7 @@ gettlen(const char *cp, const char **epp)
}
/*
* Search a type by it's type string.
* Search a type by its type string.
*/
static u_short
findtype(const char *cp, size_t len, int h)

View File

@ -908,7 +908,7 @@ main(int argc, char *argv[])
sci_init(ctx);
/*
* Exit if a device emulation finds an error in it's initilization
* Exit if a device emulation finds an error in its initilization
*/
if (init_pci(ctx) != 0)
exit(1);

View File

@ -875,7 +875,7 @@ begemotBridgeTpMaxAddresses OBJECT-TYPE
STATUS current
DESCRIPTION
"The maximum number of entires that this bridge can
learn in it's Forwarding Address Table and use for
learn in its Forwarding Address Table and use for
making forwarding decisions."
::= { begemotBridgeTpEntry 3 }

View File

@ -2062,7 +2062,7 @@ do_zipwork(struct zipwork_entry *zwork)
* Save information on any process we need to signal. Any single
* process may need to be sent different signal-values for different
* log files, but usually a single signal-value will cause the process
* to close and re-open all of it's log files.
* to close and re-open all of its log files.
*/
static struct sigwork_entry *
save_sigwork(const struct conf_entry *ent)